1 /* 2 * Linux syscalls 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #define _ATFILE_SOURCE 20 #include <stdlib.h> 21 #include <stdio.h> 22 #include <stdarg.h> 23 #include <string.h> 24 #include <elf.h> 25 #include <endian.h> 26 #include <errno.h> 27 #include <unistd.h> 28 #include <fcntl.h> 29 #include <time.h> 30 #include <limits.h> 31 #include <grp.h> 32 #include <sys/types.h> 33 #include <sys/ipc.h> 34 #include <sys/msg.h> 35 #include <sys/wait.h> 36 #include <sys/time.h> 37 #include <sys/stat.h> 38 #include <sys/mount.h> 39 #include <sys/file.h> 40 #include <sys/fsuid.h> 41 #include <sys/personality.h> 42 #include <sys/prctl.h> 43 #include <sys/resource.h> 44 #include <sys/mman.h> 45 #include <sys/swap.h> 46 #include <linux/capability.h> 47 #include <signal.h> 48 #include <sched.h> 49 #ifdef __ia64__ 50 int __clone2(int (*fn)(void *), void *child_stack_base, 51 size_t stack_size, int flags, void *arg, ...); 52 #endif 53 #include <sys/socket.h> 54 #include <sys/un.h> 55 #include <sys/uio.h> 56 #include <sys/poll.h> 57 #include <sys/times.h> 58 #include <sys/shm.h> 59 #include <sys/sem.h> 60 #include <sys/statfs.h> 61 #include <utime.h> 62 #include <sys/sysinfo.h> 63 //#include <sys/user.h> 64 #include <netinet/ip.h> 65 #include <netinet/tcp.h> 66 #include <linux/wireless.h> 67 #include <linux/icmp.h> 68 #include "qemu-common.h" 69 #ifdef TARGET_GPROF 70 #include <sys/gmon.h> 71 #endif 72 #ifdef CONFIG_EVENTFD 73 #include <sys/eventfd.h> 74 #endif 75 #ifdef CONFIG_EPOLL 76 #include <sys/epoll.h> 77 #endif 78 #ifdef CONFIG_ATTR 79 #include "qemu/xattr.h" 80 #endif 81 #ifdef CONFIG_SENDFILE 82 #include <sys/sendfile.h> 83 #endif 84 85 #define termios host_termios 86 #define winsize host_winsize 87 #define termio host_termio 88 #define sgttyb host_sgttyb /* same as target */ 89 #define tchars host_tchars /* same as target */ 90 #define ltchars host_ltchars /* same as target */ 91 92 #include <linux/termios.h> 93 #include <linux/unistd.h> 94 #include <linux/cdrom.h> 95 #include <linux/hdreg.h> 96 #include <linux/soundcard.h> 97 #include <linux/kd.h> 98 #include <linux/mtio.h> 99 #include <linux/fs.h> 100 #if defined(CONFIG_FIEMAP) 101 #include <linux/fiemap.h> 102 #endif 103 #include <linux/fb.h> 104 #include <linux/vt.h> 105 #include <linux/dm-ioctl.h> 106 #include <linux/reboot.h> 107 #include <linux/route.h> 108 #include <linux/filter.h> 109 #include <linux/blkpg.h> 110 #include "linux_loop.h" 111 #include "uname.h" 112 113 #include "qemu.h" 114 115 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \ 116 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID) 117 118 //#define DEBUG 119 120 //#include <linux/msdos_fs.h> 121 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2]) 122 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2]) 123 124 125 #undef _syscall0 126 #undef _syscall1 127 #undef _syscall2 128 #undef _syscall3 129 #undef _syscall4 130 #undef _syscall5 131 #undef _syscall6 132 133 #define _syscall0(type,name) \ 134 static type name (void) \ 135 { \ 136 return syscall(__NR_##name); \ 137 } 138 139 #define _syscall1(type,name,type1,arg1) \ 140 static type name (type1 arg1) \ 141 { \ 142 return syscall(__NR_##name, arg1); \ 143 } 144 145 #define _syscall2(type,name,type1,arg1,type2,arg2) \ 146 static type name (type1 arg1,type2 arg2) \ 147 { \ 148 return syscall(__NR_##name, arg1, arg2); \ 149 } 150 151 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 152 static type name (type1 arg1,type2 arg2,type3 arg3) \ 153 { \ 154 return syscall(__NR_##name, arg1, arg2, arg3); \ 155 } 156 157 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 158 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ 159 { \ 160 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 161 } 162 163 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 164 type5,arg5) \ 165 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ 166 { \ 167 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 168 } 169 170 171 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 172 type5,arg5,type6,arg6) \ 173 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \ 174 type6 arg6) \ 175 { \ 176 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 177 } 178 179 180 #define __NR_sys_uname __NR_uname 181 #define __NR_sys_getcwd1 __NR_getcwd 182 #define __NR_sys_getdents __NR_getdents 183 #define __NR_sys_getdents64 __NR_getdents64 184 #define __NR_sys_getpriority __NR_getpriority 185 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo 186 #define __NR_sys_syslog __NR_syslog 187 #define __NR_sys_tgkill __NR_tgkill 188 #define __NR_sys_tkill __NR_tkill 189 #define __NR_sys_futex __NR_futex 190 #define __NR_sys_inotify_init __NR_inotify_init 191 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch 192 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch 193 194 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \ 195 defined(__s390x__) 196 #define __NR__llseek __NR_lseek 197 #endif 198 199 /* Newer kernel ports have llseek() instead of _llseek() */ 200 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek) 201 #define TARGET_NR__llseek TARGET_NR_llseek 202 #endif 203 204 #ifdef __NR_gettid 205 _syscall0(int, gettid) 206 #else 207 /* This is a replacement for the host gettid() and must return a host 208 errno. */ 209 static int gettid(void) { 210 return -ENOSYS; 211 } 212 #endif 213 #ifdef __NR_getdents 214 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count); 215 #endif 216 #if !defined(__NR_getdents) || \ 217 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64)) 218 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count); 219 #endif 220 #if defined(TARGET_NR__llseek) && defined(__NR_llseek) 221 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo, 222 loff_t *, res, uint, wh); 223 #endif 224 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo) 225 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len) 226 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 227 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig) 228 #endif 229 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 230 _syscall2(int,sys_tkill,int,tid,int,sig) 231 #endif 232 #ifdef __NR_exit_group 233 _syscall1(int,exit_group,int,error_code) 234 #endif 235 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 236 _syscall1(int,set_tid_address,int *,tidptr) 237 #endif 238 #if defined(TARGET_NR_futex) && defined(__NR_futex) 239 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, 240 const struct timespec *,timeout,int *,uaddr2,int,val3) 241 #endif 242 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity 243 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, 244 unsigned long *, user_mask_ptr); 245 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity 246 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len, 247 unsigned long *, user_mask_ptr); 248 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd, 249 void *, arg); 250 _syscall2(int, capget, struct __user_cap_header_struct *, header, 251 struct __user_cap_data_struct *, data); 252 _syscall2(int, capset, struct __user_cap_header_struct *, header, 253 struct __user_cap_data_struct *, data); 254 255 static bitmask_transtbl fcntl_flags_tbl[] = { 256 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, }, 257 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, }, 258 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, }, 259 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, }, 260 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, }, 261 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, }, 262 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, }, 263 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, }, 264 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, }, 265 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, }, 266 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, }, 267 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, }, 268 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, }, 269 #if defined(O_DIRECT) 270 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, }, 271 #endif 272 #if defined(O_NOATIME) 273 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME }, 274 #endif 275 #if defined(O_CLOEXEC) 276 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC }, 277 #endif 278 #if defined(O_PATH) 279 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH }, 280 #endif 281 /* Don't terminate the list prematurely on 64-bit host+guest. */ 282 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0 283 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, }, 284 #endif 285 { 0, 0, 0, 0 } 286 }; 287 288 static int sys_getcwd1(char *buf, size_t size) 289 { 290 if (getcwd(buf, size) == NULL) { 291 /* getcwd() sets errno */ 292 return (-1); 293 } 294 return strlen(buf)+1; 295 } 296 297 #ifdef TARGET_NR_openat 298 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode) 299 { 300 /* 301 * open(2) has extra parameter 'mode' when called with 302 * flag O_CREAT. 303 */ 304 if ((flags & O_CREAT) != 0) { 305 return (openat(dirfd, pathname, flags, mode)); 306 } 307 return (openat(dirfd, pathname, flags)); 308 } 309 #endif 310 311 #ifdef TARGET_NR_utimensat 312 #ifdef CONFIG_UTIMENSAT 313 static int sys_utimensat(int dirfd, const char *pathname, 314 const struct timespec times[2], int flags) 315 { 316 if (pathname == NULL) 317 return futimens(dirfd, times); 318 else 319 return utimensat(dirfd, pathname, times, flags); 320 } 321 #elif defined(__NR_utimensat) 322 #define __NR_sys_utimensat __NR_utimensat 323 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname, 324 const struct timespec *,tsp,int,flags) 325 #else 326 static int sys_utimensat(int dirfd, const char *pathname, 327 const struct timespec times[2], int flags) 328 { 329 errno = ENOSYS; 330 return -1; 331 } 332 #endif 333 #endif /* TARGET_NR_utimensat */ 334 335 #ifdef CONFIG_INOTIFY 336 #include <sys/inotify.h> 337 338 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 339 static int sys_inotify_init(void) 340 { 341 return (inotify_init()); 342 } 343 #endif 344 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 345 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask) 346 { 347 return (inotify_add_watch(fd, pathname, mask)); 348 } 349 #endif 350 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 351 static int sys_inotify_rm_watch(int fd, int32_t wd) 352 { 353 return (inotify_rm_watch(fd, wd)); 354 } 355 #endif 356 #ifdef CONFIG_INOTIFY1 357 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 358 static int sys_inotify_init1(int flags) 359 { 360 return (inotify_init1(flags)); 361 } 362 #endif 363 #endif 364 #else 365 /* Userspace can usually survive runtime without inotify */ 366 #undef TARGET_NR_inotify_init 367 #undef TARGET_NR_inotify_init1 368 #undef TARGET_NR_inotify_add_watch 369 #undef TARGET_NR_inotify_rm_watch 370 #endif /* CONFIG_INOTIFY */ 371 372 #if defined(TARGET_NR_ppoll) 373 #ifndef __NR_ppoll 374 # define __NR_ppoll -1 375 #endif 376 #define __NR_sys_ppoll __NR_ppoll 377 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds, 378 struct timespec *, timeout, const sigset_t *, sigmask, 379 size_t, sigsetsize) 380 #endif 381 382 #if defined(TARGET_NR_pselect6) 383 #ifndef __NR_pselect6 384 # define __NR_pselect6 -1 385 #endif 386 #define __NR_sys_pselect6 __NR_pselect6 387 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, 388 fd_set *, exceptfds, struct timespec *, timeout, void *, sig); 389 #endif 390 391 #if defined(TARGET_NR_prlimit64) 392 #ifndef __NR_prlimit64 393 # define __NR_prlimit64 -1 394 #endif 395 #define __NR_sys_prlimit64 __NR_prlimit64 396 /* The glibc rlimit structure may not be that used by the underlying syscall */ 397 struct host_rlimit64 { 398 uint64_t rlim_cur; 399 uint64_t rlim_max; 400 }; 401 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource, 402 const struct host_rlimit64 *, new_limit, 403 struct host_rlimit64 *, old_limit) 404 #endif 405 406 407 #if defined(TARGET_NR_timer_create) 408 /* Maxiumum of 32 active POSIX timers allowed at any one time. */ 409 static timer_t g_posix_timers[32] = { 0, } ; 410 411 static inline int next_free_host_timer(void) 412 { 413 int k ; 414 /* FIXME: Does finding the next free slot require a lock? */ 415 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) { 416 if (g_posix_timers[k] == 0) { 417 g_posix_timers[k] = (timer_t) 1; 418 return k; 419 } 420 } 421 return -1; 422 } 423 #endif 424 425 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */ 426 #ifdef TARGET_ARM 427 static inline int regpairs_aligned(void *cpu_env) { 428 return ((((CPUARMState *)cpu_env)->eabi) == 1) ; 429 } 430 #elif defined(TARGET_MIPS) 431 static inline int regpairs_aligned(void *cpu_env) { return 1; } 432 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64) 433 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs 434 * of registers which translates to the same as ARM/MIPS, because we start with 435 * r3 as arg1 */ 436 static inline int regpairs_aligned(void *cpu_env) { return 1; } 437 #else 438 static inline int regpairs_aligned(void *cpu_env) { return 0; } 439 #endif 440 441 #define ERRNO_TABLE_SIZE 1200 442 443 /* target_to_host_errno_table[] is initialized from 444 * host_to_target_errno_table[] in syscall_init(). */ 445 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = { 446 }; 447 448 /* 449 * This list is the union of errno values overridden in asm-<arch>/errno.h 450 * minus the errnos that are not actually generic to all archs. 451 */ 452 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = { 453 [EIDRM] = TARGET_EIDRM, 454 [ECHRNG] = TARGET_ECHRNG, 455 [EL2NSYNC] = TARGET_EL2NSYNC, 456 [EL3HLT] = TARGET_EL3HLT, 457 [EL3RST] = TARGET_EL3RST, 458 [ELNRNG] = TARGET_ELNRNG, 459 [EUNATCH] = TARGET_EUNATCH, 460 [ENOCSI] = TARGET_ENOCSI, 461 [EL2HLT] = TARGET_EL2HLT, 462 [EDEADLK] = TARGET_EDEADLK, 463 [ENOLCK] = TARGET_ENOLCK, 464 [EBADE] = TARGET_EBADE, 465 [EBADR] = TARGET_EBADR, 466 [EXFULL] = TARGET_EXFULL, 467 [ENOANO] = TARGET_ENOANO, 468 [EBADRQC] = TARGET_EBADRQC, 469 [EBADSLT] = TARGET_EBADSLT, 470 [EBFONT] = TARGET_EBFONT, 471 [ENOSTR] = TARGET_ENOSTR, 472 [ENODATA] = TARGET_ENODATA, 473 [ETIME] = TARGET_ETIME, 474 [ENOSR] = TARGET_ENOSR, 475 [ENONET] = TARGET_ENONET, 476 [ENOPKG] = TARGET_ENOPKG, 477 [EREMOTE] = TARGET_EREMOTE, 478 [ENOLINK] = TARGET_ENOLINK, 479 [EADV] = TARGET_EADV, 480 [ESRMNT] = TARGET_ESRMNT, 481 [ECOMM] = TARGET_ECOMM, 482 [EPROTO] = TARGET_EPROTO, 483 [EDOTDOT] = TARGET_EDOTDOT, 484 [EMULTIHOP] = TARGET_EMULTIHOP, 485 [EBADMSG] = TARGET_EBADMSG, 486 [ENAMETOOLONG] = TARGET_ENAMETOOLONG, 487 [EOVERFLOW] = TARGET_EOVERFLOW, 488 [ENOTUNIQ] = TARGET_ENOTUNIQ, 489 [EBADFD] = TARGET_EBADFD, 490 [EREMCHG] = TARGET_EREMCHG, 491 [ELIBACC] = TARGET_ELIBACC, 492 [ELIBBAD] = TARGET_ELIBBAD, 493 [ELIBSCN] = TARGET_ELIBSCN, 494 [ELIBMAX] = TARGET_ELIBMAX, 495 [ELIBEXEC] = TARGET_ELIBEXEC, 496 [EILSEQ] = TARGET_EILSEQ, 497 [ENOSYS] = TARGET_ENOSYS, 498 [ELOOP] = TARGET_ELOOP, 499 [ERESTART] = TARGET_ERESTART, 500 [ESTRPIPE] = TARGET_ESTRPIPE, 501 [ENOTEMPTY] = TARGET_ENOTEMPTY, 502 [EUSERS] = TARGET_EUSERS, 503 [ENOTSOCK] = TARGET_ENOTSOCK, 504 [EDESTADDRREQ] = TARGET_EDESTADDRREQ, 505 [EMSGSIZE] = TARGET_EMSGSIZE, 506 [EPROTOTYPE] = TARGET_EPROTOTYPE, 507 [ENOPROTOOPT] = TARGET_ENOPROTOOPT, 508 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT, 509 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT, 510 [EOPNOTSUPP] = TARGET_EOPNOTSUPP, 511 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT, 512 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT, 513 [EADDRINUSE] = TARGET_EADDRINUSE, 514 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL, 515 [ENETDOWN] = TARGET_ENETDOWN, 516 [ENETUNREACH] = TARGET_ENETUNREACH, 517 [ENETRESET] = TARGET_ENETRESET, 518 [ECONNABORTED] = TARGET_ECONNABORTED, 519 [ECONNRESET] = TARGET_ECONNRESET, 520 [ENOBUFS] = TARGET_ENOBUFS, 521 [EISCONN] = TARGET_EISCONN, 522 [ENOTCONN] = TARGET_ENOTCONN, 523 [EUCLEAN] = TARGET_EUCLEAN, 524 [ENOTNAM] = TARGET_ENOTNAM, 525 [ENAVAIL] = TARGET_ENAVAIL, 526 [EISNAM] = TARGET_EISNAM, 527 [EREMOTEIO] = TARGET_EREMOTEIO, 528 [ESHUTDOWN] = TARGET_ESHUTDOWN, 529 [ETOOMANYREFS] = TARGET_ETOOMANYREFS, 530 [ETIMEDOUT] = TARGET_ETIMEDOUT, 531 [ECONNREFUSED] = TARGET_ECONNREFUSED, 532 [EHOSTDOWN] = TARGET_EHOSTDOWN, 533 [EHOSTUNREACH] = TARGET_EHOSTUNREACH, 534 [EALREADY] = TARGET_EALREADY, 535 [EINPROGRESS] = TARGET_EINPROGRESS, 536 [ESTALE] = TARGET_ESTALE, 537 [ECANCELED] = TARGET_ECANCELED, 538 [ENOMEDIUM] = TARGET_ENOMEDIUM, 539 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE, 540 #ifdef ENOKEY 541 [ENOKEY] = TARGET_ENOKEY, 542 #endif 543 #ifdef EKEYEXPIRED 544 [EKEYEXPIRED] = TARGET_EKEYEXPIRED, 545 #endif 546 #ifdef EKEYREVOKED 547 [EKEYREVOKED] = TARGET_EKEYREVOKED, 548 #endif 549 #ifdef EKEYREJECTED 550 [EKEYREJECTED] = TARGET_EKEYREJECTED, 551 #endif 552 #ifdef EOWNERDEAD 553 [EOWNERDEAD] = TARGET_EOWNERDEAD, 554 #endif 555 #ifdef ENOTRECOVERABLE 556 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE, 557 #endif 558 }; 559 560 static inline int host_to_target_errno(int err) 561 { 562 if(host_to_target_errno_table[err]) 563 return host_to_target_errno_table[err]; 564 return err; 565 } 566 567 static inline int target_to_host_errno(int err) 568 { 569 if (target_to_host_errno_table[err]) 570 return target_to_host_errno_table[err]; 571 return err; 572 } 573 574 static inline abi_long get_errno(abi_long ret) 575 { 576 if (ret == -1) 577 return -host_to_target_errno(errno); 578 else 579 return ret; 580 } 581 582 static inline int is_error(abi_long ret) 583 { 584 return (abi_ulong)ret >= (abi_ulong)(-4096); 585 } 586 587 char *target_strerror(int err) 588 { 589 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) { 590 return NULL; 591 } 592 return strerror(target_to_host_errno(err)); 593 } 594 595 static inline int host_to_target_sock_type(int host_type) 596 { 597 int target_type; 598 599 switch (host_type & 0xf /* SOCK_TYPE_MASK */) { 600 case SOCK_DGRAM: 601 target_type = TARGET_SOCK_DGRAM; 602 break; 603 case SOCK_STREAM: 604 target_type = TARGET_SOCK_STREAM; 605 break; 606 default: 607 target_type = host_type & 0xf /* SOCK_TYPE_MASK */; 608 break; 609 } 610 611 #if defined(SOCK_CLOEXEC) 612 if (host_type & SOCK_CLOEXEC) { 613 target_type |= TARGET_SOCK_CLOEXEC; 614 } 615 #endif 616 617 #if defined(SOCK_NONBLOCK) 618 if (host_type & SOCK_NONBLOCK) { 619 target_type |= TARGET_SOCK_NONBLOCK; 620 } 621 #endif 622 623 return target_type; 624 } 625 626 static abi_ulong target_brk; 627 static abi_ulong target_original_brk; 628 static abi_ulong brk_page; 629 630 void target_set_brk(abi_ulong new_brk) 631 { 632 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk); 633 brk_page = HOST_PAGE_ALIGN(target_brk); 634 } 635 636 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0) 637 #define DEBUGF_BRK(message, args...) 638 639 /* do_brk() must return target values and target errnos. */ 640 abi_long do_brk(abi_ulong new_brk) 641 { 642 abi_long mapped_addr; 643 int new_alloc_size; 644 645 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk); 646 647 if (!new_brk) { 648 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk); 649 return target_brk; 650 } 651 if (new_brk < target_original_brk) { 652 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n", 653 target_brk); 654 return target_brk; 655 } 656 657 /* If the new brk is less than the highest page reserved to the 658 * target heap allocation, set it and we're almost done... */ 659 if (new_brk <= brk_page) { 660 /* Heap contents are initialized to zero, as for anonymous 661 * mapped pages. */ 662 if (new_brk > target_brk) { 663 memset(g2h(target_brk), 0, new_brk - target_brk); 664 } 665 target_brk = new_brk; 666 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk); 667 return target_brk; 668 } 669 670 /* We need to allocate more memory after the brk... Note that 671 * we don't use MAP_FIXED because that will map over the top of 672 * any existing mapping (like the one with the host libc or qemu 673 * itself); instead we treat "mapped but at wrong address" as 674 * a failure and unmap again. 675 */ 676 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page); 677 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, 678 PROT_READ|PROT_WRITE, 679 MAP_ANON|MAP_PRIVATE, 0, 0)); 680 681 if (mapped_addr == brk_page) { 682 /* Heap contents are initialized to zero, as for anonymous 683 * mapped pages. Technically the new pages are already 684 * initialized to zero since they *are* anonymous mapped 685 * pages, however we have to take care with the contents that 686 * come from the remaining part of the previous page: it may 687 * contains garbage data due to a previous heap usage (grown 688 * then shrunken). */ 689 memset(g2h(target_brk), 0, brk_page - target_brk); 690 691 target_brk = new_brk; 692 brk_page = HOST_PAGE_ALIGN(target_brk); 693 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n", 694 target_brk); 695 return target_brk; 696 } else if (mapped_addr != -1) { 697 /* Mapped but at wrong address, meaning there wasn't actually 698 * enough space for this brk. 699 */ 700 target_munmap(mapped_addr, new_alloc_size); 701 mapped_addr = -1; 702 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk); 703 } 704 else { 705 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk); 706 } 707 708 #if defined(TARGET_ALPHA) 709 /* We (partially) emulate OSF/1 on Alpha, which requires we 710 return a proper errno, not an unchanged brk value. */ 711 return -TARGET_ENOMEM; 712 #endif 713 /* For everything else, return the previous break. */ 714 return target_brk; 715 } 716 717 static inline abi_long copy_from_user_fdset(fd_set *fds, 718 abi_ulong target_fds_addr, 719 int n) 720 { 721 int i, nw, j, k; 722 abi_ulong b, *target_fds; 723 724 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 725 if (!(target_fds = lock_user(VERIFY_READ, 726 target_fds_addr, 727 sizeof(abi_ulong) * nw, 728 1))) 729 return -TARGET_EFAULT; 730 731 FD_ZERO(fds); 732 k = 0; 733 for (i = 0; i < nw; i++) { 734 /* grab the abi_ulong */ 735 __get_user(b, &target_fds[i]); 736 for (j = 0; j < TARGET_ABI_BITS; j++) { 737 /* check the bit inside the abi_ulong */ 738 if ((b >> j) & 1) 739 FD_SET(k, fds); 740 k++; 741 } 742 } 743 744 unlock_user(target_fds, target_fds_addr, 0); 745 746 return 0; 747 } 748 749 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr, 750 abi_ulong target_fds_addr, 751 int n) 752 { 753 if (target_fds_addr) { 754 if (copy_from_user_fdset(fds, target_fds_addr, n)) 755 return -TARGET_EFAULT; 756 *fds_ptr = fds; 757 } else { 758 *fds_ptr = NULL; 759 } 760 return 0; 761 } 762 763 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr, 764 const fd_set *fds, 765 int n) 766 { 767 int i, nw, j, k; 768 abi_long v; 769 abi_ulong *target_fds; 770 771 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 772 if (!(target_fds = lock_user(VERIFY_WRITE, 773 target_fds_addr, 774 sizeof(abi_ulong) * nw, 775 0))) 776 return -TARGET_EFAULT; 777 778 k = 0; 779 for (i = 0; i < nw; i++) { 780 v = 0; 781 for (j = 0; j < TARGET_ABI_BITS; j++) { 782 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j); 783 k++; 784 } 785 __put_user(v, &target_fds[i]); 786 } 787 788 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw); 789 790 return 0; 791 } 792 793 #if defined(__alpha__) 794 #define HOST_HZ 1024 795 #else 796 #define HOST_HZ 100 797 #endif 798 799 static inline abi_long host_to_target_clock_t(long ticks) 800 { 801 #if HOST_HZ == TARGET_HZ 802 return ticks; 803 #else 804 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ; 805 #endif 806 } 807 808 static inline abi_long host_to_target_rusage(abi_ulong target_addr, 809 const struct rusage *rusage) 810 { 811 struct target_rusage *target_rusage; 812 813 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0)) 814 return -TARGET_EFAULT; 815 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec); 816 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec); 817 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec); 818 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec); 819 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss); 820 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss); 821 target_rusage->ru_idrss = tswapal(rusage->ru_idrss); 822 target_rusage->ru_isrss = tswapal(rusage->ru_isrss); 823 target_rusage->ru_minflt = tswapal(rusage->ru_minflt); 824 target_rusage->ru_majflt = tswapal(rusage->ru_majflt); 825 target_rusage->ru_nswap = tswapal(rusage->ru_nswap); 826 target_rusage->ru_inblock = tswapal(rusage->ru_inblock); 827 target_rusage->ru_oublock = tswapal(rusage->ru_oublock); 828 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd); 829 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv); 830 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals); 831 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw); 832 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw); 833 unlock_user_struct(target_rusage, target_addr, 1); 834 835 return 0; 836 } 837 838 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim) 839 { 840 abi_ulong target_rlim_swap; 841 rlim_t result; 842 843 target_rlim_swap = tswapal(target_rlim); 844 if (target_rlim_swap == TARGET_RLIM_INFINITY) 845 return RLIM_INFINITY; 846 847 result = target_rlim_swap; 848 if (target_rlim_swap != (rlim_t)result) 849 return RLIM_INFINITY; 850 851 return result; 852 } 853 854 static inline abi_ulong host_to_target_rlim(rlim_t rlim) 855 { 856 abi_ulong target_rlim_swap; 857 abi_ulong result; 858 859 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim) 860 target_rlim_swap = TARGET_RLIM_INFINITY; 861 else 862 target_rlim_swap = rlim; 863 result = tswapal(target_rlim_swap); 864 865 return result; 866 } 867 868 static inline int target_to_host_resource(int code) 869 { 870 switch (code) { 871 case TARGET_RLIMIT_AS: 872 return RLIMIT_AS; 873 case TARGET_RLIMIT_CORE: 874 return RLIMIT_CORE; 875 case TARGET_RLIMIT_CPU: 876 return RLIMIT_CPU; 877 case TARGET_RLIMIT_DATA: 878 return RLIMIT_DATA; 879 case TARGET_RLIMIT_FSIZE: 880 return RLIMIT_FSIZE; 881 case TARGET_RLIMIT_LOCKS: 882 return RLIMIT_LOCKS; 883 case TARGET_RLIMIT_MEMLOCK: 884 return RLIMIT_MEMLOCK; 885 case TARGET_RLIMIT_MSGQUEUE: 886 return RLIMIT_MSGQUEUE; 887 case TARGET_RLIMIT_NICE: 888 return RLIMIT_NICE; 889 case TARGET_RLIMIT_NOFILE: 890 return RLIMIT_NOFILE; 891 case TARGET_RLIMIT_NPROC: 892 return RLIMIT_NPROC; 893 case TARGET_RLIMIT_RSS: 894 return RLIMIT_RSS; 895 case TARGET_RLIMIT_RTPRIO: 896 return RLIMIT_RTPRIO; 897 case TARGET_RLIMIT_SIGPENDING: 898 return RLIMIT_SIGPENDING; 899 case TARGET_RLIMIT_STACK: 900 return RLIMIT_STACK; 901 default: 902 return code; 903 } 904 } 905 906 static inline abi_long copy_from_user_timeval(struct timeval *tv, 907 abi_ulong target_tv_addr) 908 { 909 struct target_timeval *target_tv; 910 911 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) 912 return -TARGET_EFAULT; 913 914 __get_user(tv->tv_sec, &target_tv->tv_sec); 915 __get_user(tv->tv_usec, &target_tv->tv_usec); 916 917 unlock_user_struct(target_tv, target_tv_addr, 0); 918 919 return 0; 920 } 921 922 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr, 923 const struct timeval *tv) 924 { 925 struct target_timeval *target_tv; 926 927 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) 928 return -TARGET_EFAULT; 929 930 __put_user(tv->tv_sec, &target_tv->tv_sec); 931 __put_user(tv->tv_usec, &target_tv->tv_usec); 932 933 unlock_user_struct(target_tv, target_tv_addr, 1); 934 935 return 0; 936 } 937 938 static inline abi_long copy_from_user_timezone(struct timezone *tz, 939 abi_ulong target_tz_addr) 940 { 941 struct target_timezone *target_tz; 942 943 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) { 944 return -TARGET_EFAULT; 945 } 946 947 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest); 948 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime); 949 950 unlock_user_struct(target_tz, target_tz_addr, 0); 951 952 return 0; 953 } 954 955 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 956 #include <mqueue.h> 957 958 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr, 959 abi_ulong target_mq_attr_addr) 960 { 961 struct target_mq_attr *target_mq_attr; 962 963 if (!lock_user_struct(VERIFY_READ, target_mq_attr, 964 target_mq_attr_addr, 1)) 965 return -TARGET_EFAULT; 966 967 __get_user(attr->mq_flags, &target_mq_attr->mq_flags); 968 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 969 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 970 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 971 972 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0); 973 974 return 0; 975 } 976 977 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr, 978 const struct mq_attr *attr) 979 { 980 struct target_mq_attr *target_mq_attr; 981 982 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr, 983 target_mq_attr_addr, 0)) 984 return -TARGET_EFAULT; 985 986 __put_user(attr->mq_flags, &target_mq_attr->mq_flags); 987 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 988 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 989 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 990 991 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1); 992 993 return 0; 994 } 995 #endif 996 997 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) 998 /* do_select() must return target values and target errnos. */ 999 static abi_long do_select(int n, 1000 abi_ulong rfd_addr, abi_ulong wfd_addr, 1001 abi_ulong efd_addr, abi_ulong target_tv_addr) 1002 { 1003 fd_set rfds, wfds, efds; 1004 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 1005 struct timeval tv, *tv_ptr; 1006 abi_long ret; 1007 1008 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 1009 if (ret) { 1010 return ret; 1011 } 1012 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 1013 if (ret) { 1014 return ret; 1015 } 1016 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 1017 if (ret) { 1018 return ret; 1019 } 1020 1021 if (target_tv_addr) { 1022 if (copy_from_user_timeval(&tv, target_tv_addr)) 1023 return -TARGET_EFAULT; 1024 tv_ptr = &tv; 1025 } else { 1026 tv_ptr = NULL; 1027 } 1028 1029 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr)); 1030 1031 if (!is_error(ret)) { 1032 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 1033 return -TARGET_EFAULT; 1034 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 1035 return -TARGET_EFAULT; 1036 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 1037 return -TARGET_EFAULT; 1038 1039 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv)) 1040 return -TARGET_EFAULT; 1041 } 1042 1043 return ret; 1044 } 1045 #endif 1046 1047 static abi_long do_pipe2(int host_pipe[], int flags) 1048 { 1049 #ifdef CONFIG_PIPE2 1050 return pipe2(host_pipe, flags); 1051 #else 1052 return -ENOSYS; 1053 #endif 1054 } 1055 1056 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes, 1057 int flags, int is_pipe2) 1058 { 1059 int host_pipe[2]; 1060 abi_long ret; 1061 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe); 1062 1063 if (is_error(ret)) 1064 return get_errno(ret); 1065 1066 /* Several targets have special calling conventions for the original 1067 pipe syscall, but didn't replicate this into the pipe2 syscall. */ 1068 if (!is_pipe2) { 1069 #if defined(TARGET_ALPHA) 1070 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1]; 1071 return host_pipe[0]; 1072 #elif defined(TARGET_MIPS) 1073 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1]; 1074 return host_pipe[0]; 1075 #elif defined(TARGET_SH4) 1076 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1]; 1077 return host_pipe[0]; 1078 #elif defined(TARGET_SPARC) 1079 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1]; 1080 return host_pipe[0]; 1081 #endif 1082 } 1083 1084 if (put_user_s32(host_pipe[0], pipedes) 1085 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0]))) 1086 return -TARGET_EFAULT; 1087 return get_errno(ret); 1088 } 1089 1090 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn, 1091 abi_ulong target_addr, 1092 socklen_t len) 1093 { 1094 struct target_ip_mreqn *target_smreqn; 1095 1096 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1); 1097 if (!target_smreqn) 1098 return -TARGET_EFAULT; 1099 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr; 1100 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr; 1101 if (len == sizeof(struct target_ip_mreqn)) 1102 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex); 1103 unlock_user(target_smreqn, target_addr, 0); 1104 1105 return 0; 1106 } 1107 1108 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr, 1109 abi_ulong target_addr, 1110 socklen_t len) 1111 { 1112 const socklen_t unix_maxlen = sizeof (struct sockaddr_un); 1113 sa_family_t sa_family; 1114 struct target_sockaddr *target_saddr; 1115 1116 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 1117 if (!target_saddr) 1118 return -TARGET_EFAULT; 1119 1120 sa_family = tswap16(target_saddr->sa_family); 1121 1122 /* Oops. The caller might send a incomplete sun_path; sun_path 1123 * must be terminated by \0 (see the manual page), but 1124 * unfortunately it is quite common to specify sockaddr_un 1125 * length as "strlen(x->sun_path)" while it should be 1126 * "strlen(...) + 1". We'll fix that here if needed. 1127 * Linux kernel has a similar feature. 1128 */ 1129 1130 if (sa_family == AF_UNIX) { 1131 if (len < unix_maxlen && len > 0) { 1132 char *cp = (char*)target_saddr; 1133 1134 if ( cp[len-1] && !cp[len] ) 1135 len++; 1136 } 1137 if (len > unix_maxlen) 1138 len = unix_maxlen; 1139 } 1140 1141 memcpy(addr, target_saddr, len); 1142 addr->sa_family = sa_family; 1143 unlock_user(target_saddr, target_addr, 0); 1144 1145 return 0; 1146 } 1147 1148 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr, 1149 struct sockaddr *addr, 1150 socklen_t len) 1151 { 1152 struct target_sockaddr *target_saddr; 1153 1154 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0); 1155 if (!target_saddr) 1156 return -TARGET_EFAULT; 1157 memcpy(target_saddr, addr, len); 1158 target_saddr->sa_family = tswap16(addr->sa_family); 1159 unlock_user(target_saddr, target_addr, len); 1160 1161 return 0; 1162 } 1163 1164 static inline abi_long target_to_host_cmsg(struct msghdr *msgh, 1165 struct target_msghdr *target_msgh) 1166 { 1167 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1168 abi_long msg_controllen; 1169 abi_ulong target_cmsg_addr; 1170 struct target_cmsghdr *target_cmsg; 1171 socklen_t space = 0; 1172 1173 msg_controllen = tswapal(target_msgh->msg_controllen); 1174 if (msg_controllen < sizeof (struct target_cmsghdr)) 1175 goto the_end; 1176 target_cmsg_addr = tswapal(target_msgh->msg_control); 1177 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1); 1178 if (!target_cmsg) 1179 return -TARGET_EFAULT; 1180 1181 while (cmsg && target_cmsg) { 1182 void *data = CMSG_DATA(cmsg); 1183 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1184 1185 int len = tswapal(target_cmsg->cmsg_len) 1186 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr)); 1187 1188 space += CMSG_SPACE(len); 1189 if (space > msgh->msg_controllen) { 1190 space -= CMSG_SPACE(len); 1191 gemu_log("Host cmsg overflow\n"); 1192 break; 1193 } 1194 1195 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) { 1196 cmsg->cmsg_level = SOL_SOCKET; 1197 } else { 1198 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level); 1199 } 1200 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type); 1201 cmsg->cmsg_len = CMSG_LEN(len); 1202 1203 if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) { 1204 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type); 1205 memcpy(data, target_data, len); 1206 } else { 1207 int *fd = (int *)data; 1208 int *target_fd = (int *)target_data; 1209 int i, numfds = len / sizeof(int); 1210 1211 for (i = 0; i < numfds; i++) 1212 fd[i] = tswap32(target_fd[i]); 1213 } 1214 1215 cmsg = CMSG_NXTHDR(msgh, cmsg); 1216 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1217 } 1218 unlock_user(target_cmsg, target_cmsg_addr, 0); 1219 the_end: 1220 msgh->msg_controllen = space; 1221 return 0; 1222 } 1223 1224 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, 1225 struct msghdr *msgh) 1226 { 1227 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1228 abi_long msg_controllen; 1229 abi_ulong target_cmsg_addr; 1230 struct target_cmsghdr *target_cmsg; 1231 socklen_t space = 0; 1232 1233 msg_controllen = tswapal(target_msgh->msg_controllen); 1234 if (msg_controllen < sizeof (struct target_cmsghdr)) 1235 goto the_end; 1236 target_cmsg_addr = tswapal(target_msgh->msg_control); 1237 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0); 1238 if (!target_cmsg) 1239 return -TARGET_EFAULT; 1240 1241 while (cmsg && target_cmsg) { 1242 void *data = CMSG_DATA(cmsg); 1243 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1244 1245 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr)); 1246 1247 space += TARGET_CMSG_SPACE(len); 1248 if (space > msg_controllen) { 1249 space -= TARGET_CMSG_SPACE(len); 1250 gemu_log("Target cmsg overflow\n"); 1251 break; 1252 } 1253 1254 if (cmsg->cmsg_level == SOL_SOCKET) { 1255 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET); 1256 } else { 1257 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level); 1258 } 1259 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type); 1260 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len)); 1261 1262 switch (cmsg->cmsg_level) { 1263 case SOL_SOCKET: 1264 switch (cmsg->cmsg_type) { 1265 case SCM_RIGHTS: 1266 { 1267 int *fd = (int *)data; 1268 int *target_fd = (int *)target_data; 1269 int i, numfds = len / sizeof(int); 1270 1271 for (i = 0; i < numfds; i++) 1272 target_fd[i] = tswap32(fd[i]); 1273 break; 1274 } 1275 case SO_TIMESTAMP: 1276 { 1277 struct timeval *tv = (struct timeval *)data; 1278 struct target_timeval *target_tv = 1279 (struct target_timeval *)target_data; 1280 1281 if (len != sizeof(struct timeval)) 1282 goto unimplemented; 1283 1284 /* copy struct timeval to target */ 1285 target_tv->tv_sec = tswapal(tv->tv_sec); 1286 target_tv->tv_usec = tswapal(tv->tv_usec); 1287 break; 1288 } 1289 case SCM_CREDENTIALS: 1290 { 1291 struct ucred *cred = (struct ucred *)data; 1292 struct target_ucred *target_cred = 1293 (struct target_ucred *)target_data; 1294 1295 __put_user(cred->pid, &target_cred->pid); 1296 __put_user(cred->uid, &target_cred->uid); 1297 __put_user(cred->gid, &target_cred->gid); 1298 break; 1299 } 1300 default: 1301 goto unimplemented; 1302 } 1303 break; 1304 1305 default: 1306 unimplemented: 1307 gemu_log("Unsupported ancillary data: %d/%d\n", 1308 cmsg->cmsg_level, cmsg->cmsg_type); 1309 memcpy(target_data, data, len); 1310 } 1311 1312 cmsg = CMSG_NXTHDR(msgh, cmsg); 1313 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1314 } 1315 unlock_user(target_cmsg, target_cmsg_addr, space); 1316 the_end: 1317 target_msgh->msg_controllen = tswapal(space); 1318 return 0; 1319 } 1320 1321 /* do_setsockopt() Must return target values and target errnos. */ 1322 static abi_long do_setsockopt(int sockfd, int level, int optname, 1323 abi_ulong optval_addr, socklen_t optlen) 1324 { 1325 abi_long ret; 1326 int val; 1327 struct ip_mreqn *ip_mreq; 1328 struct ip_mreq_source *ip_mreq_source; 1329 1330 switch(level) { 1331 case SOL_TCP: 1332 /* TCP options all take an 'int' value. */ 1333 if (optlen < sizeof(uint32_t)) 1334 return -TARGET_EINVAL; 1335 1336 if (get_user_u32(val, optval_addr)) 1337 return -TARGET_EFAULT; 1338 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1339 break; 1340 case SOL_IP: 1341 switch(optname) { 1342 case IP_TOS: 1343 case IP_TTL: 1344 case IP_HDRINCL: 1345 case IP_ROUTER_ALERT: 1346 case IP_RECVOPTS: 1347 case IP_RETOPTS: 1348 case IP_PKTINFO: 1349 case IP_MTU_DISCOVER: 1350 case IP_RECVERR: 1351 case IP_RECVTOS: 1352 #ifdef IP_FREEBIND 1353 case IP_FREEBIND: 1354 #endif 1355 case IP_MULTICAST_TTL: 1356 case IP_MULTICAST_LOOP: 1357 val = 0; 1358 if (optlen >= sizeof(uint32_t)) { 1359 if (get_user_u32(val, optval_addr)) 1360 return -TARGET_EFAULT; 1361 } else if (optlen >= 1) { 1362 if (get_user_u8(val, optval_addr)) 1363 return -TARGET_EFAULT; 1364 } 1365 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1366 break; 1367 case IP_ADD_MEMBERSHIP: 1368 case IP_DROP_MEMBERSHIP: 1369 if (optlen < sizeof (struct target_ip_mreq) || 1370 optlen > sizeof (struct target_ip_mreqn)) 1371 return -TARGET_EINVAL; 1372 1373 ip_mreq = (struct ip_mreqn *) alloca(optlen); 1374 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen); 1375 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen)); 1376 break; 1377 1378 case IP_BLOCK_SOURCE: 1379 case IP_UNBLOCK_SOURCE: 1380 case IP_ADD_SOURCE_MEMBERSHIP: 1381 case IP_DROP_SOURCE_MEMBERSHIP: 1382 if (optlen != sizeof (struct target_ip_mreq_source)) 1383 return -TARGET_EINVAL; 1384 1385 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1); 1386 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen)); 1387 unlock_user (ip_mreq_source, optval_addr, 0); 1388 break; 1389 1390 default: 1391 goto unimplemented; 1392 } 1393 break; 1394 case SOL_IPV6: 1395 switch (optname) { 1396 case IPV6_MTU_DISCOVER: 1397 case IPV6_MTU: 1398 case IPV6_V6ONLY: 1399 case IPV6_RECVPKTINFO: 1400 val = 0; 1401 if (optlen < sizeof(uint32_t)) { 1402 return -TARGET_EINVAL; 1403 } 1404 if (get_user_u32(val, optval_addr)) { 1405 return -TARGET_EFAULT; 1406 } 1407 ret = get_errno(setsockopt(sockfd, level, optname, 1408 &val, sizeof(val))); 1409 break; 1410 default: 1411 goto unimplemented; 1412 } 1413 break; 1414 case SOL_RAW: 1415 switch (optname) { 1416 case ICMP_FILTER: 1417 /* struct icmp_filter takes an u32 value */ 1418 if (optlen < sizeof(uint32_t)) { 1419 return -TARGET_EINVAL; 1420 } 1421 1422 if (get_user_u32(val, optval_addr)) { 1423 return -TARGET_EFAULT; 1424 } 1425 ret = get_errno(setsockopt(sockfd, level, optname, 1426 &val, sizeof(val))); 1427 break; 1428 1429 default: 1430 goto unimplemented; 1431 } 1432 break; 1433 case TARGET_SOL_SOCKET: 1434 switch (optname) { 1435 case TARGET_SO_RCVTIMEO: 1436 { 1437 struct timeval tv; 1438 1439 optname = SO_RCVTIMEO; 1440 1441 set_timeout: 1442 if (optlen != sizeof(struct target_timeval)) { 1443 return -TARGET_EINVAL; 1444 } 1445 1446 if (copy_from_user_timeval(&tv, optval_addr)) { 1447 return -TARGET_EFAULT; 1448 } 1449 1450 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 1451 &tv, sizeof(tv))); 1452 return ret; 1453 } 1454 case TARGET_SO_SNDTIMEO: 1455 optname = SO_SNDTIMEO; 1456 goto set_timeout; 1457 case TARGET_SO_ATTACH_FILTER: 1458 { 1459 struct target_sock_fprog *tfprog; 1460 struct target_sock_filter *tfilter; 1461 struct sock_fprog fprog; 1462 struct sock_filter *filter; 1463 int i; 1464 1465 if (optlen != sizeof(*tfprog)) { 1466 return -TARGET_EINVAL; 1467 } 1468 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) { 1469 return -TARGET_EFAULT; 1470 } 1471 if (!lock_user_struct(VERIFY_READ, tfilter, 1472 tswapal(tfprog->filter), 0)) { 1473 unlock_user_struct(tfprog, optval_addr, 1); 1474 return -TARGET_EFAULT; 1475 } 1476 1477 fprog.len = tswap16(tfprog->len); 1478 filter = malloc(fprog.len * sizeof(*filter)); 1479 if (filter == NULL) { 1480 unlock_user_struct(tfilter, tfprog->filter, 1); 1481 unlock_user_struct(tfprog, optval_addr, 1); 1482 return -TARGET_ENOMEM; 1483 } 1484 for (i = 0; i < fprog.len; i++) { 1485 filter[i].code = tswap16(tfilter[i].code); 1486 filter[i].jt = tfilter[i].jt; 1487 filter[i].jf = tfilter[i].jf; 1488 filter[i].k = tswap32(tfilter[i].k); 1489 } 1490 fprog.filter = filter; 1491 1492 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, 1493 SO_ATTACH_FILTER, &fprog, sizeof(fprog))); 1494 free(filter); 1495 1496 unlock_user_struct(tfilter, tfprog->filter, 1); 1497 unlock_user_struct(tfprog, optval_addr, 1); 1498 return ret; 1499 } 1500 /* Options with 'int' argument. */ 1501 case TARGET_SO_DEBUG: 1502 optname = SO_DEBUG; 1503 break; 1504 case TARGET_SO_REUSEADDR: 1505 optname = SO_REUSEADDR; 1506 break; 1507 case TARGET_SO_TYPE: 1508 optname = SO_TYPE; 1509 break; 1510 case TARGET_SO_ERROR: 1511 optname = SO_ERROR; 1512 break; 1513 case TARGET_SO_DONTROUTE: 1514 optname = SO_DONTROUTE; 1515 break; 1516 case TARGET_SO_BROADCAST: 1517 optname = SO_BROADCAST; 1518 break; 1519 case TARGET_SO_SNDBUF: 1520 optname = SO_SNDBUF; 1521 break; 1522 case TARGET_SO_SNDBUFFORCE: 1523 optname = SO_SNDBUFFORCE; 1524 break; 1525 case TARGET_SO_RCVBUF: 1526 optname = SO_RCVBUF; 1527 break; 1528 case TARGET_SO_RCVBUFFORCE: 1529 optname = SO_RCVBUFFORCE; 1530 break; 1531 case TARGET_SO_KEEPALIVE: 1532 optname = SO_KEEPALIVE; 1533 break; 1534 case TARGET_SO_OOBINLINE: 1535 optname = SO_OOBINLINE; 1536 break; 1537 case TARGET_SO_NO_CHECK: 1538 optname = SO_NO_CHECK; 1539 break; 1540 case TARGET_SO_PRIORITY: 1541 optname = SO_PRIORITY; 1542 break; 1543 #ifdef SO_BSDCOMPAT 1544 case TARGET_SO_BSDCOMPAT: 1545 optname = SO_BSDCOMPAT; 1546 break; 1547 #endif 1548 case TARGET_SO_PASSCRED: 1549 optname = SO_PASSCRED; 1550 break; 1551 case TARGET_SO_PASSSEC: 1552 optname = SO_PASSSEC; 1553 break; 1554 case TARGET_SO_TIMESTAMP: 1555 optname = SO_TIMESTAMP; 1556 break; 1557 case TARGET_SO_RCVLOWAT: 1558 optname = SO_RCVLOWAT; 1559 break; 1560 break; 1561 default: 1562 goto unimplemented; 1563 } 1564 if (optlen < sizeof(uint32_t)) 1565 return -TARGET_EINVAL; 1566 1567 if (get_user_u32(val, optval_addr)) 1568 return -TARGET_EFAULT; 1569 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val))); 1570 break; 1571 default: 1572 unimplemented: 1573 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname); 1574 ret = -TARGET_ENOPROTOOPT; 1575 } 1576 return ret; 1577 } 1578 1579 /* do_getsockopt() Must return target values and target errnos. */ 1580 static abi_long do_getsockopt(int sockfd, int level, int optname, 1581 abi_ulong optval_addr, abi_ulong optlen) 1582 { 1583 abi_long ret; 1584 int len, val; 1585 socklen_t lv; 1586 1587 switch(level) { 1588 case TARGET_SOL_SOCKET: 1589 level = SOL_SOCKET; 1590 switch (optname) { 1591 /* These don't just return a single integer */ 1592 case TARGET_SO_LINGER: 1593 case TARGET_SO_RCVTIMEO: 1594 case TARGET_SO_SNDTIMEO: 1595 case TARGET_SO_PEERNAME: 1596 goto unimplemented; 1597 case TARGET_SO_PEERCRED: { 1598 struct ucred cr; 1599 socklen_t crlen; 1600 struct target_ucred *tcr; 1601 1602 if (get_user_u32(len, optlen)) { 1603 return -TARGET_EFAULT; 1604 } 1605 if (len < 0) { 1606 return -TARGET_EINVAL; 1607 } 1608 1609 crlen = sizeof(cr); 1610 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED, 1611 &cr, &crlen)); 1612 if (ret < 0) { 1613 return ret; 1614 } 1615 if (len > crlen) { 1616 len = crlen; 1617 } 1618 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) { 1619 return -TARGET_EFAULT; 1620 } 1621 __put_user(cr.pid, &tcr->pid); 1622 __put_user(cr.uid, &tcr->uid); 1623 __put_user(cr.gid, &tcr->gid); 1624 unlock_user_struct(tcr, optval_addr, 1); 1625 if (put_user_u32(len, optlen)) { 1626 return -TARGET_EFAULT; 1627 } 1628 break; 1629 } 1630 /* Options with 'int' argument. */ 1631 case TARGET_SO_DEBUG: 1632 optname = SO_DEBUG; 1633 goto int_case; 1634 case TARGET_SO_REUSEADDR: 1635 optname = SO_REUSEADDR; 1636 goto int_case; 1637 case TARGET_SO_TYPE: 1638 optname = SO_TYPE; 1639 goto int_case; 1640 case TARGET_SO_ERROR: 1641 optname = SO_ERROR; 1642 goto int_case; 1643 case TARGET_SO_DONTROUTE: 1644 optname = SO_DONTROUTE; 1645 goto int_case; 1646 case TARGET_SO_BROADCAST: 1647 optname = SO_BROADCAST; 1648 goto int_case; 1649 case TARGET_SO_SNDBUF: 1650 optname = SO_SNDBUF; 1651 goto int_case; 1652 case TARGET_SO_RCVBUF: 1653 optname = SO_RCVBUF; 1654 goto int_case; 1655 case TARGET_SO_KEEPALIVE: 1656 optname = SO_KEEPALIVE; 1657 goto int_case; 1658 case TARGET_SO_OOBINLINE: 1659 optname = SO_OOBINLINE; 1660 goto int_case; 1661 case TARGET_SO_NO_CHECK: 1662 optname = SO_NO_CHECK; 1663 goto int_case; 1664 case TARGET_SO_PRIORITY: 1665 optname = SO_PRIORITY; 1666 goto int_case; 1667 #ifdef SO_BSDCOMPAT 1668 case TARGET_SO_BSDCOMPAT: 1669 optname = SO_BSDCOMPAT; 1670 goto int_case; 1671 #endif 1672 case TARGET_SO_PASSCRED: 1673 optname = SO_PASSCRED; 1674 goto int_case; 1675 case TARGET_SO_TIMESTAMP: 1676 optname = SO_TIMESTAMP; 1677 goto int_case; 1678 case TARGET_SO_RCVLOWAT: 1679 optname = SO_RCVLOWAT; 1680 goto int_case; 1681 case TARGET_SO_ACCEPTCONN: 1682 optname = SO_ACCEPTCONN; 1683 goto int_case; 1684 default: 1685 goto int_case; 1686 } 1687 break; 1688 case SOL_TCP: 1689 /* TCP options all take an 'int' value. */ 1690 int_case: 1691 if (get_user_u32(len, optlen)) 1692 return -TARGET_EFAULT; 1693 if (len < 0) 1694 return -TARGET_EINVAL; 1695 lv = sizeof(lv); 1696 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1697 if (ret < 0) 1698 return ret; 1699 if (optname == SO_TYPE) { 1700 val = host_to_target_sock_type(val); 1701 } 1702 if (len > lv) 1703 len = lv; 1704 if (len == 4) { 1705 if (put_user_u32(val, optval_addr)) 1706 return -TARGET_EFAULT; 1707 } else { 1708 if (put_user_u8(val, optval_addr)) 1709 return -TARGET_EFAULT; 1710 } 1711 if (put_user_u32(len, optlen)) 1712 return -TARGET_EFAULT; 1713 break; 1714 case SOL_IP: 1715 switch(optname) { 1716 case IP_TOS: 1717 case IP_TTL: 1718 case IP_HDRINCL: 1719 case IP_ROUTER_ALERT: 1720 case IP_RECVOPTS: 1721 case IP_RETOPTS: 1722 case IP_PKTINFO: 1723 case IP_MTU_DISCOVER: 1724 case IP_RECVERR: 1725 case IP_RECVTOS: 1726 #ifdef IP_FREEBIND 1727 case IP_FREEBIND: 1728 #endif 1729 case IP_MULTICAST_TTL: 1730 case IP_MULTICAST_LOOP: 1731 if (get_user_u32(len, optlen)) 1732 return -TARGET_EFAULT; 1733 if (len < 0) 1734 return -TARGET_EINVAL; 1735 lv = sizeof(lv); 1736 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1737 if (ret < 0) 1738 return ret; 1739 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 1740 len = 1; 1741 if (put_user_u32(len, optlen) 1742 || put_user_u8(val, optval_addr)) 1743 return -TARGET_EFAULT; 1744 } else { 1745 if (len > sizeof(int)) 1746 len = sizeof(int); 1747 if (put_user_u32(len, optlen) 1748 || put_user_u32(val, optval_addr)) 1749 return -TARGET_EFAULT; 1750 } 1751 break; 1752 default: 1753 ret = -TARGET_ENOPROTOOPT; 1754 break; 1755 } 1756 break; 1757 default: 1758 unimplemented: 1759 gemu_log("getsockopt level=%d optname=%d not yet supported\n", 1760 level, optname); 1761 ret = -TARGET_EOPNOTSUPP; 1762 break; 1763 } 1764 return ret; 1765 } 1766 1767 static struct iovec *lock_iovec(int type, abi_ulong target_addr, 1768 int count, int copy) 1769 { 1770 struct target_iovec *target_vec; 1771 struct iovec *vec; 1772 abi_ulong total_len, max_len; 1773 int i; 1774 int err = 0; 1775 1776 if (count == 0) { 1777 errno = 0; 1778 return NULL; 1779 } 1780 if (count < 0 || count > IOV_MAX) { 1781 errno = EINVAL; 1782 return NULL; 1783 } 1784 1785 vec = calloc(count, sizeof(struct iovec)); 1786 if (vec == NULL) { 1787 errno = ENOMEM; 1788 return NULL; 1789 } 1790 1791 target_vec = lock_user(VERIFY_READ, target_addr, 1792 count * sizeof(struct target_iovec), 1); 1793 if (target_vec == NULL) { 1794 err = EFAULT; 1795 goto fail2; 1796 } 1797 1798 /* ??? If host page size > target page size, this will result in a 1799 value larger than what we can actually support. */ 1800 max_len = 0x7fffffff & TARGET_PAGE_MASK; 1801 total_len = 0; 1802 1803 for (i = 0; i < count; i++) { 1804 abi_ulong base = tswapal(target_vec[i].iov_base); 1805 abi_long len = tswapal(target_vec[i].iov_len); 1806 1807 if (len < 0) { 1808 err = EINVAL; 1809 goto fail; 1810 } else if (len == 0) { 1811 /* Zero length pointer is ignored. */ 1812 vec[i].iov_base = 0; 1813 } else { 1814 vec[i].iov_base = lock_user(type, base, len, copy); 1815 if (!vec[i].iov_base) { 1816 err = EFAULT; 1817 goto fail; 1818 } 1819 if (len > max_len - total_len) { 1820 len = max_len - total_len; 1821 } 1822 } 1823 vec[i].iov_len = len; 1824 total_len += len; 1825 } 1826 1827 unlock_user(target_vec, target_addr, 0); 1828 return vec; 1829 1830 fail: 1831 unlock_user(target_vec, target_addr, 0); 1832 fail2: 1833 free(vec); 1834 errno = err; 1835 return NULL; 1836 } 1837 1838 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr, 1839 int count, int copy) 1840 { 1841 struct target_iovec *target_vec; 1842 int i; 1843 1844 target_vec = lock_user(VERIFY_READ, target_addr, 1845 count * sizeof(struct target_iovec), 1); 1846 if (target_vec) { 1847 for (i = 0; i < count; i++) { 1848 abi_ulong base = tswapal(target_vec[i].iov_base); 1849 abi_long len = tswapal(target_vec[i].iov_base); 1850 if (len < 0) { 1851 break; 1852 } 1853 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0); 1854 } 1855 unlock_user(target_vec, target_addr, 0); 1856 } 1857 1858 free(vec); 1859 } 1860 1861 static inline int target_to_host_sock_type(int *type) 1862 { 1863 int host_type = 0; 1864 int target_type = *type; 1865 1866 switch (target_type & TARGET_SOCK_TYPE_MASK) { 1867 case TARGET_SOCK_DGRAM: 1868 host_type = SOCK_DGRAM; 1869 break; 1870 case TARGET_SOCK_STREAM: 1871 host_type = SOCK_STREAM; 1872 break; 1873 default: 1874 host_type = target_type & TARGET_SOCK_TYPE_MASK; 1875 break; 1876 } 1877 if (target_type & TARGET_SOCK_CLOEXEC) { 1878 #if defined(SOCK_CLOEXEC) 1879 host_type |= SOCK_CLOEXEC; 1880 #else 1881 return -TARGET_EINVAL; 1882 #endif 1883 } 1884 if (target_type & TARGET_SOCK_NONBLOCK) { 1885 #if defined(SOCK_NONBLOCK) 1886 host_type |= SOCK_NONBLOCK; 1887 #elif !defined(O_NONBLOCK) 1888 return -TARGET_EINVAL; 1889 #endif 1890 } 1891 *type = host_type; 1892 return 0; 1893 } 1894 1895 /* Try to emulate socket type flags after socket creation. */ 1896 static int sock_flags_fixup(int fd, int target_type) 1897 { 1898 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK) 1899 if (target_type & TARGET_SOCK_NONBLOCK) { 1900 int flags = fcntl(fd, F_GETFL); 1901 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) { 1902 close(fd); 1903 return -TARGET_EINVAL; 1904 } 1905 } 1906 #endif 1907 return fd; 1908 } 1909 1910 /* do_socket() Must return target values and target errnos. */ 1911 static abi_long do_socket(int domain, int type, int protocol) 1912 { 1913 int target_type = type; 1914 int ret; 1915 1916 ret = target_to_host_sock_type(&type); 1917 if (ret) { 1918 return ret; 1919 } 1920 1921 if (domain == PF_NETLINK) 1922 return -TARGET_EAFNOSUPPORT; 1923 ret = get_errno(socket(domain, type, protocol)); 1924 if (ret >= 0) { 1925 ret = sock_flags_fixup(ret, target_type); 1926 } 1927 return ret; 1928 } 1929 1930 /* do_bind() Must return target values and target errnos. */ 1931 static abi_long do_bind(int sockfd, abi_ulong target_addr, 1932 socklen_t addrlen) 1933 { 1934 void *addr; 1935 abi_long ret; 1936 1937 if ((int)addrlen < 0) { 1938 return -TARGET_EINVAL; 1939 } 1940 1941 addr = alloca(addrlen+1); 1942 1943 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1944 if (ret) 1945 return ret; 1946 1947 return get_errno(bind(sockfd, addr, addrlen)); 1948 } 1949 1950 /* do_connect() Must return target values and target errnos. */ 1951 static abi_long do_connect(int sockfd, abi_ulong target_addr, 1952 socklen_t addrlen) 1953 { 1954 void *addr; 1955 abi_long ret; 1956 1957 if ((int)addrlen < 0) { 1958 return -TARGET_EINVAL; 1959 } 1960 1961 addr = alloca(addrlen); 1962 1963 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1964 if (ret) 1965 return ret; 1966 1967 return get_errno(connect(sockfd, addr, addrlen)); 1968 } 1969 1970 /* do_sendrecvmsg_locked() Must return target values and target errnos. */ 1971 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp, 1972 int flags, int send) 1973 { 1974 abi_long ret, len; 1975 struct msghdr msg; 1976 int count; 1977 struct iovec *vec; 1978 abi_ulong target_vec; 1979 1980 if (msgp->msg_name) { 1981 msg.msg_namelen = tswap32(msgp->msg_namelen); 1982 msg.msg_name = alloca(msg.msg_namelen); 1983 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name), 1984 msg.msg_namelen); 1985 if (ret) { 1986 goto out2; 1987 } 1988 } else { 1989 msg.msg_name = NULL; 1990 msg.msg_namelen = 0; 1991 } 1992 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen); 1993 msg.msg_control = alloca(msg.msg_controllen); 1994 msg.msg_flags = tswap32(msgp->msg_flags); 1995 1996 count = tswapal(msgp->msg_iovlen); 1997 target_vec = tswapal(msgp->msg_iov); 1998 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, 1999 target_vec, count, send); 2000 if (vec == NULL) { 2001 ret = -host_to_target_errno(errno); 2002 goto out2; 2003 } 2004 msg.msg_iovlen = count; 2005 msg.msg_iov = vec; 2006 2007 if (send) { 2008 ret = target_to_host_cmsg(&msg, msgp); 2009 if (ret == 0) 2010 ret = get_errno(sendmsg(fd, &msg, flags)); 2011 } else { 2012 ret = get_errno(recvmsg(fd, &msg, flags)); 2013 if (!is_error(ret)) { 2014 len = ret; 2015 ret = host_to_target_cmsg(msgp, &msg); 2016 if (!is_error(ret)) { 2017 msgp->msg_namelen = tswap32(msg.msg_namelen); 2018 if (msg.msg_name != NULL) { 2019 ret = host_to_target_sockaddr(tswapal(msgp->msg_name), 2020 msg.msg_name, msg.msg_namelen); 2021 if (ret) { 2022 goto out; 2023 } 2024 } 2025 2026 ret = len; 2027 } 2028 } 2029 } 2030 2031 out: 2032 unlock_iovec(vec, target_vec, count, !send); 2033 out2: 2034 return ret; 2035 } 2036 2037 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg, 2038 int flags, int send) 2039 { 2040 abi_long ret; 2041 struct target_msghdr *msgp; 2042 2043 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE, 2044 msgp, 2045 target_msg, 2046 send ? 1 : 0)) { 2047 return -TARGET_EFAULT; 2048 } 2049 ret = do_sendrecvmsg_locked(fd, msgp, flags, send); 2050 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 2051 return ret; 2052 } 2053 2054 #ifdef TARGET_NR_sendmmsg 2055 /* We don't rely on the C library to have sendmmsg/recvmmsg support, 2056 * so it might not have this *mmsg-specific flag either. 2057 */ 2058 #ifndef MSG_WAITFORONE 2059 #define MSG_WAITFORONE 0x10000 2060 #endif 2061 2062 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec, 2063 unsigned int vlen, unsigned int flags, 2064 int send) 2065 { 2066 struct target_mmsghdr *mmsgp; 2067 abi_long ret = 0; 2068 int i; 2069 2070 if (vlen > UIO_MAXIOV) { 2071 vlen = UIO_MAXIOV; 2072 } 2073 2074 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1); 2075 if (!mmsgp) { 2076 return -TARGET_EFAULT; 2077 } 2078 2079 for (i = 0; i < vlen; i++) { 2080 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send); 2081 if (is_error(ret)) { 2082 break; 2083 } 2084 mmsgp[i].msg_len = tswap32(ret); 2085 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ 2086 if (flags & MSG_WAITFORONE) { 2087 flags |= MSG_DONTWAIT; 2088 } 2089 } 2090 2091 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i); 2092 2093 /* Return number of datagrams sent if we sent any at all; 2094 * otherwise return the error. 2095 */ 2096 if (i) { 2097 return i; 2098 } 2099 return ret; 2100 } 2101 #endif 2102 2103 /* If we don't have a system accept4() then just call accept. 2104 * The callsites to do_accept4() will ensure that they don't 2105 * pass a non-zero flags argument in this config. 2106 */ 2107 #ifndef CONFIG_ACCEPT4 2108 static inline int accept4(int sockfd, struct sockaddr *addr, 2109 socklen_t *addrlen, int flags) 2110 { 2111 assert(flags == 0); 2112 return accept(sockfd, addr, addrlen); 2113 } 2114 #endif 2115 2116 /* do_accept4() Must return target values and target errnos. */ 2117 static abi_long do_accept4(int fd, abi_ulong target_addr, 2118 abi_ulong target_addrlen_addr, int flags) 2119 { 2120 socklen_t addrlen; 2121 void *addr; 2122 abi_long ret; 2123 int host_flags; 2124 2125 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl); 2126 2127 if (target_addr == 0) { 2128 return get_errno(accept4(fd, NULL, NULL, host_flags)); 2129 } 2130 2131 /* linux returns EINVAL if addrlen pointer is invalid */ 2132 if (get_user_u32(addrlen, target_addrlen_addr)) 2133 return -TARGET_EINVAL; 2134 2135 if ((int)addrlen < 0) { 2136 return -TARGET_EINVAL; 2137 } 2138 2139 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2140 return -TARGET_EINVAL; 2141 2142 addr = alloca(addrlen); 2143 2144 ret = get_errno(accept4(fd, addr, &addrlen, host_flags)); 2145 if (!is_error(ret)) { 2146 host_to_target_sockaddr(target_addr, addr, addrlen); 2147 if (put_user_u32(addrlen, target_addrlen_addr)) 2148 ret = -TARGET_EFAULT; 2149 } 2150 return ret; 2151 } 2152 2153 /* do_getpeername() Must return target values and target errnos. */ 2154 static abi_long do_getpeername(int fd, abi_ulong target_addr, 2155 abi_ulong target_addrlen_addr) 2156 { 2157 socklen_t addrlen; 2158 void *addr; 2159 abi_long ret; 2160 2161 if (get_user_u32(addrlen, target_addrlen_addr)) 2162 return -TARGET_EFAULT; 2163 2164 if ((int)addrlen < 0) { 2165 return -TARGET_EINVAL; 2166 } 2167 2168 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2169 return -TARGET_EFAULT; 2170 2171 addr = alloca(addrlen); 2172 2173 ret = get_errno(getpeername(fd, addr, &addrlen)); 2174 if (!is_error(ret)) { 2175 host_to_target_sockaddr(target_addr, addr, addrlen); 2176 if (put_user_u32(addrlen, target_addrlen_addr)) 2177 ret = -TARGET_EFAULT; 2178 } 2179 return ret; 2180 } 2181 2182 /* do_getsockname() Must return target values and target errnos. */ 2183 static abi_long do_getsockname(int fd, abi_ulong target_addr, 2184 abi_ulong target_addrlen_addr) 2185 { 2186 socklen_t addrlen; 2187 void *addr; 2188 abi_long ret; 2189 2190 if (get_user_u32(addrlen, target_addrlen_addr)) 2191 return -TARGET_EFAULT; 2192 2193 if ((int)addrlen < 0) { 2194 return -TARGET_EINVAL; 2195 } 2196 2197 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2198 return -TARGET_EFAULT; 2199 2200 addr = alloca(addrlen); 2201 2202 ret = get_errno(getsockname(fd, addr, &addrlen)); 2203 if (!is_error(ret)) { 2204 host_to_target_sockaddr(target_addr, addr, addrlen); 2205 if (put_user_u32(addrlen, target_addrlen_addr)) 2206 ret = -TARGET_EFAULT; 2207 } 2208 return ret; 2209 } 2210 2211 /* do_socketpair() Must return target values and target errnos. */ 2212 static abi_long do_socketpair(int domain, int type, int protocol, 2213 abi_ulong target_tab_addr) 2214 { 2215 int tab[2]; 2216 abi_long ret; 2217 2218 target_to_host_sock_type(&type); 2219 2220 ret = get_errno(socketpair(domain, type, protocol, tab)); 2221 if (!is_error(ret)) { 2222 if (put_user_s32(tab[0], target_tab_addr) 2223 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0]))) 2224 ret = -TARGET_EFAULT; 2225 } 2226 return ret; 2227 } 2228 2229 /* do_sendto() Must return target values and target errnos. */ 2230 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags, 2231 abi_ulong target_addr, socklen_t addrlen) 2232 { 2233 void *addr; 2234 void *host_msg; 2235 abi_long ret; 2236 2237 if ((int)addrlen < 0) { 2238 return -TARGET_EINVAL; 2239 } 2240 2241 host_msg = lock_user(VERIFY_READ, msg, len, 1); 2242 if (!host_msg) 2243 return -TARGET_EFAULT; 2244 if (target_addr) { 2245 addr = alloca(addrlen); 2246 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 2247 if (ret) { 2248 unlock_user(host_msg, msg, 0); 2249 return ret; 2250 } 2251 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen)); 2252 } else { 2253 ret = get_errno(send(fd, host_msg, len, flags)); 2254 } 2255 unlock_user(host_msg, msg, 0); 2256 return ret; 2257 } 2258 2259 /* do_recvfrom() Must return target values and target errnos. */ 2260 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags, 2261 abi_ulong target_addr, 2262 abi_ulong target_addrlen) 2263 { 2264 socklen_t addrlen; 2265 void *addr; 2266 void *host_msg; 2267 abi_long ret; 2268 2269 host_msg = lock_user(VERIFY_WRITE, msg, len, 0); 2270 if (!host_msg) 2271 return -TARGET_EFAULT; 2272 if (target_addr) { 2273 if (get_user_u32(addrlen, target_addrlen)) { 2274 ret = -TARGET_EFAULT; 2275 goto fail; 2276 } 2277 if ((int)addrlen < 0) { 2278 ret = -TARGET_EINVAL; 2279 goto fail; 2280 } 2281 addr = alloca(addrlen); 2282 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen)); 2283 } else { 2284 addr = NULL; /* To keep compiler quiet. */ 2285 ret = get_errno(qemu_recv(fd, host_msg, len, flags)); 2286 } 2287 if (!is_error(ret)) { 2288 if (target_addr) { 2289 host_to_target_sockaddr(target_addr, addr, addrlen); 2290 if (put_user_u32(addrlen, target_addrlen)) { 2291 ret = -TARGET_EFAULT; 2292 goto fail; 2293 } 2294 } 2295 unlock_user(host_msg, msg, len); 2296 } else { 2297 fail: 2298 unlock_user(host_msg, msg, 0); 2299 } 2300 return ret; 2301 } 2302 2303 #ifdef TARGET_NR_socketcall 2304 /* do_socketcall() Must return target values and target errnos. */ 2305 static abi_long do_socketcall(int num, abi_ulong vptr) 2306 { 2307 static const unsigned ac[] = { /* number of arguments per call */ 2308 [SOCKOP_socket] = 3, /* domain, type, protocol */ 2309 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */ 2310 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */ 2311 [SOCKOP_listen] = 2, /* sockfd, backlog */ 2312 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */ 2313 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */ 2314 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */ 2315 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */ 2316 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */ 2317 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */ 2318 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */ 2319 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */ 2320 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */ 2321 [SOCKOP_shutdown] = 2, /* sockfd, how */ 2322 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */ 2323 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */ 2324 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */ 2325 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */ 2326 }; 2327 abi_long a[6]; /* max 6 args */ 2328 2329 /* first, collect the arguments in a[] according to ac[] */ 2330 if (num >= 0 && num < ARRAY_SIZE(ac)) { 2331 unsigned i; 2332 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */ 2333 for (i = 0; i < ac[num]; ++i) { 2334 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) { 2335 return -TARGET_EFAULT; 2336 } 2337 } 2338 } 2339 2340 /* now when we have the args, actually handle the call */ 2341 switch (num) { 2342 case SOCKOP_socket: /* domain, type, protocol */ 2343 return do_socket(a[0], a[1], a[2]); 2344 case SOCKOP_bind: /* sockfd, addr, addrlen */ 2345 return do_bind(a[0], a[1], a[2]); 2346 case SOCKOP_connect: /* sockfd, addr, addrlen */ 2347 return do_connect(a[0], a[1], a[2]); 2348 case SOCKOP_listen: /* sockfd, backlog */ 2349 return get_errno(listen(a[0], a[1])); 2350 case SOCKOP_accept: /* sockfd, addr, addrlen */ 2351 return do_accept4(a[0], a[1], a[2], 0); 2352 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */ 2353 return do_accept4(a[0], a[1], a[2], a[3]); 2354 case SOCKOP_getsockname: /* sockfd, addr, addrlen */ 2355 return do_getsockname(a[0], a[1], a[2]); 2356 case SOCKOP_getpeername: /* sockfd, addr, addrlen */ 2357 return do_getpeername(a[0], a[1], a[2]); 2358 case SOCKOP_socketpair: /* domain, type, protocol, tab */ 2359 return do_socketpair(a[0], a[1], a[2], a[3]); 2360 case SOCKOP_send: /* sockfd, msg, len, flags */ 2361 return do_sendto(a[0], a[1], a[2], a[3], 0, 0); 2362 case SOCKOP_recv: /* sockfd, msg, len, flags */ 2363 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0); 2364 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */ 2365 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]); 2366 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */ 2367 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]); 2368 case SOCKOP_shutdown: /* sockfd, how */ 2369 return get_errno(shutdown(a[0], a[1])); 2370 case SOCKOP_sendmsg: /* sockfd, msg, flags */ 2371 return do_sendrecvmsg(a[0], a[1], a[2], 1); 2372 case SOCKOP_recvmsg: /* sockfd, msg, flags */ 2373 return do_sendrecvmsg(a[0], a[1], a[2], 0); 2374 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */ 2375 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]); 2376 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */ 2377 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]); 2378 default: 2379 gemu_log("Unsupported socketcall: %d\n", num); 2380 return -TARGET_ENOSYS; 2381 } 2382 } 2383 #endif 2384 2385 #define N_SHM_REGIONS 32 2386 2387 static struct shm_region { 2388 abi_ulong start; 2389 abi_ulong size; 2390 } shm_regions[N_SHM_REGIONS]; 2391 2392 struct target_semid_ds 2393 { 2394 struct target_ipc_perm sem_perm; 2395 abi_ulong sem_otime; 2396 abi_ulong __unused1; 2397 abi_ulong sem_ctime; 2398 abi_ulong __unused2; 2399 abi_ulong sem_nsems; 2400 abi_ulong __unused3; 2401 abi_ulong __unused4; 2402 }; 2403 2404 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip, 2405 abi_ulong target_addr) 2406 { 2407 struct target_ipc_perm *target_ip; 2408 struct target_semid_ds *target_sd; 2409 2410 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2411 return -TARGET_EFAULT; 2412 target_ip = &(target_sd->sem_perm); 2413 host_ip->__key = tswap32(target_ip->__key); 2414 host_ip->uid = tswap32(target_ip->uid); 2415 host_ip->gid = tswap32(target_ip->gid); 2416 host_ip->cuid = tswap32(target_ip->cuid); 2417 host_ip->cgid = tswap32(target_ip->cgid); 2418 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 2419 host_ip->mode = tswap32(target_ip->mode); 2420 #else 2421 host_ip->mode = tswap16(target_ip->mode); 2422 #endif 2423 #if defined(TARGET_PPC) 2424 host_ip->__seq = tswap32(target_ip->__seq); 2425 #else 2426 host_ip->__seq = tswap16(target_ip->__seq); 2427 #endif 2428 unlock_user_struct(target_sd, target_addr, 0); 2429 return 0; 2430 } 2431 2432 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr, 2433 struct ipc_perm *host_ip) 2434 { 2435 struct target_ipc_perm *target_ip; 2436 struct target_semid_ds *target_sd; 2437 2438 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2439 return -TARGET_EFAULT; 2440 target_ip = &(target_sd->sem_perm); 2441 target_ip->__key = tswap32(host_ip->__key); 2442 target_ip->uid = tswap32(host_ip->uid); 2443 target_ip->gid = tswap32(host_ip->gid); 2444 target_ip->cuid = tswap32(host_ip->cuid); 2445 target_ip->cgid = tswap32(host_ip->cgid); 2446 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 2447 target_ip->mode = tswap32(host_ip->mode); 2448 #else 2449 target_ip->mode = tswap16(host_ip->mode); 2450 #endif 2451 #if defined(TARGET_PPC) 2452 target_ip->__seq = tswap32(host_ip->__seq); 2453 #else 2454 target_ip->__seq = tswap16(host_ip->__seq); 2455 #endif 2456 unlock_user_struct(target_sd, target_addr, 1); 2457 return 0; 2458 } 2459 2460 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd, 2461 abi_ulong target_addr) 2462 { 2463 struct target_semid_ds *target_sd; 2464 2465 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2466 return -TARGET_EFAULT; 2467 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr)) 2468 return -TARGET_EFAULT; 2469 host_sd->sem_nsems = tswapal(target_sd->sem_nsems); 2470 host_sd->sem_otime = tswapal(target_sd->sem_otime); 2471 host_sd->sem_ctime = tswapal(target_sd->sem_ctime); 2472 unlock_user_struct(target_sd, target_addr, 0); 2473 return 0; 2474 } 2475 2476 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr, 2477 struct semid_ds *host_sd) 2478 { 2479 struct target_semid_ds *target_sd; 2480 2481 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2482 return -TARGET_EFAULT; 2483 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm))) 2484 return -TARGET_EFAULT; 2485 target_sd->sem_nsems = tswapal(host_sd->sem_nsems); 2486 target_sd->sem_otime = tswapal(host_sd->sem_otime); 2487 target_sd->sem_ctime = tswapal(host_sd->sem_ctime); 2488 unlock_user_struct(target_sd, target_addr, 1); 2489 return 0; 2490 } 2491 2492 struct target_seminfo { 2493 int semmap; 2494 int semmni; 2495 int semmns; 2496 int semmnu; 2497 int semmsl; 2498 int semopm; 2499 int semume; 2500 int semusz; 2501 int semvmx; 2502 int semaem; 2503 }; 2504 2505 static inline abi_long host_to_target_seminfo(abi_ulong target_addr, 2506 struct seminfo *host_seminfo) 2507 { 2508 struct target_seminfo *target_seminfo; 2509 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0)) 2510 return -TARGET_EFAULT; 2511 __put_user(host_seminfo->semmap, &target_seminfo->semmap); 2512 __put_user(host_seminfo->semmni, &target_seminfo->semmni); 2513 __put_user(host_seminfo->semmns, &target_seminfo->semmns); 2514 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu); 2515 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl); 2516 __put_user(host_seminfo->semopm, &target_seminfo->semopm); 2517 __put_user(host_seminfo->semume, &target_seminfo->semume); 2518 __put_user(host_seminfo->semusz, &target_seminfo->semusz); 2519 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx); 2520 __put_user(host_seminfo->semaem, &target_seminfo->semaem); 2521 unlock_user_struct(target_seminfo, target_addr, 1); 2522 return 0; 2523 } 2524 2525 union semun { 2526 int val; 2527 struct semid_ds *buf; 2528 unsigned short *array; 2529 struct seminfo *__buf; 2530 }; 2531 2532 union target_semun { 2533 int val; 2534 abi_ulong buf; 2535 abi_ulong array; 2536 abi_ulong __buf; 2537 }; 2538 2539 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array, 2540 abi_ulong target_addr) 2541 { 2542 int nsems; 2543 unsigned short *array; 2544 union semun semun; 2545 struct semid_ds semid_ds; 2546 int i, ret; 2547 2548 semun.buf = &semid_ds; 2549 2550 ret = semctl(semid, 0, IPC_STAT, semun); 2551 if (ret == -1) 2552 return get_errno(ret); 2553 2554 nsems = semid_ds.sem_nsems; 2555 2556 *host_array = malloc(nsems*sizeof(unsigned short)); 2557 if (!*host_array) { 2558 return -TARGET_ENOMEM; 2559 } 2560 array = lock_user(VERIFY_READ, target_addr, 2561 nsems*sizeof(unsigned short), 1); 2562 if (!array) { 2563 free(*host_array); 2564 return -TARGET_EFAULT; 2565 } 2566 2567 for(i=0; i<nsems; i++) { 2568 __get_user((*host_array)[i], &array[i]); 2569 } 2570 unlock_user(array, target_addr, 0); 2571 2572 return 0; 2573 } 2574 2575 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr, 2576 unsigned short **host_array) 2577 { 2578 int nsems; 2579 unsigned short *array; 2580 union semun semun; 2581 struct semid_ds semid_ds; 2582 int i, ret; 2583 2584 semun.buf = &semid_ds; 2585 2586 ret = semctl(semid, 0, IPC_STAT, semun); 2587 if (ret == -1) 2588 return get_errno(ret); 2589 2590 nsems = semid_ds.sem_nsems; 2591 2592 array = lock_user(VERIFY_WRITE, target_addr, 2593 nsems*sizeof(unsigned short), 0); 2594 if (!array) 2595 return -TARGET_EFAULT; 2596 2597 for(i=0; i<nsems; i++) { 2598 __put_user((*host_array)[i], &array[i]); 2599 } 2600 free(*host_array); 2601 unlock_user(array, target_addr, 1); 2602 2603 return 0; 2604 } 2605 2606 static inline abi_long do_semctl(int semid, int semnum, int cmd, 2607 union target_semun target_su) 2608 { 2609 union semun arg; 2610 struct semid_ds dsarg; 2611 unsigned short *array = NULL; 2612 struct seminfo seminfo; 2613 abi_long ret = -TARGET_EINVAL; 2614 abi_long err; 2615 cmd &= 0xff; 2616 2617 switch( cmd ) { 2618 case GETVAL: 2619 case SETVAL: 2620 arg.val = tswap32(target_su.val); 2621 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2622 target_su.val = tswap32(arg.val); 2623 break; 2624 case GETALL: 2625 case SETALL: 2626 err = target_to_host_semarray(semid, &array, target_su.array); 2627 if (err) 2628 return err; 2629 arg.array = array; 2630 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2631 err = host_to_target_semarray(semid, target_su.array, &array); 2632 if (err) 2633 return err; 2634 break; 2635 case IPC_STAT: 2636 case IPC_SET: 2637 case SEM_STAT: 2638 err = target_to_host_semid_ds(&dsarg, target_su.buf); 2639 if (err) 2640 return err; 2641 arg.buf = &dsarg; 2642 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2643 err = host_to_target_semid_ds(target_su.buf, &dsarg); 2644 if (err) 2645 return err; 2646 break; 2647 case IPC_INFO: 2648 case SEM_INFO: 2649 arg.__buf = &seminfo; 2650 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2651 err = host_to_target_seminfo(target_su.__buf, &seminfo); 2652 if (err) 2653 return err; 2654 break; 2655 case IPC_RMID: 2656 case GETPID: 2657 case GETNCNT: 2658 case GETZCNT: 2659 ret = get_errno(semctl(semid, semnum, cmd, NULL)); 2660 break; 2661 } 2662 2663 return ret; 2664 } 2665 2666 struct target_sembuf { 2667 unsigned short sem_num; 2668 short sem_op; 2669 short sem_flg; 2670 }; 2671 2672 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf, 2673 abi_ulong target_addr, 2674 unsigned nsops) 2675 { 2676 struct target_sembuf *target_sembuf; 2677 int i; 2678 2679 target_sembuf = lock_user(VERIFY_READ, target_addr, 2680 nsops*sizeof(struct target_sembuf), 1); 2681 if (!target_sembuf) 2682 return -TARGET_EFAULT; 2683 2684 for(i=0; i<nsops; i++) { 2685 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num); 2686 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op); 2687 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg); 2688 } 2689 2690 unlock_user(target_sembuf, target_addr, 0); 2691 2692 return 0; 2693 } 2694 2695 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops) 2696 { 2697 struct sembuf sops[nsops]; 2698 2699 if (target_to_host_sembuf(sops, ptr, nsops)) 2700 return -TARGET_EFAULT; 2701 2702 return get_errno(semop(semid, sops, nsops)); 2703 } 2704 2705 struct target_msqid_ds 2706 { 2707 struct target_ipc_perm msg_perm; 2708 abi_ulong msg_stime; 2709 #if TARGET_ABI_BITS == 32 2710 abi_ulong __unused1; 2711 #endif 2712 abi_ulong msg_rtime; 2713 #if TARGET_ABI_BITS == 32 2714 abi_ulong __unused2; 2715 #endif 2716 abi_ulong msg_ctime; 2717 #if TARGET_ABI_BITS == 32 2718 abi_ulong __unused3; 2719 #endif 2720 abi_ulong __msg_cbytes; 2721 abi_ulong msg_qnum; 2722 abi_ulong msg_qbytes; 2723 abi_ulong msg_lspid; 2724 abi_ulong msg_lrpid; 2725 abi_ulong __unused4; 2726 abi_ulong __unused5; 2727 }; 2728 2729 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md, 2730 abi_ulong target_addr) 2731 { 2732 struct target_msqid_ds *target_md; 2733 2734 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1)) 2735 return -TARGET_EFAULT; 2736 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr)) 2737 return -TARGET_EFAULT; 2738 host_md->msg_stime = tswapal(target_md->msg_stime); 2739 host_md->msg_rtime = tswapal(target_md->msg_rtime); 2740 host_md->msg_ctime = tswapal(target_md->msg_ctime); 2741 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes); 2742 host_md->msg_qnum = tswapal(target_md->msg_qnum); 2743 host_md->msg_qbytes = tswapal(target_md->msg_qbytes); 2744 host_md->msg_lspid = tswapal(target_md->msg_lspid); 2745 host_md->msg_lrpid = tswapal(target_md->msg_lrpid); 2746 unlock_user_struct(target_md, target_addr, 0); 2747 return 0; 2748 } 2749 2750 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr, 2751 struct msqid_ds *host_md) 2752 { 2753 struct target_msqid_ds *target_md; 2754 2755 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0)) 2756 return -TARGET_EFAULT; 2757 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm))) 2758 return -TARGET_EFAULT; 2759 target_md->msg_stime = tswapal(host_md->msg_stime); 2760 target_md->msg_rtime = tswapal(host_md->msg_rtime); 2761 target_md->msg_ctime = tswapal(host_md->msg_ctime); 2762 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes); 2763 target_md->msg_qnum = tswapal(host_md->msg_qnum); 2764 target_md->msg_qbytes = tswapal(host_md->msg_qbytes); 2765 target_md->msg_lspid = tswapal(host_md->msg_lspid); 2766 target_md->msg_lrpid = tswapal(host_md->msg_lrpid); 2767 unlock_user_struct(target_md, target_addr, 1); 2768 return 0; 2769 } 2770 2771 struct target_msginfo { 2772 int msgpool; 2773 int msgmap; 2774 int msgmax; 2775 int msgmnb; 2776 int msgmni; 2777 int msgssz; 2778 int msgtql; 2779 unsigned short int msgseg; 2780 }; 2781 2782 static inline abi_long host_to_target_msginfo(abi_ulong target_addr, 2783 struct msginfo *host_msginfo) 2784 { 2785 struct target_msginfo *target_msginfo; 2786 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0)) 2787 return -TARGET_EFAULT; 2788 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool); 2789 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap); 2790 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax); 2791 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb); 2792 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni); 2793 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz); 2794 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql); 2795 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg); 2796 unlock_user_struct(target_msginfo, target_addr, 1); 2797 return 0; 2798 } 2799 2800 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr) 2801 { 2802 struct msqid_ds dsarg; 2803 struct msginfo msginfo; 2804 abi_long ret = -TARGET_EINVAL; 2805 2806 cmd &= 0xff; 2807 2808 switch (cmd) { 2809 case IPC_STAT: 2810 case IPC_SET: 2811 case MSG_STAT: 2812 if (target_to_host_msqid_ds(&dsarg,ptr)) 2813 return -TARGET_EFAULT; 2814 ret = get_errno(msgctl(msgid, cmd, &dsarg)); 2815 if (host_to_target_msqid_ds(ptr,&dsarg)) 2816 return -TARGET_EFAULT; 2817 break; 2818 case IPC_RMID: 2819 ret = get_errno(msgctl(msgid, cmd, NULL)); 2820 break; 2821 case IPC_INFO: 2822 case MSG_INFO: 2823 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo)); 2824 if (host_to_target_msginfo(ptr, &msginfo)) 2825 return -TARGET_EFAULT; 2826 break; 2827 } 2828 2829 return ret; 2830 } 2831 2832 struct target_msgbuf { 2833 abi_long mtype; 2834 char mtext[1]; 2835 }; 2836 2837 static inline abi_long do_msgsnd(int msqid, abi_long msgp, 2838 unsigned int msgsz, int msgflg) 2839 { 2840 struct target_msgbuf *target_mb; 2841 struct msgbuf *host_mb; 2842 abi_long ret = 0; 2843 2844 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0)) 2845 return -TARGET_EFAULT; 2846 host_mb = malloc(msgsz+sizeof(long)); 2847 host_mb->mtype = (abi_long) tswapal(target_mb->mtype); 2848 memcpy(host_mb->mtext, target_mb->mtext, msgsz); 2849 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg)); 2850 free(host_mb); 2851 unlock_user_struct(target_mb, msgp, 0); 2852 2853 return ret; 2854 } 2855 2856 static inline abi_long do_msgrcv(int msqid, abi_long msgp, 2857 unsigned int msgsz, abi_long msgtyp, 2858 int msgflg) 2859 { 2860 struct target_msgbuf *target_mb; 2861 char *target_mtext; 2862 struct msgbuf *host_mb; 2863 abi_long ret = 0; 2864 2865 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0)) 2866 return -TARGET_EFAULT; 2867 2868 host_mb = g_malloc(msgsz+sizeof(long)); 2869 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg)); 2870 2871 if (ret > 0) { 2872 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong); 2873 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0); 2874 if (!target_mtext) { 2875 ret = -TARGET_EFAULT; 2876 goto end; 2877 } 2878 memcpy(target_mb->mtext, host_mb->mtext, ret); 2879 unlock_user(target_mtext, target_mtext_addr, ret); 2880 } 2881 2882 target_mb->mtype = tswapal(host_mb->mtype); 2883 2884 end: 2885 if (target_mb) 2886 unlock_user_struct(target_mb, msgp, 1); 2887 g_free(host_mb); 2888 return ret; 2889 } 2890 2891 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd, 2892 abi_ulong target_addr) 2893 { 2894 struct target_shmid_ds *target_sd; 2895 2896 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2897 return -TARGET_EFAULT; 2898 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr)) 2899 return -TARGET_EFAULT; 2900 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2901 __get_user(host_sd->shm_atime, &target_sd->shm_atime); 2902 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2903 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2904 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2905 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2906 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2907 unlock_user_struct(target_sd, target_addr, 0); 2908 return 0; 2909 } 2910 2911 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr, 2912 struct shmid_ds *host_sd) 2913 { 2914 struct target_shmid_ds *target_sd; 2915 2916 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2917 return -TARGET_EFAULT; 2918 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm))) 2919 return -TARGET_EFAULT; 2920 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2921 __put_user(host_sd->shm_atime, &target_sd->shm_atime); 2922 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2923 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2924 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2925 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2926 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2927 unlock_user_struct(target_sd, target_addr, 1); 2928 return 0; 2929 } 2930 2931 struct target_shminfo { 2932 abi_ulong shmmax; 2933 abi_ulong shmmin; 2934 abi_ulong shmmni; 2935 abi_ulong shmseg; 2936 abi_ulong shmall; 2937 }; 2938 2939 static inline abi_long host_to_target_shminfo(abi_ulong target_addr, 2940 struct shminfo *host_shminfo) 2941 { 2942 struct target_shminfo *target_shminfo; 2943 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0)) 2944 return -TARGET_EFAULT; 2945 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax); 2946 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin); 2947 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni); 2948 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg); 2949 __put_user(host_shminfo->shmall, &target_shminfo->shmall); 2950 unlock_user_struct(target_shminfo, target_addr, 1); 2951 return 0; 2952 } 2953 2954 struct target_shm_info { 2955 int used_ids; 2956 abi_ulong shm_tot; 2957 abi_ulong shm_rss; 2958 abi_ulong shm_swp; 2959 abi_ulong swap_attempts; 2960 abi_ulong swap_successes; 2961 }; 2962 2963 static inline abi_long host_to_target_shm_info(abi_ulong target_addr, 2964 struct shm_info *host_shm_info) 2965 { 2966 struct target_shm_info *target_shm_info; 2967 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0)) 2968 return -TARGET_EFAULT; 2969 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids); 2970 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot); 2971 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss); 2972 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp); 2973 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts); 2974 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes); 2975 unlock_user_struct(target_shm_info, target_addr, 1); 2976 return 0; 2977 } 2978 2979 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf) 2980 { 2981 struct shmid_ds dsarg; 2982 struct shminfo shminfo; 2983 struct shm_info shm_info; 2984 abi_long ret = -TARGET_EINVAL; 2985 2986 cmd &= 0xff; 2987 2988 switch(cmd) { 2989 case IPC_STAT: 2990 case IPC_SET: 2991 case SHM_STAT: 2992 if (target_to_host_shmid_ds(&dsarg, buf)) 2993 return -TARGET_EFAULT; 2994 ret = get_errno(shmctl(shmid, cmd, &dsarg)); 2995 if (host_to_target_shmid_ds(buf, &dsarg)) 2996 return -TARGET_EFAULT; 2997 break; 2998 case IPC_INFO: 2999 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo)); 3000 if (host_to_target_shminfo(buf, &shminfo)) 3001 return -TARGET_EFAULT; 3002 break; 3003 case SHM_INFO: 3004 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info)); 3005 if (host_to_target_shm_info(buf, &shm_info)) 3006 return -TARGET_EFAULT; 3007 break; 3008 case IPC_RMID: 3009 case SHM_LOCK: 3010 case SHM_UNLOCK: 3011 ret = get_errno(shmctl(shmid, cmd, NULL)); 3012 break; 3013 } 3014 3015 return ret; 3016 } 3017 3018 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg) 3019 { 3020 abi_long raddr; 3021 void *host_raddr; 3022 struct shmid_ds shm_info; 3023 int i,ret; 3024 3025 /* find out the length of the shared memory segment */ 3026 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); 3027 if (is_error(ret)) { 3028 /* can't get length, bail out */ 3029 return ret; 3030 } 3031 3032 mmap_lock(); 3033 3034 if (shmaddr) 3035 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg); 3036 else { 3037 abi_ulong mmap_start; 3038 3039 mmap_start = mmap_find_vma(0, shm_info.shm_segsz); 3040 3041 if (mmap_start == -1) { 3042 errno = ENOMEM; 3043 host_raddr = (void *)-1; 3044 } else 3045 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP); 3046 } 3047 3048 if (host_raddr == (void *)-1) { 3049 mmap_unlock(); 3050 return get_errno((long)host_raddr); 3051 } 3052 raddr=h2g((unsigned long)host_raddr); 3053 3054 page_set_flags(raddr, raddr + shm_info.shm_segsz, 3055 PAGE_VALID | PAGE_READ | 3056 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE)); 3057 3058 for (i = 0; i < N_SHM_REGIONS; i++) { 3059 if (shm_regions[i].start == 0) { 3060 shm_regions[i].start = raddr; 3061 shm_regions[i].size = shm_info.shm_segsz; 3062 break; 3063 } 3064 } 3065 3066 mmap_unlock(); 3067 return raddr; 3068 3069 } 3070 3071 static inline abi_long do_shmdt(abi_ulong shmaddr) 3072 { 3073 int i; 3074 3075 for (i = 0; i < N_SHM_REGIONS; ++i) { 3076 if (shm_regions[i].start == shmaddr) { 3077 shm_regions[i].start = 0; 3078 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0); 3079 break; 3080 } 3081 } 3082 3083 return get_errno(shmdt(g2h(shmaddr))); 3084 } 3085 3086 #ifdef TARGET_NR_ipc 3087 /* ??? This only works with linear mappings. */ 3088 /* do_ipc() must return target values and target errnos. */ 3089 static abi_long do_ipc(unsigned int call, int first, 3090 int second, int third, 3091 abi_long ptr, abi_long fifth) 3092 { 3093 int version; 3094 abi_long ret = 0; 3095 3096 version = call >> 16; 3097 call &= 0xffff; 3098 3099 switch (call) { 3100 case IPCOP_semop: 3101 ret = do_semop(first, ptr, second); 3102 break; 3103 3104 case IPCOP_semget: 3105 ret = get_errno(semget(first, second, third)); 3106 break; 3107 3108 case IPCOP_semctl: 3109 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr); 3110 break; 3111 3112 case IPCOP_msgget: 3113 ret = get_errno(msgget(first, second)); 3114 break; 3115 3116 case IPCOP_msgsnd: 3117 ret = do_msgsnd(first, ptr, second, third); 3118 break; 3119 3120 case IPCOP_msgctl: 3121 ret = do_msgctl(first, second, ptr); 3122 break; 3123 3124 case IPCOP_msgrcv: 3125 switch (version) { 3126 case 0: 3127 { 3128 struct target_ipc_kludge { 3129 abi_long msgp; 3130 abi_long msgtyp; 3131 } *tmp; 3132 3133 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) { 3134 ret = -TARGET_EFAULT; 3135 break; 3136 } 3137 3138 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third); 3139 3140 unlock_user_struct(tmp, ptr, 0); 3141 break; 3142 } 3143 default: 3144 ret = do_msgrcv(first, ptr, second, fifth, third); 3145 } 3146 break; 3147 3148 case IPCOP_shmat: 3149 switch (version) { 3150 default: 3151 { 3152 abi_ulong raddr; 3153 raddr = do_shmat(first, ptr, second); 3154 if (is_error(raddr)) 3155 return get_errno(raddr); 3156 if (put_user_ual(raddr, third)) 3157 return -TARGET_EFAULT; 3158 break; 3159 } 3160 case 1: 3161 ret = -TARGET_EINVAL; 3162 break; 3163 } 3164 break; 3165 case IPCOP_shmdt: 3166 ret = do_shmdt(ptr); 3167 break; 3168 3169 case IPCOP_shmget: 3170 /* IPC_* flag values are the same on all linux platforms */ 3171 ret = get_errno(shmget(first, second, third)); 3172 break; 3173 3174 /* IPC_* and SHM_* command values are the same on all linux platforms */ 3175 case IPCOP_shmctl: 3176 ret = do_shmctl(first, second, ptr); 3177 break; 3178 default: 3179 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version); 3180 ret = -TARGET_ENOSYS; 3181 break; 3182 } 3183 return ret; 3184 } 3185 #endif 3186 3187 /* kernel structure types definitions */ 3188 3189 #define STRUCT(name, ...) STRUCT_ ## name, 3190 #define STRUCT_SPECIAL(name) STRUCT_ ## name, 3191 enum { 3192 #include "syscall_types.h" 3193 }; 3194 #undef STRUCT 3195 #undef STRUCT_SPECIAL 3196 3197 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL }; 3198 #define STRUCT_SPECIAL(name) 3199 #include "syscall_types.h" 3200 #undef STRUCT 3201 #undef STRUCT_SPECIAL 3202 3203 typedef struct IOCTLEntry IOCTLEntry; 3204 3205 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp, 3206 int fd, abi_long cmd, abi_long arg); 3207 3208 struct IOCTLEntry { 3209 unsigned int target_cmd; 3210 unsigned int host_cmd; 3211 const char *name; 3212 int access; 3213 do_ioctl_fn *do_ioctl; 3214 const argtype arg_type[5]; 3215 }; 3216 3217 #define IOC_R 0x0001 3218 #define IOC_W 0x0002 3219 #define IOC_RW (IOC_R | IOC_W) 3220 3221 #define MAX_STRUCT_SIZE 4096 3222 3223 #ifdef CONFIG_FIEMAP 3224 /* So fiemap access checks don't overflow on 32 bit systems. 3225 * This is very slightly smaller than the limit imposed by 3226 * the underlying kernel. 3227 */ 3228 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \ 3229 / sizeof(struct fiemap_extent)) 3230 3231 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp, 3232 int fd, abi_long cmd, abi_long arg) 3233 { 3234 /* The parameter for this ioctl is a struct fiemap followed 3235 * by an array of struct fiemap_extent whose size is set 3236 * in fiemap->fm_extent_count. The array is filled in by the 3237 * ioctl. 3238 */ 3239 int target_size_in, target_size_out; 3240 struct fiemap *fm; 3241 const argtype *arg_type = ie->arg_type; 3242 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) }; 3243 void *argptr, *p; 3244 abi_long ret; 3245 int i, extent_size = thunk_type_size(extent_arg_type, 0); 3246 uint32_t outbufsz; 3247 int free_fm = 0; 3248 3249 assert(arg_type[0] == TYPE_PTR); 3250 assert(ie->access == IOC_RW); 3251 arg_type++; 3252 target_size_in = thunk_type_size(arg_type, 0); 3253 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1); 3254 if (!argptr) { 3255 return -TARGET_EFAULT; 3256 } 3257 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3258 unlock_user(argptr, arg, 0); 3259 fm = (struct fiemap *)buf_temp; 3260 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) { 3261 return -TARGET_EINVAL; 3262 } 3263 3264 outbufsz = sizeof (*fm) + 3265 (sizeof(struct fiemap_extent) * fm->fm_extent_count); 3266 3267 if (outbufsz > MAX_STRUCT_SIZE) { 3268 /* We can't fit all the extents into the fixed size buffer. 3269 * Allocate one that is large enough and use it instead. 3270 */ 3271 fm = malloc(outbufsz); 3272 if (!fm) { 3273 return -TARGET_ENOMEM; 3274 } 3275 memcpy(fm, buf_temp, sizeof(struct fiemap)); 3276 free_fm = 1; 3277 } 3278 ret = get_errno(ioctl(fd, ie->host_cmd, fm)); 3279 if (!is_error(ret)) { 3280 target_size_out = target_size_in; 3281 /* An extent_count of 0 means we were only counting the extents 3282 * so there are no structs to copy 3283 */ 3284 if (fm->fm_extent_count != 0) { 3285 target_size_out += fm->fm_mapped_extents * extent_size; 3286 } 3287 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0); 3288 if (!argptr) { 3289 ret = -TARGET_EFAULT; 3290 } else { 3291 /* Convert the struct fiemap */ 3292 thunk_convert(argptr, fm, arg_type, THUNK_TARGET); 3293 if (fm->fm_extent_count != 0) { 3294 p = argptr + target_size_in; 3295 /* ...and then all the struct fiemap_extents */ 3296 for (i = 0; i < fm->fm_mapped_extents; i++) { 3297 thunk_convert(p, &fm->fm_extents[i], extent_arg_type, 3298 THUNK_TARGET); 3299 p += extent_size; 3300 } 3301 } 3302 unlock_user(argptr, arg, target_size_out); 3303 } 3304 } 3305 if (free_fm) { 3306 free(fm); 3307 } 3308 return ret; 3309 } 3310 #endif 3311 3312 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, 3313 int fd, abi_long cmd, abi_long arg) 3314 { 3315 const argtype *arg_type = ie->arg_type; 3316 int target_size; 3317 void *argptr; 3318 int ret; 3319 struct ifconf *host_ifconf; 3320 uint32_t outbufsz; 3321 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) }; 3322 int target_ifreq_size; 3323 int nb_ifreq; 3324 int free_buf = 0; 3325 int i; 3326 int target_ifc_len; 3327 abi_long target_ifc_buf; 3328 int host_ifc_len; 3329 char *host_ifc_buf; 3330 3331 assert(arg_type[0] == TYPE_PTR); 3332 assert(ie->access == IOC_RW); 3333 3334 arg_type++; 3335 target_size = thunk_type_size(arg_type, 0); 3336 3337 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3338 if (!argptr) 3339 return -TARGET_EFAULT; 3340 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3341 unlock_user(argptr, arg, 0); 3342 3343 host_ifconf = (struct ifconf *)(unsigned long)buf_temp; 3344 target_ifc_len = host_ifconf->ifc_len; 3345 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf; 3346 3347 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0); 3348 nb_ifreq = target_ifc_len / target_ifreq_size; 3349 host_ifc_len = nb_ifreq * sizeof(struct ifreq); 3350 3351 outbufsz = sizeof(*host_ifconf) + host_ifc_len; 3352 if (outbufsz > MAX_STRUCT_SIZE) { 3353 /* We can't fit all the extents into the fixed size buffer. 3354 * Allocate one that is large enough and use it instead. 3355 */ 3356 host_ifconf = malloc(outbufsz); 3357 if (!host_ifconf) { 3358 return -TARGET_ENOMEM; 3359 } 3360 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf)); 3361 free_buf = 1; 3362 } 3363 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf); 3364 3365 host_ifconf->ifc_len = host_ifc_len; 3366 host_ifconf->ifc_buf = host_ifc_buf; 3367 3368 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf)); 3369 if (!is_error(ret)) { 3370 /* convert host ifc_len to target ifc_len */ 3371 3372 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq); 3373 target_ifc_len = nb_ifreq * target_ifreq_size; 3374 host_ifconf->ifc_len = target_ifc_len; 3375 3376 /* restore target ifc_buf */ 3377 3378 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf; 3379 3380 /* copy struct ifconf to target user */ 3381 3382 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3383 if (!argptr) 3384 return -TARGET_EFAULT; 3385 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET); 3386 unlock_user(argptr, arg, target_size); 3387 3388 /* copy ifreq[] to target user */ 3389 3390 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0); 3391 for (i = 0; i < nb_ifreq ; i++) { 3392 thunk_convert(argptr + i * target_ifreq_size, 3393 host_ifc_buf + i * sizeof(struct ifreq), 3394 ifreq_arg_type, THUNK_TARGET); 3395 } 3396 unlock_user(argptr, target_ifc_buf, target_ifc_len); 3397 } 3398 3399 if (free_buf) { 3400 free(host_ifconf); 3401 } 3402 3403 return ret; 3404 } 3405 3406 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 3407 abi_long cmd, abi_long arg) 3408 { 3409 void *argptr; 3410 struct dm_ioctl *host_dm; 3411 abi_long guest_data; 3412 uint32_t guest_data_size; 3413 int target_size; 3414 const argtype *arg_type = ie->arg_type; 3415 abi_long ret; 3416 void *big_buf = NULL; 3417 char *host_data; 3418 3419 arg_type++; 3420 target_size = thunk_type_size(arg_type, 0); 3421 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3422 if (!argptr) { 3423 ret = -TARGET_EFAULT; 3424 goto out; 3425 } 3426 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3427 unlock_user(argptr, arg, 0); 3428 3429 /* buf_temp is too small, so fetch things into a bigger buffer */ 3430 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2); 3431 memcpy(big_buf, buf_temp, target_size); 3432 buf_temp = big_buf; 3433 host_dm = big_buf; 3434 3435 guest_data = arg + host_dm->data_start; 3436 if ((guest_data - arg) < 0) { 3437 ret = -EINVAL; 3438 goto out; 3439 } 3440 guest_data_size = host_dm->data_size - host_dm->data_start; 3441 host_data = (char*)host_dm + host_dm->data_start; 3442 3443 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1); 3444 switch (ie->host_cmd) { 3445 case DM_REMOVE_ALL: 3446 case DM_LIST_DEVICES: 3447 case DM_DEV_CREATE: 3448 case DM_DEV_REMOVE: 3449 case DM_DEV_SUSPEND: 3450 case DM_DEV_STATUS: 3451 case DM_DEV_WAIT: 3452 case DM_TABLE_STATUS: 3453 case DM_TABLE_CLEAR: 3454 case DM_TABLE_DEPS: 3455 case DM_LIST_VERSIONS: 3456 /* no input data */ 3457 break; 3458 case DM_DEV_RENAME: 3459 case DM_DEV_SET_GEOMETRY: 3460 /* data contains only strings */ 3461 memcpy(host_data, argptr, guest_data_size); 3462 break; 3463 case DM_TARGET_MSG: 3464 memcpy(host_data, argptr, guest_data_size); 3465 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr); 3466 break; 3467 case DM_TABLE_LOAD: 3468 { 3469 void *gspec = argptr; 3470 void *cur_data = host_data; 3471 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 3472 int spec_size = thunk_type_size(arg_type, 0); 3473 int i; 3474 3475 for (i = 0; i < host_dm->target_count; i++) { 3476 struct dm_target_spec *spec = cur_data; 3477 uint32_t next; 3478 int slen; 3479 3480 thunk_convert(spec, gspec, arg_type, THUNK_HOST); 3481 slen = strlen((char*)gspec + spec_size) + 1; 3482 next = spec->next; 3483 spec->next = sizeof(*spec) + slen; 3484 strcpy((char*)&spec[1], gspec + spec_size); 3485 gspec += next; 3486 cur_data += spec->next; 3487 } 3488 break; 3489 } 3490 default: 3491 ret = -TARGET_EINVAL; 3492 goto out; 3493 } 3494 unlock_user(argptr, guest_data, 0); 3495 3496 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3497 if (!is_error(ret)) { 3498 guest_data = arg + host_dm->data_start; 3499 guest_data_size = host_dm->data_size - host_dm->data_start; 3500 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0); 3501 switch (ie->host_cmd) { 3502 case DM_REMOVE_ALL: 3503 case DM_DEV_CREATE: 3504 case DM_DEV_REMOVE: 3505 case DM_DEV_RENAME: 3506 case DM_DEV_SUSPEND: 3507 case DM_DEV_STATUS: 3508 case DM_TABLE_LOAD: 3509 case DM_TABLE_CLEAR: 3510 case DM_TARGET_MSG: 3511 case DM_DEV_SET_GEOMETRY: 3512 /* no return data */ 3513 break; 3514 case DM_LIST_DEVICES: 3515 { 3516 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start; 3517 uint32_t remaining_data = guest_data_size; 3518 void *cur_data = argptr; 3519 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) }; 3520 int nl_size = 12; /* can't use thunk_size due to alignment */ 3521 3522 while (1) { 3523 uint32_t next = nl->next; 3524 if (next) { 3525 nl->next = nl_size + (strlen(nl->name) + 1); 3526 } 3527 if (remaining_data < nl->next) { 3528 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3529 break; 3530 } 3531 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET); 3532 strcpy(cur_data + nl_size, nl->name); 3533 cur_data += nl->next; 3534 remaining_data -= nl->next; 3535 if (!next) { 3536 break; 3537 } 3538 nl = (void*)nl + next; 3539 } 3540 break; 3541 } 3542 case DM_DEV_WAIT: 3543 case DM_TABLE_STATUS: 3544 { 3545 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start; 3546 void *cur_data = argptr; 3547 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 3548 int spec_size = thunk_type_size(arg_type, 0); 3549 int i; 3550 3551 for (i = 0; i < host_dm->target_count; i++) { 3552 uint32_t next = spec->next; 3553 int slen = strlen((char*)&spec[1]) + 1; 3554 spec->next = (cur_data - argptr) + spec_size + slen; 3555 if (guest_data_size < spec->next) { 3556 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3557 break; 3558 } 3559 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET); 3560 strcpy(cur_data + spec_size, (char*)&spec[1]); 3561 cur_data = argptr + spec->next; 3562 spec = (void*)host_dm + host_dm->data_start + next; 3563 } 3564 break; 3565 } 3566 case DM_TABLE_DEPS: 3567 { 3568 void *hdata = (void*)host_dm + host_dm->data_start; 3569 int count = *(uint32_t*)hdata; 3570 uint64_t *hdev = hdata + 8; 3571 uint64_t *gdev = argptr + 8; 3572 int i; 3573 3574 *(uint32_t*)argptr = tswap32(count); 3575 for (i = 0; i < count; i++) { 3576 *gdev = tswap64(*hdev); 3577 gdev++; 3578 hdev++; 3579 } 3580 break; 3581 } 3582 case DM_LIST_VERSIONS: 3583 { 3584 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start; 3585 uint32_t remaining_data = guest_data_size; 3586 void *cur_data = argptr; 3587 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) }; 3588 int vers_size = thunk_type_size(arg_type, 0); 3589 3590 while (1) { 3591 uint32_t next = vers->next; 3592 if (next) { 3593 vers->next = vers_size + (strlen(vers->name) + 1); 3594 } 3595 if (remaining_data < vers->next) { 3596 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3597 break; 3598 } 3599 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET); 3600 strcpy(cur_data + vers_size, vers->name); 3601 cur_data += vers->next; 3602 remaining_data -= vers->next; 3603 if (!next) { 3604 break; 3605 } 3606 vers = (void*)vers + next; 3607 } 3608 break; 3609 } 3610 default: 3611 ret = -TARGET_EINVAL; 3612 goto out; 3613 } 3614 unlock_user(argptr, guest_data, guest_data_size); 3615 3616 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3617 if (!argptr) { 3618 ret = -TARGET_EFAULT; 3619 goto out; 3620 } 3621 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3622 unlock_user(argptr, arg, target_size); 3623 } 3624 out: 3625 g_free(big_buf); 3626 return ret; 3627 } 3628 3629 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp, 3630 int fd, abi_long cmd, abi_long arg) 3631 { 3632 const argtype *arg_type = ie->arg_type; 3633 const StructEntry *se; 3634 const argtype *field_types; 3635 const int *dst_offsets, *src_offsets; 3636 int target_size; 3637 void *argptr; 3638 abi_ulong *target_rt_dev_ptr; 3639 unsigned long *host_rt_dev_ptr; 3640 abi_long ret; 3641 int i; 3642 3643 assert(ie->access == IOC_W); 3644 assert(*arg_type == TYPE_PTR); 3645 arg_type++; 3646 assert(*arg_type == TYPE_STRUCT); 3647 target_size = thunk_type_size(arg_type, 0); 3648 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3649 if (!argptr) { 3650 return -TARGET_EFAULT; 3651 } 3652 arg_type++; 3653 assert(*arg_type == (int)STRUCT_rtentry); 3654 se = struct_entries + *arg_type++; 3655 assert(se->convert[0] == NULL); 3656 /* convert struct here to be able to catch rt_dev string */ 3657 field_types = se->field_types; 3658 dst_offsets = se->field_offsets[THUNK_HOST]; 3659 src_offsets = se->field_offsets[THUNK_TARGET]; 3660 for (i = 0; i < se->nb_fields; i++) { 3661 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) { 3662 assert(*field_types == TYPE_PTRVOID); 3663 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]); 3664 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]); 3665 if (*target_rt_dev_ptr != 0) { 3666 *host_rt_dev_ptr = (unsigned long)lock_user_string( 3667 tswapal(*target_rt_dev_ptr)); 3668 if (!*host_rt_dev_ptr) { 3669 unlock_user(argptr, arg, 0); 3670 return -TARGET_EFAULT; 3671 } 3672 } else { 3673 *host_rt_dev_ptr = 0; 3674 } 3675 field_types++; 3676 continue; 3677 } 3678 field_types = thunk_convert(buf_temp + dst_offsets[i], 3679 argptr + src_offsets[i], 3680 field_types, THUNK_HOST); 3681 } 3682 unlock_user(argptr, arg, 0); 3683 3684 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3685 if (*host_rt_dev_ptr != 0) { 3686 unlock_user((void *)*host_rt_dev_ptr, 3687 *target_rt_dev_ptr, 0); 3688 } 3689 return ret; 3690 } 3691 3692 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp, 3693 int fd, abi_long cmd, abi_long arg) 3694 { 3695 int sig = target_to_host_signal(arg); 3696 return get_errno(ioctl(fd, ie->host_cmd, sig)); 3697 } 3698 3699 static IOCTLEntry ioctl_entries[] = { 3700 #define IOCTL(cmd, access, ...) \ 3701 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } }, 3702 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \ 3703 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } }, 3704 #include "ioctls.h" 3705 { 0, 0, }, 3706 }; 3707 3708 /* ??? Implement proper locking for ioctls. */ 3709 /* do_ioctl() Must return target values and target errnos. */ 3710 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg) 3711 { 3712 const IOCTLEntry *ie; 3713 const argtype *arg_type; 3714 abi_long ret; 3715 uint8_t buf_temp[MAX_STRUCT_SIZE]; 3716 int target_size; 3717 void *argptr; 3718 3719 ie = ioctl_entries; 3720 for(;;) { 3721 if (ie->target_cmd == 0) { 3722 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd); 3723 return -TARGET_ENOSYS; 3724 } 3725 if (ie->target_cmd == cmd) 3726 break; 3727 ie++; 3728 } 3729 arg_type = ie->arg_type; 3730 #if defined(DEBUG) 3731 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name); 3732 #endif 3733 if (ie->do_ioctl) { 3734 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg); 3735 } 3736 3737 switch(arg_type[0]) { 3738 case TYPE_NULL: 3739 /* no argument */ 3740 ret = get_errno(ioctl(fd, ie->host_cmd)); 3741 break; 3742 case TYPE_PTRVOID: 3743 case TYPE_INT: 3744 /* int argment */ 3745 ret = get_errno(ioctl(fd, ie->host_cmd, arg)); 3746 break; 3747 case TYPE_PTR: 3748 arg_type++; 3749 target_size = thunk_type_size(arg_type, 0); 3750 switch(ie->access) { 3751 case IOC_R: 3752 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3753 if (!is_error(ret)) { 3754 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3755 if (!argptr) 3756 return -TARGET_EFAULT; 3757 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3758 unlock_user(argptr, arg, target_size); 3759 } 3760 break; 3761 case IOC_W: 3762 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3763 if (!argptr) 3764 return -TARGET_EFAULT; 3765 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3766 unlock_user(argptr, arg, 0); 3767 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3768 break; 3769 default: 3770 case IOC_RW: 3771 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3772 if (!argptr) 3773 return -TARGET_EFAULT; 3774 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3775 unlock_user(argptr, arg, 0); 3776 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3777 if (!is_error(ret)) { 3778 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3779 if (!argptr) 3780 return -TARGET_EFAULT; 3781 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3782 unlock_user(argptr, arg, target_size); 3783 } 3784 break; 3785 } 3786 break; 3787 default: 3788 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n", 3789 (long)cmd, arg_type[0]); 3790 ret = -TARGET_ENOSYS; 3791 break; 3792 } 3793 return ret; 3794 } 3795 3796 static const bitmask_transtbl iflag_tbl[] = { 3797 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK }, 3798 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT }, 3799 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR }, 3800 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK }, 3801 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK }, 3802 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP }, 3803 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR }, 3804 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR }, 3805 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL }, 3806 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC }, 3807 { TARGET_IXON, TARGET_IXON, IXON, IXON }, 3808 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY }, 3809 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF }, 3810 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL }, 3811 { 0, 0, 0, 0 } 3812 }; 3813 3814 static const bitmask_transtbl oflag_tbl[] = { 3815 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST }, 3816 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC }, 3817 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR }, 3818 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL }, 3819 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR }, 3820 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET }, 3821 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL }, 3822 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL }, 3823 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 }, 3824 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 }, 3825 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 }, 3826 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 }, 3827 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 }, 3828 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 }, 3829 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 }, 3830 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 }, 3831 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 }, 3832 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 }, 3833 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 }, 3834 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 }, 3835 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 }, 3836 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 }, 3837 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 }, 3838 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 }, 3839 { 0, 0, 0, 0 } 3840 }; 3841 3842 static const bitmask_transtbl cflag_tbl[] = { 3843 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 }, 3844 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 }, 3845 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 }, 3846 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 }, 3847 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 }, 3848 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 }, 3849 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 }, 3850 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 }, 3851 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 }, 3852 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 }, 3853 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 }, 3854 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 }, 3855 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 }, 3856 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 }, 3857 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 }, 3858 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 }, 3859 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 }, 3860 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 }, 3861 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 }, 3862 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 }, 3863 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 }, 3864 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 }, 3865 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 }, 3866 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 }, 3867 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB }, 3868 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD }, 3869 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB }, 3870 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD }, 3871 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL }, 3872 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL }, 3873 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS }, 3874 { 0, 0, 0, 0 } 3875 }; 3876 3877 static const bitmask_transtbl lflag_tbl[] = { 3878 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG }, 3879 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON }, 3880 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE }, 3881 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO }, 3882 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE }, 3883 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK }, 3884 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL }, 3885 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH }, 3886 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP }, 3887 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL }, 3888 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT }, 3889 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE }, 3890 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO }, 3891 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN }, 3892 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN }, 3893 { 0, 0, 0, 0 } 3894 }; 3895 3896 static void target_to_host_termios (void *dst, const void *src) 3897 { 3898 struct host_termios *host = dst; 3899 const struct target_termios *target = src; 3900 3901 host->c_iflag = 3902 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl); 3903 host->c_oflag = 3904 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl); 3905 host->c_cflag = 3906 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl); 3907 host->c_lflag = 3908 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl); 3909 host->c_line = target->c_line; 3910 3911 memset(host->c_cc, 0, sizeof(host->c_cc)); 3912 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR]; 3913 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT]; 3914 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE]; 3915 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL]; 3916 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF]; 3917 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME]; 3918 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN]; 3919 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC]; 3920 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART]; 3921 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP]; 3922 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP]; 3923 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL]; 3924 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT]; 3925 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD]; 3926 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE]; 3927 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT]; 3928 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2]; 3929 } 3930 3931 static void host_to_target_termios (void *dst, const void *src) 3932 { 3933 struct target_termios *target = dst; 3934 const struct host_termios *host = src; 3935 3936 target->c_iflag = 3937 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl)); 3938 target->c_oflag = 3939 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl)); 3940 target->c_cflag = 3941 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl)); 3942 target->c_lflag = 3943 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl)); 3944 target->c_line = host->c_line; 3945 3946 memset(target->c_cc, 0, sizeof(target->c_cc)); 3947 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR]; 3948 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT]; 3949 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE]; 3950 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL]; 3951 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF]; 3952 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME]; 3953 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN]; 3954 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC]; 3955 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART]; 3956 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP]; 3957 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP]; 3958 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL]; 3959 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT]; 3960 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD]; 3961 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE]; 3962 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT]; 3963 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2]; 3964 } 3965 3966 static const StructEntry struct_termios_def = { 3967 .convert = { host_to_target_termios, target_to_host_termios }, 3968 .size = { sizeof(struct target_termios), sizeof(struct host_termios) }, 3969 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) }, 3970 }; 3971 3972 static bitmask_transtbl mmap_flags_tbl[] = { 3973 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED }, 3974 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE }, 3975 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED }, 3976 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS }, 3977 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN }, 3978 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE }, 3979 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE }, 3980 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED }, 3981 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE, 3982 MAP_NORESERVE }, 3983 { 0, 0, 0, 0 } 3984 }; 3985 3986 #if defined(TARGET_I386) 3987 3988 /* NOTE: there is really one LDT for all the threads */ 3989 static uint8_t *ldt_table; 3990 3991 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount) 3992 { 3993 int size; 3994 void *p; 3995 3996 if (!ldt_table) 3997 return 0; 3998 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE; 3999 if (size > bytecount) 4000 size = bytecount; 4001 p = lock_user(VERIFY_WRITE, ptr, size, 0); 4002 if (!p) 4003 return -TARGET_EFAULT; 4004 /* ??? Should this by byteswapped? */ 4005 memcpy(p, ldt_table, size); 4006 unlock_user(p, ptr, size); 4007 return size; 4008 } 4009 4010 /* XXX: add locking support */ 4011 static abi_long write_ldt(CPUX86State *env, 4012 abi_ulong ptr, unsigned long bytecount, int oldmode) 4013 { 4014 struct target_modify_ldt_ldt_s ldt_info; 4015 struct target_modify_ldt_ldt_s *target_ldt_info; 4016 int seg_32bit, contents, read_exec_only, limit_in_pages; 4017 int seg_not_present, useable, lm; 4018 uint32_t *lp, entry_1, entry_2; 4019 4020 if (bytecount != sizeof(ldt_info)) 4021 return -TARGET_EINVAL; 4022 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1)) 4023 return -TARGET_EFAULT; 4024 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 4025 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 4026 ldt_info.limit = tswap32(target_ldt_info->limit); 4027 ldt_info.flags = tswap32(target_ldt_info->flags); 4028 unlock_user_struct(target_ldt_info, ptr, 0); 4029 4030 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES) 4031 return -TARGET_EINVAL; 4032 seg_32bit = ldt_info.flags & 1; 4033 contents = (ldt_info.flags >> 1) & 3; 4034 read_exec_only = (ldt_info.flags >> 3) & 1; 4035 limit_in_pages = (ldt_info.flags >> 4) & 1; 4036 seg_not_present = (ldt_info.flags >> 5) & 1; 4037 useable = (ldt_info.flags >> 6) & 1; 4038 #ifdef TARGET_ABI32 4039 lm = 0; 4040 #else 4041 lm = (ldt_info.flags >> 7) & 1; 4042 #endif 4043 if (contents == 3) { 4044 if (oldmode) 4045 return -TARGET_EINVAL; 4046 if (seg_not_present == 0) 4047 return -TARGET_EINVAL; 4048 } 4049 /* allocate the LDT */ 4050 if (!ldt_table) { 4051 env->ldt.base = target_mmap(0, 4052 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE, 4053 PROT_READ|PROT_WRITE, 4054 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 4055 if (env->ldt.base == -1) 4056 return -TARGET_ENOMEM; 4057 memset(g2h(env->ldt.base), 0, 4058 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE); 4059 env->ldt.limit = 0xffff; 4060 ldt_table = g2h(env->ldt.base); 4061 } 4062 4063 /* NOTE: same code as Linux kernel */ 4064 /* Allow LDTs to be cleared by the user. */ 4065 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 4066 if (oldmode || 4067 (contents == 0 && 4068 read_exec_only == 1 && 4069 seg_32bit == 0 && 4070 limit_in_pages == 0 && 4071 seg_not_present == 1 && 4072 useable == 0 )) { 4073 entry_1 = 0; 4074 entry_2 = 0; 4075 goto install; 4076 } 4077 } 4078 4079 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 4080 (ldt_info.limit & 0x0ffff); 4081 entry_2 = (ldt_info.base_addr & 0xff000000) | 4082 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 4083 (ldt_info.limit & 0xf0000) | 4084 ((read_exec_only ^ 1) << 9) | 4085 (contents << 10) | 4086 ((seg_not_present ^ 1) << 15) | 4087 (seg_32bit << 22) | 4088 (limit_in_pages << 23) | 4089 (lm << 21) | 4090 0x7000; 4091 if (!oldmode) 4092 entry_2 |= (useable << 20); 4093 4094 /* Install the new entry ... */ 4095 install: 4096 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3)); 4097 lp[0] = tswap32(entry_1); 4098 lp[1] = tswap32(entry_2); 4099 return 0; 4100 } 4101 4102 /* specific and weird i386 syscalls */ 4103 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr, 4104 unsigned long bytecount) 4105 { 4106 abi_long ret; 4107 4108 switch (func) { 4109 case 0: 4110 ret = read_ldt(ptr, bytecount); 4111 break; 4112 case 1: 4113 ret = write_ldt(env, ptr, bytecount, 1); 4114 break; 4115 case 0x11: 4116 ret = write_ldt(env, ptr, bytecount, 0); 4117 break; 4118 default: 4119 ret = -TARGET_ENOSYS; 4120 break; 4121 } 4122 return ret; 4123 } 4124 4125 #if defined(TARGET_I386) && defined(TARGET_ABI32) 4126 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr) 4127 { 4128 uint64_t *gdt_table = g2h(env->gdt.base); 4129 struct target_modify_ldt_ldt_s ldt_info; 4130 struct target_modify_ldt_ldt_s *target_ldt_info; 4131 int seg_32bit, contents, read_exec_only, limit_in_pages; 4132 int seg_not_present, useable, lm; 4133 uint32_t *lp, entry_1, entry_2; 4134 int i; 4135 4136 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 4137 if (!target_ldt_info) 4138 return -TARGET_EFAULT; 4139 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 4140 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 4141 ldt_info.limit = tswap32(target_ldt_info->limit); 4142 ldt_info.flags = tswap32(target_ldt_info->flags); 4143 if (ldt_info.entry_number == -1) { 4144 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) { 4145 if (gdt_table[i] == 0) { 4146 ldt_info.entry_number = i; 4147 target_ldt_info->entry_number = tswap32(i); 4148 break; 4149 } 4150 } 4151 } 4152 unlock_user_struct(target_ldt_info, ptr, 1); 4153 4154 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 4155 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX) 4156 return -TARGET_EINVAL; 4157 seg_32bit = ldt_info.flags & 1; 4158 contents = (ldt_info.flags >> 1) & 3; 4159 read_exec_only = (ldt_info.flags >> 3) & 1; 4160 limit_in_pages = (ldt_info.flags >> 4) & 1; 4161 seg_not_present = (ldt_info.flags >> 5) & 1; 4162 useable = (ldt_info.flags >> 6) & 1; 4163 #ifdef TARGET_ABI32 4164 lm = 0; 4165 #else 4166 lm = (ldt_info.flags >> 7) & 1; 4167 #endif 4168 4169 if (contents == 3) { 4170 if (seg_not_present == 0) 4171 return -TARGET_EINVAL; 4172 } 4173 4174 /* NOTE: same code as Linux kernel */ 4175 /* Allow LDTs to be cleared by the user. */ 4176 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 4177 if ((contents == 0 && 4178 read_exec_only == 1 && 4179 seg_32bit == 0 && 4180 limit_in_pages == 0 && 4181 seg_not_present == 1 && 4182 useable == 0 )) { 4183 entry_1 = 0; 4184 entry_2 = 0; 4185 goto install; 4186 } 4187 } 4188 4189 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 4190 (ldt_info.limit & 0x0ffff); 4191 entry_2 = (ldt_info.base_addr & 0xff000000) | 4192 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 4193 (ldt_info.limit & 0xf0000) | 4194 ((read_exec_only ^ 1) << 9) | 4195 (contents << 10) | 4196 ((seg_not_present ^ 1) << 15) | 4197 (seg_32bit << 22) | 4198 (limit_in_pages << 23) | 4199 (useable << 20) | 4200 (lm << 21) | 4201 0x7000; 4202 4203 /* Install the new entry ... */ 4204 install: 4205 lp = (uint32_t *)(gdt_table + ldt_info.entry_number); 4206 lp[0] = tswap32(entry_1); 4207 lp[1] = tswap32(entry_2); 4208 return 0; 4209 } 4210 4211 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr) 4212 { 4213 struct target_modify_ldt_ldt_s *target_ldt_info; 4214 uint64_t *gdt_table = g2h(env->gdt.base); 4215 uint32_t base_addr, limit, flags; 4216 int seg_32bit, contents, read_exec_only, limit_in_pages, idx; 4217 int seg_not_present, useable, lm; 4218 uint32_t *lp, entry_1, entry_2; 4219 4220 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 4221 if (!target_ldt_info) 4222 return -TARGET_EFAULT; 4223 idx = tswap32(target_ldt_info->entry_number); 4224 if (idx < TARGET_GDT_ENTRY_TLS_MIN || 4225 idx > TARGET_GDT_ENTRY_TLS_MAX) { 4226 unlock_user_struct(target_ldt_info, ptr, 1); 4227 return -TARGET_EINVAL; 4228 } 4229 lp = (uint32_t *)(gdt_table + idx); 4230 entry_1 = tswap32(lp[0]); 4231 entry_2 = tswap32(lp[1]); 4232 4233 read_exec_only = ((entry_2 >> 9) & 1) ^ 1; 4234 contents = (entry_2 >> 10) & 3; 4235 seg_not_present = ((entry_2 >> 15) & 1) ^ 1; 4236 seg_32bit = (entry_2 >> 22) & 1; 4237 limit_in_pages = (entry_2 >> 23) & 1; 4238 useable = (entry_2 >> 20) & 1; 4239 #ifdef TARGET_ABI32 4240 lm = 0; 4241 #else 4242 lm = (entry_2 >> 21) & 1; 4243 #endif 4244 flags = (seg_32bit << 0) | (contents << 1) | 4245 (read_exec_only << 3) | (limit_in_pages << 4) | 4246 (seg_not_present << 5) | (useable << 6) | (lm << 7); 4247 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000); 4248 base_addr = (entry_1 >> 16) | 4249 (entry_2 & 0xff000000) | 4250 ((entry_2 & 0xff) << 16); 4251 target_ldt_info->base_addr = tswapal(base_addr); 4252 target_ldt_info->limit = tswap32(limit); 4253 target_ldt_info->flags = tswap32(flags); 4254 unlock_user_struct(target_ldt_info, ptr, 1); 4255 return 0; 4256 } 4257 #endif /* TARGET_I386 && TARGET_ABI32 */ 4258 4259 #ifndef TARGET_ABI32 4260 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 4261 { 4262 abi_long ret = 0; 4263 abi_ulong val; 4264 int idx; 4265 4266 switch(code) { 4267 case TARGET_ARCH_SET_GS: 4268 case TARGET_ARCH_SET_FS: 4269 if (code == TARGET_ARCH_SET_GS) 4270 idx = R_GS; 4271 else 4272 idx = R_FS; 4273 cpu_x86_load_seg(env, idx, 0); 4274 env->segs[idx].base = addr; 4275 break; 4276 case TARGET_ARCH_GET_GS: 4277 case TARGET_ARCH_GET_FS: 4278 if (code == TARGET_ARCH_GET_GS) 4279 idx = R_GS; 4280 else 4281 idx = R_FS; 4282 val = env->segs[idx].base; 4283 if (put_user(val, addr, abi_ulong)) 4284 ret = -TARGET_EFAULT; 4285 break; 4286 default: 4287 ret = -TARGET_EINVAL; 4288 break; 4289 } 4290 return ret; 4291 } 4292 #endif 4293 4294 #endif /* defined(TARGET_I386) */ 4295 4296 #define NEW_STACK_SIZE 0x40000 4297 4298 4299 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; 4300 typedef struct { 4301 CPUArchState *env; 4302 pthread_mutex_t mutex; 4303 pthread_cond_t cond; 4304 pthread_t thread; 4305 uint32_t tid; 4306 abi_ulong child_tidptr; 4307 abi_ulong parent_tidptr; 4308 sigset_t sigmask; 4309 } new_thread_info; 4310 4311 static void *clone_func(void *arg) 4312 { 4313 new_thread_info *info = arg; 4314 CPUArchState *env; 4315 CPUState *cpu; 4316 TaskState *ts; 4317 4318 env = info->env; 4319 cpu = ENV_GET_CPU(env); 4320 thread_cpu = cpu; 4321 ts = (TaskState *)cpu->opaque; 4322 info->tid = gettid(); 4323 cpu->host_tid = info->tid; 4324 task_settid(ts); 4325 if (info->child_tidptr) 4326 put_user_u32(info->tid, info->child_tidptr); 4327 if (info->parent_tidptr) 4328 put_user_u32(info->tid, info->parent_tidptr); 4329 /* Enable signals. */ 4330 sigprocmask(SIG_SETMASK, &info->sigmask, NULL); 4331 /* Signal to the parent that we're ready. */ 4332 pthread_mutex_lock(&info->mutex); 4333 pthread_cond_broadcast(&info->cond); 4334 pthread_mutex_unlock(&info->mutex); 4335 /* Wait until the parent has finshed initializing the tls state. */ 4336 pthread_mutex_lock(&clone_lock); 4337 pthread_mutex_unlock(&clone_lock); 4338 cpu_loop(env); 4339 /* never exits */ 4340 return NULL; 4341 } 4342 4343 /* do_fork() Must return host values and target errnos (unlike most 4344 do_*() functions). */ 4345 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, 4346 abi_ulong parent_tidptr, target_ulong newtls, 4347 abi_ulong child_tidptr) 4348 { 4349 CPUState *cpu = ENV_GET_CPU(env); 4350 int ret; 4351 TaskState *ts; 4352 CPUState *new_cpu; 4353 CPUArchState *new_env; 4354 unsigned int nptl_flags; 4355 sigset_t sigmask; 4356 4357 /* Emulate vfork() with fork() */ 4358 if (flags & CLONE_VFORK) 4359 flags &= ~(CLONE_VFORK | CLONE_VM); 4360 4361 if (flags & CLONE_VM) { 4362 TaskState *parent_ts = (TaskState *)cpu->opaque; 4363 new_thread_info info; 4364 pthread_attr_t attr; 4365 4366 ts = g_malloc0(sizeof(TaskState)); 4367 init_task_state(ts); 4368 /* we create a new CPU instance. */ 4369 new_env = cpu_copy(env); 4370 /* Init regs that differ from the parent. */ 4371 cpu_clone_regs(new_env, newsp); 4372 new_cpu = ENV_GET_CPU(new_env); 4373 new_cpu->opaque = ts; 4374 ts->bprm = parent_ts->bprm; 4375 ts->info = parent_ts->info; 4376 nptl_flags = flags; 4377 flags &= ~CLONE_NPTL_FLAGS2; 4378 4379 if (nptl_flags & CLONE_CHILD_CLEARTID) { 4380 ts->child_tidptr = child_tidptr; 4381 } 4382 4383 if (nptl_flags & CLONE_SETTLS) 4384 cpu_set_tls (new_env, newtls); 4385 4386 /* Grab a mutex so that thread setup appears atomic. */ 4387 pthread_mutex_lock(&clone_lock); 4388 4389 memset(&info, 0, sizeof(info)); 4390 pthread_mutex_init(&info.mutex, NULL); 4391 pthread_mutex_lock(&info.mutex); 4392 pthread_cond_init(&info.cond, NULL); 4393 info.env = new_env; 4394 if (nptl_flags & CLONE_CHILD_SETTID) 4395 info.child_tidptr = child_tidptr; 4396 if (nptl_flags & CLONE_PARENT_SETTID) 4397 info.parent_tidptr = parent_tidptr; 4398 4399 ret = pthread_attr_init(&attr); 4400 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE); 4401 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 4402 /* It is not safe to deliver signals until the child has finished 4403 initializing, so temporarily block all signals. */ 4404 sigfillset(&sigmask); 4405 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); 4406 4407 ret = pthread_create(&info.thread, &attr, clone_func, &info); 4408 /* TODO: Free new CPU state if thread creation failed. */ 4409 4410 sigprocmask(SIG_SETMASK, &info.sigmask, NULL); 4411 pthread_attr_destroy(&attr); 4412 if (ret == 0) { 4413 /* Wait for the child to initialize. */ 4414 pthread_cond_wait(&info.cond, &info.mutex); 4415 ret = info.tid; 4416 if (flags & CLONE_PARENT_SETTID) 4417 put_user_u32(ret, parent_tidptr); 4418 } else { 4419 ret = -1; 4420 } 4421 pthread_mutex_unlock(&info.mutex); 4422 pthread_cond_destroy(&info.cond); 4423 pthread_mutex_destroy(&info.mutex); 4424 pthread_mutex_unlock(&clone_lock); 4425 } else { 4426 /* if no CLONE_VM, we consider it is a fork */ 4427 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) 4428 return -EINVAL; 4429 fork_start(); 4430 ret = fork(); 4431 if (ret == 0) { 4432 /* Child Process. */ 4433 cpu_clone_regs(env, newsp); 4434 fork_end(1); 4435 /* There is a race condition here. The parent process could 4436 theoretically read the TID in the child process before the child 4437 tid is set. This would require using either ptrace 4438 (not implemented) or having *_tidptr to point at a shared memory 4439 mapping. We can't repeat the spinlock hack used above because 4440 the child process gets its own copy of the lock. */ 4441 if (flags & CLONE_CHILD_SETTID) 4442 put_user_u32(gettid(), child_tidptr); 4443 if (flags & CLONE_PARENT_SETTID) 4444 put_user_u32(gettid(), parent_tidptr); 4445 ts = (TaskState *)cpu->opaque; 4446 if (flags & CLONE_SETTLS) 4447 cpu_set_tls (env, newtls); 4448 if (flags & CLONE_CHILD_CLEARTID) 4449 ts->child_tidptr = child_tidptr; 4450 } else { 4451 fork_end(0); 4452 } 4453 } 4454 return ret; 4455 } 4456 4457 /* warning : doesn't handle linux specific flags... */ 4458 static int target_to_host_fcntl_cmd(int cmd) 4459 { 4460 switch(cmd) { 4461 case TARGET_F_DUPFD: 4462 case TARGET_F_GETFD: 4463 case TARGET_F_SETFD: 4464 case TARGET_F_GETFL: 4465 case TARGET_F_SETFL: 4466 return cmd; 4467 case TARGET_F_GETLK: 4468 return F_GETLK; 4469 case TARGET_F_SETLK: 4470 return F_SETLK; 4471 case TARGET_F_SETLKW: 4472 return F_SETLKW; 4473 case TARGET_F_GETOWN: 4474 return F_GETOWN; 4475 case TARGET_F_SETOWN: 4476 return F_SETOWN; 4477 case TARGET_F_GETSIG: 4478 return F_GETSIG; 4479 case TARGET_F_SETSIG: 4480 return F_SETSIG; 4481 #if TARGET_ABI_BITS == 32 4482 case TARGET_F_GETLK64: 4483 return F_GETLK64; 4484 case TARGET_F_SETLK64: 4485 return F_SETLK64; 4486 case TARGET_F_SETLKW64: 4487 return F_SETLKW64; 4488 #endif 4489 case TARGET_F_SETLEASE: 4490 return F_SETLEASE; 4491 case TARGET_F_GETLEASE: 4492 return F_GETLEASE; 4493 #ifdef F_DUPFD_CLOEXEC 4494 case TARGET_F_DUPFD_CLOEXEC: 4495 return F_DUPFD_CLOEXEC; 4496 #endif 4497 case TARGET_F_NOTIFY: 4498 return F_NOTIFY; 4499 #ifdef F_GETOWN_EX 4500 case TARGET_F_GETOWN_EX: 4501 return F_GETOWN_EX; 4502 #endif 4503 #ifdef F_SETOWN_EX 4504 case TARGET_F_SETOWN_EX: 4505 return F_SETOWN_EX; 4506 #endif 4507 default: 4508 return -TARGET_EINVAL; 4509 } 4510 return -TARGET_EINVAL; 4511 } 4512 4513 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a } 4514 static const bitmask_transtbl flock_tbl[] = { 4515 TRANSTBL_CONVERT(F_RDLCK), 4516 TRANSTBL_CONVERT(F_WRLCK), 4517 TRANSTBL_CONVERT(F_UNLCK), 4518 TRANSTBL_CONVERT(F_EXLCK), 4519 TRANSTBL_CONVERT(F_SHLCK), 4520 { 0, 0, 0, 0 } 4521 }; 4522 4523 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) 4524 { 4525 struct flock fl; 4526 struct target_flock *target_fl; 4527 struct flock64 fl64; 4528 struct target_flock64 *target_fl64; 4529 #ifdef F_GETOWN_EX 4530 struct f_owner_ex fox; 4531 struct target_f_owner_ex *target_fox; 4532 #endif 4533 abi_long ret; 4534 int host_cmd = target_to_host_fcntl_cmd(cmd); 4535 4536 if (host_cmd == -TARGET_EINVAL) 4537 return host_cmd; 4538 4539 switch(cmd) { 4540 case TARGET_F_GETLK: 4541 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4542 return -TARGET_EFAULT; 4543 fl.l_type = 4544 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl); 4545 fl.l_whence = tswap16(target_fl->l_whence); 4546 fl.l_start = tswapal(target_fl->l_start); 4547 fl.l_len = tswapal(target_fl->l_len); 4548 fl.l_pid = tswap32(target_fl->l_pid); 4549 unlock_user_struct(target_fl, arg, 0); 4550 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4551 if (ret == 0) { 4552 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0)) 4553 return -TARGET_EFAULT; 4554 target_fl->l_type = 4555 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl); 4556 target_fl->l_whence = tswap16(fl.l_whence); 4557 target_fl->l_start = tswapal(fl.l_start); 4558 target_fl->l_len = tswapal(fl.l_len); 4559 target_fl->l_pid = tswap32(fl.l_pid); 4560 unlock_user_struct(target_fl, arg, 1); 4561 } 4562 break; 4563 4564 case TARGET_F_SETLK: 4565 case TARGET_F_SETLKW: 4566 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4567 return -TARGET_EFAULT; 4568 fl.l_type = 4569 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl); 4570 fl.l_whence = tswap16(target_fl->l_whence); 4571 fl.l_start = tswapal(target_fl->l_start); 4572 fl.l_len = tswapal(target_fl->l_len); 4573 fl.l_pid = tswap32(target_fl->l_pid); 4574 unlock_user_struct(target_fl, arg, 0); 4575 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4576 break; 4577 4578 case TARGET_F_GETLK64: 4579 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4580 return -TARGET_EFAULT; 4581 fl64.l_type = 4582 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1; 4583 fl64.l_whence = tswap16(target_fl64->l_whence); 4584 fl64.l_start = tswap64(target_fl64->l_start); 4585 fl64.l_len = tswap64(target_fl64->l_len); 4586 fl64.l_pid = tswap32(target_fl64->l_pid); 4587 unlock_user_struct(target_fl64, arg, 0); 4588 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4589 if (ret == 0) { 4590 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0)) 4591 return -TARGET_EFAULT; 4592 target_fl64->l_type = 4593 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1; 4594 target_fl64->l_whence = tswap16(fl64.l_whence); 4595 target_fl64->l_start = tswap64(fl64.l_start); 4596 target_fl64->l_len = tswap64(fl64.l_len); 4597 target_fl64->l_pid = tswap32(fl64.l_pid); 4598 unlock_user_struct(target_fl64, arg, 1); 4599 } 4600 break; 4601 case TARGET_F_SETLK64: 4602 case TARGET_F_SETLKW64: 4603 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4604 return -TARGET_EFAULT; 4605 fl64.l_type = 4606 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1; 4607 fl64.l_whence = tswap16(target_fl64->l_whence); 4608 fl64.l_start = tswap64(target_fl64->l_start); 4609 fl64.l_len = tswap64(target_fl64->l_len); 4610 fl64.l_pid = tswap32(target_fl64->l_pid); 4611 unlock_user_struct(target_fl64, arg, 0); 4612 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4613 break; 4614 4615 case TARGET_F_GETFL: 4616 ret = get_errno(fcntl(fd, host_cmd, arg)); 4617 if (ret >= 0) { 4618 ret = host_to_target_bitmask(ret, fcntl_flags_tbl); 4619 } 4620 break; 4621 4622 case TARGET_F_SETFL: 4623 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl))); 4624 break; 4625 4626 #ifdef F_GETOWN_EX 4627 case TARGET_F_GETOWN_EX: 4628 ret = get_errno(fcntl(fd, host_cmd, &fox)); 4629 if (ret >= 0) { 4630 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0)) 4631 return -TARGET_EFAULT; 4632 target_fox->type = tswap32(fox.type); 4633 target_fox->pid = tswap32(fox.pid); 4634 unlock_user_struct(target_fox, arg, 1); 4635 } 4636 break; 4637 #endif 4638 4639 #ifdef F_SETOWN_EX 4640 case TARGET_F_SETOWN_EX: 4641 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1)) 4642 return -TARGET_EFAULT; 4643 fox.type = tswap32(target_fox->type); 4644 fox.pid = tswap32(target_fox->pid); 4645 unlock_user_struct(target_fox, arg, 0); 4646 ret = get_errno(fcntl(fd, host_cmd, &fox)); 4647 break; 4648 #endif 4649 4650 case TARGET_F_SETOWN: 4651 case TARGET_F_GETOWN: 4652 case TARGET_F_SETSIG: 4653 case TARGET_F_GETSIG: 4654 case TARGET_F_SETLEASE: 4655 case TARGET_F_GETLEASE: 4656 ret = get_errno(fcntl(fd, host_cmd, arg)); 4657 break; 4658 4659 default: 4660 ret = get_errno(fcntl(fd, cmd, arg)); 4661 break; 4662 } 4663 return ret; 4664 } 4665 4666 #ifdef USE_UID16 4667 4668 static inline int high2lowuid(int uid) 4669 { 4670 if (uid > 65535) 4671 return 65534; 4672 else 4673 return uid; 4674 } 4675 4676 static inline int high2lowgid(int gid) 4677 { 4678 if (gid > 65535) 4679 return 65534; 4680 else 4681 return gid; 4682 } 4683 4684 static inline int low2highuid(int uid) 4685 { 4686 if ((int16_t)uid == -1) 4687 return -1; 4688 else 4689 return uid; 4690 } 4691 4692 static inline int low2highgid(int gid) 4693 { 4694 if ((int16_t)gid == -1) 4695 return -1; 4696 else 4697 return gid; 4698 } 4699 static inline int tswapid(int id) 4700 { 4701 return tswap16(id); 4702 } 4703 4704 #define put_user_id(x, gaddr) put_user_u16(x, gaddr) 4705 4706 #else /* !USE_UID16 */ 4707 static inline int high2lowuid(int uid) 4708 { 4709 return uid; 4710 } 4711 static inline int high2lowgid(int gid) 4712 { 4713 return gid; 4714 } 4715 static inline int low2highuid(int uid) 4716 { 4717 return uid; 4718 } 4719 static inline int low2highgid(int gid) 4720 { 4721 return gid; 4722 } 4723 static inline int tswapid(int id) 4724 { 4725 return tswap32(id); 4726 } 4727 4728 #define put_user_id(x, gaddr) put_user_u32(x, gaddr) 4729 4730 #endif /* USE_UID16 */ 4731 4732 void syscall_init(void) 4733 { 4734 IOCTLEntry *ie; 4735 const argtype *arg_type; 4736 int size; 4737 int i; 4738 4739 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def); 4740 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def); 4741 #include "syscall_types.h" 4742 #undef STRUCT 4743 #undef STRUCT_SPECIAL 4744 4745 /* Build target_to_host_errno_table[] table from 4746 * host_to_target_errno_table[]. */ 4747 for (i = 0; i < ERRNO_TABLE_SIZE; i++) { 4748 target_to_host_errno_table[host_to_target_errno_table[i]] = i; 4749 } 4750 4751 /* we patch the ioctl size if necessary. We rely on the fact that 4752 no ioctl has all the bits at '1' in the size field */ 4753 ie = ioctl_entries; 4754 while (ie->target_cmd != 0) { 4755 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) == 4756 TARGET_IOC_SIZEMASK) { 4757 arg_type = ie->arg_type; 4758 if (arg_type[0] != TYPE_PTR) { 4759 fprintf(stderr, "cannot patch size for ioctl 0x%x\n", 4760 ie->target_cmd); 4761 exit(1); 4762 } 4763 arg_type++; 4764 size = thunk_type_size(arg_type, 0); 4765 ie->target_cmd = (ie->target_cmd & 4766 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) | 4767 (size << TARGET_IOC_SIZESHIFT); 4768 } 4769 4770 /* automatic consistency check if same arch */ 4771 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 4772 (defined(__x86_64__) && defined(TARGET_X86_64)) 4773 if (unlikely(ie->target_cmd != ie->host_cmd)) { 4774 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n", 4775 ie->name, ie->target_cmd, ie->host_cmd); 4776 } 4777 #endif 4778 ie++; 4779 } 4780 } 4781 4782 #if TARGET_ABI_BITS == 32 4783 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1) 4784 { 4785 #ifdef TARGET_WORDS_BIGENDIAN 4786 return ((uint64_t)word0 << 32) | word1; 4787 #else 4788 return ((uint64_t)word1 << 32) | word0; 4789 #endif 4790 } 4791 #else /* TARGET_ABI_BITS == 32 */ 4792 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1) 4793 { 4794 return word0; 4795 } 4796 #endif /* TARGET_ABI_BITS != 32 */ 4797 4798 #ifdef TARGET_NR_truncate64 4799 static inline abi_long target_truncate64(void *cpu_env, const char *arg1, 4800 abi_long arg2, 4801 abi_long arg3, 4802 abi_long arg4) 4803 { 4804 if (regpairs_aligned(cpu_env)) { 4805 arg2 = arg3; 4806 arg3 = arg4; 4807 } 4808 return get_errno(truncate64(arg1, target_offset64(arg2, arg3))); 4809 } 4810 #endif 4811 4812 #ifdef TARGET_NR_ftruncate64 4813 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1, 4814 abi_long arg2, 4815 abi_long arg3, 4816 abi_long arg4) 4817 { 4818 if (regpairs_aligned(cpu_env)) { 4819 arg2 = arg3; 4820 arg3 = arg4; 4821 } 4822 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3))); 4823 } 4824 #endif 4825 4826 static inline abi_long target_to_host_timespec(struct timespec *host_ts, 4827 abi_ulong target_addr) 4828 { 4829 struct target_timespec *target_ts; 4830 4831 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) 4832 return -TARGET_EFAULT; 4833 host_ts->tv_sec = tswapal(target_ts->tv_sec); 4834 host_ts->tv_nsec = tswapal(target_ts->tv_nsec); 4835 unlock_user_struct(target_ts, target_addr, 0); 4836 return 0; 4837 } 4838 4839 static inline abi_long host_to_target_timespec(abi_ulong target_addr, 4840 struct timespec *host_ts) 4841 { 4842 struct target_timespec *target_ts; 4843 4844 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) 4845 return -TARGET_EFAULT; 4846 target_ts->tv_sec = tswapal(host_ts->tv_sec); 4847 target_ts->tv_nsec = tswapal(host_ts->tv_nsec); 4848 unlock_user_struct(target_ts, target_addr, 1); 4849 return 0; 4850 } 4851 4852 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec, 4853 abi_ulong target_addr) 4854 { 4855 struct target_itimerspec *target_itspec; 4856 4857 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) { 4858 return -TARGET_EFAULT; 4859 } 4860 4861 host_itspec->it_interval.tv_sec = 4862 tswapal(target_itspec->it_interval.tv_sec); 4863 host_itspec->it_interval.tv_nsec = 4864 tswapal(target_itspec->it_interval.tv_nsec); 4865 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec); 4866 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec); 4867 4868 unlock_user_struct(target_itspec, target_addr, 1); 4869 return 0; 4870 } 4871 4872 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr, 4873 struct itimerspec *host_its) 4874 { 4875 struct target_itimerspec *target_itspec; 4876 4877 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) { 4878 return -TARGET_EFAULT; 4879 } 4880 4881 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec); 4882 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec); 4883 4884 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec); 4885 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec); 4886 4887 unlock_user_struct(target_itspec, target_addr, 0); 4888 return 0; 4889 } 4890 4891 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat) 4892 static inline abi_long host_to_target_stat64(void *cpu_env, 4893 abi_ulong target_addr, 4894 struct stat *host_st) 4895 { 4896 #if defined(TARGET_ARM) && defined(TARGET_ABI32) 4897 if (((CPUARMState *)cpu_env)->eabi) { 4898 struct target_eabi_stat64 *target_st; 4899 4900 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 4901 return -TARGET_EFAULT; 4902 memset(target_st, 0, sizeof(struct target_eabi_stat64)); 4903 __put_user(host_st->st_dev, &target_st->st_dev); 4904 __put_user(host_st->st_ino, &target_st->st_ino); 4905 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 4906 __put_user(host_st->st_ino, &target_st->__st_ino); 4907 #endif 4908 __put_user(host_st->st_mode, &target_st->st_mode); 4909 __put_user(host_st->st_nlink, &target_st->st_nlink); 4910 __put_user(host_st->st_uid, &target_st->st_uid); 4911 __put_user(host_st->st_gid, &target_st->st_gid); 4912 __put_user(host_st->st_rdev, &target_st->st_rdev); 4913 __put_user(host_st->st_size, &target_st->st_size); 4914 __put_user(host_st->st_blksize, &target_st->st_blksize); 4915 __put_user(host_st->st_blocks, &target_st->st_blocks); 4916 __put_user(host_st->st_atime, &target_st->target_st_atime); 4917 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 4918 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 4919 unlock_user_struct(target_st, target_addr, 1); 4920 } else 4921 #endif 4922 { 4923 #if defined(TARGET_HAS_STRUCT_STAT64) 4924 struct target_stat64 *target_st; 4925 #else 4926 struct target_stat *target_st; 4927 #endif 4928 4929 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 4930 return -TARGET_EFAULT; 4931 memset(target_st, 0, sizeof(*target_st)); 4932 __put_user(host_st->st_dev, &target_st->st_dev); 4933 __put_user(host_st->st_ino, &target_st->st_ino); 4934 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 4935 __put_user(host_st->st_ino, &target_st->__st_ino); 4936 #endif 4937 __put_user(host_st->st_mode, &target_st->st_mode); 4938 __put_user(host_st->st_nlink, &target_st->st_nlink); 4939 __put_user(host_st->st_uid, &target_st->st_uid); 4940 __put_user(host_st->st_gid, &target_st->st_gid); 4941 __put_user(host_st->st_rdev, &target_st->st_rdev); 4942 /* XXX: better use of kernel struct */ 4943 __put_user(host_st->st_size, &target_st->st_size); 4944 __put_user(host_st->st_blksize, &target_st->st_blksize); 4945 __put_user(host_st->st_blocks, &target_st->st_blocks); 4946 __put_user(host_st->st_atime, &target_st->target_st_atime); 4947 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 4948 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 4949 unlock_user_struct(target_st, target_addr, 1); 4950 } 4951 4952 return 0; 4953 } 4954 #endif 4955 4956 /* ??? Using host futex calls even when target atomic operations 4957 are not really atomic probably breaks things. However implementing 4958 futexes locally would make futexes shared between multiple processes 4959 tricky. However they're probably useless because guest atomic 4960 operations won't work either. */ 4961 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout, 4962 target_ulong uaddr2, int val3) 4963 { 4964 struct timespec ts, *pts; 4965 int base_op; 4966 4967 /* ??? We assume FUTEX_* constants are the same on both host 4968 and target. */ 4969 #ifdef FUTEX_CMD_MASK 4970 base_op = op & FUTEX_CMD_MASK; 4971 #else 4972 base_op = op; 4973 #endif 4974 switch (base_op) { 4975 case FUTEX_WAIT: 4976 case FUTEX_WAIT_BITSET: 4977 if (timeout) { 4978 pts = &ts; 4979 target_to_host_timespec(pts, timeout); 4980 } else { 4981 pts = NULL; 4982 } 4983 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val), 4984 pts, NULL, val3)); 4985 case FUTEX_WAKE: 4986 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 4987 case FUTEX_FD: 4988 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 4989 case FUTEX_REQUEUE: 4990 case FUTEX_CMP_REQUEUE: 4991 case FUTEX_WAKE_OP: 4992 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 4993 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 4994 But the prototype takes a `struct timespec *'; insert casts 4995 to satisfy the compiler. We do not need to tswap TIMEOUT 4996 since it's not compared to guest memory. */ 4997 pts = (struct timespec *)(uintptr_t) timeout; 4998 return get_errno(sys_futex(g2h(uaddr), op, val, pts, 4999 g2h(uaddr2), 5000 (base_op == FUTEX_CMP_REQUEUE 5001 ? tswap32(val3) 5002 : val3))); 5003 default: 5004 return -TARGET_ENOSYS; 5005 } 5006 } 5007 5008 /* Map host to target signal numbers for the wait family of syscalls. 5009 Assume all other status bits are the same. */ 5010 int host_to_target_waitstatus(int status) 5011 { 5012 if (WIFSIGNALED(status)) { 5013 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f); 5014 } 5015 if (WIFSTOPPED(status)) { 5016 return (host_to_target_signal(WSTOPSIG(status)) << 8) 5017 | (status & 0xff); 5018 } 5019 return status; 5020 } 5021 5022 static int open_self_cmdline(void *cpu_env, int fd) 5023 { 5024 int fd_orig = -1; 5025 bool word_skipped = false; 5026 5027 fd_orig = open("/proc/self/cmdline", O_RDONLY); 5028 if (fd_orig < 0) { 5029 return fd_orig; 5030 } 5031 5032 while (true) { 5033 ssize_t nb_read; 5034 char buf[128]; 5035 char *cp_buf = buf; 5036 5037 nb_read = read(fd_orig, buf, sizeof(buf)); 5038 if (nb_read < 0) { 5039 fd_orig = close(fd_orig); 5040 return -1; 5041 } else if (nb_read == 0) { 5042 break; 5043 } 5044 5045 if (!word_skipped) { 5046 /* Skip the first string, which is the path to qemu-*-static 5047 instead of the actual command. */ 5048 cp_buf = memchr(buf, 0, sizeof(buf)); 5049 if (cp_buf) { 5050 /* Null byte found, skip one string */ 5051 cp_buf++; 5052 nb_read -= cp_buf - buf; 5053 word_skipped = true; 5054 } 5055 } 5056 5057 if (word_skipped) { 5058 if (write(fd, cp_buf, nb_read) != nb_read) { 5059 return -1; 5060 } 5061 } 5062 } 5063 5064 return close(fd_orig); 5065 } 5066 5067 static int open_self_maps(void *cpu_env, int fd) 5068 { 5069 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32) 5070 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 5071 TaskState *ts = cpu->opaque; 5072 #endif 5073 FILE *fp; 5074 char *line = NULL; 5075 size_t len = 0; 5076 ssize_t read; 5077 5078 fp = fopen("/proc/self/maps", "r"); 5079 if (fp == NULL) { 5080 return -EACCES; 5081 } 5082 5083 while ((read = getline(&line, &len, fp)) != -1) { 5084 int fields, dev_maj, dev_min, inode; 5085 uint64_t min, max, offset; 5086 char flag_r, flag_w, flag_x, flag_p; 5087 char path[512] = ""; 5088 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d" 5089 " %512s", &min, &max, &flag_r, &flag_w, &flag_x, 5090 &flag_p, &offset, &dev_maj, &dev_min, &inode, path); 5091 5092 if ((fields < 10) || (fields > 11)) { 5093 continue; 5094 } 5095 if (!strncmp(path, "[stack]", 7)) { 5096 continue; 5097 } 5098 if (h2g_valid(min) && h2g_valid(max)) { 5099 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx 5100 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n", 5101 h2g(min), h2g(max), flag_r, flag_w, 5102 flag_x, flag_p, offset, dev_maj, dev_min, inode, 5103 path[0] ? " " : "", path); 5104 } 5105 } 5106 5107 free(line); 5108 fclose(fp); 5109 5110 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32) 5111 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n", 5112 (unsigned long long)ts->info->stack_limit, 5113 (unsigned long long)(ts->info->start_stack + 5114 (TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK, 5115 (unsigned long long)0); 5116 #endif 5117 5118 return 0; 5119 } 5120 5121 static int open_self_stat(void *cpu_env, int fd) 5122 { 5123 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 5124 TaskState *ts = cpu->opaque; 5125 abi_ulong start_stack = ts->info->start_stack; 5126 int i; 5127 5128 for (i = 0; i < 44; i++) { 5129 char buf[128]; 5130 int len; 5131 uint64_t val = 0; 5132 5133 if (i == 0) { 5134 /* pid */ 5135 val = getpid(); 5136 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 5137 } else if (i == 1) { 5138 /* app name */ 5139 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]); 5140 } else if (i == 27) { 5141 /* stack bottom */ 5142 val = start_stack; 5143 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 5144 } else { 5145 /* for the rest, there is MasterCard */ 5146 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' '); 5147 } 5148 5149 len = strlen(buf); 5150 if (write(fd, buf, len) != len) { 5151 return -1; 5152 } 5153 } 5154 5155 return 0; 5156 } 5157 5158 static int open_self_auxv(void *cpu_env, int fd) 5159 { 5160 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 5161 TaskState *ts = cpu->opaque; 5162 abi_ulong auxv = ts->info->saved_auxv; 5163 abi_ulong len = ts->info->auxv_len; 5164 char *ptr; 5165 5166 /* 5167 * Auxiliary vector is stored in target process stack. 5168 * read in whole auxv vector and copy it to file 5169 */ 5170 ptr = lock_user(VERIFY_READ, auxv, len, 0); 5171 if (ptr != NULL) { 5172 while (len > 0) { 5173 ssize_t r; 5174 r = write(fd, ptr, len); 5175 if (r <= 0) { 5176 break; 5177 } 5178 len -= r; 5179 ptr += r; 5180 } 5181 lseek(fd, 0, SEEK_SET); 5182 unlock_user(ptr, auxv, len); 5183 } 5184 5185 return 0; 5186 } 5187 5188 static int is_proc_myself(const char *filename, const char *entry) 5189 { 5190 if (!strncmp(filename, "/proc/", strlen("/proc/"))) { 5191 filename += strlen("/proc/"); 5192 if (!strncmp(filename, "self/", strlen("self/"))) { 5193 filename += strlen("self/"); 5194 } else if (*filename >= '1' && *filename <= '9') { 5195 char myself[80]; 5196 snprintf(myself, sizeof(myself), "%d/", getpid()); 5197 if (!strncmp(filename, myself, strlen(myself))) { 5198 filename += strlen(myself); 5199 } else { 5200 return 0; 5201 } 5202 } else { 5203 return 0; 5204 } 5205 if (!strcmp(filename, entry)) { 5206 return 1; 5207 } 5208 } 5209 return 0; 5210 } 5211 5212 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 5213 static int is_proc(const char *filename, const char *entry) 5214 { 5215 return strcmp(filename, entry) == 0; 5216 } 5217 5218 static int open_net_route(void *cpu_env, int fd) 5219 { 5220 FILE *fp; 5221 char *line = NULL; 5222 size_t len = 0; 5223 ssize_t read; 5224 5225 fp = fopen("/proc/net/route", "r"); 5226 if (fp == NULL) { 5227 return -EACCES; 5228 } 5229 5230 /* read header */ 5231 5232 read = getline(&line, &len, fp); 5233 dprintf(fd, "%s", line); 5234 5235 /* read routes */ 5236 5237 while ((read = getline(&line, &len, fp)) != -1) { 5238 char iface[16]; 5239 uint32_t dest, gw, mask; 5240 unsigned int flags, refcnt, use, metric, mtu, window, irtt; 5241 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 5242 iface, &dest, &gw, &flags, &refcnt, &use, &metric, 5243 &mask, &mtu, &window, &irtt); 5244 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 5245 iface, tswap32(dest), tswap32(gw), flags, refcnt, use, 5246 metric, tswap32(mask), mtu, window, irtt); 5247 } 5248 5249 free(line); 5250 fclose(fp); 5251 5252 return 0; 5253 } 5254 #endif 5255 5256 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode) 5257 { 5258 struct fake_open { 5259 const char *filename; 5260 int (*fill)(void *cpu_env, int fd); 5261 int (*cmp)(const char *s1, const char *s2); 5262 }; 5263 const struct fake_open *fake_open; 5264 static const struct fake_open fakes[] = { 5265 { "maps", open_self_maps, is_proc_myself }, 5266 { "stat", open_self_stat, is_proc_myself }, 5267 { "auxv", open_self_auxv, is_proc_myself }, 5268 { "cmdline", open_self_cmdline, is_proc_myself }, 5269 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 5270 { "/proc/net/route", open_net_route, is_proc }, 5271 #endif 5272 { NULL, NULL, NULL } 5273 }; 5274 5275 if (is_proc_myself(pathname, "exe")) { 5276 int execfd = qemu_getauxval(AT_EXECFD); 5277 return execfd ? execfd : get_errno(open(exec_path, flags, mode)); 5278 } 5279 5280 for (fake_open = fakes; fake_open->filename; fake_open++) { 5281 if (fake_open->cmp(pathname, fake_open->filename)) { 5282 break; 5283 } 5284 } 5285 5286 if (fake_open->filename) { 5287 const char *tmpdir; 5288 char filename[PATH_MAX]; 5289 int fd, r; 5290 5291 /* create temporary file to map stat to */ 5292 tmpdir = getenv("TMPDIR"); 5293 if (!tmpdir) 5294 tmpdir = "/tmp"; 5295 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir); 5296 fd = mkstemp(filename); 5297 if (fd < 0) { 5298 return fd; 5299 } 5300 unlink(filename); 5301 5302 if ((r = fake_open->fill(cpu_env, fd))) { 5303 close(fd); 5304 return r; 5305 } 5306 lseek(fd, 0, SEEK_SET); 5307 5308 return fd; 5309 } 5310 5311 return get_errno(open(path(pathname), flags, mode)); 5312 } 5313 5314 /* do_syscall() should always have a single exit point at the end so 5315 that actions, such as logging of syscall results, can be performed. 5316 All errnos that do_syscall() returns must be -TARGET_<errcode>. */ 5317 abi_long do_syscall(void *cpu_env, int num, abi_long arg1, 5318 abi_long arg2, abi_long arg3, abi_long arg4, 5319 abi_long arg5, abi_long arg6, abi_long arg7, 5320 abi_long arg8) 5321 { 5322 CPUState *cpu = ENV_GET_CPU(cpu_env); 5323 abi_long ret; 5324 struct stat st; 5325 struct statfs stfs; 5326 void *p; 5327 5328 #ifdef DEBUG 5329 gemu_log("syscall %d", num); 5330 #endif 5331 if(do_strace) 5332 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); 5333 5334 switch(num) { 5335 case TARGET_NR_exit: 5336 /* In old applications this may be used to implement _exit(2). 5337 However in threaded applictions it is used for thread termination, 5338 and _exit_group is used for application termination. 5339 Do thread termination if we have more then one thread. */ 5340 /* FIXME: This probably breaks if a signal arrives. We should probably 5341 be disabling signals. */ 5342 if (CPU_NEXT(first_cpu)) { 5343 TaskState *ts; 5344 5345 cpu_list_lock(); 5346 /* Remove the CPU from the list. */ 5347 QTAILQ_REMOVE(&cpus, cpu, node); 5348 cpu_list_unlock(); 5349 ts = cpu->opaque; 5350 if (ts->child_tidptr) { 5351 put_user_u32(0, ts->child_tidptr); 5352 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX, 5353 NULL, NULL, 0); 5354 } 5355 thread_cpu = NULL; 5356 object_unref(OBJECT(cpu)); 5357 g_free(ts); 5358 pthread_exit(NULL); 5359 } 5360 #ifdef TARGET_GPROF 5361 _mcleanup(); 5362 #endif 5363 gdb_exit(cpu_env, arg1); 5364 _exit(arg1); 5365 ret = 0; /* avoid warning */ 5366 break; 5367 case TARGET_NR_read: 5368 if (arg3 == 0) 5369 ret = 0; 5370 else { 5371 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 5372 goto efault; 5373 ret = get_errno(read(arg1, p, arg3)); 5374 unlock_user(p, arg2, ret); 5375 } 5376 break; 5377 case TARGET_NR_write: 5378 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 5379 goto efault; 5380 ret = get_errno(write(arg1, p, arg3)); 5381 unlock_user(p, arg2, 0); 5382 break; 5383 case TARGET_NR_open: 5384 if (!(p = lock_user_string(arg1))) 5385 goto efault; 5386 ret = get_errno(do_open(cpu_env, p, 5387 target_to_host_bitmask(arg2, fcntl_flags_tbl), 5388 arg3)); 5389 unlock_user(p, arg1, 0); 5390 break; 5391 #if defined(TARGET_NR_openat) && defined(__NR_openat) 5392 case TARGET_NR_openat: 5393 if (!(p = lock_user_string(arg2))) 5394 goto efault; 5395 ret = get_errno(sys_openat(arg1, 5396 path(p), 5397 target_to_host_bitmask(arg3, fcntl_flags_tbl), 5398 arg4)); 5399 unlock_user(p, arg2, 0); 5400 break; 5401 #endif 5402 case TARGET_NR_close: 5403 ret = get_errno(close(arg1)); 5404 break; 5405 case TARGET_NR_brk: 5406 ret = do_brk(arg1); 5407 break; 5408 case TARGET_NR_fork: 5409 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0)); 5410 break; 5411 #ifdef TARGET_NR_waitpid 5412 case TARGET_NR_waitpid: 5413 { 5414 int status; 5415 ret = get_errno(waitpid(arg1, &status, arg3)); 5416 if (!is_error(ret) && arg2 && ret 5417 && put_user_s32(host_to_target_waitstatus(status), arg2)) 5418 goto efault; 5419 } 5420 break; 5421 #endif 5422 #ifdef TARGET_NR_waitid 5423 case TARGET_NR_waitid: 5424 { 5425 siginfo_t info; 5426 info.si_pid = 0; 5427 ret = get_errno(waitid(arg1, arg2, &info, arg4)); 5428 if (!is_error(ret) && arg3 && info.si_pid != 0) { 5429 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) 5430 goto efault; 5431 host_to_target_siginfo(p, &info); 5432 unlock_user(p, arg3, sizeof(target_siginfo_t)); 5433 } 5434 } 5435 break; 5436 #endif 5437 #ifdef TARGET_NR_creat /* not on alpha */ 5438 case TARGET_NR_creat: 5439 if (!(p = lock_user_string(arg1))) 5440 goto efault; 5441 ret = get_errno(creat(p, arg2)); 5442 unlock_user(p, arg1, 0); 5443 break; 5444 #endif 5445 case TARGET_NR_link: 5446 { 5447 void * p2; 5448 p = lock_user_string(arg1); 5449 p2 = lock_user_string(arg2); 5450 if (!p || !p2) 5451 ret = -TARGET_EFAULT; 5452 else 5453 ret = get_errno(link(p, p2)); 5454 unlock_user(p2, arg2, 0); 5455 unlock_user(p, arg1, 0); 5456 } 5457 break; 5458 #if defined(TARGET_NR_linkat) 5459 case TARGET_NR_linkat: 5460 { 5461 void * p2 = NULL; 5462 if (!arg2 || !arg4) 5463 goto efault; 5464 p = lock_user_string(arg2); 5465 p2 = lock_user_string(arg4); 5466 if (!p || !p2) 5467 ret = -TARGET_EFAULT; 5468 else 5469 ret = get_errno(linkat(arg1, p, arg3, p2, arg5)); 5470 unlock_user(p, arg2, 0); 5471 unlock_user(p2, arg4, 0); 5472 } 5473 break; 5474 #endif 5475 case TARGET_NR_unlink: 5476 if (!(p = lock_user_string(arg1))) 5477 goto efault; 5478 ret = get_errno(unlink(p)); 5479 unlock_user(p, arg1, 0); 5480 break; 5481 #if defined(TARGET_NR_unlinkat) 5482 case TARGET_NR_unlinkat: 5483 if (!(p = lock_user_string(arg2))) 5484 goto efault; 5485 ret = get_errno(unlinkat(arg1, p, arg3)); 5486 unlock_user(p, arg2, 0); 5487 break; 5488 #endif 5489 case TARGET_NR_execve: 5490 { 5491 char **argp, **envp; 5492 int argc, envc; 5493 abi_ulong gp; 5494 abi_ulong guest_argp; 5495 abi_ulong guest_envp; 5496 abi_ulong addr; 5497 char **q; 5498 int total_size = 0; 5499 5500 argc = 0; 5501 guest_argp = arg2; 5502 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { 5503 if (get_user_ual(addr, gp)) 5504 goto efault; 5505 if (!addr) 5506 break; 5507 argc++; 5508 } 5509 envc = 0; 5510 guest_envp = arg3; 5511 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { 5512 if (get_user_ual(addr, gp)) 5513 goto efault; 5514 if (!addr) 5515 break; 5516 envc++; 5517 } 5518 5519 argp = alloca((argc + 1) * sizeof(void *)); 5520 envp = alloca((envc + 1) * sizeof(void *)); 5521 5522 for (gp = guest_argp, q = argp; gp; 5523 gp += sizeof(abi_ulong), q++) { 5524 if (get_user_ual(addr, gp)) 5525 goto execve_efault; 5526 if (!addr) 5527 break; 5528 if (!(*q = lock_user_string(addr))) 5529 goto execve_efault; 5530 total_size += strlen(*q) + 1; 5531 } 5532 *q = NULL; 5533 5534 for (gp = guest_envp, q = envp; gp; 5535 gp += sizeof(abi_ulong), q++) { 5536 if (get_user_ual(addr, gp)) 5537 goto execve_efault; 5538 if (!addr) 5539 break; 5540 if (!(*q = lock_user_string(addr))) 5541 goto execve_efault; 5542 total_size += strlen(*q) + 1; 5543 } 5544 *q = NULL; 5545 5546 /* This case will not be caught by the host's execve() if its 5547 page size is bigger than the target's. */ 5548 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) { 5549 ret = -TARGET_E2BIG; 5550 goto execve_end; 5551 } 5552 if (!(p = lock_user_string(arg1))) 5553 goto execve_efault; 5554 ret = get_errno(execve(p, argp, envp)); 5555 unlock_user(p, arg1, 0); 5556 5557 goto execve_end; 5558 5559 execve_efault: 5560 ret = -TARGET_EFAULT; 5561 5562 execve_end: 5563 for (gp = guest_argp, q = argp; *q; 5564 gp += sizeof(abi_ulong), q++) { 5565 if (get_user_ual(addr, gp) 5566 || !addr) 5567 break; 5568 unlock_user(*q, addr, 0); 5569 } 5570 for (gp = guest_envp, q = envp; *q; 5571 gp += sizeof(abi_ulong), q++) { 5572 if (get_user_ual(addr, gp) 5573 || !addr) 5574 break; 5575 unlock_user(*q, addr, 0); 5576 } 5577 } 5578 break; 5579 case TARGET_NR_chdir: 5580 if (!(p = lock_user_string(arg1))) 5581 goto efault; 5582 ret = get_errno(chdir(p)); 5583 unlock_user(p, arg1, 0); 5584 break; 5585 #ifdef TARGET_NR_time 5586 case TARGET_NR_time: 5587 { 5588 time_t host_time; 5589 ret = get_errno(time(&host_time)); 5590 if (!is_error(ret) 5591 && arg1 5592 && put_user_sal(host_time, arg1)) 5593 goto efault; 5594 } 5595 break; 5596 #endif 5597 case TARGET_NR_mknod: 5598 if (!(p = lock_user_string(arg1))) 5599 goto efault; 5600 ret = get_errno(mknod(p, arg2, arg3)); 5601 unlock_user(p, arg1, 0); 5602 break; 5603 #if defined(TARGET_NR_mknodat) 5604 case TARGET_NR_mknodat: 5605 if (!(p = lock_user_string(arg2))) 5606 goto efault; 5607 ret = get_errno(mknodat(arg1, p, arg3, arg4)); 5608 unlock_user(p, arg2, 0); 5609 break; 5610 #endif 5611 case TARGET_NR_chmod: 5612 if (!(p = lock_user_string(arg1))) 5613 goto efault; 5614 ret = get_errno(chmod(p, arg2)); 5615 unlock_user(p, arg1, 0); 5616 break; 5617 #ifdef TARGET_NR_break 5618 case TARGET_NR_break: 5619 goto unimplemented; 5620 #endif 5621 #ifdef TARGET_NR_oldstat 5622 case TARGET_NR_oldstat: 5623 goto unimplemented; 5624 #endif 5625 case TARGET_NR_lseek: 5626 ret = get_errno(lseek(arg1, arg2, arg3)); 5627 break; 5628 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) 5629 /* Alpha specific */ 5630 case TARGET_NR_getxpid: 5631 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid(); 5632 ret = get_errno(getpid()); 5633 break; 5634 #endif 5635 #ifdef TARGET_NR_getpid 5636 case TARGET_NR_getpid: 5637 ret = get_errno(getpid()); 5638 break; 5639 #endif 5640 case TARGET_NR_mount: 5641 { 5642 /* need to look at the data field */ 5643 void *p2, *p3; 5644 5645 if (arg1) { 5646 p = lock_user_string(arg1); 5647 if (!p) { 5648 goto efault; 5649 } 5650 } else { 5651 p = NULL; 5652 } 5653 5654 p2 = lock_user_string(arg2); 5655 if (!p2) { 5656 if (arg1) { 5657 unlock_user(p, arg1, 0); 5658 } 5659 goto efault; 5660 } 5661 5662 if (arg3) { 5663 p3 = lock_user_string(arg3); 5664 if (!p3) { 5665 if (arg1) { 5666 unlock_user(p, arg1, 0); 5667 } 5668 unlock_user(p2, arg2, 0); 5669 goto efault; 5670 } 5671 } else { 5672 p3 = NULL; 5673 } 5674 5675 /* FIXME - arg5 should be locked, but it isn't clear how to 5676 * do that since it's not guaranteed to be a NULL-terminated 5677 * string. 5678 */ 5679 if (!arg5) { 5680 ret = mount(p, p2, p3, (unsigned long)arg4, NULL); 5681 } else { 5682 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)); 5683 } 5684 ret = get_errno(ret); 5685 5686 if (arg1) { 5687 unlock_user(p, arg1, 0); 5688 } 5689 unlock_user(p2, arg2, 0); 5690 if (arg3) { 5691 unlock_user(p3, arg3, 0); 5692 } 5693 } 5694 break; 5695 #ifdef TARGET_NR_umount 5696 case TARGET_NR_umount: 5697 if (!(p = lock_user_string(arg1))) 5698 goto efault; 5699 ret = get_errno(umount(p)); 5700 unlock_user(p, arg1, 0); 5701 break; 5702 #endif 5703 #ifdef TARGET_NR_stime /* not on alpha */ 5704 case TARGET_NR_stime: 5705 { 5706 time_t host_time; 5707 if (get_user_sal(host_time, arg1)) 5708 goto efault; 5709 ret = get_errno(stime(&host_time)); 5710 } 5711 break; 5712 #endif 5713 case TARGET_NR_ptrace: 5714 goto unimplemented; 5715 #ifdef TARGET_NR_alarm /* not on alpha */ 5716 case TARGET_NR_alarm: 5717 ret = alarm(arg1); 5718 break; 5719 #endif 5720 #ifdef TARGET_NR_oldfstat 5721 case TARGET_NR_oldfstat: 5722 goto unimplemented; 5723 #endif 5724 #ifdef TARGET_NR_pause /* not on alpha */ 5725 case TARGET_NR_pause: 5726 ret = get_errno(pause()); 5727 break; 5728 #endif 5729 #ifdef TARGET_NR_utime 5730 case TARGET_NR_utime: 5731 { 5732 struct utimbuf tbuf, *host_tbuf; 5733 struct target_utimbuf *target_tbuf; 5734 if (arg2) { 5735 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) 5736 goto efault; 5737 tbuf.actime = tswapal(target_tbuf->actime); 5738 tbuf.modtime = tswapal(target_tbuf->modtime); 5739 unlock_user_struct(target_tbuf, arg2, 0); 5740 host_tbuf = &tbuf; 5741 } else { 5742 host_tbuf = NULL; 5743 } 5744 if (!(p = lock_user_string(arg1))) 5745 goto efault; 5746 ret = get_errno(utime(p, host_tbuf)); 5747 unlock_user(p, arg1, 0); 5748 } 5749 break; 5750 #endif 5751 case TARGET_NR_utimes: 5752 { 5753 struct timeval *tvp, tv[2]; 5754 if (arg2) { 5755 if (copy_from_user_timeval(&tv[0], arg2) 5756 || copy_from_user_timeval(&tv[1], 5757 arg2 + sizeof(struct target_timeval))) 5758 goto efault; 5759 tvp = tv; 5760 } else { 5761 tvp = NULL; 5762 } 5763 if (!(p = lock_user_string(arg1))) 5764 goto efault; 5765 ret = get_errno(utimes(p, tvp)); 5766 unlock_user(p, arg1, 0); 5767 } 5768 break; 5769 #if defined(TARGET_NR_futimesat) 5770 case TARGET_NR_futimesat: 5771 { 5772 struct timeval *tvp, tv[2]; 5773 if (arg3) { 5774 if (copy_from_user_timeval(&tv[0], arg3) 5775 || copy_from_user_timeval(&tv[1], 5776 arg3 + sizeof(struct target_timeval))) 5777 goto efault; 5778 tvp = tv; 5779 } else { 5780 tvp = NULL; 5781 } 5782 if (!(p = lock_user_string(arg2))) 5783 goto efault; 5784 ret = get_errno(futimesat(arg1, path(p), tvp)); 5785 unlock_user(p, arg2, 0); 5786 } 5787 break; 5788 #endif 5789 #ifdef TARGET_NR_stty 5790 case TARGET_NR_stty: 5791 goto unimplemented; 5792 #endif 5793 #ifdef TARGET_NR_gtty 5794 case TARGET_NR_gtty: 5795 goto unimplemented; 5796 #endif 5797 case TARGET_NR_access: 5798 if (!(p = lock_user_string(arg1))) 5799 goto efault; 5800 ret = get_errno(access(path(p), arg2)); 5801 unlock_user(p, arg1, 0); 5802 break; 5803 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 5804 case TARGET_NR_faccessat: 5805 if (!(p = lock_user_string(arg2))) 5806 goto efault; 5807 ret = get_errno(faccessat(arg1, p, arg3, 0)); 5808 unlock_user(p, arg2, 0); 5809 break; 5810 #endif 5811 #ifdef TARGET_NR_nice /* not on alpha */ 5812 case TARGET_NR_nice: 5813 ret = get_errno(nice(arg1)); 5814 break; 5815 #endif 5816 #ifdef TARGET_NR_ftime 5817 case TARGET_NR_ftime: 5818 goto unimplemented; 5819 #endif 5820 case TARGET_NR_sync: 5821 sync(); 5822 ret = 0; 5823 break; 5824 case TARGET_NR_kill: 5825 ret = get_errno(kill(arg1, target_to_host_signal(arg2))); 5826 break; 5827 case TARGET_NR_rename: 5828 { 5829 void *p2; 5830 p = lock_user_string(arg1); 5831 p2 = lock_user_string(arg2); 5832 if (!p || !p2) 5833 ret = -TARGET_EFAULT; 5834 else 5835 ret = get_errno(rename(p, p2)); 5836 unlock_user(p2, arg2, 0); 5837 unlock_user(p, arg1, 0); 5838 } 5839 break; 5840 #if defined(TARGET_NR_renameat) 5841 case TARGET_NR_renameat: 5842 { 5843 void *p2; 5844 p = lock_user_string(arg2); 5845 p2 = lock_user_string(arg4); 5846 if (!p || !p2) 5847 ret = -TARGET_EFAULT; 5848 else 5849 ret = get_errno(renameat(arg1, p, arg3, p2)); 5850 unlock_user(p2, arg4, 0); 5851 unlock_user(p, arg2, 0); 5852 } 5853 break; 5854 #endif 5855 case TARGET_NR_mkdir: 5856 if (!(p = lock_user_string(arg1))) 5857 goto efault; 5858 ret = get_errno(mkdir(p, arg2)); 5859 unlock_user(p, arg1, 0); 5860 break; 5861 #if defined(TARGET_NR_mkdirat) 5862 case TARGET_NR_mkdirat: 5863 if (!(p = lock_user_string(arg2))) 5864 goto efault; 5865 ret = get_errno(mkdirat(arg1, p, arg3)); 5866 unlock_user(p, arg2, 0); 5867 break; 5868 #endif 5869 case TARGET_NR_rmdir: 5870 if (!(p = lock_user_string(arg1))) 5871 goto efault; 5872 ret = get_errno(rmdir(p)); 5873 unlock_user(p, arg1, 0); 5874 break; 5875 case TARGET_NR_dup: 5876 ret = get_errno(dup(arg1)); 5877 break; 5878 case TARGET_NR_pipe: 5879 ret = do_pipe(cpu_env, arg1, 0, 0); 5880 break; 5881 #ifdef TARGET_NR_pipe2 5882 case TARGET_NR_pipe2: 5883 ret = do_pipe(cpu_env, arg1, 5884 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1); 5885 break; 5886 #endif 5887 case TARGET_NR_times: 5888 { 5889 struct target_tms *tmsp; 5890 struct tms tms; 5891 ret = get_errno(times(&tms)); 5892 if (arg1) { 5893 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); 5894 if (!tmsp) 5895 goto efault; 5896 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime)); 5897 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime)); 5898 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime)); 5899 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime)); 5900 } 5901 if (!is_error(ret)) 5902 ret = host_to_target_clock_t(ret); 5903 } 5904 break; 5905 #ifdef TARGET_NR_prof 5906 case TARGET_NR_prof: 5907 goto unimplemented; 5908 #endif 5909 #ifdef TARGET_NR_signal 5910 case TARGET_NR_signal: 5911 goto unimplemented; 5912 #endif 5913 case TARGET_NR_acct: 5914 if (arg1 == 0) { 5915 ret = get_errno(acct(NULL)); 5916 } else { 5917 if (!(p = lock_user_string(arg1))) 5918 goto efault; 5919 ret = get_errno(acct(path(p))); 5920 unlock_user(p, arg1, 0); 5921 } 5922 break; 5923 #ifdef TARGET_NR_umount2 5924 case TARGET_NR_umount2: 5925 if (!(p = lock_user_string(arg1))) 5926 goto efault; 5927 ret = get_errno(umount2(p, arg2)); 5928 unlock_user(p, arg1, 0); 5929 break; 5930 #endif 5931 #ifdef TARGET_NR_lock 5932 case TARGET_NR_lock: 5933 goto unimplemented; 5934 #endif 5935 case TARGET_NR_ioctl: 5936 ret = do_ioctl(arg1, arg2, arg3); 5937 break; 5938 case TARGET_NR_fcntl: 5939 ret = do_fcntl(arg1, arg2, arg3); 5940 break; 5941 #ifdef TARGET_NR_mpx 5942 case TARGET_NR_mpx: 5943 goto unimplemented; 5944 #endif 5945 case TARGET_NR_setpgid: 5946 ret = get_errno(setpgid(arg1, arg2)); 5947 break; 5948 #ifdef TARGET_NR_ulimit 5949 case TARGET_NR_ulimit: 5950 goto unimplemented; 5951 #endif 5952 #ifdef TARGET_NR_oldolduname 5953 case TARGET_NR_oldolduname: 5954 goto unimplemented; 5955 #endif 5956 case TARGET_NR_umask: 5957 ret = get_errno(umask(arg1)); 5958 break; 5959 case TARGET_NR_chroot: 5960 if (!(p = lock_user_string(arg1))) 5961 goto efault; 5962 ret = get_errno(chroot(p)); 5963 unlock_user(p, arg1, 0); 5964 break; 5965 case TARGET_NR_ustat: 5966 goto unimplemented; 5967 case TARGET_NR_dup2: 5968 ret = get_errno(dup2(arg1, arg2)); 5969 break; 5970 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) 5971 case TARGET_NR_dup3: 5972 ret = get_errno(dup3(arg1, arg2, arg3)); 5973 break; 5974 #endif 5975 #ifdef TARGET_NR_getppid /* not on alpha */ 5976 case TARGET_NR_getppid: 5977 ret = get_errno(getppid()); 5978 break; 5979 #endif 5980 case TARGET_NR_getpgrp: 5981 ret = get_errno(getpgrp()); 5982 break; 5983 case TARGET_NR_setsid: 5984 ret = get_errno(setsid()); 5985 break; 5986 #ifdef TARGET_NR_sigaction 5987 case TARGET_NR_sigaction: 5988 { 5989 #if defined(TARGET_ALPHA) 5990 struct target_sigaction act, oact, *pact = 0; 5991 struct target_old_sigaction *old_act; 5992 if (arg2) { 5993 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5994 goto efault; 5995 act._sa_handler = old_act->_sa_handler; 5996 target_siginitset(&act.sa_mask, old_act->sa_mask); 5997 act.sa_flags = old_act->sa_flags; 5998 act.sa_restorer = 0; 5999 unlock_user_struct(old_act, arg2, 0); 6000 pact = &act; 6001 } 6002 ret = get_errno(do_sigaction(arg1, pact, &oact)); 6003 if (!is_error(ret) && arg3) { 6004 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 6005 goto efault; 6006 old_act->_sa_handler = oact._sa_handler; 6007 old_act->sa_mask = oact.sa_mask.sig[0]; 6008 old_act->sa_flags = oact.sa_flags; 6009 unlock_user_struct(old_act, arg3, 1); 6010 } 6011 #elif defined(TARGET_MIPS) 6012 struct target_sigaction act, oact, *pact, *old_act; 6013 6014 if (arg2) { 6015 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 6016 goto efault; 6017 act._sa_handler = old_act->_sa_handler; 6018 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); 6019 act.sa_flags = old_act->sa_flags; 6020 unlock_user_struct(old_act, arg2, 0); 6021 pact = &act; 6022 } else { 6023 pact = NULL; 6024 } 6025 6026 ret = get_errno(do_sigaction(arg1, pact, &oact)); 6027 6028 if (!is_error(ret) && arg3) { 6029 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 6030 goto efault; 6031 old_act->_sa_handler = oact._sa_handler; 6032 old_act->sa_flags = oact.sa_flags; 6033 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; 6034 old_act->sa_mask.sig[1] = 0; 6035 old_act->sa_mask.sig[2] = 0; 6036 old_act->sa_mask.sig[3] = 0; 6037 unlock_user_struct(old_act, arg3, 1); 6038 } 6039 #else 6040 struct target_old_sigaction *old_act; 6041 struct target_sigaction act, oact, *pact; 6042 if (arg2) { 6043 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 6044 goto efault; 6045 act._sa_handler = old_act->_sa_handler; 6046 target_siginitset(&act.sa_mask, old_act->sa_mask); 6047 act.sa_flags = old_act->sa_flags; 6048 act.sa_restorer = old_act->sa_restorer; 6049 unlock_user_struct(old_act, arg2, 0); 6050 pact = &act; 6051 } else { 6052 pact = NULL; 6053 } 6054 ret = get_errno(do_sigaction(arg1, pact, &oact)); 6055 if (!is_error(ret) && arg3) { 6056 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 6057 goto efault; 6058 old_act->_sa_handler = oact._sa_handler; 6059 old_act->sa_mask = oact.sa_mask.sig[0]; 6060 old_act->sa_flags = oact.sa_flags; 6061 old_act->sa_restorer = oact.sa_restorer; 6062 unlock_user_struct(old_act, arg3, 1); 6063 } 6064 #endif 6065 } 6066 break; 6067 #endif 6068 case TARGET_NR_rt_sigaction: 6069 { 6070 #if defined(TARGET_ALPHA) 6071 struct target_sigaction act, oact, *pact = 0; 6072 struct target_rt_sigaction *rt_act; 6073 /* ??? arg4 == sizeof(sigset_t). */ 6074 if (arg2) { 6075 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1)) 6076 goto efault; 6077 act._sa_handler = rt_act->_sa_handler; 6078 act.sa_mask = rt_act->sa_mask; 6079 act.sa_flags = rt_act->sa_flags; 6080 act.sa_restorer = arg5; 6081 unlock_user_struct(rt_act, arg2, 0); 6082 pact = &act; 6083 } 6084 ret = get_errno(do_sigaction(arg1, pact, &oact)); 6085 if (!is_error(ret) && arg3) { 6086 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0)) 6087 goto efault; 6088 rt_act->_sa_handler = oact._sa_handler; 6089 rt_act->sa_mask = oact.sa_mask; 6090 rt_act->sa_flags = oact.sa_flags; 6091 unlock_user_struct(rt_act, arg3, 1); 6092 } 6093 #else 6094 struct target_sigaction *act; 6095 struct target_sigaction *oact; 6096 6097 if (arg2) { 6098 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) 6099 goto efault; 6100 } else 6101 act = NULL; 6102 if (arg3) { 6103 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { 6104 ret = -TARGET_EFAULT; 6105 goto rt_sigaction_fail; 6106 } 6107 } else 6108 oact = NULL; 6109 ret = get_errno(do_sigaction(arg1, act, oact)); 6110 rt_sigaction_fail: 6111 if (act) 6112 unlock_user_struct(act, arg2, 0); 6113 if (oact) 6114 unlock_user_struct(oact, arg3, 1); 6115 #endif 6116 } 6117 break; 6118 #ifdef TARGET_NR_sgetmask /* not on alpha */ 6119 case TARGET_NR_sgetmask: 6120 { 6121 sigset_t cur_set; 6122 abi_ulong target_set; 6123 do_sigprocmask(0, NULL, &cur_set); 6124 host_to_target_old_sigset(&target_set, &cur_set); 6125 ret = target_set; 6126 } 6127 break; 6128 #endif 6129 #ifdef TARGET_NR_ssetmask /* not on alpha */ 6130 case TARGET_NR_ssetmask: 6131 { 6132 sigset_t set, oset, cur_set; 6133 abi_ulong target_set = arg1; 6134 do_sigprocmask(0, NULL, &cur_set); 6135 target_to_host_old_sigset(&set, &target_set); 6136 sigorset(&set, &set, &cur_set); 6137 do_sigprocmask(SIG_SETMASK, &set, &oset); 6138 host_to_target_old_sigset(&target_set, &oset); 6139 ret = target_set; 6140 } 6141 break; 6142 #endif 6143 #ifdef TARGET_NR_sigprocmask 6144 case TARGET_NR_sigprocmask: 6145 { 6146 #if defined(TARGET_ALPHA) 6147 sigset_t set, oldset; 6148 abi_ulong mask; 6149 int how; 6150 6151 switch (arg1) { 6152 case TARGET_SIG_BLOCK: 6153 how = SIG_BLOCK; 6154 break; 6155 case TARGET_SIG_UNBLOCK: 6156 how = SIG_UNBLOCK; 6157 break; 6158 case TARGET_SIG_SETMASK: 6159 how = SIG_SETMASK; 6160 break; 6161 default: 6162 ret = -TARGET_EINVAL; 6163 goto fail; 6164 } 6165 mask = arg2; 6166 target_to_host_old_sigset(&set, &mask); 6167 6168 ret = get_errno(do_sigprocmask(how, &set, &oldset)); 6169 if (!is_error(ret)) { 6170 host_to_target_old_sigset(&mask, &oldset); 6171 ret = mask; 6172 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */ 6173 } 6174 #else 6175 sigset_t set, oldset, *set_ptr; 6176 int how; 6177 6178 if (arg2) { 6179 switch (arg1) { 6180 case TARGET_SIG_BLOCK: 6181 how = SIG_BLOCK; 6182 break; 6183 case TARGET_SIG_UNBLOCK: 6184 how = SIG_UNBLOCK; 6185 break; 6186 case TARGET_SIG_SETMASK: 6187 how = SIG_SETMASK; 6188 break; 6189 default: 6190 ret = -TARGET_EINVAL; 6191 goto fail; 6192 } 6193 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 6194 goto efault; 6195 target_to_host_old_sigset(&set, p); 6196 unlock_user(p, arg2, 0); 6197 set_ptr = &set; 6198 } else { 6199 how = 0; 6200 set_ptr = NULL; 6201 } 6202 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset)); 6203 if (!is_error(ret) && arg3) { 6204 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 6205 goto efault; 6206 host_to_target_old_sigset(p, &oldset); 6207 unlock_user(p, arg3, sizeof(target_sigset_t)); 6208 } 6209 #endif 6210 } 6211 break; 6212 #endif 6213 case TARGET_NR_rt_sigprocmask: 6214 { 6215 int how = arg1; 6216 sigset_t set, oldset, *set_ptr; 6217 6218 if (arg2) { 6219 switch(how) { 6220 case TARGET_SIG_BLOCK: 6221 how = SIG_BLOCK; 6222 break; 6223 case TARGET_SIG_UNBLOCK: 6224 how = SIG_UNBLOCK; 6225 break; 6226 case TARGET_SIG_SETMASK: 6227 how = SIG_SETMASK; 6228 break; 6229 default: 6230 ret = -TARGET_EINVAL; 6231 goto fail; 6232 } 6233 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 6234 goto efault; 6235 target_to_host_sigset(&set, p); 6236 unlock_user(p, arg2, 0); 6237 set_ptr = &set; 6238 } else { 6239 how = 0; 6240 set_ptr = NULL; 6241 } 6242 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset)); 6243 if (!is_error(ret) && arg3) { 6244 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 6245 goto efault; 6246 host_to_target_sigset(p, &oldset); 6247 unlock_user(p, arg3, sizeof(target_sigset_t)); 6248 } 6249 } 6250 break; 6251 #ifdef TARGET_NR_sigpending 6252 case TARGET_NR_sigpending: 6253 { 6254 sigset_t set; 6255 ret = get_errno(sigpending(&set)); 6256 if (!is_error(ret)) { 6257 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 6258 goto efault; 6259 host_to_target_old_sigset(p, &set); 6260 unlock_user(p, arg1, sizeof(target_sigset_t)); 6261 } 6262 } 6263 break; 6264 #endif 6265 case TARGET_NR_rt_sigpending: 6266 { 6267 sigset_t set; 6268 ret = get_errno(sigpending(&set)); 6269 if (!is_error(ret)) { 6270 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 6271 goto efault; 6272 host_to_target_sigset(p, &set); 6273 unlock_user(p, arg1, sizeof(target_sigset_t)); 6274 } 6275 } 6276 break; 6277 #ifdef TARGET_NR_sigsuspend 6278 case TARGET_NR_sigsuspend: 6279 { 6280 sigset_t set; 6281 #if defined(TARGET_ALPHA) 6282 abi_ulong mask = arg1; 6283 target_to_host_old_sigset(&set, &mask); 6284 #else 6285 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6286 goto efault; 6287 target_to_host_old_sigset(&set, p); 6288 unlock_user(p, arg1, 0); 6289 #endif 6290 ret = get_errno(sigsuspend(&set)); 6291 } 6292 break; 6293 #endif 6294 case TARGET_NR_rt_sigsuspend: 6295 { 6296 sigset_t set; 6297 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6298 goto efault; 6299 target_to_host_sigset(&set, p); 6300 unlock_user(p, arg1, 0); 6301 ret = get_errno(sigsuspend(&set)); 6302 } 6303 break; 6304 case TARGET_NR_rt_sigtimedwait: 6305 { 6306 sigset_t set; 6307 struct timespec uts, *puts; 6308 siginfo_t uinfo; 6309 6310 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6311 goto efault; 6312 target_to_host_sigset(&set, p); 6313 unlock_user(p, arg1, 0); 6314 if (arg3) { 6315 puts = &uts; 6316 target_to_host_timespec(puts, arg3); 6317 } else { 6318 puts = NULL; 6319 } 6320 ret = get_errno(sigtimedwait(&set, &uinfo, puts)); 6321 if (!is_error(ret)) { 6322 if (arg2) { 6323 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 6324 0); 6325 if (!p) { 6326 goto efault; 6327 } 6328 host_to_target_siginfo(p, &uinfo); 6329 unlock_user(p, arg2, sizeof(target_siginfo_t)); 6330 } 6331 ret = host_to_target_signal(ret); 6332 } 6333 } 6334 break; 6335 case TARGET_NR_rt_sigqueueinfo: 6336 { 6337 siginfo_t uinfo; 6338 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1))) 6339 goto efault; 6340 target_to_host_siginfo(&uinfo, p); 6341 unlock_user(p, arg1, 0); 6342 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo)); 6343 } 6344 break; 6345 #ifdef TARGET_NR_sigreturn 6346 case TARGET_NR_sigreturn: 6347 /* NOTE: ret is eax, so not transcoding must be done */ 6348 ret = do_sigreturn(cpu_env); 6349 break; 6350 #endif 6351 case TARGET_NR_rt_sigreturn: 6352 /* NOTE: ret is eax, so not transcoding must be done */ 6353 ret = do_rt_sigreturn(cpu_env); 6354 break; 6355 case TARGET_NR_sethostname: 6356 if (!(p = lock_user_string(arg1))) 6357 goto efault; 6358 ret = get_errno(sethostname(p, arg2)); 6359 unlock_user(p, arg1, 0); 6360 break; 6361 case TARGET_NR_setrlimit: 6362 { 6363 int resource = target_to_host_resource(arg1); 6364 struct target_rlimit *target_rlim; 6365 struct rlimit rlim; 6366 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) 6367 goto efault; 6368 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur); 6369 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max); 6370 unlock_user_struct(target_rlim, arg2, 0); 6371 ret = get_errno(setrlimit(resource, &rlim)); 6372 } 6373 break; 6374 case TARGET_NR_getrlimit: 6375 { 6376 int resource = target_to_host_resource(arg1); 6377 struct target_rlimit *target_rlim; 6378 struct rlimit rlim; 6379 6380 ret = get_errno(getrlimit(resource, &rlim)); 6381 if (!is_error(ret)) { 6382 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 6383 goto efault; 6384 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 6385 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 6386 unlock_user_struct(target_rlim, arg2, 1); 6387 } 6388 } 6389 break; 6390 case TARGET_NR_getrusage: 6391 { 6392 struct rusage rusage; 6393 ret = get_errno(getrusage(arg1, &rusage)); 6394 if (!is_error(ret)) { 6395 ret = host_to_target_rusage(arg2, &rusage); 6396 } 6397 } 6398 break; 6399 case TARGET_NR_gettimeofday: 6400 { 6401 struct timeval tv; 6402 ret = get_errno(gettimeofday(&tv, NULL)); 6403 if (!is_error(ret)) { 6404 if (copy_to_user_timeval(arg1, &tv)) 6405 goto efault; 6406 } 6407 } 6408 break; 6409 case TARGET_NR_settimeofday: 6410 { 6411 struct timeval tv, *ptv = NULL; 6412 struct timezone tz, *ptz = NULL; 6413 6414 if (arg1) { 6415 if (copy_from_user_timeval(&tv, arg1)) { 6416 goto efault; 6417 } 6418 ptv = &tv; 6419 } 6420 6421 if (arg2) { 6422 if (copy_from_user_timezone(&tz, arg2)) { 6423 goto efault; 6424 } 6425 ptz = &tz; 6426 } 6427 6428 ret = get_errno(settimeofday(ptv, ptz)); 6429 } 6430 break; 6431 #if defined(TARGET_NR_select) 6432 case TARGET_NR_select: 6433 #if defined(TARGET_S390X) || defined(TARGET_ALPHA) 6434 ret = do_select(arg1, arg2, arg3, arg4, arg5); 6435 #else 6436 { 6437 struct target_sel_arg_struct *sel; 6438 abi_ulong inp, outp, exp, tvp; 6439 long nsel; 6440 6441 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) 6442 goto efault; 6443 nsel = tswapal(sel->n); 6444 inp = tswapal(sel->inp); 6445 outp = tswapal(sel->outp); 6446 exp = tswapal(sel->exp); 6447 tvp = tswapal(sel->tvp); 6448 unlock_user_struct(sel, arg1, 0); 6449 ret = do_select(nsel, inp, outp, exp, tvp); 6450 } 6451 #endif 6452 break; 6453 #endif 6454 #ifdef TARGET_NR_pselect6 6455 case TARGET_NR_pselect6: 6456 { 6457 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr; 6458 fd_set rfds, wfds, efds; 6459 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 6460 struct timespec ts, *ts_ptr; 6461 6462 /* 6463 * The 6th arg is actually two args smashed together, 6464 * so we cannot use the C library. 6465 */ 6466 sigset_t set; 6467 struct { 6468 sigset_t *set; 6469 size_t size; 6470 } sig, *sig_ptr; 6471 6472 abi_ulong arg_sigset, arg_sigsize, *arg7; 6473 target_sigset_t *target_sigset; 6474 6475 n = arg1; 6476 rfd_addr = arg2; 6477 wfd_addr = arg3; 6478 efd_addr = arg4; 6479 ts_addr = arg5; 6480 6481 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 6482 if (ret) { 6483 goto fail; 6484 } 6485 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 6486 if (ret) { 6487 goto fail; 6488 } 6489 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 6490 if (ret) { 6491 goto fail; 6492 } 6493 6494 /* 6495 * This takes a timespec, and not a timeval, so we cannot 6496 * use the do_select() helper ... 6497 */ 6498 if (ts_addr) { 6499 if (target_to_host_timespec(&ts, ts_addr)) { 6500 goto efault; 6501 } 6502 ts_ptr = &ts; 6503 } else { 6504 ts_ptr = NULL; 6505 } 6506 6507 /* Extract the two packed args for the sigset */ 6508 if (arg6) { 6509 sig_ptr = &sig; 6510 sig.size = _NSIG / 8; 6511 6512 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1); 6513 if (!arg7) { 6514 goto efault; 6515 } 6516 arg_sigset = tswapal(arg7[0]); 6517 arg_sigsize = tswapal(arg7[1]); 6518 unlock_user(arg7, arg6, 0); 6519 6520 if (arg_sigset) { 6521 sig.set = &set; 6522 if (arg_sigsize != sizeof(*target_sigset)) { 6523 /* Like the kernel, we enforce correct size sigsets */ 6524 ret = -TARGET_EINVAL; 6525 goto fail; 6526 } 6527 target_sigset = lock_user(VERIFY_READ, arg_sigset, 6528 sizeof(*target_sigset), 1); 6529 if (!target_sigset) { 6530 goto efault; 6531 } 6532 target_to_host_sigset(&set, target_sigset); 6533 unlock_user(target_sigset, arg_sigset, 0); 6534 } else { 6535 sig.set = NULL; 6536 } 6537 } else { 6538 sig_ptr = NULL; 6539 } 6540 6541 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 6542 ts_ptr, sig_ptr)); 6543 6544 if (!is_error(ret)) { 6545 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 6546 goto efault; 6547 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 6548 goto efault; 6549 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 6550 goto efault; 6551 6552 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) 6553 goto efault; 6554 } 6555 } 6556 break; 6557 #endif 6558 case TARGET_NR_symlink: 6559 { 6560 void *p2; 6561 p = lock_user_string(arg1); 6562 p2 = lock_user_string(arg2); 6563 if (!p || !p2) 6564 ret = -TARGET_EFAULT; 6565 else 6566 ret = get_errno(symlink(p, p2)); 6567 unlock_user(p2, arg2, 0); 6568 unlock_user(p, arg1, 0); 6569 } 6570 break; 6571 #if defined(TARGET_NR_symlinkat) 6572 case TARGET_NR_symlinkat: 6573 { 6574 void *p2; 6575 p = lock_user_string(arg1); 6576 p2 = lock_user_string(arg3); 6577 if (!p || !p2) 6578 ret = -TARGET_EFAULT; 6579 else 6580 ret = get_errno(symlinkat(p, arg2, p2)); 6581 unlock_user(p2, arg3, 0); 6582 unlock_user(p, arg1, 0); 6583 } 6584 break; 6585 #endif 6586 #ifdef TARGET_NR_oldlstat 6587 case TARGET_NR_oldlstat: 6588 goto unimplemented; 6589 #endif 6590 case TARGET_NR_readlink: 6591 { 6592 void *p2; 6593 p = lock_user_string(arg1); 6594 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); 6595 if (!p || !p2) { 6596 ret = -TARGET_EFAULT; 6597 } else if (is_proc_myself((const char *)p, "exe")) { 6598 char real[PATH_MAX], *temp; 6599 temp = realpath(exec_path, real); 6600 ret = temp == NULL ? get_errno(-1) : strlen(real) ; 6601 snprintf((char *)p2, arg3, "%s", real); 6602 } else { 6603 ret = get_errno(readlink(path(p), p2, arg3)); 6604 } 6605 unlock_user(p2, arg2, ret); 6606 unlock_user(p, arg1, 0); 6607 } 6608 break; 6609 #if defined(TARGET_NR_readlinkat) 6610 case TARGET_NR_readlinkat: 6611 { 6612 void *p2; 6613 p = lock_user_string(arg2); 6614 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); 6615 if (!p || !p2) { 6616 ret = -TARGET_EFAULT; 6617 } else if (is_proc_myself((const char *)p, "exe")) { 6618 char real[PATH_MAX], *temp; 6619 temp = realpath(exec_path, real); 6620 ret = temp == NULL ? get_errno(-1) : strlen(real) ; 6621 snprintf((char *)p2, arg4, "%s", real); 6622 } else { 6623 ret = get_errno(readlinkat(arg1, path(p), p2, arg4)); 6624 } 6625 unlock_user(p2, arg3, ret); 6626 unlock_user(p, arg2, 0); 6627 } 6628 break; 6629 #endif 6630 #ifdef TARGET_NR_uselib 6631 case TARGET_NR_uselib: 6632 goto unimplemented; 6633 #endif 6634 #ifdef TARGET_NR_swapon 6635 case TARGET_NR_swapon: 6636 if (!(p = lock_user_string(arg1))) 6637 goto efault; 6638 ret = get_errno(swapon(p, arg2)); 6639 unlock_user(p, arg1, 0); 6640 break; 6641 #endif 6642 case TARGET_NR_reboot: 6643 if (arg3 == LINUX_REBOOT_CMD_RESTART2) { 6644 /* arg4 must be ignored in all other cases */ 6645 p = lock_user_string(arg4); 6646 if (!p) { 6647 goto efault; 6648 } 6649 ret = get_errno(reboot(arg1, arg2, arg3, p)); 6650 unlock_user(p, arg4, 0); 6651 } else { 6652 ret = get_errno(reboot(arg1, arg2, arg3, NULL)); 6653 } 6654 break; 6655 #ifdef TARGET_NR_readdir 6656 case TARGET_NR_readdir: 6657 goto unimplemented; 6658 #endif 6659 #ifdef TARGET_NR_mmap 6660 case TARGET_NR_mmap: 6661 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 6662 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \ 6663 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ 6664 || defined(TARGET_S390X) 6665 { 6666 abi_ulong *v; 6667 abi_ulong v1, v2, v3, v4, v5, v6; 6668 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) 6669 goto efault; 6670 v1 = tswapal(v[0]); 6671 v2 = tswapal(v[1]); 6672 v3 = tswapal(v[2]); 6673 v4 = tswapal(v[3]); 6674 v5 = tswapal(v[4]); 6675 v6 = tswapal(v[5]); 6676 unlock_user(v, arg1, 0); 6677 ret = get_errno(target_mmap(v1, v2, v3, 6678 target_to_host_bitmask(v4, mmap_flags_tbl), 6679 v5, v6)); 6680 } 6681 #else 6682 ret = get_errno(target_mmap(arg1, arg2, arg3, 6683 target_to_host_bitmask(arg4, mmap_flags_tbl), 6684 arg5, 6685 arg6)); 6686 #endif 6687 break; 6688 #endif 6689 #ifdef TARGET_NR_mmap2 6690 case TARGET_NR_mmap2: 6691 #ifndef MMAP_SHIFT 6692 #define MMAP_SHIFT 12 6693 #endif 6694 ret = get_errno(target_mmap(arg1, arg2, arg3, 6695 target_to_host_bitmask(arg4, mmap_flags_tbl), 6696 arg5, 6697 arg6 << MMAP_SHIFT)); 6698 break; 6699 #endif 6700 case TARGET_NR_munmap: 6701 ret = get_errno(target_munmap(arg1, arg2)); 6702 break; 6703 case TARGET_NR_mprotect: 6704 { 6705 TaskState *ts = cpu->opaque; 6706 /* Special hack to detect libc making the stack executable. */ 6707 if ((arg3 & PROT_GROWSDOWN) 6708 && arg1 >= ts->info->stack_limit 6709 && arg1 <= ts->info->start_stack) { 6710 arg3 &= ~PROT_GROWSDOWN; 6711 arg2 = arg2 + arg1 - ts->info->stack_limit; 6712 arg1 = ts->info->stack_limit; 6713 } 6714 } 6715 ret = get_errno(target_mprotect(arg1, arg2, arg3)); 6716 break; 6717 #ifdef TARGET_NR_mremap 6718 case TARGET_NR_mremap: 6719 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); 6720 break; 6721 #endif 6722 /* ??? msync/mlock/munlock are broken for softmmu. */ 6723 #ifdef TARGET_NR_msync 6724 case TARGET_NR_msync: 6725 ret = get_errno(msync(g2h(arg1), arg2, arg3)); 6726 break; 6727 #endif 6728 #ifdef TARGET_NR_mlock 6729 case TARGET_NR_mlock: 6730 ret = get_errno(mlock(g2h(arg1), arg2)); 6731 break; 6732 #endif 6733 #ifdef TARGET_NR_munlock 6734 case TARGET_NR_munlock: 6735 ret = get_errno(munlock(g2h(arg1), arg2)); 6736 break; 6737 #endif 6738 #ifdef TARGET_NR_mlockall 6739 case TARGET_NR_mlockall: 6740 ret = get_errno(mlockall(arg1)); 6741 break; 6742 #endif 6743 #ifdef TARGET_NR_munlockall 6744 case TARGET_NR_munlockall: 6745 ret = get_errno(munlockall()); 6746 break; 6747 #endif 6748 case TARGET_NR_truncate: 6749 if (!(p = lock_user_string(arg1))) 6750 goto efault; 6751 ret = get_errno(truncate(p, arg2)); 6752 unlock_user(p, arg1, 0); 6753 break; 6754 case TARGET_NR_ftruncate: 6755 ret = get_errno(ftruncate(arg1, arg2)); 6756 break; 6757 case TARGET_NR_fchmod: 6758 ret = get_errno(fchmod(arg1, arg2)); 6759 break; 6760 #if defined(TARGET_NR_fchmodat) 6761 case TARGET_NR_fchmodat: 6762 if (!(p = lock_user_string(arg2))) 6763 goto efault; 6764 ret = get_errno(fchmodat(arg1, p, arg3, 0)); 6765 unlock_user(p, arg2, 0); 6766 break; 6767 #endif 6768 case TARGET_NR_getpriority: 6769 /* Note that negative values are valid for getpriority, so we must 6770 differentiate based on errno settings. */ 6771 errno = 0; 6772 ret = getpriority(arg1, arg2); 6773 if (ret == -1 && errno != 0) { 6774 ret = -host_to_target_errno(errno); 6775 break; 6776 } 6777 #ifdef TARGET_ALPHA 6778 /* Return value is the unbiased priority. Signal no error. */ 6779 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; 6780 #else 6781 /* Return value is a biased priority to avoid negative numbers. */ 6782 ret = 20 - ret; 6783 #endif 6784 break; 6785 case TARGET_NR_setpriority: 6786 ret = get_errno(setpriority(arg1, arg2, arg3)); 6787 break; 6788 #ifdef TARGET_NR_profil 6789 case TARGET_NR_profil: 6790 goto unimplemented; 6791 #endif 6792 case TARGET_NR_statfs: 6793 if (!(p = lock_user_string(arg1))) 6794 goto efault; 6795 ret = get_errno(statfs(path(p), &stfs)); 6796 unlock_user(p, arg1, 0); 6797 convert_statfs: 6798 if (!is_error(ret)) { 6799 struct target_statfs *target_stfs; 6800 6801 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) 6802 goto efault; 6803 __put_user(stfs.f_type, &target_stfs->f_type); 6804 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 6805 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 6806 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 6807 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 6808 __put_user(stfs.f_files, &target_stfs->f_files); 6809 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 6810 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 6811 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 6812 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 6813 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 6814 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 6815 unlock_user_struct(target_stfs, arg2, 1); 6816 } 6817 break; 6818 case TARGET_NR_fstatfs: 6819 ret = get_errno(fstatfs(arg1, &stfs)); 6820 goto convert_statfs; 6821 #ifdef TARGET_NR_statfs64 6822 case TARGET_NR_statfs64: 6823 if (!(p = lock_user_string(arg1))) 6824 goto efault; 6825 ret = get_errno(statfs(path(p), &stfs)); 6826 unlock_user(p, arg1, 0); 6827 convert_statfs64: 6828 if (!is_error(ret)) { 6829 struct target_statfs64 *target_stfs; 6830 6831 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) 6832 goto efault; 6833 __put_user(stfs.f_type, &target_stfs->f_type); 6834 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 6835 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 6836 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 6837 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 6838 __put_user(stfs.f_files, &target_stfs->f_files); 6839 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 6840 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 6841 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 6842 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 6843 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 6844 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 6845 unlock_user_struct(target_stfs, arg3, 1); 6846 } 6847 break; 6848 case TARGET_NR_fstatfs64: 6849 ret = get_errno(fstatfs(arg1, &stfs)); 6850 goto convert_statfs64; 6851 #endif 6852 #ifdef TARGET_NR_ioperm 6853 case TARGET_NR_ioperm: 6854 goto unimplemented; 6855 #endif 6856 #ifdef TARGET_NR_socketcall 6857 case TARGET_NR_socketcall: 6858 ret = do_socketcall(arg1, arg2); 6859 break; 6860 #endif 6861 #ifdef TARGET_NR_accept 6862 case TARGET_NR_accept: 6863 ret = do_accept4(arg1, arg2, arg3, 0); 6864 break; 6865 #endif 6866 #ifdef TARGET_NR_accept4 6867 case TARGET_NR_accept4: 6868 #ifdef CONFIG_ACCEPT4 6869 ret = do_accept4(arg1, arg2, arg3, arg4); 6870 #else 6871 goto unimplemented; 6872 #endif 6873 break; 6874 #endif 6875 #ifdef TARGET_NR_bind 6876 case TARGET_NR_bind: 6877 ret = do_bind(arg1, arg2, arg3); 6878 break; 6879 #endif 6880 #ifdef TARGET_NR_connect 6881 case TARGET_NR_connect: 6882 ret = do_connect(arg1, arg2, arg3); 6883 break; 6884 #endif 6885 #ifdef TARGET_NR_getpeername 6886 case TARGET_NR_getpeername: 6887 ret = do_getpeername(arg1, arg2, arg3); 6888 break; 6889 #endif 6890 #ifdef TARGET_NR_getsockname 6891 case TARGET_NR_getsockname: 6892 ret = do_getsockname(arg1, arg2, arg3); 6893 break; 6894 #endif 6895 #ifdef TARGET_NR_getsockopt 6896 case TARGET_NR_getsockopt: 6897 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5); 6898 break; 6899 #endif 6900 #ifdef TARGET_NR_listen 6901 case TARGET_NR_listen: 6902 ret = get_errno(listen(arg1, arg2)); 6903 break; 6904 #endif 6905 #ifdef TARGET_NR_recv 6906 case TARGET_NR_recv: 6907 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); 6908 break; 6909 #endif 6910 #ifdef TARGET_NR_recvfrom 6911 case TARGET_NR_recvfrom: 6912 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); 6913 break; 6914 #endif 6915 #ifdef TARGET_NR_recvmsg 6916 case TARGET_NR_recvmsg: 6917 ret = do_sendrecvmsg(arg1, arg2, arg3, 0); 6918 break; 6919 #endif 6920 #ifdef TARGET_NR_send 6921 case TARGET_NR_send: 6922 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0); 6923 break; 6924 #endif 6925 #ifdef TARGET_NR_sendmsg 6926 case TARGET_NR_sendmsg: 6927 ret = do_sendrecvmsg(arg1, arg2, arg3, 1); 6928 break; 6929 #endif 6930 #ifdef TARGET_NR_sendmmsg 6931 case TARGET_NR_sendmmsg: 6932 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1); 6933 break; 6934 case TARGET_NR_recvmmsg: 6935 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0); 6936 break; 6937 #endif 6938 #ifdef TARGET_NR_sendto 6939 case TARGET_NR_sendto: 6940 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); 6941 break; 6942 #endif 6943 #ifdef TARGET_NR_shutdown 6944 case TARGET_NR_shutdown: 6945 ret = get_errno(shutdown(arg1, arg2)); 6946 break; 6947 #endif 6948 #ifdef TARGET_NR_socket 6949 case TARGET_NR_socket: 6950 ret = do_socket(arg1, arg2, arg3); 6951 break; 6952 #endif 6953 #ifdef TARGET_NR_socketpair 6954 case TARGET_NR_socketpair: 6955 ret = do_socketpair(arg1, arg2, arg3, arg4); 6956 break; 6957 #endif 6958 #ifdef TARGET_NR_setsockopt 6959 case TARGET_NR_setsockopt: 6960 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); 6961 break; 6962 #endif 6963 6964 case TARGET_NR_syslog: 6965 if (!(p = lock_user_string(arg2))) 6966 goto efault; 6967 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); 6968 unlock_user(p, arg2, 0); 6969 break; 6970 6971 case TARGET_NR_setitimer: 6972 { 6973 struct itimerval value, ovalue, *pvalue; 6974 6975 if (arg2) { 6976 pvalue = &value; 6977 if (copy_from_user_timeval(&pvalue->it_interval, arg2) 6978 || copy_from_user_timeval(&pvalue->it_value, 6979 arg2 + sizeof(struct target_timeval))) 6980 goto efault; 6981 } else { 6982 pvalue = NULL; 6983 } 6984 ret = get_errno(setitimer(arg1, pvalue, &ovalue)); 6985 if (!is_error(ret) && arg3) { 6986 if (copy_to_user_timeval(arg3, 6987 &ovalue.it_interval) 6988 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), 6989 &ovalue.it_value)) 6990 goto efault; 6991 } 6992 } 6993 break; 6994 case TARGET_NR_getitimer: 6995 { 6996 struct itimerval value; 6997 6998 ret = get_errno(getitimer(arg1, &value)); 6999 if (!is_error(ret) && arg2) { 7000 if (copy_to_user_timeval(arg2, 7001 &value.it_interval) 7002 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), 7003 &value.it_value)) 7004 goto efault; 7005 } 7006 } 7007 break; 7008 case TARGET_NR_stat: 7009 if (!(p = lock_user_string(arg1))) 7010 goto efault; 7011 ret = get_errno(stat(path(p), &st)); 7012 unlock_user(p, arg1, 0); 7013 goto do_stat; 7014 case TARGET_NR_lstat: 7015 if (!(p = lock_user_string(arg1))) 7016 goto efault; 7017 ret = get_errno(lstat(path(p), &st)); 7018 unlock_user(p, arg1, 0); 7019 goto do_stat; 7020 case TARGET_NR_fstat: 7021 { 7022 ret = get_errno(fstat(arg1, &st)); 7023 do_stat: 7024 if (!is_error(ret)) { 7025 struct target_stat *target_st; 7026 7027 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) 7028 goto efault; 7029 memset(target_st, 0, sizeof(*target_st)); 7030 __put_user(st.st_dev, &target_st->st_dev); 7031 __put_user(st.st_ino, &target_st->st_ino); 7032 __put_user(st.st_mode, &target_st->st_mode); 7033 __put_user(st.st_uid, &target_st->st_uid); 7034 __put_user(st.st_gid, &target_st->st_gid); 7035 __put_user(st.st_nlink, &target_st->st_nlink); 7036 __put_user(st.st_rdev, &target_st->st_rdev); 7037 __put_user(st.st_size, &target_st->st_size); 7038 __put_user(st.st_blksize, &target_st->st_blksize); 7039 __put_user(st.st_blocks, &target_st->st_blocks); 7040 __put_user(st.st_atime, &target_st->target_st_atime); 7041 __put_user(st.st_mtime, &target_st->target_st_mtime); 7042 __put_user(st.st_ctime, &target_st->target_st_ctime); 7043 unlock_user_struct(target_st, arg2, 1); 7044 } 7045 } 7046 break; 7047 #ifdef TARGET_NR_olduname 7048 case TARGET_NR_olduname: 7049 goto unimplemented; 7050 #endif 7051 #ifdef TARGET_NR_iopl 7052 case TARGET_NR_iopl: 7053 goto unimplemented; 7054 #endif 7055 case TARGET_NR_vhangup: 7056 ret = get_errno(vhangup()); 7057 break; 7058 #ifdef TARGET_NR_idle 7059 case TARGET_NR_idle: 7060 goto unimplemented; 7061 #endif 7062 #ifdef TARGET_NR_syscall 7063 case TARGET_NR_syscall: 7064 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5, 7065 arg6, arg7, arg8, 0); 7066 break; 7067 #endif 7068 case TARGET_NR_wait4: 7069 { 7070 int status; 7071 abi_long status_ptr = arg2; 7072 struct rusage rusage, *rusage_ptr; 7073 abi_ulong target_rusage = arg4; 7074 abi_long rusage_err; 7075 if (target_rusage) 7076 rusage_ptr = &rusage; 7077 else 7078 rusage_ptr = NULL; 7079 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr)); 7080 if (!is_error(ret)) { 7081 if (status_ptr && ret) { 7082 status = host_to_target_waitstatus(status); 7083 if (put_user_s32(status, status_ptr)) 7084 goto efault; 7085 } 7086 if (target_rusage) { 7087 rusage_err = host_to_target_rusage(target_rusage, &rusage); 7088 if (rusage_err) { 7089 ret = rusage_err; 7090 } 7091 } 7092 } 7093 } 7094 break; 7095 #ifdef TARGET_NR_swapoff 7096 case TARGET_NR_swapoff: 7097 if (!(p = lock_user_string(arg1))) 7098 goto efault; 7099 ret = get_errno(swapoff(p)); 7100 unlock_user(p, arg1, 0); 7101 break; 7102 #endif 7103 case TARGET_NR_sysinfo: 7104 { 7105 struct target_sysinfo *target_value; 7106 struct sysinfo value; 7107 ret = get_errno(sysinfo(&value)); 7108 if (!is_error(ret) && arg1) 7109 { 7110 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) 7111 goto efault; 7112 __put_user(value.uptime, &target_value->uptime); 7113 __put_user(value.loads[0], &target_value->loads[0]); 7114 __put_user(value.loads[1], &target_value->loads[1]); 7115 __put_user(value.loads[2], &target_value->loads[2]); 7116 __put_user(value.totalram, &target_value->totalram); 7117 __put_user(value.freeram, &target_value->freeram); 7118 __put_user(value.sharedram, &target_value->sharedram); 7119 __put_user(value.bufferram, &target_value->bufferram); 7120 __put_user(value.totalswap, &target_value->totalswap); 7121 __put_user(value.freeswap, &target_value->freeswap); 7122 __put_user(value.procs, &target_value->procs); 7123 __put_user(value.totalhigh, &target_value->totalhigh); 7124 __put_user(value.freehigh, &target_value->freehigh); 7125 __put_user(value.mem_unit, &target_value->mem_unit); 7126 unlock_user_struct(target_value, arg1, 1); 7127 } 7128 } 7129 break; 7130 #ifdef TARGET_NR_ipc 7131 case TARGET_NR_ipc: 7132 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6); 7133 break; 7134 #endif 7135 #ifdef TARGET_NR_semget 7136 case TARGET_NR_semget: 7137 ret = get_errno(semget(arg1, arg2, arg3)); 7138 break; 7139 #endif 7140 #ifdef TARGET_NR_semop 7141 case TARGET_NR_semop: 7142 ret = do_semop(arg1, arg2, arg3); 7143 break; 7144 #endif 7145 #ifdef TARGET_NR_semctl 7146 case TARGET_NR_semctl: 7147 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4); 7148 break; 7149 #endif 7150 #ifdef TARGET_NR_msgctl 7151 case TARGET_NR_msgctl: 7152 ret = do_msgctl(arg1, arg2, arg3); 7153 break; 7154 #endif 7155 #ifdef TARGET_NR_msgget 7156 case TARGET_NR_msgget: 7157 ret = get_errno(msgget(arg1, arg2)); 7158 break; 7159 #endif 7160 #ifdef TARGET_NR_msgrcv 7161 case TARGET_NR_msgrcv: 7162 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5); 7163 break; 7164 #endif 7165 #ifdef TARGET_NR_msgsnd 7166 case TARGET_NR_msgsnd: 7167 ret = do_msgsnd(arg1, arg2, arg3, arg4); 7168 break; 7169 #endif 7170 #ifdef TARGET_NR_shmget 7171 case TARGET_NR_shmget: 7172 ret = get_errno(shmget(arg1, arg2, arg3)); 7173 break; 7174 #endif 7175 #ifdef TARGET_NR_shmctl 7176 case TARGET_NR_shmctl: 7177 ret = do_shmctl(arg1, arg2, arg3); 7178 break; 7179 #endif 7180 #ifdef TARGET_NR_shmat 7181 case TARGET_NR_shmat: 7182 ret = do_shmat(arg1, arg2, arg3); 7183 break; 7184 #endif 7185 #ifdef TARGET_NR_shmdt 7186 case TARGET_NR_shmdt: 7187 ret = do_shmdt(arg1); 7188 break; 7189 #endif 7190 case TARGET_NR_fsync: 7191 ret = get_errno(fsync(arg1)); 7192 break; 7193 case TARGET_NR_clone: 7194 /* Linux manages to have three different orderings for its 7195 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines 7196 * match the kernel's CONFIG_CLONE_* settings. 7197 * Microblaze is further special in that it uses a sixth 7198 * implicit argument to clone for the TLS pointer. 7199 */ 7200 #if defined(TARGET_MICROBLAZE) 7201 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5)); 7202 #elif defined(TARGET_CLONE_BACKWARDS) 7203 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); 7204 #elif defined(TARGET_CLONE_BACKWARDS2) 7205 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); 7206 #else 7207 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); 7208 #endif 7209 break; 7210 #ifdef __NR_exit_group 7211 /* new thread calls */ 7212 case TARGET_NR_exit_group: 7213 #ifdef TARGET_GPROF 7214 _mcleanup(); 7215 #endif 7216 gdb_exit(cpu_env, arg1); 7217 ret = get_errno(exit_group(arg1)); 7218 break; 7219 #endif 7220 case TARGET_NR_setdomainname: 7221 if (!(p = lock_user_string(arg1))) 7222 goto efault; 7223 ret = get_errno(setdomainname(p, arg2)); 7224 unlock_user(p, arg1, 0); 7225 break; 7226 case TARGET_NR_uname: 7227 /* no need to transcode because we use the linux syscall */ 7228 { 7229 struct new_utsname * buf; 7230 7231 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) 7232 goto efault; 7233 ret = get_errno(sys_uname(buf)); 7234 if (!is_error(ret)) { 7235 /* Overrite the native machine name with whatever is being 7236 emulated. */ 7237 strcpy (buf->machine, cpu_to_uname_machine(cpu_env)); 7238 /* Allow the user to override the reported release. */ 7239 if (qemu_uname_release && *qemu_uname_release) 7240 strcpy (buf->release, qemu_uname_release); 7241 } 7242 unlock_user_struct(buf, arg1, 1); 7243 } 7244 break; 7245 #ifdef TARGET_I386 7246 case TARGET_NR_modify_ldt: 7247 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3); 7248 break; 7249 #if !defined(TARGET_X86_64) 7250 case TARGET_NR_vm86old: 7251 goto unimplemented; 7252 case TARGET_NR_vm86: 7253 ret = do_vm86(cpu_env, arg1, arg2); 7254 break; 7255 #endif 7256 #endif 7257 case TARGET_NR_adjtimex: 7258 goto unimplemented; 7259 #ifdef TARGET_NR_create_module 7260 case TARGET_NR_create_module: 7261 #endif 7262 case TARGET_NR_init_module: 7263 case TARGET_NR_delete_module: 7264 #ifdef TARGET_NR_get_kernel_syms 7265 case TARGET_NR_get_kernel_syms: 7266 #endif 7267 goto unimplemented; 7268 case TARGET_NR_quotactl: 7269 goto unimplemented; 7270 case TARGET_NR_getpgid: 7271 ret = get_errno(getpgid(arg1)); 7272 break; 7273 case TARGET_NR_fchdir: 7274 ret = get_errno(fchdir(arg1)); 7275 break; 7276 #ifdef TARGET_NR_bdflush /* not on x86_64 */ 7277 case TARGET_NR_bdflush: 7278 goto unimplemented; 7279 #endif 7280 #ifdef TARGET_NR_sysfs 7281 case TARGET_NR_sysfs: 7282 goto unimplemented; 7283 #endif 7284 case TARGET_NR_personality: 7285 ret = get_errno(personality(arg1)); 7286 break; 7287 #ifdef TARGET_NR_afs_syscall 7288 case TARGET_NR_afs_syscall: 7289 goto unimplemented; 7290 #endif 7291 #ifdef TARGET_NR__llseek /* Not on alpha */ 7292 case TARGET_NR__llseek: 7293 { 7294 int64_t res; 7295 #if !defined(__NR_llseek) 7296 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5); 7297 if (res == -1) { 7298 ret = get_errno(res); 7299 } else { 7300 ret = 0; 7301 } 7302 #else 7303 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); 7304 #endif 7305 if ((ret == 0) && put_user_s64(res, arg4)) { 7306 goto efault; 7307 } 7308 } 7309 break; 7310 #endif 7311 case TARGET_NR_getdents: 7312 #ifdef __NR_getdents 7313 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 7314 { 7315 struct target_dirent *target_dirp; 7316 struct linux_dirent *dirp; 7317 abi_long count = arg3; 7318 7319 dirp = malloc(count); 7320 if (!dirp) { 7321 ret = -TARGET_ENOMEM; 7322 goto fail; 7323 } 7324 7325 ret = get_errno(sys_getdents(arg1, dirp, count)); 7326 if (!is_error(ret)) { 7327 struct linux_dirent *de; 7328 struct target_dirent *tde; 7329 int len = ret; 7330 int reclen, treclen; 7331 int count1, tnamelen; 7332 7333 count1 = 0; 7334 de = dirp; 7335 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7336 goto efault; 7337 tde = target_dirp; 7338 while (len > 0) { 7339 reclen = de->d_reclen; 7340 tnamelen = reclen - offsetof(struct linux_dirent, d_name); 7341 assert(tnamelen >= 0); 7342 treclen = tnamelen + offsetof(struct target_dirent, d_name); 7343 assert(count1 + treclen <= count); 7344 tde->d_reclen = tswap16(treclen); 7345 tde->d_ino = tswapal(de->d_ino); 7346 tde->d_off = tswapal(de->d_off); 7347 memcpy(tde->d_name, de->d_name, tnamelen); 7348 de = (struct linux_dirent *)((char *)de + reclen); 7349 len -= reclen; 7350 tde = (struct target_dirent *)((char *)tde + treclen); 7351 count1 += treclen; 7352 } 7353 ret = count1; 7354 unlock_user(target_dirp, arg2, ret); 7355 } 7356 free(dirp); 7357 } 7358 #else 7359 { 7360 struct linux_dirent *dirp; 7361 abi_long count = arg3; 7362 7363 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7364 goto efault; 7365 ret = get_errno(sys_getdents(arg1, dirp, count)); 7366 if (!is_error(ret)) { 7367 struct linux_dirent *de; 7368 int len = ret; 7369 int reclen; 7370 de = dirp; 7371 while (len > 0) { 7372 reclen = de->d_reclen; 7373 if (reclen > len) 7374 break; 7375 de->d_reclen = tswap16(reclen); 7376 tswapls(&de->d_ino); 7377 tswapls(&de->d_off); 7378 de = (struct linux_dirent *)((char *)de + reclen); 7379 len -= reclen; 7380 } 7381 } 7382 unlock_user(dirp, arg2, ret); 7383 } 7384 #endif 7385 #else 7386 /* Implement getdents in terms of getdents64 */ 7387 { 7388 struct linux_dirent64 *dirp; 7389 abi_long count = arg3; 7390 7391 dirp = lock_user(VERIFY_WRITE, arg2, count, 0); 7392 if (!dirp) { 7393 goto efault; 7394 } 7395 ret = get_errno(sys_getdents64(arg1, dirp, count)); 7396 if (!is_error(ret)) { 7397 /* Convert the dirent64 structs to target dirent. We do this 7398 * in-place, since we can guarantee that a target_dirent is no 7399 * larger than a dirent64; however this means we have to be 7400 * careful to read everything before writing in the new format. 7401 */ 7402 struct linux_dirent64 *de; 7403 struct target_dirent *tde; 7404 int len = ret; 7405 int tlen = 0; 7406 7407 de = dirp; 7408 tde = (struct target_dirent *)dirp; 7409 while (len > 0) { 7410 int namelen, treclen; 7411 int reclen = de->d_reclen; 7412 uint64_t ino = de->d_ino; 7413 int64_t off = de->d_off; 7414 uint8_t type = de->d_type; 7415 7416 namelen = strlen(de->d_name); 7417 treclen = offsetof(struct target_dirent, d_name) 7418 + namelen + 2; 7419 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long)); 7420 7421 memmove(tde->d_name, de->d_name, namelen + 1); 7422 tde->d_ino = tswapal(ino); 7423 tde->d_off = tswapal(off); 7424 tde->d_reclen = tswap16(treclen); 7425 /* The target_dirent type is in what was formerly a padding 7426 * byte at the end of the structure: 7427 */ 7428 *(((char *)tde) + treclen - 1) = type; 7429 7430 de = (struct linux_dirent64 *)((char *)de + reclen); 7431 tde = (struct target_dirent *)((char *)tde + treclen); 7432 len -= reclen; 7433 tlen += treclen; 7434 } 7435 ret = tlen; 7436 } 7437 unlock_user(dirp, arg2, ret); 7438 } 7439 #endif 7440 break; 7441 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 7442 case TARGET_NR_getdents64: 7443 { 7444 struct linux_dirent64 *dirp; 7445 abi_long count = arg3; 7446 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7447 goto efault; 7448 ret = get_errno(sys_getdents64(arg1, dirp, count)); 7449 if (!is_error(ret)) { 7450 struct linux_dirent64 *de; 7451 int len = ret; 7452 int reclen; 7453 de = dirp; 7454 while (len > 0) { 7455 reclen = de->d_reclen; 7456 if (reclen > len) 7457 break; 7458 de->d_reclen = tswap16(reclen); 7459 tswap64s((uint64_t *)&de->d_ino); 7460 tswap64s((uint64_t *)&de->d_off); 7461 de = (struct linux_dirent64 *)((char *)de + reclen); 7462 len -= reclen; 7463 } 7464 } 7465 unlock_user(dirp, arg2, ret); 7466 } 7467 break; 7468 #endif /* TARGET_NR_getdents64 */ 7469 #if defined(TARGET_NR__newselect) 7470 case TARGET_NR__newselect: 7471 ret = do_select(arg1, arg2, arg3, arg4, arg5); 7472 break; 7473 #endif 7474 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) 7475 # ifdef TARGET_NR_poll 7476 case TARGET_NR_poll: 7477 # endif 7478 # ifdef TARGET_NR_ppoll 7479 case TARGET_NR_ppoll: 7480 # endif 7481 { 7482 struct target_pollfd *target_pfd; 7483 unsigned int nfds = arg2; 7484 int timeout = arg3; 7485 struct pollfd *pfd; 7486 unsigned int i; 7487 7488 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1); 7489 if (!target_pfd) 7490 goto efault; 7491 7492 pfd = alloca(sizeof(struct pollfd) * nfds); 7493 for(i = 0; i < nfds; i++) { 7494 pfd[i].fd = tswap32(target_pfd[i].fd); 7495 pfd[i].events = tswap16(target_pfd[i].events); 7496 } 7497 7498 # ifdef TARGET_NR_ppoll 7499 if (num == TARGET_NR_ppoll) { 7500 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; 7501 target_sigset_t *target_set; 7502 sigset_t _set, *set = &_set; 7503 7504 if (arg3) { 7505 if (target_to_host_timespec(timeout_ts, arg3)) { 7506 unlock_user(target_pfd, arg1, 0); 7507 goto efault; 7508 } 7509 } else { 7510 timeout_ts = NULL; 7511 } 7512 7513 if (arg4) { 7514 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1); 7515 if (!target_set) { 7516 unlock_user(target_pfd, arg1, 0); 7517 goto efault; 7518 } 7519 target_to_host_sigset(set, target_set); 7520 } else { 7521 set = NULL; 7522 } 7523 7524 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8)); 7525 7526 if (!is_error(ret) && arg3) { 7527 host_to_target_timespec(arg3, timeout_ts); 7528 } 7529 if (arg4) { 7530 unlock_user(target_set, arg4, 0); 7531 } 7532 } else 7533 # endif 7534 ret = get_errno(poll(pfd, nfds, timeout)); 7535 7536 if (!is_error(ret)) { 7537 for(i = 0; i < nfds; i++) { 7538 target_pfd[i].revents = tswap16(pfd[i].revents); 7539 } 7540 } 7541 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds); 7542 } 7543 break; 7544 #endif 7545 case TARGET_NR_flock: 7546 /* NOTE: the flock constant seems to be the same for every 7547 Linux platform */ 7548 ret = get_errno(flock(arg1, arg2)); 7549 break; 7550 case TARGET_NR_readv: 7551 { 7552 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 7553 if (vec != NULL) { 7554 ret = get_errno(readv(arg1, vec, arg3)); 7555 unlock_iovec(vec, arg2, arg3, 1); 7556 } else { 7557 ret = -host_to_target_errno(errno); 7558 } 7559 } 7560 break; 7561 case TARGET_NR_writev: 7562 { 7563 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 7564 if (vec != NULL) { 7565 ret = get_errno(writev(arg1, vec, arg3)); 7566 unlock_iovec(vec, arg2, arg3, 0); 7567 } else { 7568 ret = -host_to_target_errno(errno); 7569 } 7570 } 7571 break; 7572 case TARGET_NR_getsid: 7573 ret = get_errno(getsid(arg1)); 7574 break; 7575 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ 7576 case TARGET_NR_fdatasync: 7577 ret = get_errno(fdatasync(arg1)); 7578 break; 7579 #endif 7580 case TARGET_NR__sysctl: 7581 /* We don't implement this, but ENOTDIR is always a safe 7582 return value. */ 7583 ret = -TARGET_ENOTDIR; 7584 break; 7585 case TARGET_NR_sched_getaffinity: 7586 { 7587 unsigned int mask_size; 7588 unsigned long *mask; 7589 7590 /* 7591 * sched_getaffinity needs multiples of ulong, so need to take 7592 * care of mismatches between target ulong and host ulong sizes. 7593 */ 7594 if (arg2 & (sizeof(abi_ulong) - 1)) { 7595 ret = -TARGET_EINVAL; 7596 break; 7597 } 7598 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 7599 7600 mask = alloca(mask_size); 7601 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask)); 7602 7603 if (!is_error(ret)) { 7604 if (ret > arg2) { 7605 /* More data returned than the caller's buffer will fit. 7606 * This only happens if sizeof(abi_long) < sizeof(long) 7607 * and the caller passed us a buffer holding an odd number 7608 * of abi_longs. If the host kernel is actually using the 7609 * extra 4 bytes then fail EINVAL; otherwise we can just 7610 * ignore them and only copy the interesting part. 7611 */ 7612 int numcpus = sysconf(_SC_NPROCESSORS_CONF); 7613 if (numcpus > arg2 * 8) { 7614 ret = -TARGET_EINVAL; 7615 break; 7616 } 7617 ret = arg2; 7618 } 7619 7620 if (copy_to_user(arg3, mask, ret)) { 7621 goto efault; 7622 } 7623 } 7624 } 7625 break; 7626 case TARGET_NR_sched_setaffinity: 7627 { 7628 unsigned int mask_size; 7629 unsigned long *mask; 7630 7631 /* 7632 * sched_setaffinity needs multiples of ulong, so need to take 7633 * care of mismatches between target ulong and host ulong sizes. 7634 */ 7635 if (arg2 & (sizeof(abi_ulong) - 1)) { 7636 ret = -TARGET_EINVAL; 7637 break; 7638 } 7639 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 7640 7641 mask = alloca(mask_size); 7642 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) { 7643 goto efault; 7644 } 7645 memcpy(mask, p, arg2); 7646 unlock_user_struct(p, arg2, 0); 7647 7648 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask)); 7649 } 7650 break; 7651 case TARGET_NR_sched_setparam: 7652 { 7653 struct sched_param *target_schp; 7654 struct sched_param schp; 7655 7656 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) 7657 goto efault; 7658 schp.sched_priority = tswap32(target_schp->sched_priority); 7659 unlock_user_struct(target_schp, arg2, 0); 7660 ret = get_errno(sched_setparam(arg1, &schp)); 7661 } 7662 break; 7663 case TARGET_NR_sched_getparam: 7664 { 7665 struct sched_param *target_schp; 7666 struct sched_param schp; 7667 ret = get_errno(sched_getparam(arg1, &schp)); 7668 if (!is_error(ret)) { 7669 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) 7670 goto efault; 7671 target_schp->sched_priority = tswap32(schp.sched_priority); 7672 unlock_user_struct(target_schp, arg2, 1); 7673 } 7674 } 7675 break; 7676 case TARGET_NR_sched_setscheduler: 7677 { 7678 struct sched_param *target_schp; 7679 struct sched_param schp; 7680 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) 7681 goto efault; 7682 schp.sched_priority = tswap32(target_schp->sched_priority); 7683 unlock_user_struct(target_schp, arg3, 0); 7684 ret = get_errno(sched_setscheduler(arg1, arg2, &schp)); 7685 } 7686 break; 7687 case TARGET_NR_sched_getscheduler: 7688 ret = get_errno(sched_getscheduler(arg1)); 7689 break; 7690 case TARGET_NR_sched_yield: 7691 ret = get_errno(sched_yield()); 7692 break; 7693 case TARGET_NR_sched_get_priority_max: 7694 ret = get_errno(sched_get_priority_max(arg1)); 7695 break; 7696 case TARGET_NR_sched_get_priority_min: 7697 ret = get_errno(sched_get_priority_min(arg1)); 7698 break; 7699 case TARGET_NR_sched_rr_get_interval: 7700 { 7701 struct timespec ts; 7702 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 7703 if (!is_error(ret)) { 7704 host_to_target_timespec(arg2, &ts); 7705 } 7706 } 7707 break; 7708 case TARGET_NR_nanosleep: 7709 { 7710 struct timespec req, rem; 7711 target_to_host_timespec(&req, arg1); 7712 ret = get_errno(nanosleep(&req, &rem)); 7713 if (is_error(ret) && arg2) { 7714 host_to_target_timespec(arg2, &rem); 7715 } 7716 } 7717 break; 7718 #ifdef TARGET_NR_query_module 7719 case TARGET_NR_query_module: 7720 goto unimplemented; 7721 #endif 7722 #ifdef TARGET_NR_nfsservctl 7723 case TARGET_NR_nfsservctl: 7724 goto unimplemented; 7725 #endif 7726 case TARGET_NR_prctl: 7727 switch (arg1) { 7728 case PR_GET_PDEATHSIG: 7729 { 7730 int deathsig; 7731 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5)); 7732 if (!is_error(ret) && arg2 7733 && put_user_ual(deathsig, arg2)) { 7734 goto efault; 7735 } 7736 break; 7737 } 7738 #ifdef PR_GET_NAME 7739 case PR_GET_NAME: 7740 { 7741 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1); 7742 if (!name) { 7743 goto efault; 7744 } 7745 ret = get_errno(prctl(arg1, (unsigned long)name, 7746 arg3, arg4, arg5)); 7747 unlock_user(name, arg2, 16); 7748 break; 7749 } 7750 case PR_SET_NAME: 7751 { 7752 void *name = lock_user(VERIFY_READ, arg2, 16, 1); 7753 if (!name) { 7754 goto efault; 7755 } 7756 ret = get_errno(prctl(arg1, (unsigned long)name, 7757 arg3, arg4, arg5)); 7758 unlock_user(name, arg2, 0); 7759 break; 7760 } 7761 #endif 7762 default: 7763 /* Most prctl options have no pointer arguments */ 7764 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5)); 7765 break; 7766 } 7767 break; 7768 #ifdef TARGET_NR_arch_prctl 7769 case TARGET_NR_arch_prctl: 7770 #if defined(TARGET_I386) && !defined(TARGET_ABI32) 7771 ret = do_arch_prctl(cpu_env, arg1, arg2); 7772 break; 7773 #else 7774 goto unimplemented; 7775 #endif 7776 #endif 7777 #ifdef TARGET_NR_pread64 7778 case TARGET_NR_pread64: 7779 if (regpairs_aligned(cpu_env)) { 7780 arg4 = arg5; 7781 arg5 = arg6; 7782 } 7783 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 7784 goto efault; 7785 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); 7786 unlock_user(p, arg2, ret); 7787 break; 7788 case TARGET_NR_pwrite64: 7789 if (regpairs_aligned(cpu_env)) { 7790 arg4 = arg5; 7791 arg5 = arg6; 7792 } 7793 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 7794 goto efault; 7795 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); 7796 unlock_user(p, arg2, 0); 7797 break; 7798 #endif 7799 case TARGET_NR_getcwd: 7800 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) 7801 goto efault; 7802 ret = get_errno(sys_getcwd1(p, arg2)); 7803 unlock_user(p, arg1, ret); 7804 break; 7805 case TARGET_NR_capget: 7806 case TARGET_NR_capset: 7807 { 7808 struct target_user_cap_header *target_header; 7809 struct target_user_cap_data *target_data = NULL; 7810 struct __user_cap_header_struct header; 7811 struct __user_cap_data_struct data[2]; 7812 struct __user_cap_data_struct *dataptr = NULL; 7813 int i, target_datalen; 7814 int data_items = 1; 7815 7816 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) { 7817 goto efault; 7818 } 7819 header.version = tswap32(target_header->version); 7820 header.pid = tswap32(target_header->pid); 7821 7822 if (header.version != _LINUX_CAPABILITY_VERSION) { 7823 /* Version 2 and up takes pointer to two user_data structs */ 7824 data_items = 2; 7825 } 7826 7827 target_datalen = sizeof(*target_data) * data_items; 7828 7829 if (arg2) { 7830 if (num == TARGET_NR_capget) { 7831 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0); 7832 } else { 7833 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1); 7834 } 7835 if (!target_data) { 7836 unlock_user_struct(target_header, arg1, 0); 7837 goto efault; 7838 } 7839 7840 if (num == TARGET_NR_capset) { 7841 for (i = 0; i < data_items; i++) { 7842 data[i].effective = tswap32(target_data[i].effective); 7843 data[i].permitted = tswap32(target_data[i].permitted); 7844 data[i].inheritable = tswap32(target_data[i].inheritable); 7845 } 7846 } 7847 7848 dataptr = data; 7849 } 7850 7851 if (num == TARGET_NR_capget) { 7852 ret = get_errno(capget(&header, dataptr)); 7853 } else { 7854 ret = get_errno(capset(&header, dataptr)); 7855 } 7856 7857 /* The kernel always updates version for both capget and capset */ 7858 target_header->version = tswap32(header.version); 7859 unlock_user_struct(target_header, arg1, 1); 7860 7861 if (arg2) { 7862 if (num == TARGET_NR_capget) { 7863 for (i = 0; i < data_items; i++) { 7864 target_data[i].effective = tswap32(data[i].effective); 7865 target_data[i].permitted = tswap32(data[i].permitted); 7866 target_data[i].inheritable = tswap32(data[i].inheritable); 7867 } 7868 unlock_user(target_data, arg2, target_datalen); 7869 } else { 7870 unlock_user(target_data, arg2, 0); 7871 } 7872 } 7873 break; 7874 } 7875 case TARGET_NR_sigaltstack: 7876 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \ 7877 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \ 7878 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC) 7879 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env)); 7880 break; 7881 #else 7882 goto unimplemented; 7883 #endif 7884 7885 #ifdef CONFIG_SENDFILE 7886 case TARGET_NR_sendfile: 7887 { 7888 off_t *offp = NULL; 7889 off_t off; 7890 if (arg3) { 7891 ret = get_user_sal(off, arg3); 7892 if (is_error(ret)) { 7893 break; 7894 } 7895 offp = &off; 7896 } 7897 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 7898 if (!is_error(ret) && arg3) { 7899 abi_long ret2 = put_user_sal(off, arg3); 7900 if (is_error(ret2)) { 7901 ret = ret2; 7902 } 7903 } 7904 break; 7905 } 7906 #ifdef TARGET_NR_sendfile64 7907 case TARGET_NR_sendfile64: 7908 { 7909 off_t *offp = NULL; 7910 off_t off; 7911 if (arg3) { 7912 ret = get_user_s64(off, arg3); 7913 if (is_error(ret)) { 7914 break; 7915 } 7916 offp = &off; 7917 } 7918 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 7919 if (!is_error(ret) && arg3) { 7920 abi_long ret2 = put_user_s64(off, arg3); 7921 if (is_error(ret2)) { 7922 ret = ret2; 7923 } 7924 } 7925 break; 7926 } 7927 #endif 7928 #else 7929 case TARGET_NR_sendfile: 7930 #ifdef TARGET_NR_sendfile64 7931 case TARGET_NR_sendfile64: 7932 #endif 7933 goto unimplemented; 7934 #endif 7935 7936 #ifdef TARGET_NR_getpmsg 7937 case TARGET_NR_getpmsg: 7938 goto unimplemented; 7939 #endif 7940 #ifdef TARGET_NR_putpmsg 7941 case TARGET_NR_putpmsg: 7942 goto unimplemented; 7943 #endif 7944 #ifdef TARGET_NR_vfork 7945 case TARGET_NR_vfork: 7946 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 7947 0, 0, 0, 0)); 7948 break; 7949 #endif 7950 #ifdef TARGET_NR_ugetrlimit 7951 case TARGET_NR_ugetrlimit: 7952 { 7953 struct rlimit rlim; 7954 int resource = target_to_host_resource(arg1); 7955 ret = get_errno(getrlimit(resource, &rlim)); 7956 if (!is_error(ret)) { 7957 struct target_rlimit *target_rlim; 7958 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 7959 goto efault; 7960 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 7961 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 7962 unlock_user_struct(target_rlim, arg2, 1); 7963 } 7964 break; 7965 } 7966 #endif 7967 #ifdef TARGET_NR_truncate64 7968 case TARGET_NR_truncate64: 7969 if (!(p = lock_user_string(arg1))) 7970 goto efault; 7971 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); 7972 unlock_user(p, arg1, 0); 7973 break; 7974 #endif 7975 #ifdef TARGET_NR_ftruncate64 7976 case TARGET_NR_ftruncate64: 7977 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); 7978 break; 7979 #endif 7980 #ifdef TARGET_NR_stat64 7981 case TARGET_NR_stat64: 7982 if (!(p = lock_user_string(arg1))) 7983 goto efault; 7984 ret = get_errno(stat(path(p), &st)); 7985 unlock_user(p, arg1, 0); 7986 if (!is_error(ret)) 7987 ret = host_to_target_stat64(cpu_env, arg2, &st); 7988 break; 7989 #endif 7990 #ifdef TARGET_NR_lstat64 7991 case TARGET_NR_lstat64: 7992 if (!(p = lock_user_string(arg1))) 7993 goto efault; 7994 ret = get_errno(lstat(path(p), &st)); 7995 unlock_user(p, arg1, 0); 7996 if (!is_error(ret)) 7997 ret = host_to_target_stat64(cpu_env, arg2, &st); 7998 break; 7999 #endif 8000 #ifdef TARGET_NR_fstat64 8001 case TARGET_NR_fstat64: 8002 ret = get_errno(fstat(arg1, &st)); 8003 if (!is_error(ret)) 8004 ret = host_to_target_stat64(cpu_env, arg2, &st); 8005 break; 8006 #endif 8007 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) 8008 #ifdef TARGET_NR_fstatat64 8009 case TARGET_NR_fstatat64: 8010 #endif 8011 #ifdef TARGET_NR_newfstatat 8012 case TARGET_NR_newfstatat: 8013 #endif 8014 if (!(p = lock_user_string(arg2))) 8015 goto efault; 8016 ret = get_errno(fstatat(arg1, path(p), &st, arg4)); 8017 if (!is_error(ret)) 8018 ret = host_to_target_stat64(cpu_env, arg3, &st); 8019 break; 8020 #endif 8021 case TARGET_NR_lchown: 8022 if (!(p = lock_user_string(arg1))) 8023 goto efault; 8024 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); 8025 unlock_user(p, arg1, 0); 8026 break; 8027 #ifdef TARGET_NR_getuid 8028 case TARGET_NR_getuid: 8029 ret = get_errno(high2lowuid(getuid())); 8030 break; 8031 #endif 8032 #ifdef TARGET_NR_getgid 8033 case TARGET_NR_getgid: 8034 ret = get_errno(high2lowgid(getgid())); 8035 break; 8036 #endif 8037 #ifdef TARGET_NR_geteuid 8038 case TARGET_NR_geteuid: 8039 ret = get_errno(high2lowuid(geteuid())); 8040 break; 8041 #endif 8042 #ifdef TARGET_NR_getegid 8043 case TARGET_NR_getegid: 8044 ret = get_errno(high2lowgid(getegid())); 8045 break; 8046 #endif 8047 case TARGET_NR_setreuid: 8048 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); 8049 break; 8050 case TARGET_NR_setregid: 8051 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); 8052 break; 8053 case TARGET_NR_getgroups: 8054 { 8055 int gidsetsize = arg1; 8056 target_id *target_grouplist; 8057 gid_t *grouplist; 8058 int i; 8059 8060 grouplist = alloca(gidsetsize * sizeof(gid_t)); 8061 ret = get_errno(getgroups(gidsetsize, grouplist)); 8062 if (gidsetsize == 0) 8063 break; 8064 if (!is_error(ret)) { 8065 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0); 8066 if (!target_grouplist) 8067 goto efault; 8068 for(i = 0;i < ret; i++) 8069 target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); 8070 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id)); 8071 } 8072 } 8073 break; 8074 case TARGET_NR_setgroups: 8075 { 8076 int gidsetsize = arg1; 8077 target_id *target_grouplist; 8078 gid_t *grouplist = NULL; 8079 int i; 8080 if (gidsetsize) { 8081 grouplist = alloca(gidsetsize * sizeof(gid_t)); 8082 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1); 8083 if (!target_grouplist) { 8084 ret = -TARGET_EFAULT; 8085 goto fail; 8086 } 8087 for (i = 0; i < gidsetsize; i++) { 8088 grouplist[i] = low2highgid(tswapid(target_grouplist[i])); 8089 } 8090 unlock_user(target_grouplist, arg2, 0); 8091 } 8092 ret = get_errno(setgroups(gidsetsize, grouplist)); 8093 } 8094 break; 8095 case TARGET_NR_fchown: 8096 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); 8097 break; 8098 #if defined(TARGET_NR_fchownat) 8099 case TARGET_NR_fchownat: 8100 if (!(p = lock_user_string(arg2))) 8101 goto efault; 8102 ret = get_errno(fchownat(arg1, p, low2highuid(arg3), 8103 low2highgid(arg4), arg5)); 8104 unlock_user(p, arg2, 0); 8105 break; 8106 #endif 8107 #ifdef TARGET_NR_setresuid 8108 case TARGET_NR_setresuid: 8109 ret = get_errno(setresuid(low2highuid(arg1), 8110 low2highuid(arg2), 8111 low2highuid(arg3))); 8112 break; 8113 #endif 8114 #ifdef TARGET_NR_getresuid 8115 case TARGET_NR_getresuid: 8116 { 8117 uid_t ruid, euid, suid; 8118 ret = get_errno(getresuid(&ruid, &euid, &suid)); 8119 if (!is_error(ret)) { 8120 if (put_user_id(high2lowuid(ruid), arg1) 8121 || put_user_id(high2lowuid(euid), arg2) 8122 || put_user_id(high2lowuid(suid), arg3)) 8123 goto efault; 8124 } 8125 } 8126 break; 8127 #endif 8128 #ifdef TARGET_NR_getresgid 8129 case TARGET_NR_setresgid: 8130 ret = get_errno(setresgid(low2highgid(arg1), 8131 low2highgid(arg2), 8132 low2highgid(arg3))); 8133 break; 8134 #endif 8135 #ifdef TARGET_NR_getresgid 8136 case TARGET_NR_getresgid: 8137 { 8138 gid_t rgid, egid, sgid; 8139 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 8140 if (!is_error(ret)) { 8141 if (put_user_id(high2lowgid(rgid), arg1) 8142 || put_user_id(high2lowgid(egid), arg2) 8143 || put_user_id(high2lowgid(sgid), arg3)) 8144 goto efault; 8145 } 8146 } 8147 break; 8148 #endif 8149 case TARGET_NR_chown: 8150 if (!(p = lock_user_string(arg1))) 8151 goto efault; 8152 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); 8153 unlock_user(p, arg1, 0); 8154 break; 8155 case TARGET_NR_setuid: 8156 ret = get_errno(setuid(low2highuid(arg1))); 8157 break; 8158 case TARGET_NR_setgid: 8159 ret = get_errno(setgid(low2highgid(arg1))); 8160 break; 8161 case TARGET_NR_setfsuid: 8162 ret = get_errno(setfsuid(arg1)); 8163 break; 8164 case TARGET_NR_setfsgid: 8165 ret = get_errno(setfsgid(arg1)); 8166 break; 8167 8168 #ifdef TARGET_NR_lchown32 8169 case TARGET_NR_lchown32: 8170 if (!(p = lock_user_string(arg1))) 8171 goto efault; 8172 ret = get_errno(lchown(p, arg2, arg3)); 8173 unlock_user(p, arg1, 0); 8174 break; 8175 #endif 8176 #ifdef TARGET_NR_getuid32 8177 case TARGET_NR_getuid32: 8178 ret = get_errno(getuid()); 8179 break; 8180 #endif 8181 8182 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) 8183 /* Alpha specific */ 8184 case TARGET_NR_getxuid: 8185 { 8186 uid_t euid; 8187 euid=geteuid(); 8188 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid; 8189 } 8190 ret = get_errno(getuid()); 8191 break; 8192 #endif 8193 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) 8194 /* Alpha specific */ 8195 case TARGET_NR_getxgid: 8196 { 8197 uid_t egid; 8198 egid=getegid(); 8199 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid; 8200 } 8201 ret = get_errno(getgid()); 8202 break; 8203 #endif 8204 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) 8205 /* Alpha specific */ 8206 case TARGET_NR_osf_getsysinfo: 8207 ret = -TARGET_EOPNOTSUPP; 8208 switch (arg1) { 8209 case TARGET_GSI_IEEE_FP_CONTROL: 8210 { 8211 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env); 8212 8213 /* Copied from linux ieee_fpcr_to_swcr. */ 8214 swcr = (fpcr >> 35) & SWCR_STATUS_MASK; 8215 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ; 8216 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV 8217 | SWCR_TRAP_ENABLE_DZE 8218 | SWCR_TRAP_ENABLE_OVF); 8219 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF 8220 | SWCR_TRAP_ENABLE_INE); 8221 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ; 8222 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO; 8223 8224 if (put_user_u64 (swcr, arg2)) 8225 goto efault; 8226 ret = 0; 8227 } 8228 break; 8229 8230 /* case GSI_IEEE_STATE_AT_SIGNAL: 8231 -- Not implemented in linux kernel. 8232 case GSI_UACPROC: 8233 -- Retrieves current unaligned access state; not much used. 8234 case GSI_PROC_TYPE: 8235 -- Retrieves implver information; surely not used. 8236 case GSI_GET_HWRPB: 8237 -- Grabs a copy of the HWRPB; surely not used. 8238 */ 8239 } 8240 break; 8241 #endif 8242 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) 8243 /* Alpha specific */ 8244 case TARGET_NR_osf_setsysinfo: 8245 ret = -TARGET_EOPNOTSUPP; 8246 switch (arg1) { 8247 case TARGET_SSI_IEEE_FP_CONTROL: 8248 { 8249 uint64_t swcr, fpcr, orig_fpcr; 8250 8251 if (get_user_u64 (swcr, arg2)) { 8252 goto efault; 8253 } 8254 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 8255 fpcr = orig_fpcr & FPCR_DYN_MASK; 8256 8257 /* Copied from linux ieee_swcr_to_fpcr. */ 8258 fpcr |= (swcr & SWCR_STATUS_MASK) << 35; 8259 fpcr |= (swcr & SWCR_MAP_DMZ) << 36; 8260 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV 8261 | SWCR_TRAP_ENABLE_DZE 8262 | SWCR_TRAP_ENABLE_OVF)) << 48; 8263 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF 8264 | SWCR_TRAP_ENABLE_INE)) << 57; 8265 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0); 8266 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41; 8267 8268 cpu_alpha_store_fpcr(cpu_env, fpcr); 8269 ret = 0; 8270 } 8271 break; 8272 8273 case TARGET_SSI_IEEE_RAISE_EXCEPTION: 8274 { 8275 uint64_t exc, fpcr, orig_fpcr; 8276 int si_code; 8277 8278 if (get_user_u64(exc, arg2)) { 8279 goto efault; 8280 } 8281 8282 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 8283 8284 /* We only add to the exception status here. */ 8285 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35); 8286 8287 cpu_alpha_store_fpcr(cpu_env, fpcr); 8288 ret = 0; 8289 8290 /* Old exceptions are not signaled. */ 8291 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK); 8292 8293 /* If any exceptions set by this call, 8294 and are unmasked, send a signal. */ 8295 si_code = 0; 8296 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) { 8297 si_code = TARGET_FPE_FLTRES; 8298 } 8299 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) { 8300 si_code = TARGET_FPE_FLTUND; 8301 } 8302 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) { 8303 si_code = TARGET_FPE_FLTOVF; 8304 } 8305 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) { 8306 si_code = TARGET_FPE_FLTDIV; 8307 } 8308 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) { 8309 si_code = TARGET_FPE_FLTINV; 8310 } 8311 if (si_code != 0) { 8312 target_siginfo_t info; 8313 info.si_signo = SIGFPE; 8314 info.si_errno = 0; 8315 info.si_code = si_code; 8316 info._sifields._sigfault._addr 8317 = ((CPUArchState *)cpu_env)->pc; 8318 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info); 8319 } 8320 } 8321 break; 8322 8323 /* case SSI_NVPAIRS: 8324 -- Used with SSIN_UACPROC to enable unaligned accesses. 8325 case SSI_IEEE_STATE_AT_SIGNAL: 8326 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: 8327 -- Not implemented in linux kernel 8328 */ 8329 } 8330 break; 8331 #endif 8332 #ifdef TARGET_NR_osf_sigprocmask 8333 /* Alpha specific. */ 8334 case TARGET_NR_osf_sigprocmask: 8335 { 8336 abi_ulong mask; 8337 int how; 8338 sigset_t set, oldset; 8339 8340 switch(arg1) { 8341 case TARGET_SIG_BLOCK: 8342 how = SIG_BLOCK; 8343 break; 8344 case TARGET_SIG_UNBLOCK: 8345 how = SIG_UNBLOCK; 8346 break; 8347 case TARGET_SIG_SETMASK: 8348 how = SIG_SETMASK; 8349 break; 8350 default: 8351 ret = -TARGET_EINVAL; 8352 goto fail; 8353 } 8354 mask = arg2; 8355 target_to_host_old_sigset(&set, &mask); 8356 do_sigprocmask(how, &set, &oldset); 8357 host_to_target_old_sigset(&mask, &oldset); 8358 ret = mask; 8359 } 8360 break; 8361 #endif 8362 8363 #ifdef TARGET_NR_getgid32 8364 case TARGET_NR_getgid32: 8365 ret = get_errno(getgid()); 8366 break; 8367 #endif 8368 #ifdef TARGET_NR_geteuid32 8369 case TARGET_NR_geteuid32: 8370 ret = get_errno(geteuid()); 8371 break; 8372 #endif 8373 #ifdef TARGET_NR_getegid32 8374 case TARGET_NR_getegid32: 8375 ret = get_errno(getegid()); 8376 break; 8377 #endif 8378 #ifdef TARGET_NR_setreuid32 8379 case TARGET_NR_setreuid32: 8380 ret = get_errno(setreuid(arg1, arg2)); 8381 break; 8382 #endif 8383 #ifdef TARGET_NR_setregid32 8384 case TARGET_NR_setregid32: 8385 ret = get_errno(setregid(arg1, arg2)); 8386 break; 8387 #endif 8388 #ifdef TARGET_NR_getgroups32 8389 case TARGET_NR_getgroups32: 8390 { 8391 int gidsetsize = arg1; 8392 uint32_t *target_grouplist; 8393 gid_t *grouplist; 8394 int i; 8395 8396 grouplist = alloca(gidsetsize * sizeof(gid_t)); 8397 ret = get_errno(getgroups(gidsetsize, grouplist)); 8398 if (gidsetsize == 0) 8399 break; 8400 if (!is_error(ret)) { 8401 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); 8402 if (!target_grouplist) { 8403 ret = -TARGET_EFAULT; 8404 goto fail; 8405 } 8406 for(i = 0;i < ret; i++) 8407 target_grouplist[i] = tswap32(grouplist[i]); 8408 unlock_user(target_grouplist, arg2, gidsetsize * 4); 8409 } 8410 } 8411 break; 8412 #endif 8413 #ifdef TARGET_NR_setgroups32 8414 case TARGET_NR_setgroups32: 8415 { 8416 int gidsetsize = arg1; 8417 uint32_t *target_grouplist; 8418 gid_t *grouplist; 8419 int i; 8420 8421 grouplist = alloca(gidsetsize * sizeof(gid_t)); 8422 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); 8423 if (!target_grouplist) { 8424 ret = -TARGET_EFAULT; 8425 goto fail; 8426 } 8427 for(i = 0;i < gidsetsize; i++) 8428 grouplist[i] = tswap32(target_grouplist[i]); 8429 unlock_user(target_grouplist, arg2, 0); 8430 ret = get_errno(setgroups(gidsetsize, grouplist)); 8431 } 8432 break; 8433 #endif 8434 #ifdef TARGET_NR_fchown32 8435 case TARGET_NR_fchown32: 8436 ret = get_errno(fchown(arg1, arg2, arg3)); 8437 break; 8438 #endif 8439 #ifdef TARGET_NR_setresuid32 8440 case TARGET_NR_setresuid32: 8441 ret = get_errno(setresuid(arg1, arg2, arg3)); 8442 break; 8443 #endif 8444 #ifdef TARGET_NR_getresuid32 8445 case TARGET_NR_getresuid32: 8446 { 8447 uid_t ruid, euid, suid; 8448 ret = get_errno(getresuid(&ruid, &euid, &suid)); 8449 if (!is_error(ret)) { 8450 if (put_user_u32(ruid, arg1) 8451 || put_user_u32(euid, arg2) 8452 || put_user_u32(suid, arg3)) 8453 goto efault; 8454 } 8455 } 8456 break; 8457 #endif 8458 #ifdef TARGET_NR_setresgid32 8459 case TARGET_NR_setresgid32: 8460 ret = get_errno(setresgid(arg1, arg2, arg3)); 8461 break; 8462 #endif 8463 #ifdef TARGET_NR_getresgid32 8464 case TARGET_NR_getresgid32: 8465 { 8466 gid_t rgid, egid, sgid; 8467 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 8468 if (!is_error(ret)) { 8469 if (put_user_u32(rgid, arg1) 8470 || put_user_u32(egid, arg2) 8471 || put_user_u32(sgid, arg3)) 8472 goto efault; 8473 } 8474 } 8475 break; 8476 #endif 8477 #ifdef TARGET_NR_chown32 8478 case TARGET_NR_chown32: 8479 if (!(p = lock_user_string(arg1))) 8480 goto efault; 8481 ret = get_errno(chown(p, arg2, arg3)); 8482 unlock_user(p, arg1, 0); 8483 break; 8484 #endif 8485 #ifdef TARGET_NR_setuid32 8486 case TARGET_NR_setuid32: 8487 ret = get_errno(setuid(arg1)); 8488 break; 8489 #endif 8490 #ifdef TARGET_NR_setgid32 8491 case TARGET_NR_setgid32: 8492 ret = get_errno(setgid(arg1)); 8493 break; 8494 #endif 8495 #ifdef TARGET_NR_setfsuid32 8496 case TARGET_NR_setfsuid32: 8497 ret = get_errno(setfsuid(arg1)); 8498 break; 8499 #endif 8500 #ifdef TARGET_NR_setfsgid32 8501 case TARGET_NR_setfsgid32: 8502 ret = get_errno(setfsgid(arg1)); 8503 break; 8504 #endif 8505 8506 case TARGET_NR_pivot_root: 8507 goto unimplemented; 8508 #ifdef TARGET_NR_mincore 8509 case TARGET_NR_mincore: 8510 { 8511 void *a; 8512 ret = -TARGET_EFAULT; 8513 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0))) 8514 goto efault; 8515 if (!(p = lock_user_string(arg3))) 8516 goto mincore_fail; 8517 ret = get_errno(mincore(a, arg2, p)); 8518 unlock_user(p, arg3, ret); 8519 mincore_fail: 8520 unlock_user(a, arg1, 0); 8521 } 8522 break; 8523 #endif 8524 #ifdef TARGET_NR_arm_fadvise64_64 8525 case TARGET_NR_arm_fadvise64_64: 8526 { 8527 /* 8528 * arm_fadvise64_64 looks like fadvise64_64 but 8529 * with different argument order 8530 */ 8531 abi_long temp; 8532 temp = arg3; 8533 arg3 = arg4; 8534 arg4 = temp; 8535 } 8536 #endif 8537 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64) 8538 #ifdef TARGET_NR_fadvise64_64 8539 case TARGET_NR_fadvise64_64: 8540 #endif 8541 #ifdef TARGET_NR_fadvise64 8542 case TARGET_NR_fadvise64: 8543 #endif 8544 #ifdef TARGET_S390X 8545 switch (arg4) { 8546 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ 8547 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ 8548 case 6: arg4 = POSIX_FADV_DONTNEED; break; 8549 case 7: arg4 = POSIX_FADV_NOREUSE; break; 8550 default: break; 8551 } 8552 #endif 8553 ret = -posix_fadvise(arg1, arg2, arg3, arg4); 8554 break; 8555 #endif 8556 #ifdef TARGET_NR_madvise 8557 case TARGET_NR_madvise: 8558 /* A straight passthrough may not be safe because qemu sometimes 8559 turns private file-backed mappings into anonymous mappings. 8560 This will break MADV_DONTNEED. 8561 This is a hint, so ignoring and returning success is ok. */ 8562 ret = get_errno(0); 8563 break; 8564 #endif 8565 #if TARGET_ABI_BITS == 32 8566 case TARGET_NR_fcntl64: 8567 { 8568 int cmd; 8569 struct flock64 fl; 8570 struct target_flock64 *target_fl; 8571 #ifdef TARGET_ARM 8572 struct target_eabi_flock64 *target_efl; 8573 #endif 8574 8575 cmd = target_to_host_fcntl_cmd(arg2); 8576 if (cmd == -TARGET_EINVAL) { 8577 ret = cmd; 8578 break; 8579 } 8580 8581 switch(arg2) { 8582 case TARGET_F_GETLK64: 8583 #ifdef TARGET_ARM 8584 if (((CPUARMState *)cpu_env)->eabi) { 8585 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 8586 goto efault; 8587 fl.l_type = tswap16(target_efl->l_type); 8588 fl.l_whence = tswap16(target_efl->l_whence); 8589 fl.l_start = tswap64(target_efl->l_start); 8590 fl.l_len = tswap64(target_efl->l_len); 8591 fl.l_pid = tswap32(target_efl->l_pid); 8592 unlock_user_struct(target_efl, arg3, 0); 8593 } else 8594 #endif 8595 { 8596 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 8597 goto efault; 8598 fl.l_type = tswap16(target_fl->l_type); 8599 fl.l_whence = tswap16(target_fl->l_whence); 8600 fl.l_start = tswap64(target_fl->l_start); 8601 fl.l_len = tswap64(target_fl->l_len); 8602 fl.l_pid = tswap32(target_fl->l_pid); 8603 unlock_user_struct(target_fl, arg3, 0); 8604 } 8605 ret = get_errno(fcntl(arg1, cmd, &fl)); 8606 if (ret == 0) { 8607 #ifdef TARGET_ARM 8608 if (((CPUARMState *)cpu_env)->eabi) { 8609 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0)) 8610 goto efault; 8611 target_efl->l_type = tswap16(fl.l_type); 8612 target_efl->l_whence = tswap16(fl.l_whence); 8613 target_efl->l_start = tswap64(fl.l_start); 8614 target_efl->l_len = tswap64(fl.l_len); 8615 target_efl->l_pid = tswap32(fl.l_pid); 8616 unlock_user_struct(target_efl, arg3, 1); 8617 } else 8618 #endif 8619 { 8620 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0)) 8621 goto efault; 8622 target_fl->l_type = tswap16(fl.l_type); 8623 target_fl->l_whence = tswap16(fl.l_whence); 8624 target_fl->l_start = tswap64(fl.l_start); 8625 target_fl->l_len = tswap64(fl.l_len); 8626 target_fl->l_pid = tswap32(fl.l_pid); 8627 unlock_user_struct(target_fl, arg3, 1); 8628 } 8629 } 8630 break; 8631 8632 case TARGET_F_SETLK64: 8633 case TARGET_F_SETLKW64: 8634 #ifdef TARGET_ARM 8635 if (((CPUARMState *)cpu_env)->eabi) { 8636 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 8637 goto efault; 8638 fl.l_type = tswap16(target_efl->l_type); 8639 fl.l_whence = tswap16(target_efl->l_whence); 8640 fl.l_start = tswap64(target_efl->l_start); 8641 fl.l_len = tswap64(target_efl->l_len); 8642 fl.l_pid = tswap32(target_efl->l_pid); 8643 unlock_user_struct(target_efl, arg3, 0); 8644 } else 8645 #endif 8646 { 8647 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 8648 goto efault; 8649 fl.l_type = tswap16(target_fl->l_type); 8650 fl.l_whence = tswap16(target_fl->l_whence); 8651 fl.l_start = tswap64(target_fl->l_start); 8652 fl.l_len = tswap64(target_fl->l_len); 8653 fl.l_pid = tswap32(target_fl->l_pid); 8654 unlock_user_struct(target_fl, arg3, 0); 8655 } 8656 ret = get_errno(fcntl(arg1, cmd, &fl)); 8657 break; 8658 default: 8659 ret = do_fcntl(arg1, arg2, arg3); 8660 break; 8661 } 8662 break; 8663 } 8664 #endif 8665 #ifdef TARGET_NR_cacheflush 8666 case TARGET_NR_cacheflush: 8667 /* self-modifying code is handled automatically, so nothing needed */ 8668 ret = 0; 8669 break; 8670 #endif 8671 #ifdef TARGET_NR_security 8672 case TARGET_NR_security: 8673 goto unimplemented; 8674 #endif 8675 #ifdef TARGET_NR_getpagesize 8676 case TARGET_NR_getpagesize: 8677 ret = TARGET_PAGE_SIZE; 8678 break; 8679 #endif 8680 case TARGET_NR_gettid: 8681 ret = get_errno(gettid()); 8682 break; 8683 #ifdef TARGET_NR_readahead 8684 case TARGET_NR_readahead: 8685 #if TARGET_ABI_BITS == 32 8686 if (regpairs_aligned(cpu_env)) { 8687 arg2 = arg3; 8688 arg3 = arg4; 8689 arg4 = arg5; 8690 } 8691 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4)); 8692 #else 8693 ret = get_errno(readahead(arg1, arg2, arg3)); 8694 #endif 8695 break; 8696 #endif 8697 #ifdef CONFIG_ATTR 8698 #ifdef TARGET_NR_setxattr 8699 case TARGET_NR_listxattr: 8700 case TARGET_NR_llistxattr: 8701 { 8702 void *p, *b = 0; 8703 if (arg2) { 8704 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 8705 if (!b) { 8706 ret = -TARGET_EFAULT; 8707 break; 8708 } 8709 } 8710 p = lock_user_string(arg1); 8711 if (p) { 8712 if (num == TARGET_NR_listxattr) { 8713 ret = get_errno(listxattr(p, b, arg3)); 8714 } else { 8715 ret = get_errno(llistxattr(p, b, arg3)); 8716 } 8717 } else { 8718 ret = -TARGET_EFAULT; 8719 } 8720 unlock_user(p, arg1, 0); 8721 unlock_user(b, arg2, arg3); 8722 break; 8723 } 8724 case TARGET_NR_flistxattr: 8725 { 8726 void *b = 0; 8727 if (arg2) { 8728 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 8729 if (!b) { 8730 ret = -TARGET_EFAULT; 8731 break; 8732 } 8733 } 8734 ret = get_errno(flistxattr(arg1, b, arg3)); 8735 unlock_user(b, arg2, arg3); 8736 break; 8737 } 8738 case TARGET_NR_setxattr: 8739 case TARGET_NR_lsetxattr: 8740 { 8741 void *p, *n, *v = 0; 8742 if (arg3) { 8743 v = lock_user(VERIFY_READ, arg3, arg4, 1); 8744 if (!v) { 8745 ret = -TARGET_EFAULT; 8746 break; 8747 } 8748 } 8749 p = lock_user_string(arg1); 8750 n = lock_user_string(arg2); 8751 if (p && n) { 8752 if (num == TARGET_NR_setxattr) { 8753 ret = get_errno(setxattr(p, n, v, arg4, arg5)); 8754 } else { 8755 ret = get_errno(lsetxattr(p, n, v, arg4, arg5)); 8756 } 8757 } else { 8758 ret = -TARGET_EFAULT; 8759 } 8760 unlock_user(p, arg1, 0); 8761 unlock_user(n, arg2, 0); 8762 unlock_user(v, arg3, 0); 8763 } 8764 break; 8765 case TARGET_NR_fsetxattr: 8766 { 8767 void *n, *v = 0; 8768 if (arg3) { 8769 v = lock_user(VERIFY_READ, arg3, arg4, 1); 8770 if (!v) { 8771 ret = -TARGET_EFAULT; 8772 break; 8773 } 8774 } 8775 n = lock_user_string(arg2); 8776 if (n) { 8777 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5)); 8778 } else { 8779 ret = -TARGET_EFAULT; 8780 } 8781 unlock_user(n, arg2, 0); 8782 unlock_user(v, arg3, 0); 8783 } 8784 break; 8785 case TARGET_NR_getxattr: 8786 case TARGET_NR_lgetxattr: 8787 { 8788 void *p, *n, *v = 0; 8789 if (arg3) { 8790 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 8791 if (!v) { 8792 ret = -TARGET_EFAULT; 8793 break; 8794 } 8795 } 8796 p = lock_user_string(arg1); 8797 n = lock_user_string(arg2); 8798 if (p && n) { 8799 if (num == TARGET_NR_getxattr) { 8800 ret = get_errno(getxattr(p, n, v, arg4)); 8801 } else { 8802 ret = get_errno(lgetxattr(p, n, v, arg4)); 8803 } 8804 } else { 8805 ret = -TARGET_EFAULT; 8806 } 8807 unlock_user(p, arg1, 0); 8808 unlock_user(n, arg2, 0); 8809 unlock_user(v, arg3, arg4); 8810 } 8811 break; 8812 case TARGET_NR_fgetxattr: 8813 { 8814 void *n, *v = 0; 8815 if (arg3) { 8816 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 8817 if (!v) { 8818 ret = -TARGET_EFAULT; 8819 break; 8820 } 8821 } 8822 n = lock_user_string(arg2); 8823 if (n) { 8824 ret = get_errno(fgetxattr(arg1, n, v, arg4)); 8825 } else { 8826 ret = -TARGET_EFAULT; 8827 } 8828 unlock_user(n, arg2, 0); 8829 unlock_user(v, arg3, arg4); 8830 } 8831 break; 8832 case TARGET_NR_removexattr: 8833 case TARGET_NR_lremovexattr: 8834 { 8835 void *p, *n; 8836 p = lock_user_string(arg1); 8837 n = lock_user_string(arg2); 8838 if (p && n) { 8839 if (num == TARGET_NR_removexattr) { 8840 ret = get_errno(removexattr(p, n)); 8841 } else { 8842 ret = get_errno(lremovexattr(p, n)); 8843 } 8844 } else { 8845 ret = -TARGET_EFAULT; 8846 } 8847 unlock_user(p, arg1, 0); 8848 unlock_user(n, arg2, 0); 8849 } 8850 break; 8851 case TARGET_NR_fremovexattr: 8852 { 8853 void *n; 8854 n = lock_user_string(arg2); 8855 if (n) { 8856 ret = get_errno(fremovexattr(arg1, n)); 8857 } else { 8858 ret = -TARGET_EFAULT; 8859 } 8860 unlock_user(n, arg2, 0); 8861 } 8862 break; 8863 #endif 8864 #endif /* CONFIG_ATTR */ 8865 #ifdef TARGET_NR_set_thread_area 8866 case TARGET_NR_set_thread_area: 8867 #if defined(TARGET_MIPS) 8868 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1; 8869 ret = 0; 8870 break; 8871 #elif defined(TARGET_CRIS) 8872 if (arg1 & 0xff) 8873 ret = -TARGET_EINVAL; 8874 else { 8875 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1; 8876 ret = 0; 8877 } 8878 break; 8879 #elif defined(TARGET_I386) && defined(TARGET_ABI32) 8880 ret = do_set_thread_area(cpu_env, arg1); 8881 break; 8882 #elif defined(TARGET_M68K) 8883 { 8884 TaskState *ts = cpu->opaque; 8885 ts->tp_value = arg1; 8886 ret = 0; 8887 break; 8888 } 8889 #else 8890 goto unimplemented_nowarn; 8891 #endif 8892 #endif 8893 #ifdef TARGET_NR_get_thread_area 8894 case TARGET_NR_get_thread_area: 8895 #if defined(TARGET_I386) && defined(TARGET_ABI32) 8896 ret = do_get_thread_area(cpu_env, arg1); 8897 break; 8898 #elif defined(TARGET_M68K) 8899 { 8900 TaskState *ts = cpu->opaque; 8901 ret = ts->tp_value; 8902 break; 8903 } 8904 #else 8905 goto unimplemented_nowarn; 8906 #endif 8907 #endif 8908 #ifdef TARGET_NR_getdomainname 8909 case TARGET_NR_getdomainname: 8910 goto unimplemented_nowarn; 8911 #endif 8912 8913 #ifdef TARGET_NR_clock_gettime 8914 case TARGET_NR_clock_gettime: 8915 { 8916 struct timespec ts; 8917 ret = get_errno(clock_gettime(arg1, &ts)); 8918 if (!is_error(ret)) { 8919 host_to_target_timespec(arg2, &ts); 8920 } 8921 break; 8922 } 8923 #endif 8924 #ifdef TARGET_NR_clock_getres 8925 case TARGET_NR_clock_getres: 8926 { 8927 struct timespec ts; 8928 ret = get_errno(clock_getres(arg1, &ts)); 8929 if (!is_error(ret)) { 8930 host_to_target_timespec(arg2, &ts); 8931 } 8932 break; 8933 } 8934 #endif 8935 #ifdef TARGET_NR_clock_nanosleep 8936 case TARGET_NR_clock_nanosleep: 8937 { 8938 struct timespec ts; 8939 target_to_host_timespec(&ts, arg3); 8940 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL)); 8941 if (arg4) 8942 host_to_target_timespec(arg4, &ts); 8943 break; 8944 } 8945 #endif 8946 8947 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 8948 case TARGET_NR_set_tid_address: 8949 ret = get_errno(set_tid_address((int *)g2h(arg1))); 8950 break; 8951 #endif 8952 8953 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 8954 case TARGET_NR_tkill: 8955 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2))); 8956 break; 8957 #endif 8958 8959 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 8960 case TARGET_NR_tgkill: 8961 ret = get_errno(sys_tgkill((int)arg1, (int)arg2, 8962 target_to_host_signal(arg3))); 8963 break; 8964 #endif 8965 8966 #ifdef TARGET_NR_set_robust_list 8967 case TARGET_NR_set_robust_list: 8968 case TARGET_NR_get_robust_list: 8969 /* The ABI for supporting robust futexes has userspace pass 8970 * the kernel a pointer to a linked list which is updated by 8971 * userspace after the syscall; the list is walked by the kernel 8972 * when the thread exits. Since the linked list in QEMU guest 8973 * memory isn't a valid linked list for the host and we have 8974 * no way to reliably intercept the thread-death event, we can't 8975 * support these. Silently return ENOSYS so that guest userspace 8976 * falls back to a non-robust futex implementation (which should 8977 * be OK except in the corner case of the guest crashing while 8978 * holding a mutex that is shared with another process via 8979 * shared memory). 8980 */ 8981 goto unimplemented_nowarn; 8982 #endif 8983 8984 #if defined(TARGET_NR_utimensat) 8985 case TARGET_NR_utimensat: 8986 { 8987 struct timespec *tsp, ts[2]; 8988 if (!arg3) { 8989 tsp = NULL; 8990 } else { 8991 target_to_host_timespec(ts, arg3); 8992 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec)); 8993 tsp = ts; 8994 } 8995 if (!arg2) 8996 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 8997 else { 8998 if (!(p = lock_user_string(arg2))) { 8999 ret = -TARGET_EFAULT; 9000 goto fail; 9001 } 9002 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 9003 unlock_user(p, arg2, 0); 9004 } 9005 } 9006 break; 9007 #endif 9008 case TARGET_NR_futex: 9009 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6); 9010 break; 9011 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 9012 case TARGET_NR_inotify_init: 9013 ret = get_errno(sys_inotify_init()); 9014 break; 9015 #endif 9016 #ifdef CONFIG_INOTIFY1 9017 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 9018 case TARGET_NR_inotify_init1: 9019 ret = get_errno(sys_inotify_init1(arg1)); 9020 break; 9021 #endif 9022 #endif 9023 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 9024 case TARGET_NR_inotify_add_watch: 9025 p = lock_user_string(arg2); 9026 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3)); 9027 unlock_user(p, arg2, 0); 9028 break; 9029 #endif 9030 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 9031 case TARGET_NR_inotify_rm_watch: 9032 ret = get_errno(sys_inotify_rm_watch(arg1, arg2)); 9033 break; 9034 #endif 9035 9036 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 9037 case TARGET_NR_mq_open: 9038 { 9039 struct mq_attr posix_mq_attr; 9040 9041 p = lock_user_string(arg1 - 1); 9042 if (arg4 != 0) 9043 copy_from_user_mq_attr (&posix_mq_attr, arg4); 9044 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr)); 9045 unlock_user (p, arg1, 0); 9046 } 9047 break; 9048 9049 case TARGET_NR_mq_unlink: 9050 p = lock_user_string(arg1 - 1); 9051 ret = get_errno(mq_unlink(p)); 9052 unlock_user (p, arg1, 0); 9053 break; 9054 9055 case TARGET_NR_mq_timedsend: 9056 { 9057 struct timespec ts; 9058 9059 p = lock_user (VERIFY_READ, arg2, arg3, 1); 9060 if (arg5 != 0) { 9061 target_to_host_timespec(&ts, arg5); 9062 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts)); 9063 host_to_target_timespec(arg5, &ts); 9064 } 9065 else 9066 ret = get_errno(mq_send(arg1, p, arg3, arg4)); 9067 unlock_user (p, arg2, arg3); 9068 } 9069 break; 9070 9071 case TARGET_NR_mq_timedreceive: 9072 { 9073 struct timespec ts; 9074 unsigned int prio; 9075 9076 p = lock_user (VERIFY_READ, arg2, arg3, 1); 9077 if (arg5 != 0) { 9078 target_to_host_timespec(&ts, arg5); 9079 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts)); 9080 host_to_target_timespec(arg5, &ts); 9081 } 9082 else 9083 ret = get_errno(mq_receive(arg1, p, arg3, &prio)); 9084 unlock_user (p, arg2, arg3); 9085 if (arg4 != 0) 9086 put_user_u32(prio, arg4); 9087 } 9088 break; 9089 9090 /* Not implemented for now... */ 9091 /* case TARGET_NR_mq_notify: */ 9092 /* break; */ 9093 9094 case TARGET_NR_mq_getsetattr: 9095 { 9096 struct mq_attr posix_mq_attr_in, posix_mq_attr_out; 9097 ret = 0; 9098 if (arg3 != 0) { 9099 ret = mq_getattr(arg1, &posix_mq_attr_out); 9100 copy_to_user_mq_attr(arg3, &posix_mq_attr_out); 9101 } 9102 if (arg2 != 0) { 9103 copy_from_user_mq_attr(&posix_mq_attr_in, arg2); 9104 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out); 9105 } 9106 9107 } 9108 break; 9109 #endif 9110 9111 #ifdef CONFIG_SPLICE 9112 #ifdef TARGET_NR_tee 9113 case TARGET_NR_tee: 9114 { 9115 ret = get_errno(tee(arg1,arg2,arg3,arg4)); 9116 } 9117 break; 9118 #endif 9119 #ifdef TARGET_NR_splice 9120 case TARGET_NR_splice: 9121 { 9122 loff_t loff_in, loff_out; 9123 loff_t *ploff_in = NULL, *ploff_out = NULL; 9124 if(arg2) { 9125 get_user_u64(loff_in, arg2); 9126 ploff_in = &loff_in; 9127 } 9128 if(arg4) { 9129 get_user_u64(loff_out, arg2); 9130 ploff_out = &loff_out; 9131 } 9132 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); 9133 } 9134 break; 9135 #endif 9136 #ifdef TARGET_NR_vmsplice 9137 case TARGET_NR_vmsplice: 9138 { 9139 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 9140 if (vec != NULL) { 9141 ret = get_errno(vmsplice(arg1, vec, arg3, arg4)); 9142 unlock_iovec(vec, arg2, arg3, 0); 9143 } else { 9144 ret = -host_to_target_errno(errno); 9145 } 9146 } 9147 break; 9148 #endif 9149 #endif /* CONFIG_SPLICE */ 9150 #ifdef CONFIG_EVENTFD 9151 #if defined(TARGET_NR_eventfd) 9152 case TARGET_NR_eventfd: 9153 ret = get_errno(eventfd(arg1, 0)); 9154 break; 9155 #endif 9156 #if defined(TARGET_NR_eventfd2) 9157 case TARGET_NR_eventfd2: 9158 { 9159 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)); 9160 if (arg2 & TARGET_O_NONBLOCK) { 9161 host_flags |= O_NONBLOCK; 9162 } 9163 if (arg2 & TARGET_O_CLOEXEC) { 9164 host_flags |= O_CLOEXEC; 9165 } 9166 ret = get_errno(eventfd(arg1, host_flags)); 9167 break; 9168 } 9169 #endif 9170 #endif /* CONFIG_EVENTFD */ 9171 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) 9172 case TARGET_NR_fallocate: 9173 #if TARGET_ABI_BITS == 32 9174 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4), 9175 target_offset64(arg5, arg6))); 9176 #else 9177 ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); 9178 #endif 9179 break; 9180 #endif 9181 #if defined(CONFIG_SYNC_FILE_RANGE) 9182 #if defined(TARGET_NR_sync_file_range) 9183 case TARGET_NR_sync_file_range: 9184 #if TARGET_ABI_BITS == 32 9185 #if defined(TARGET_MIPS) 9186 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 9187 target_offset64(arg5, arg6), arg7)); 9188 #else 9189 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3), 9190 target_offset64(arg4, arg5), arg6)); 9191 #endif /* !TARGET_MIPS */ 9192 #else 9193 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4)); 9194 #endif 9195 break; 9196 #endif 9197 #if defined(TARGET_NR_sync_file_range2) 9198 case TARGET_NR_sync_file_range2: 9199 /* This is like sync_file_range but the arguments are reordered */ 9200 #if TARGET_ABI_BITS == 32 9201 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 9202 target_offset64(arg5, arg6), arg2)); 9203 #else 9204 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2)); 9205 #endif 9206 break; 9207 #endif 9208 #endif 9209 #if defined(CONFIG_EPOLL) 9210 #if defined(TARGET_NR_epoll_create) 9211 case TARGET_NR_epoll_create: 9212 ret = get_errno(epoll_create(arg1)); 9213 break; 9214 #endif 9215 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1) 9216 case TARGET_NR_epoll_create1: 9217 ret = get_errno(epoll_create1(arg1)); 9218 break; 9219 #endif 9220 #if defined(TARGET_NR_epoll_ctl) 9221 case TARGET_NR_epoll_ctl: 9222 { 9223 struct epoll_event ep; 9224 struct epoll_event *epp = 0; 9225 if (arg4) { 9226 struct target_epoll_event *target_ep; 9227 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) { 9228 goto efault; 9229 } 9230 ep.events = tswap32(target_ep->events); 9231 /* The epoll_data_t union is just opaque data to the kernel, 9232 * so we transfer all 64 bits across and need not worry what 9233 * actual data type it is. 9234 */ 9235 ep.data.u64 = tswap64(target_ep->data.u64); 9236 unlock_user_struct(target_ep, arg4, 0); 9237 epp = &ep; 9238 } 9239 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp)); 9240 break; 9241 } 9242 #endif 9243 9244 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT) 9245 #define IMPLEMENT_EPOLL_PWAIT 9246 #endif 9247 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT) 9248 #if defined(TARGET_NR_epoll_wait) 9249 case TARGET_NR_epoll_wait: 9250 #endif 9251 #if defined(IMPLEMENT_EPOLL_PWAIT) 9252 case TARGET_NR_epoll_pwait: 9253 #endif 9254 { 9255 struct target_epoll_event *target_ep; 9256 struct epoll_event *ep; 9257 int epfd = arg1; 9258 int maxevents = arg3; 9259 int timeout = arg4; 9260 9261 target_ep = lock_user(VERIFY_WRITE, arg2, 9262 maxevents * sizeof(struct target_epoll_event), 1); 9263 if (!target_ep) { 9264 goto efault; 9265 } 9266 9267 ep = alloca(maxevents * sizeof(struct epoll_event)); 9268 9269 switch (num) { 9270 #if defined(IMPLEMENT_EPOLL_PWAIT) 9271 case TARGET_NR_epoll_pwait: 9272 { 9273 target_sigset_t *target_set; 9274 sigset_t _set, *set = &_set; 9275 9276 if (arg5) { 9277 target_set = lock_user(VERIFY_READ, arg5, 9278 sizeof(target_sigset_t), 1); 9279 if (!target_set) { 9280 unlock_user(target_ep, arg2, 0); 9281 goto efault; 9282 } 9283 target_to_host_sigset(set, target_set); 9284 unlock_user(target_set, arg5, 0); 9285 } else { 9286 set = NULL; 9287 } 9288 9289 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set)); 9290 break; 9291 } 9292 #endif 9293 #if defined(TARGET_NR_epoll_wait) 9294 case TARGET_NR_epoll_wait: 9295 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout)); 9296 break; 9297 #endif 9298 default: 9299 ret = -TARGET_ENOSYS; 9300 } 9301 if (!is_error(ret)) { 9302 int i; 9303 for (i = 0; i < ret; i++) { 9304 target_ep[i].events = tswap32(ep[i].events); 9305 target_ep[i].data.u64 = tswap64(ep[i].data.u64); 9306 } 9307 } 9308 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event)); 9309 break; 9310 } 9311 #endif 9312 #endif 9313 #ifdef TARGET_NR_prlimit64 9314 case TARGET_NR_prlimit64: 9315 { 9316 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */ 9317 struct target_rlimit64 *target_rnew, *target_rold; 9318 struct host_rlimit64 rnew, rold, *rnewp = 0; 9319 if (arg3) { 9320 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { 9321 goto efault; 9322 } 9323 rnew.rlim_cur = tswap64(target_rnew->rlim_cur); 9324 rnew.rlim_max = tswap64(target_rnew->rlim_max); 9325 unlock_user_struct(target_rnew, arg3, 0); 9326 rnewp = &rnew; 9327 } 9328 9329 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0)); 9330 if (!is_error(ret) && arg4) { 9331 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { 9332 goto efault; 9333 } 9334 target_rold->rlim_cur = tswap64(rold.rlim_cur); 9335 target_rold->rlim_max = tswap64(rold.rlim_max); 9336 unlock_user_struct(target_rold, arg4, 1); 9337 } 9338 break; 9339 } 9340 #endif 9341 #ifdef TARGET_NR_gethostname 9342 case TARGET_NR_gethostname: 9343 { 9344 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0); 9345 if (name) { 9346 ret = get_errno(gethostname(name, arg2)); 9347 unlock_user(name, arg1, arg2); 9348 } else { 9349 ret = -TARGET_EFAULT; 9350 } 9351 break; 9352 } 9353 #endif 9354 #ifdef TARGET_NR_atomic_cmpxchg_32 9355 case TARGET_NR_atomic_cmpxchg_32: 9356 { 9357 /* should use start_exclusive from main.c */ 9358 abi_ulong mem_value; 9359 if (get_user_u32(mem_value, arg6)) { 9360 target_siginfo_t info; 9361 info.si_signo = SIGSEGV; 9362 info.si_errno = 0; 9363 info.si_code = TARGET_SEGV_MAPERR; 9364 info._sifields._sigfault._addr = arg6; 9365 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info); 9366 ret = 0xdeadbeef; 9367 9368 } 9369 if (mem_value == arg2) 9370 put_user_u32(arg1, arg6); 9371 ret = mem_value; 9372 break; 9373 } 9374 #endif 9375 #ifdef TARGET_NR_atomic_barrier 9376 case TARGET_NR_atomic_barrier: 9377 { 9378 /* Like the kernel implementation and the qemu arm barrier, no-op this? */ 9379 ret = 0; 9380 break; 9381 } 9382 #endif 9383 9384 #ifdef TARGET_NR_timer_create 9385 case TARGET_NR_timer_create: 9386 { 9387 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */ 9388 9389 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL; 9390 struct target_sigevent *ptarget_sevp; 9391 struct target_timer_t *ptarget_timer; 9392 9393 int clkid = arg1; 9394 int timer_index = next_free_host_timer(); 9395 9396 if (timer_index < 0) { 9397 ret = -TARGET_EAGAIN; 9398 } else { 9399 timer_t *phtimer = g_posix_timers + timer_index; 9400 9401 if (arg2) { 9402 if (!lock_user_struct(VERIFY_READ, ptarget_sevp, arg2, 1)) { 9403 goto efault; 9404 } 9405 9406 host_sevp.sigev_signo = tswap32(ptarget_sevp->sigev_signo); 9407 host_sevp.sigev_notify = tswap32(ptarget_sevp->sigev_notify); 9408 9409 phost_sevp = &host_sevp; 9410 } 9411 9412 ret = get_errno(timer_create(clkid, phost_sevp, phtimer)); 9413 if (ret) { 9414 phtimer = NULL; 9415 } else { 9416 if (!lock_user_struct(VERIFY_WRITE, ptarget_timer, arg3, 1)) { 9417 goto efault; 9418 } 9419 ptarget_timer->ptr = tswap32(0xcafe0000 | timer_index); 9420 unlock_user_struct(ptarget_timer, arg3, 1); 9421 } 9422 } 9423 break; 9424 } 9425 #endif 9426 9427 #ifdef TARGET_NR_timer_settime 9428 case TARGET_NR_timer_settime: 9429 { 9430 /* args: timer_t timerid, int flags, const struct itimerspec *new_value, 9431 * struct itimerspec * old_value */ 9432 arg1 &= 0xffff; 9433 if (arg3 == 0 || arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9434 ret = -TARGET_EINVAL; 9435 } else { 9436 timer_t htimer = g_posix_timers[arg1]; 9437 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},}; 9438 9439 target_to_host_itimerspec(&hspec_new, arg3); 9440 ret = get_errno( 9441 timer_settime(htimer, arg2, &hspec_new, &hspec_old)); 9442 host_to_target_itimerspec(arg2, &hspec_old); 9443 } 9444 break; 9445 } 9446 #endif 9447 9448 #ifdef TARGET_NR_timer_gettime 9449 case TARGET_NR_timer_gettime: 9450 { 9451 /* args: timer_t timerid, struct itimerspec *curr_value */ 9452 arg1 &= 0xffff; 9453 if (!arg2) { 9454 return -TARGET_EFAULT; 9455 } else if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9456 ret = -TARGET_EINVAL; 9457 } else { 9458 timer_t htimer = g_posix_timers[arg1]; 9459 struct itimerspec hspec; 9460 ret = get_errno(timer_gettime(htimer, &hspec)); 9461 9462 if (host_to_target_itimerspec(arg2, &hspec)) { 9463 ret = -TARGET_EFAULT; 9464 } 9465 } 9466 break; 9467 } 9468 #endif 9469 9470 #ifdef TARGET_NR_timer_getoverrun 9471 case TARGET_NR_timer_getoverrun: 9472 { 9473 /* args: timer_t timerid */ 9474 arg1 &= 0xffff; 9475 if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9476 ret = -TARGET_EINVAL; 9477 } else { 9478 timer_t htimer = g_posix_timers[arg1]; 9479 ret = get_errno(timer_getoverrun(htimer)); 9480 } 9481 break; 9482 } 9483 #endif 9484 9485 #ifdef TARGET_NR_timer_delete 9486 case TARGET_NR_timer_delete: 9487 { 9488 /* args: timer_t timerid */ 9489 arg1 &= 0xffff; 9490 if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9491 ret = -TARGET_EINVAL; 9492 } else { 9493 timer_t htimer = g_posix_timers[arg1]; 9494 ret = get_errno(timer_delete(htimer)); 9495 g_posix_timers[arg1] = 0; 9496 } 9497 break; 9498 } 9499 #endif 9500 9501 default: 9502 unimplemented: 9503 gemu_log("qemu: Unsupported syscall: %d\n", num); 9504 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list) 9505 unimplemented_nowarn: 9506 #endif 9507 ret = -TARGET_ENOSYS; 9508 break; 9509 } 9510 fail: 9511 #ifdef DEBUG 9512 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret); 9513 #endif 9514 if(do_strace) 9515 print_syscall_ret(num, ret); 9516 return ret; 9517 efault: 9518 ret = -TARGET_EFAULT; 9519 goto fail; 9520 } 9521