1 /* 2 * Linux syscalls 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #define _ATFILE_SOURCE 20 #include <stdlib.h> 21 #include <stdio.h> 22 #include <stdarg.h> 23 #include <string.h> 24 #include <elf.h> 25 #include <endian.h> 26 #include <errno.h> 27 #include <unistd.h> 28 #include <fcntl.h> 29 #include <time.h> 30 #include <limits.h> 31 #include <grp.h> 32 #include <sys/types.h> 33 #include <sys/ipc.h> 34 #include <sys/msg.h> 35 #include <sys/wait.h> 36 #include <sys/time.h> 37 #include <sys/stat.h> 38 #include <sys/mount.h> 39 #include <sys/file.h> 40 #include <sys/fsuid.h> 41 #include <sys/personality.h> 42 #include <sys/prctl.h> 43 #include <sys/resource.h> 44 #include <sys/mman.h> 45 #include <sys/swap.h> 46 #include <linux/capability.h> 47 #include <signal.h> 48 #include <sched.h> 49 #ifdef __ia64__ 50 int __clone2(int (*fn)(void *), void *child_stack_base, 51 size_t stack_size, int flags, void *arg, ...); 52 #endif 53 #include <sys/socket.h> 54 #include <sys/un.h> 55 #include <sys/uio.h> 56 #include <sys/poll.h> 57 #include <sys/times.h> 58 #include <sys/shm.h> 59 #include <sys/sem.h> 60 #include <sys/statfs.h> 61 #include <utime.h> 62 #include <sys/sysinfo.h> 63 #include <sys/utsname.h> 64 //#include <sys/user.h> 65 #include <netinet/ip.h> 66 #include <netinet/tcp.h> 67 #include <linux/wireless.h> 68 #include <linux/icmp.h> 69 #include "qemu-common.h" 70 #ifdef TARGET_GPROF 71 #include <sys/gmon.h> 72 #endif 73 #ifdef CONFIG_EVENTFD 74 #include <sys/eventfd.h> 75 #endif 76 #ifdef CONFIG_EPOLL 77 #include <sys/epoll.h> 78 #endif 79 #ifdef CONFIG_ATTR 80 #include "qemu/xattr.h" 81 #endif 82 #ifdef CONFIG_SENDFILE 83 #include <sys/sendfile.h> 84 #endif 85 86 #define termios host_termios 87 #define winsize host_winsize 88 #define termio host_termio 89 #define sgttyb host_sgttyb /* same as target */ 90 #define tchars host_tchars /* same as target */ 91 #define ltchars host_ltchars /* same as target */ 92 93 #include <linux/termios.h> 94 #include <linux/unistd.h> 95 #include <linux/utsname.h> 96 #include <linux/cdrom.h> 97 #include <linux/hdreg.h> 98 #include <linux/soundcard.h> 99 #include <linux/kd.h> 100 #include <linux/mtio.h> 101 #include <linux/fs.h> 102 #if defined(CONFIG_FIEMAP) 103 #include <linux/fiemap.h> 104 #endif 105 #include <linux/fb.h> 106 #include <linux/vt.h> 107 #include <linux/dm-ioctl.h> 108 #include <linux/reboot.h> 109 #include <linux/route.h> 110 #include <linux/filter.h> 111 #include <linux/blkpg.h> 112 #include "linux_loop.h" 113 #include "cpu-uname.h" 114 115 #include "qemu.h" 116 117 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \ 118 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID) 119 120 //#define DEBUG 121 122 //#include <linux/msdos_fs.h> 123 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2]) 124 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2]) 125 126 127 #undef _syscall0 128 #undef _syscall1 129 #undef _syscall2 130 #undef _syscall3 131 #undef _syscall4 132 #undef _syscall5 133 #undef _syscall6 134 135 #define _syscall0(type,name) \ 136 static type name (void) \ 137 { \ 138 return syscall(__NR_##name); \ 139 } 140 141 #define _syscall1(type,name,type1,arg1) \ 142 static type name (type1 arg1) \ 143 { \ 144 return syscall(__NR_##name, arg1); \ 145 } 146 147 #define _syscall2(type,name,type1,arg1,type2,arg2) \ 148 static type name (type1 arg1,type2 arg2) \ 149 { \ 150 return syscall(__NR_##name, arg1, arg2); \ 151 } 152 153 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 154 static type name (type1 arg1,type2 arg2,type3 arg3) \ 155 { \ 156 return syscall(__NR_##name, arg1, arg2, arg3); \ 157 } 158 159 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 160 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ 161 { \ 162 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 163 } 164 165 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 166 type5,arg5) \ 167 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ 168 { \ 169 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 170 } 171 172 173 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 174 type5,arg5,type6,arg6) \ 175 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \ 176 type6 arg6) \ 177 { \ 178 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 179 } 180 181 182 #define __NR_sys_uname __NR_uname 183 #define __NR_sys_getcwd1 __NR_getcwd 184 #define __NR_sys_getdents __NR_getdents 185 #define __NR_sys_getdents64 __NR_getdents64 186 #define __NR_sys_getpriority __NR_getpriority 187 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo 188 #define __NR_sys_syslog __NR_syslog 189 #define __NR_sys_tgkill __NR_tgkill 190 #define __NR_sys_tkill __NR_tkill 191 #define __NR_sys_futex __NR_futex 192 #define __NR_sys_inotify_init __NR_inotify_init 193 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch 194 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch 195 196 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \ 197 defined(__s390x__) 198 #define __NR__llseek __NR_lseek 199 #endif 200 201 #ifdef __NR_gettid 202 _syscall0(int, gettid) 203 #else 204 /* This is a replacement for the host gettid() and must return a host 205 errno. */ 206 static int gettid(void) { 207 return -ENOSYS; 208 } 209 #endif 210 #ifdef __NR_getdents 211 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count); 212 #endif 213 #if !defined(__NR_getdents) || \ 214 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64)) 215 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count); 216 #endif 217 #if defined(TARGET_NR__llseek) && defined(__NR_llseek) 218 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo, 219 loff_t *, res, uint, wh); 220 #endif 221 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo) 222 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len) 223 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 224 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig) 225 #endif 226 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 227 _syscall2(int,sys_tkill,int,tid,int,sig) 228 #endif 229 #ifdef __NR_exit_group 230 _syscall1(int,exit_group,int,error_code) 231 #endif 232 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 233 _syscall1(int,set_tid_address,int *,tidptr) 234 #endif 235 #if defined(TARGET_NR_futex) && defined(__NR_futex) 236 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, 237 const struct timespec *,timeout,int *,uaddr2,int,val3) 238 #endif 239 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity 240 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, 241 unsigned long *, user_mask_ptr); 242 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity 243 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len, 244 unsigned long *, user_mask_ptr); 245 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd, 246 void *, arg); 247 _syscall2(int, capget, struct __user_cap_header_struct *, header, 248 struct __user_cap_data_struct *, data); 249 _syscall2(int, capset, struct __user_cap_header_struct *, header, 250 struct __user_cap_data_struct *, data); 251 252 static bitmask_transtbl fcntl_flags_tbl[] = { 253 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, }, 254 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, }, 255 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, }, 256 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, }, 257 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, }, 258 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, }, 259 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, }, 260 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, }, 261 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, }, 262 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, }, 263 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, }, 264 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, }, 265 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, }, 266 #if defined(O_DIRECT) 267 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, }, 268 #endif 269 #if defined(O_NOATIME) 270 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME }, 271 #endif 272 #if defined(O_CLOEXEC) 273 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC }, 274 #endif 275 #if defined(O_PATH) 276 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH }, 277 #endif 278 /* Don't terminate the list prematurely on 64-bit host+guest. */ 279 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0 280 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, }, 281 #endif 282 { 0, 0, 0, 0 } 283 }; 284 285 #define COPY_UTSNAME_FIELD(dest, src) \ 286 do { \ 287 /* __NEW_UTS_LEN doesn't include terminating null */ \ 288 (void) strncpy((dest), (src), __NEW_UTS_LEN); \ 289 (dest)[__NEW_UTS_LEN] = '\0'; \ 290 } while (0) 291 292 static int sys_uname(struct new_utsname *buf) 293 { 294 struct utsname uts_buf; 295 296 if (uname(&uts_buf) < 0) 297 return (-1); 298 299 /* 300 * Just in case these have some differences, we 301 * translate utsname to new_utsname (which is the 302 * struct linux kernel uses). 303 */ 304 305 memset(buf, 0, sizeof(*buf)); 306 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname); 307 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename); 308 COPY_UTSNAME_FIELD(buf->release, uts_buf.release); 309 COPY_UTSNAME_FIELD(buf->version, uts_buf.version); 310 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine); 311 #ifdef _GNU_SOURCE 312 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname); 313 #endif 314 return (0); 315 316 #undef COPY_UTSNAME_FIELD 317 } 318 319 static int sys_getcwd1(char *buf, size_t size) 320 { 321 if (getcwd(buf, size) == NULL) { 322 /* getcwd() sets errno */ 323 return (-1); 324 } 325 return strlen(buf)+1; 326 } 327 328 #ifdef TARGET_NR_openat 329 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode) 330 { 331 /* 332 * open(2) has extra parameter 'mode' when called with 333 * flag O_CREAT. 334 */ 335 if ((flags & O_CREAT) != 0) { 336 return (openat(dirfd, pathname, flags, mode)); 337 } 338 return (openat(dirfd, pathname, flags)); 339 } 340 #endif 341 342 #ifdef TARGET_NR_utimensat 343 #ifdef CONFIG_UTIMENSAT 344 static int sys_utimensat(int dirfd, const char *pathname, 345 const struct timespec times[2], int flags) 346 { 347 if (pathname == NULL) 348 return futimens(dirfd, times); 349 else 350 return utimensat(dirfd, pathname, times, flags); 351 } 352 #elif defined(__NR_utimensat) 353 #define __NR_sys_utimensat __NR_utimensat 354 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname, 355 const struct timespec *,tsp,int,flags) 356 #else 357 static int sys_utimensat(int dirfd, const char *pathname, 358 const struct timespec times[2], int flags) 359 { 360 errno = ENOSYS; 361 return -1; 362 } 363 #endif 364 #endif /* TARGET_NR_utimensat */ 365 366 #ifdef CONFIG_INOTIFY 367 #include <sys/inotify.h> 368 369 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 370 static int sys_inotify_init(void) 371 { 372 return (inotify_init()); 373 } 374 #endif 375 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 376 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask) 377 { 378 return (inotify_add_watch(fd, pathname, mask)); 379 } 380 #endif 381 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 382 static int sys_inotify_rm_watch(int fd, int32_t wd) 383 { 384 return (inotify_rm_watch(fd, wd)); 385 } 386 #endif 387 #ifdef CONFIG_INOTIFY1 388 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 389 static int sys_inotify_init1(int flags) 390 { 391 return (inotify_init1(flags)); 392 } 393 #endif 394 #endif 395 #else 396 /* Userspace can usually survive runtime without inotify */ 397 #undef TARGET_NR_inotify_init 398 #undef TARGET_NR_inotify_init1 399 #undef TARGET_NR_inotify_add_watch 400 #undef TARGET_NR_inotify_rm_watch 401 #endif /* CONFIG_INOTIFY */ 402 403 #if defined(TARGET_NR_ppoll) 404 #ifndef __NR_ppoll 405 # define __NR_ppoll -1 406 #endif 407 #define __NR_sys_ppoll __NR_ppoll 408 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds, 409 struct timespec *, timeout, const __sigset_t *, sigmask, 410 size_t, sigsetsize) 411 #endif 412 413 #if defined(TARGET_NR_pselect6) 414 #ifndef __NR_pselect6 415 # define __NR_pselect6 -1 416 #endif 417 #define __NR_sys_pselect6 __NR_pselect6 418 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, 419 fd_set *, exceptfds, struct timespec *, timeout, void *, sig); 420 #endif 421 422 #if defined(TARGET_NR_prlimit64) 423 #ifndef __NR_prlimit64 424 # define __NR_prlimit64 -1 425 #endif 426 #define __NR_sys_prlimit64 __NR_prlimit64 427 /* The glibc rlimit structure may not be that used by the underlying syscall */ 428 struct host_rlimit64 { 429 uint64_t rlim_cur; 430 uint64_t rlim_max; 431 }; 432 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource, 433 const struct host_rlimit64 *, new_limit, 434 struct host_rlimit64 *, old_limit) 435 #endif 436 437 438 #if defined(TARGET_NR_timer_create) 439 /* Maxiumum of 32 active POSIX timers allowed at any one time. */ 440 static timer_t g_posix_timers[32] = { 0, } ; 441 442 static inline int next_free_host_timer(void) 443 { 444 int k ; 445 /* FIXME: Does finding the next free slot require a lock? */ 446 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) { 447 if (g_posix_timers[k] == 0) { 448 g_posix_timers[k] = (timer_t) 1; 449 return k; 450 } 451 } 452 return -1; 453 } 454 #endif 455 456 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */ 457 #ifdef TARGET_ARM 458 static inline int regpairs_aligned(void *cpu_env) { 459 return ((((CPUARMState *)cpu_env)->eabi) == 1) ; 460 } 461 #elif defined(TARGET_MIPS) 462 static inline int regpairs_aligned(void *cpu_env) { return 1; } 463 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64) 464 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs 465 * of registers which translates to the same as ARM/MIPS, because we start with 466 * r3 as arg1 */ 467 static inline int regpairs_aligned(void *cpu_env) { return 1; } 468 #else 469 static inline int regpairs_aligned(void *cpu_env) { return 0; } 470 #endif 471 472 #define ERRNO_TABLE_SIZE 1200 473 474 /* target_to_host_errno_table[] is initialized from 475 * host_to_target_errno_table[] in syscall_init(). */ 476 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = { 477 }; 478 479 /* 480 * This list is the union of errno values overridden in asm-<arch>/errno.h 481 * minus the errnos that are not actually generic to all archs. 482 */ 483 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = { 484 [EIDRM] = TARGET_EIDRM, 485 [ECHRNG] = TARGET_ECHRNG, 486 [EL2NSYNC] = TARGET_EL2NSYNC, 487 [EL3HLT] = TARGET_EL3HLT, 488 [EL3RST] = TARGET_EL3RST, 489 [ELNRNG] = TARGET_ELNRNG, 490 [EUNATCH] = TARGET_EUNATCH, 491 [ENOCSI] = TARGET_ENOCSI, 492 [EL2HLT] = TARGET_EL2HLT, 493 [EDEADLK] = TARGET_EDEADLK, 494 [ENOLCK] = TARGET_ENOLCK, 495 [EBADE] = TARGET_EBADE, 496 [EBADR] = TARGET_EBADR, 497 [EXFULL] = TARGET_EXFULL, 498 [ENOANO] = TARGET_ENOANO, 499 [EBADRQC] = TARGET_EBADRQC, 500 [EBADSLT] = TARGET_EBADSLT, 501 [EBFONT] = TARGET_EBFONT, 502 [ENOSTR] = TARGET_ENOSTR, 503 [ENODATA] = TARGET_ENODATA, 504 [ETIME] = TARGET_ETIME, 505 [ENOSR] = TARGET_ENOSR, 506 [ENONET] = TARGET_ENONET, 507 [ENOPKG] = TARGET_ENOPKG, 508 [EREMOTE] = TARGET_EREMOTE, 509 [ENOLINK] = TARGET_ENOLINK, 510 [EADV] = TARGET_EADV, 511 [ESRMNT] = TARGET_ESRMNT, 512 [ECOMM] = TARGET_ECOMM, 513 [EPROTO] = TARGET_EPROTO, 514 [EDOTDOT] = TARGET_EDOTDOT, 515 [EMULTIHOP] = TARGET_EMULTIHOP, 516 [EBADMSG] = TARGET_EBADMSG, 517 [ENAMETOOLONG] = TARGET_ENAMETOOLONG, 518 [EOVERFLOW] = TARGET_EOVERFLOW, 519 [ENOTUNIQ] = TARGET_ENOTUNIQ, 520 [EBADFD] = TARGET_EBADFD, 521 [EREMCHG] = TARGET_EREMCHG, 522 [ELIBACC] = TARGET_ELIBACC, 523 [ELIBBAD] = TARGET_ELIBBAD, 524 [ELIBSCN] = TARGET_ELIBSCN, 525 [ELIBMAX] = TARGET_ELIBMAX, 526 [ELIBEXEC] = TARGET_ELIBEXEC, 527 [EILSEQ] = TARGET_EILSEQ, 528 [ENOSYS] = TARGET_ENOSYS, 529 [ELOOP] = TARGET_ELOOP, 530 [ERESTART] = TARGET_ERESTART, 531 [ESTRPIPE] = TARGET_ESTRPIPE, 532 [ENOTEMPTY] = TARGET_ENOTEMPTY, 533 [EUSERS] = TARGET_EUSERS, 534 [ENOTSOCK] = TARGET_ENOTSOCK, 535 [EDESTADDRREQ] = TARGET_EDESTADDRREQ, 536 [EMSGSIZE] = TARGET_EMSGSIZE, 537 [EPROTOTYPE] = TARGET_EPROTOTYPE, 538 [ENOPROTOOPT] = TARGET_ENOPROTOOPT, 539 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT, 540 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT, 541 [EOPNOTSUPP] = TARGET_EOPNOTSUPP, 542 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT, 543 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT, 544 [EADDRINUSE] = TARGET_EADDRINUSE, 545 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL, 546 [ENETDOWN] = TARGET_ENETDOWN, 547 [ENETUNREACH] = TARGET_ENETUNREACH, 548 [ENETRESET] = TARGET_ENETRESET, 549 [ECONNABORTED] = TARGET_ECONNABORTED, 550 [ECONNRESET] = TARGET_ECONNRESET, 551 [ENOBUFS] = TARGET_ENOBUFS, 552 [EISCONN] = TARGET_EISCONN, 553 [ENOTCONN] = TARGET_ENOTCONN, 554 [EUCLEAN] = TARGET_EUCLEAN, 555 [ENOTNAM] = TARGET_ENOTNAM, 556 [ENAVAIL] = TARGET_ENAVAIL, 557 [EISNAM] = TARGET_EISNAM, 558 [EREMOTEIO] = TARGET_EREMOTEIO, 559 [ESHUTDOWN] = TARGET_ESHUTDOWN, 560 [ETOOMANYREFS] = TARGET_ETOOMANYREFS, 561 [ETIMEDOUT] = TARGET_ETIMEDOUT, 562 [ECONNREFUSED] = TARGET_ECONNREFUSED, 563 [EHOSTDOWN] = TARGET_EHOSTDOWN, 564 [EHOSTUNREACH] = TARGET_EHOSTUNREACH, 565 [EALREADY] = TARGET_EALREADY, 566 [EINPROGRESS] = TARGET_EINPROGRESS, 567 [ESTALE] = TARGET_ESTALE, 568 [ECANCELED] = TARGET_ECANCELED, 569 [ENOMEDIUM] = TARGET_ENOMEDIUM, 570 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE, 571 #ifdef ENOKEY 572 [ENOKEY] = TARGET_ENOKEY, 573 #endif 574 #ifdef EKEYEXPIRED 575 [EKEYEXPIRED] = TARGET_EKEYEXPIRED, 576 #endif 577 #ifdef EKEYREVOKED 578 [EKEYREVOKED] = TARGET_EKEYREVOKED, 579 #endif 580 #ifdef EKEYREJECTED 581 [EKEYREJECTED] = TARGET_EKEYREJECTED, 582 #endif 583 #ifdef EOWNERDEAD 584 [EOWNERDEAD] = TARGET_EOWNERDEAD, 585 #endif 586 #ifdef ENOTRECOVERABLE 587 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE, 588 #endif 589 }; 590 591 static inline int host_to_target_errno(int err) 592 { 593 if(host_to_target_errno_table[err]) 594 return host_to_target_errno_table[err]; 595 return err; 596 } 597 598 static inline int target_to_host_errno(int err) 599 { 600 if (target_to_host_errno_table[err]) 601 return target_to_host_errno_table[err]; 602 return err; 603 } 604 605 static inline abi_long get_errno(abi_long ret) 606 { 607 if (ret == -1) 608 return -host_to_target_errno(errno); 609 else 610 return ret; 611 } 612 613 static inline int is_error(abi_long ret) 614 { 615 return (abi_ulong)ret >= (abi_ulong)(-4096); 616 } 617 618 char *target_strerror(int err) 619 { 620 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) { 621 return NULL; 622 } 623 return strerror(target_to_host_errno(err)); 624 } 625 626 static abi_ulong target_brk; 627 static abi_ulong target_original_brk; 628 static abi_ulong brk_page; 629 630 void target_set_brk(abi_ulong new_brk) 631 { 632 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk); 633 brk_page = HOST_PAGE_ALIGN(target_brk); 634 } 635 636 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0) 637 #define DEBUGF_BRK(message, args...) 638 639 /* do_brk() must return target values and target errnos. */ 640 abi_long do_brk(abi_ulong new_brk) 641 { 642 abi_long mapped_addr; 643 int new_alloc_size; 644 645 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk); 646 647 if (!new_brk) { 648 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk); 649 return target_brk; 650 } 651 if (new_brk < target_original_brk) { 652 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n", 653 target_brk); 654 return target_brk; 655 } 656 657 /* If the new brk is less than the highest page reserved to the 658 * target heap allocation, set it and we're almost done... */ 659 if (new_brk <= brk_page) { 660 /* Heap contents are initialized to zero, as for anonymous 661 * mapped pages. */ 662 if (new_brk > target_brk) { 663 memset(g2h(target_brk), 0, new_brk - target_brk); 664 } 665 target_brk = new_brk; 666 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk); 667 return target_brk; 668 } 669 670 /* We need to allocate more memory after the brk... Note that 671 * we don't use MAP_FIXED because that will map over the top of 672 * any existing mapping (like the one with the host libc or qemu 673 * itself); instead we treat "mapped but at wrong address" as 674 * a failure and unmap again. 675 */ 676 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page); 677 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, 678 PROT_READ|PROT_WRITE, 679 MAP_ANON|MAP_PRIVATE, 0, 0)); 680 681 if (mapped_addr == brk_page) { 682 /* Heap contents are initialized to zero, as for anonymous 683 * mapped pages. Technically the new pages are already 684 * initialized to zero since they *are* anonymous mapped 685 * pages, however we have to take care with the contents that 686 * come from the remaining part of the previous page: it may 687 * contains garbage data due to a previous heap usage (grown 688 * then shrunken). */ 689 memset(g2h(target_brk), 0, brk_page - target_brk); 690 691 target_brk = new_brk; 692 brk_page = HOST_PAGE_ALIGN(target_brk); 693 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n", 694 target_brk); 695 return target_brk; 696 } else if (mapped_addr != -1) { 697 /* Mapped but at wrong address, meaning there wasn't actually 698 * enough space for this brk. 699 */ 700 target_munmap(mapped_addr, new_alloc_size); 701 mapped_addr = -1; 702 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk); 703 } 704 else { 705 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk); 706 } 707 708 #if defined(TARGET_ALPHA) 709 /* We (partially) emulate OSF/1 on Alpha, which requires we 710 return a proper errno, not an unchanged brk value. */ 711 return -TARGET_ENOMEM; 712 #endif 713 /* For everything else, return the previous break. */ 714 return target_brk; 715 } 716 717 static inline abi_long copy_from_user_fdset(fd_set *fds, 718 abi_ulong target_fds_addr, 719 int n) 720 { 721 int i, nw, j, k; 722 abi_ulong b, *target_fds; 723 724 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 725 if (!(target_fds = lock_user(VERIFY_READ, 726 target_fds_addr, 727 sizeof(abi_ulong) * nw, 728 1))) 729 return -TARGET_EFAULT; 730 731 FD_ZERO(fds); 732 k = 0; 733 for (i = 0; i < nw; i++) { 734 /* grab the abi_ulong */ 735 __get_user(b, &target_fds[i]); 736 for (j = 0; j < TARGET_ABI_BITS; j++) { 737 /* check the bit inside the abi_ulong */ 738 if ((b >> j) & 1) 739 FD_SET(k, fds); 740 k++; 741 } 742 } 743 744 unlock_user(target_fds, target_fds_addr, 0); 745 746 return 0; 747 } 748 749 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr, 750 abi_ulong target_fds_addr, 751 int n) 752 { 753 if (target_fds_addr) { 754 if (copy_from_user_fdset(fds, target_fds_addr, n)) 755 return -TARGET_EFAULT; 756 *fds_ptr = fds; 757 } else { 758 *fds_ptr = NULL; 759 } 760 return 0; 761 } 762 763 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr, 764 const fd_set *fds, 765 int n) 766 { 767 int i, nw, j, k; 768 abi_long v; 769 abi_ulong *target_fds; 770 771 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 772 if (!(target_fds = lock_user(VERIFY_WRITE, 773 target_fds_addr, 774 sizeof(abi_ulong) * nw, 775 0))) 776 return -TARGET_EFAULT; 777 778 k = 0; 779 for (i = 0; i < nw; i++) { 780 v = 0; 781 for (j = 0; j < TARGET_ABI_BITS; j++) { 782 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j); 783 k++; 784 } 785 __put_user(v, &target_fds[i]); 786 } 787 788 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw); 789 790 return 0; 791 } 792 793 #if defined(__alpha__) 794 #define HOST_HZ 1024 795 #else 796 #define HOST_HZ 100 797 #endif 798 799 static inline abi_long host_to_target_clock_t(long ticks) 800 { 801 #if HOST_HZ == TARGET_HZ 802 return ticks; 803 #else 804 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ; 805 #endif 806 } 807 808 static inline abi_long host_to_target_rusage(abi_ulong target_addr, 809 const struct rusage *rusage) 810 { 811 struct target_rusage *target_rusage; 812 813 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0)) 814 return -TARGET_EFAULT; 815 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec); 816 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec); 817 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec); 818 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec); 819 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss); 820 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss); 821 target_rusage->ru_idrss = tswapal(rusage->ru_idrss); 822 target_rusage->ru_isrss = tswapal(rusage->ru_isrss); 823 target_rusage->ru_minflt = tswapal(rusage->ru_minflt); 824 target_rusage->ru_majflt = tswapal(rusage->ru_majflt); 825 target_rusage->ru_nswap = tswapal(rusage->ru_nswap); 826 target_rusage->ru_inblock = tswapal(rusage->ru_inblock); 827 target_rusage->ru_oublock = tswapal(rusage->ru_oublock); 828 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd); 829 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv); 830 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals); 831 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw); 832 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw); 833 unlock_user_struct(target_rusage, target_addr, 1); 834 835 return 0; 836 } 837 838 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim) 839 { 840 abi_ulong target_rlim_swap; 841 rlim_t result; 842 843 target_rlim_swap = tswapal(target_rlim); 844 if (target_rlim_swap == TARGET_RLIM_INFINITY) 845 return RLIM_INFINITY; 846 847 result = target_rlim_swap; 848 if (target_rlim_swap != (rlim_t)result) 849 return RLIM_INFINITY; 850 851 return result; 852 } 853 854 static inline abi_ulong host_to_target_rlim(rlim_t rlim) 855 { 856 abi_ulong target_rlim_swap; 857 abi_ulong result; 858 859 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim) 860 target_rlim_swap = TARGET_RLIM_INFINITY; 861 else 862 target_rlim_swap = rlim; 863 result = tswapal(target_rlim_swap); 864 865 return result; 866 } 867 868 static inline int target_to_host_resource(int code) 869 { 870 switch (code) { 871 case TARGET_RLIMIT_AS: 872 return RLIMIT_AS; 873 case TARGET_RLIMIT_CORE: 874 return RLIMIT_CORE; 875 case TARGET_RLIMIT_CPU: 876 return RLIMIT_CPU; 877 case TARGET_RLIMIT_DATA: 878 return RLIMIT_DATA; 879 case TARGET_RLIMIT_FSIZE: 880 return RLIMIT_FSIZE; 881 case TARGET_RLIMIT_LOCKS: 882 return RLIMIT_LOCKS; 883 case TARGET_RLIMIT_MEMLOCK: 884 return RLIMIT_MEMLOCK; 885 case TARGET_RLIMIT_MSGQUEUE: 886 return RLIMIT_MSGQUEUE; 887 case TARGET_RLIMIT_NICE: 888 return RLIMIT_NICE; 889 case TARGET_RLIMIT_NOFILE: 890 return RLIMIT_NOFILE; 891 case TARGET_RLIMIT_NPROC: 892 return RLIMIT_NPROC; 893 case TARGET_RLIMIT_RSS: 894 return RLIMIT_RSS; 895 case TARGET_RLIMIT_RTPRIO: 896 return RLIMIT_RTPRIO; 897 case TARGET_RLIMIT_SIGPENDING: 898 return RLIMIT_SIGPENDING; 899 case TARGET_RLIMIT_STACK: 900 return RLIMIT_STACK; 901 default: 902 return code; 903 } 904 } 905 906 static inline abi_long copy_from_user_timeval(struct timeval *tv, 907 abi_ulong target_tv_addr) 908 { 909 struct target_timeval *target_tv; 910 911 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) 912 return -TARGET_EFAULT; 913 914 __get_user(tv->tv_sec, &target_tv->tv_sec); 915 __get_user(tv->tv_usec, &target_tv->tv_usec); 916 917 unlock_user_struct(target_tv, target_tv_addr, 0); 918 919 return 0; 920 } 921 922 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr, 923 const struct timeval *tv) 924 { 925 struct target_timeval *target_tv; 926 927 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) 928 return -TARGET_EFAULT; 929 930 __put_user(tv->tv_sec, &target_tv->tv_sec); 931 __put_user(tv->tv_usec, &target_tv->tv_usec); 932 933 unlock_user_struct(target_tv, target_tv_addr, 1); 934 935 return 0; 936 } 937 938 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 939 #include <mqueue.h> 940 941 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr, 942 abi_ulong target_mq_attr_addr) 943 { 944 struct target_mq_attr *target_mq_attr; 945 946 if (!lock_user_struct(VERIFY_READ, target_mq_attr, 947 target_mq_attr_addr, 1)) 948 return -TARGET_EFAULT; 949 950 __get_user(attr->mq_flags, &target_mq_attr->mq_flags); 951 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 952 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 953 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 954 955 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0); 956 957 return 0; 958 } 959 960 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr, 961 const struct mq_attr *attr) 962 { 963 struct target_mq_attr *target_mq_attr; 964 965 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr, 966 target_mq_attr_addr, 0)) 967 return -TARGET_EFAULT; 968 969 __put_user(attr->mq_flags, &target_mq_attr->mq_flags); 970 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 971 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 972 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 973 974 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1); 975 976 return 0; 977 } 978 #endif 979 980 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) 981 /* do_select() must return target values and target errnos. */ 982 static abi_long do_select(int n, 983 abi_ulong rfd_addr, abi_ulong wfd_addr, 984 abi_ulong efd_addr, abi_ulong target_tv_addr) 985 { 986 fd_set rfds, wfds, efds; 987 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 988 struct timeval tv, *tv_ptr; 989 abi_long ret; 990 991 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 992 if (ret) { 993 return ret; 994 } 995 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 996 if (ret) { 997 return ret; 998 } 999 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 1000 if (ret) { 1001 return ret; 1002 } 1003 1004 if (target_tv_addr) { 1005 if (copy_from_user_timeval(&tv, target_tv_addr)) 1006 return -TARGET_EFAULT; 1007 tv_ptr = &tv; 1008 } else { 1009 tv_ptr = NULL; 1010 } 1011 1012 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr)); 1013 1014 if (!is_error(ret)) { 1015 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 1016 return -TARGET_EFAULT; 1017 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 1018 return -TARGET_EFAULT; 1019 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 1020 return -TARGET_EFAULT; 1021 1022 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv)) 1023 return -TARGET_EFAULT; 1024 } 1025 1026 return ret; 1027 } 1028 #endif 1029 1030 static abi_long do_pipe2(int host_pipe[], int flags) 1031 { 1032 #ifdef CONFIG_PIPE2 1033 return pipe2(host_pipe, flags); 1034 #else 1035 return -ENOSYS; 1036 #endif 1037 } 1038 1039 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes, 1040 int flags, int is_pipe2) 1041 { 1042 int host_pipe[2]; 1043 abi_long ret; 1044 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe); 1045 1046 if (is_error(ret)) 1047 return get_errno(ret); 1048 1049 /* Several targets have special calling conventions for the original 1050 pipe syscall, but didn't replicate this into the pipe2 syscall. */ 1051 if (!is_pipe2) { 1052 #if defined(TARGET_ALPHA) 1053 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1]; 1054 return host_pipe[0]; 1055 #elif defined(TARGET_MIPS) 1056 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1]; 1057 return host_pipe[0]; 1058 #elif defined(TARGET_SH4) 1059 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1]; 1060 return host_pipe[0]; 1061 #elif defined(TARGET_SPARC) 1062 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1]; 1063 return host_pipe[0]; 1064 #endif 1065 } 1066 1067 if (put_user_s32(host_pipe[0], pipedes) 1068 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0]))) 1069 return -TARGET_EFAULT; 1070 return get_errno(ret); 1071 } 1072 1073 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn, 1074 abi_ulong target_addr, 1075 socklen_t len) 1076 { 1077 struct target_ip_mreqn *target_smreqn; 1078 1079 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1); 1080 if (!target_smreqn) 1081 return -TARGET_EFAULT; 1082 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr; 1083 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr; 1084 if (len == sizeof(struct target_ip_mreqn)) 1085 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex); 1086 unlock_user(target_smreqn, target_addr, 0); 1087 1088 return 0; 1089 } 1090 1091 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr, 1092 abi_ulong target_addr, 1093 socklen_t len) 1094 { 1095 const socklen_t unix_maxlen = sizeof (struct sockaddr_un); 1096 sa_family_t sa_family; 1097 struct target_sockaddr *target_saddr; 1098 1099 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 1100 if (!target_saddr) 1101 return -TARGET_EFAULT; 1102 1103 sa_family = tswap16(target_saddr->sa_family); 1104 1105 /* Oops. The caller might send a incomplete sun_path; sun_path 1106 * must be terminated by \0 (see the manual page), but 1107 * unfortunately it is quite common to specify sockaddr_un 1108 * length as "strlen(x->sun_path)" while it should be 1109 * "strlen(...) + 1". We'll fix that here if needed. 1110 * Linux kernel has a similar feature. 1111 */ 1112 1113 if (sa_family == AF_UNIX) { 1114 if (len < unix_maxlen && len > 0) { 1115 char *cp = (char*)target_saddr; 1116 1117 if ( cp[len-1] && !cp[len] ) 1118 len++; 1119 } 1120 if (len > unix_maxlen) 1121 len = unix_maxlen; 1122 } 1123 1124 memcpy(addr, target_saddr, len); 1125 addr->sa_family = sa_family; 1126 unlock_user(target_saddr, target_addr, 0); 1127 1128 return 0; 1129 } 1130 1131 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr, 1132 struct sockaddr *addr, 1133 socklen_t len) 1134 { 1135 struct target_sockaddr *target_saddr; 1136 1137 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0); 1138 if (!target_saddr) 1139 return -TARGET_EFAULT; 1140 memcpy(target_saddr, addr, len); 1141 target_saddr->sa_family = tswap16(addr->sa_family); 1142 unlock_user(target_saddr, target_addr, len); 1143 1144 return 0; 1145 } 1146 1147 static inline abi_long target_to_host_cmsg(struct msghdr *msgh, 1148 struct target_msghdr *target_msgh) 1149 { 1150 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1151 abi_long msg_controllen; 1152 abi_ulong target_cmsg_addr; 1153 struct target_cmsghdr *target_cmsg; 1154 socklen_t space = 0; 1155 1156 msg_controllen = tswapal(target_msgh->msg_controllen); 1157 if (msg_controllen < sizeof (struct target_cmsghdr)) 1158 goto the_end; 1159 target_cmsg_addr = tswapal(target_msgh->msg_control); 1160 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1); 1161 if (!target_cmsg) 1162 return -TARGET_EFAULT; 1163 1164 while (cmsg && target_cmsg) { 1165 void *data = CMSG_DATA(cmsg); 1166 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1167 1168 int len = tswapal(target_cmsg->cmsg_len) 1169 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr)); 1170 1171 space += CMSG_SPACE(len); 1172 if (space > msgh->msg_controllen) { 1173 space -= CMSG_SPACE(len); 1174 gemu_log("Host cmsg overflow\n"); 1175 break; 1176 } 1177 1178 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) { 1179 cmsg->cmsg_level = SOL_SOCKET; 1180 } else { 1181 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level); 1182 } 1183 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type); 1184 cmsg->cmsg_len = CMSG_LEN(len); 1185 1186 if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) { 1187 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type); 1188 memcpy(data, target_data, len); 1189 } else { 1190 int *fd = (int *)data; 1191 int *target_fd = (int *)target_data; 1192 int i, numfds = len / sizeof(int); 1193 1194 for (i = 0; i < numfds; i++) 1195 fd[i] = tswap32(target_fd[i]); 1196 } 1197 1198 cmsg = CMSG_NXTHDR(msgh, cmsg); 1199 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1200 } 1201 unlock_user(target_cmsg, target_cmsg_addr, 0); 1202 the_end: 1203 msgh->msg_controllen = space; 1204 return 0; 1205 } 1206 1207 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, 1208 struct msghdr *msgh) 1209 { 1210 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1211 abi_long msg_controllen; 1212 abi_ulong target_cmsg_addr; 1213 struct target_cmsghdr *target_cmsg; 1214 socklen_t space = 0; 1215 1216 msg_controllen = tswapal(target_msgh->msg_controllen); 1217 if (msg_controllen < sizeof (struct target_cmsghdr)) 1218 goto the_end; 1219 target_cmsg_addr = tswapal(target_msgh->msg_control); 1220 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0); 1221 if (!target_cmsg) 1222 return -TARGET_EFAULT; 1223 1224 while (cmsg && target_cmsg) { 1225 void *data = CMSG_DATA(cmsg); 1226 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1227 1228 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr)); 1229 1230 space += TARGET_CMSG_SPACE(len); 1231 if (space > msg_controllen) { 1232 space -= TARGET_CMSG_SPACE(len); 1233 gemu_log("Target cmsg overflow\n"); 1234 break; 1235 } 1236 1237 if (cmsg->cmsg_level == SOL_SOCKET) { 1238 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET); 1239 } else { 1240 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level); 1241 } 1242 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type); 1243 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len)); 1244 1245 if ((cmsg->cmsg_level == SOL_SOCKET) && 1246 (cmsg->cmsg_type == SCM_RIGHTS)) { 1247 int *fd = (int *)data; 1248 int *target_fd = (int *)target_data; 1249 int i, numfds = len / sizeof(int); 1250 1251 for (i = 0; i < numfds; i++) 1252 target_fd[i] = tswap32(fd[i]); 1253 } else if ((cmsg->cmsg_level == SOL_SOCKET) && 1254 (cmsg->cmsg_type == SO_TIMESTAMP) && 1255 (len == sizeof(struct timeval))) { 1256 /* copy struct timeval to target */ 1257 struct timeval *tv = (struct timeval *)data; 1258 struct target_timeval *target_tv = 1259 (struct target_timeval *)target_data; 1260 1261 target_tv->tv_sec = tswapal(tv->tv_sec); 1262 target_tv->tv_usec = tswapal(tv->tv_usec); 1263 } else { 1264 gemu_log("Unsupported ancillary data: %d/%d\n", 1265 cmsg->cmsg_level, cmsg->cmsg_type); 1266 memcpy(target_data, data, len); 1267 } 1268 1269 cmsg = CMSG_NXTHDR(msgh, cmsg); 1270 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1271 } 1272 unlock_user(target_cmsg, target_cmsg_addr, space); 1273 the_end: 1274 target_msgh->msg_controllen = tswapal(space); 1275 return 0; 1276 } 1277 1278 /* do_setsockopt() Must return target values and target errnos. */ 1279 static abi_long do_setsockopt(int sockfd, int level, int optname, 1280 abi_ulong optval_addr, socklen_t optlen) 1281 { 1282 abi_long ret; 1283 int val; 1284 struct ip_mreqn *ip_mreq; 1285 struct ip_mreq_source *ip_mreq_source; 1286 1287 switch(level) { 1288 case SOL_TCP: 1289 /* TCP options all take an 'int' value. */ 1290 if (optlen < sizeof(uint32_t)) 1291 return -TARGET_EINVAL; 1292 1293 if (get_user_u32(val, optval_addr)) 1294 return -TARGET_EFAULT; 1295 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1296 break; 1297 case SOL_IP: 1298 switch(optname) { 1299 case IP_TOS: 1300 case IP_TTL: 1301 case IP_HDRINCL: 1302 case IP_ROUTER_ALERT: 1303 case IP_RECVOPTS: 1304 case IP_RETOPTS: 1305 case IP_PKTINFO: 1306 case IP_MTU_DISCOVER: 1307 case IP_RECVERR: 1308 case IP_RECVTOS: 1309 #ifdef IP_FREEBIND 1310 case IP_FREEBIND: 1311 #endif 1312 case IP_MULTICAST_TTL: 1313 case IP_MULTICAST_LOOP: 1314 val = 0; 1315 if (optlen >= sizeof(uint32_t)) { 1316 if (get_user_u32(val, optval_addr)) 1317 return -TARGET_EFAULT; 1318 } else if (optlen >= 1) { 1319 if (get_user_u8(val, optval_addr)) 1320 return -TARGET_EFAULT; 1321 } 1322 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1323 break; 1324 case IP_ADD_MEMBERSHIP: 1325 case IP_DROP_MEMBERSHIP: 1326 if (optlen < sizeof (struct target_ip_mreq) || 1327 optlen > sizeof (struct target_ip_mreqn)) 1328 return -TARGET_EINVAL; 1329 1330 ip_mreq = (struct ip_mreqn *) alloca(optlen); 1331 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen); 1332 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen)); 1333 break; 1334 1335 case IP_BLOCK_SOURCE: 1336 case IP_UNBLOCK_SOURCE: 1337 case IP_ADD_SOURCE_MEMBERSHIP: 1338 case IP_DROP_SOURCE_MEMBERSHIP: 1339 if (optlen != sizeof (struct target_ip_mreq_source)) 1340 return -TARGET_EINVAL; 1341 1342 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1); 1343 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen)); 1344 unlock_user (ip_mreq_source, optval_addr, 0); 1345 break; 1346 1347 default: 1348 goto unimplemented; 1349 } 1350 break; 1351 case SOL_IPV6: 1352 switch (optname) { 1353 case IPV6_MTU_DISCOVER: 1354 case IPV6_MTU: 1355 case IPV6_V6ONLY: 1356 case IPV6_RECVPKTINFO: 1357 val = 0; 1358 if (optlen < sizeof(uint32_t)) { 1359 return -TARGET_EINVAL; 1360 } 1361 if (get_user_u32(val, optval_addr)) { 1362 return -TARGET_EFAULT; 1363 } 1364 ret = get_errno(setsockopt(sockfd, level, optname, 1365 &val, sizeof(val))); 1366 break; 1367 default: 1368 goto unimplemented; 1369 } 1370 break; 1371 case SOL_RAW: 1372 switch (optname) { 1373 case ICMP_FILTER: 1374 /* struct icmp_filter takes an u32 value */ 1375 if (optlen < sizeof(uint32_t)) { 1376 return -TARGET_EINVAL; 1377 } 1378 1379 if (get_user_u32(val, optval_addr)) { 1380 return -TARGET_EFAULT; 1381 } 1382 ret = get_errno(setsockopt(sockfd, level, optname, 1383 &val, sizeof(val))); 1384 break; 1385 1386 default: 1387 goto unimplemented; 1388 } 1389 break; 1390 case TARGET_SOL_SOCKET: 1391 switch (optname) { 1392 case TARGET_SO_RCVTIMEO: 1393 { 1394 struct timeval tv; 1395 1396 optname = SO_RCVTIMEO; 1397 1398 set_timeout: 1399 if (optlen != sizeof(struct target_timeval)) { 1400 return -TARGET_EINVAL; 1401 } 1402 1403 if (copy_from_user_timeval(&tv, optval_addr)) { 1404 return -TARGET_EFAULT; 1405 } 1406 1407 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 1408 &tv, sizeof(tv))); 1409 return ret; 1410 } 1411 case TARGET_SO_SNDTIMEO: 1412 optname = SO_SNDTIMEO; 1413 goto set_timeout; 1414 case TARGET_SO_ATTACH_FILTER: 1415 { 1416 struct target_sock_fprog *tfprog; 1417 struct target_sock_filter *tfilter; 1418 struct sock_fprog fprog; 1419 struct sock_filter *filter; 1420 int i; 1421 1422 if (optlen != sizeof(*tfprog)) { 1423 return -TARGET_EINVAL; 1424 } 1425 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) { 1426 return -TARGET_EFAULT; 1427 } 1428 if (!lock_user_struct(VERIFY_READ, tfilter, 1429 tswapal(tfprog->filter), 0)) { 1430 unlock_user_struct(tfprog, optval_addr, 1); 1431 return -TARGET_EFAULT; 1432 } 1433 1434 fprog.len = tswap16(tfprog->len); 1435 filter = malloc(fprog.len * sizeof(*filter)); 1436 if (filter == NULL) { 1437 unlock_user_struct(tfilter, tfprog->filter, 1); 1438 unlock_user_struct(tfprog, optval_addr, 1); 1439 return -TARGET_ENOMEM; 1440 } 1441 for (i = 0; i < fprog.len; i++) { 1442 filter[i].code = tswap16(tfilter[i].code); 1443 filter[i].jt = tfilter[i].jt; 1444 filter[i].jf = tfilter[i].jf; 1445 filter[i].k = tswap32(tfilter[i].k); 1446 } 1447 fprog.filter = filter; 1448 1449 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, 1450 SO_ATTACH_FILTER, &fprog, sizeof(fprog))); 1451 free(filter); 1452 1453 unlock_user_struct(tfilter, tfprog->filter, 1); 1454 unlock_user_struct(tfprog, optval_addr, 1); 1455 return ret; 1456 } 1457 /* Options with 'int' argument. */ 1458 case TARGET_SO_DEBUG: 1459 optname = SO_DEBUG; 1460 break; 1461 case TARGET_SO_REUSEADDR: 1462 optname = SO_REUSEADDR; 1463 break; 1464 case TARGET_SO_TYPE: 1465 optname = SO_TYPE; 1466 break; 1467 case TARGET_SO_ERROR: 1468 optname = SO_ERROR; 1469 break; 1470 case TARGET_SO_DONTROUTE: 1471 optname = SO_DONTROUTE; 1472 break; 1473 case TARGET_SO_BROADCAST: 1474 optname = SO_BROADCAST; 1475 break; 1476 case TARGET_SO_SNDBUF: 1477 optname = SO_SNDBUF; 1478 break; 1479 case TARGET_SO_RCVBUF: 1480 optname = SO_RCVBUF; 1481 break; 1482 case TARGET_SO_KEEPALIVE: 1483 optname = SO_KEEPALIVE; 1484 break; 1485 case TARGET_SO_OOBINLINE: 1486 optname = SO_OOBINLINE; 1487 break; 1488 case TARGET_SO_NO_CHECK: 1489 optname = SO_NO_CHECK; 1490 break; 1491 case TARGET_SO_PRIORITY: 1492 optname = SO_PRIORITY; 1493 break; 1494 #ifdef SO_BSDCOMPAT 1495 case TARGET_SO_BSDCOMPAT: 1496 optname = SO_BSDCOMPAT; 1497 break; 1498 #endif 1499 case TARGET_SO_PASSCRED: 1500 optname = SO_PASSCRED; 1501 break; 1502 case TARGET_SO_TIMESTAMP: 1503 optname = SO_TIMESTAMP; 1504 break; 1505 case TARGET_SO_RCVLOWAT: 1506 optname = SO_RCVLOWAT; 1507 break; 1508 break; 1509 default: 1510 goto unimplemented; 1511 } 1512 if (optlen < sizeof(uint32_t)) 1513 return -TARGET_EINVAL; 1514 1515 if (get_user_u32(val, optval_addr)) 1516 return -TARGET_EFAULT; 1517 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val))); 1518 break; 1519 default: 1520 unimplemented: 1521 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname); 1522 ret = -TARGET_ENOPROTOOPT; 1523 } 1524 return ret; 1525 } 1526 1527 /* do_getsockopt() Must return target values and target errnos. */ 1528 static abi_long do_getsockopt(int sockfd, int level, int optname, 1529 abi_ulong optval_addr, abi_ulong optlen) 1530 { 1531 abi_long ret; 1532 int len, val; 1533 socklen_t lv; 1534 1535 switch(level) { 1536 case TARGET_SOL_SOCKET: 1537 level = SOL_SOCKET; 1538 switch (optname) { 1539 /* These don't just return a single integer */ 1540 case TARGET_SO_LINGER: 1541 case TARGET_SO_RCVTIMEO: 1542 case TARGET_SO_SNDTIMEO: 1543 case TARGET_SO_PEERNAME: 1544 goto unimplemented; 1545 case TARGET_SO_PEERCRED: { 1546 struct ucred cr; 1547 socklen_t crlen; 1548 struct target_ucred *tcr; 1549 1550 if (get_user_u32(len, optlen)) { 1551 return -TARGET_EFAULT; 1552 } 1553 if (len < 0) { 1554 return -TARGET_EINVAL; 1555 } 1556 1557 crlen = sizeof(cr); 1558 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED, 1559 &cr, &crlen)); 1560 if (ret < 0) { 1561 return ret; 1562 } 1563 if (len > crlen) { 1564 len = crlen; 1565 } 1566 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) { 1567 return -TARGET_EFAULT; 1568 } 1569 __put_user(cr.pid, &tcr->pid); 1570 __put_user(cr.uid, &tcr->uid); 1571 __put_user(cr.gid, &tcr->gid); 1572 unlock_user_struct(tcr, optval_addr, 1); 1573 if (put_user_u32(len, optlen)) { 1574 return -TARGET_EFAULT; 1575 } 1576 break; 1577 } 1578 /* Options with 'int' argument. */ 1579 case TARGET_SO_DEBUG: 1580 optname = SO_DEBUG; 1581 goto int_case; 1582 case TARGET_SO_REUSEADDR: 1583 optname = SO_REUSEADDR; 1584 goto int_case; 1585 case TARGET_SO_TYPE: 1586 optname = SO_TYPE; 1587 goto int_case; 1588 case TARGET_SO_ERROR: 1589 optname = SO_ERROR; 1590 goto int_case; 1591 case TARGET_SO_DONTROUTE: 1592 optname = SO_DONTROUTE; 1593 goto int_case; 1594 case TARGET_SO_BROADCAST: 1595 optname = SO_BROADCAST; 1596 goto int_case; 1597 case TARGET_SO_SNDBUF: 1598 optname = SO_SNDBUF; 1599 goto int_case; 1600 case TARGET_SO_RCVBUF: 1601 optname = SO_RCVBUF; 1602 goto int_case; 1603 case TARGET_SO_KEEPALIVE: 1604 optname = SO_KEEPALIVE; 1605 goto int_case; 1606 case TARGET_SO_OOBINLINE: 1607 optname = SO_OOBINLINE; 1608 goto int_case; 1609 case TARGET_SO_NO_CHECK: 1610 optname = SO_NO_CHECK; 1611 goto int_case; 1612 case TARGET_SO_PRIORITY: 1613 optname = SO_PRIORITY; 1614 goto int_case; 1615 #ifdef SO_BSDCOMPAT 1616 case TARGET_SO_BSDCOMPAT: 1617 optname = SO_BSDCOMPAT; 1618 goto int_case; 1619 #endif 1620 case TARGET_SO_PASSCRED: 1621 optname = SO_PASSCRED; 1622 goto int_case; 1623 case TARGET_SO_TIMESTAMP: 1624 optname = SO_TIMESTAMP; 1625 goto int_case; 1626 case TARGET_SO_RCVLOWAT: 1627 optname = SO_RCVLOWAT; 1628 goto int_case; 1629 default: 1630 goto int_case; 1631 } 1632 break; 1633 case SOL_TCP: 1634 /* TCP options all take an 'int' value. */ 1635 int_case: 1636 if (get_user_u32(len, optlen)) 1637 return -TARGET_EFAULT; 1638 if (len < 0) 1639 return -TARGET_EINVAL; 1640 lv = sizeof(lv); 1641 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1642 if (ret < 0) 1643 return ret; 1644 if (len > lv) 1645 len = lv; 1646 if (len == 4) { 1647 if (put_user_u32(val, optval_addr)) 1648 return -TARGET_EFAULT; 1649 } else { 1650 if (put_user_u8(val, optval_addr)) 1651 return -TARGET_EFAULT; 1652 } 1653 if (put_user_u32(len, optlen)) 1654 return -TARGET_EFAULT; 1655 break; 1656 case SOL_IP: 1657 switch(optname) { 1658 case IP_TOS: 1659 case IP_TTL: 1660 case IP_HDRINCL: 1661 case IP_ROUTER_ALERT: 1662 case IP_RECVOPTS: 1663 case IP_RETOPTS: 1664 case IP_PKTINFO: 1665 case IP_MTU_DISCOVER: 1666 case IP_RECVERR: 1667 case IP_RECVTOS: 1668 #ifdef IP_FREEBIND 1669 case IP_FREEBIND: 1670 #endif 1671 case IP_MULTICAST_TTL: 1672 case IP_MULTICAST_LOOP: 1673 if (get_user_u32(len, optlen)) 1674 return -TARGET_EFAULT; 1675 if (len < 0) 1676 return -TARGET_EINVAL; 1677 lv = sizeof(lv); 1678 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1679 if (ret < 0) 1680 return ret; 1681 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 1682 len = 1; 1683 if (put_user_u32(len, optlen) 1684 || put_user_u8(val, optval_addr)) 1685 return -TARGET_EFAULT; 1686 } else { 1687 if (len > sizeof(int)) 1688 len = sizeof(int); 1689 if (put_user_u32(len, optlen) 1690 || put_user_u32(val, optval_addr)) 1691 return -TARGET_EFAULT; 1692 } 1693 break; 1694 default: 1695 ret = -TARGET_ENOPROTOOPT; 1696 break; 1697 } 1698 break; 1699 default: 1700 unimplemented: 1701 gemu_log("getsockopt level=%d optname=%d not yet supported\n", 1702 level, optname); 1703 ret = -TARGET_EOPNOTSUPP; 1704 break; 1705 } 1706 return ret; 1707 } 1708 1709 static struct iovec *lock_iovec(int type, abi_ulong target_addr, 1710 int count, int copy) 1711 { 1712 struct target_iovec *target_vec; 1713 struct iovec *vec; 1714 abi_ulong total_len, max_len; 1715 int i; 1716 int err = 0; 1717 1718 if (count == 0) { 1719 errno = 0; 1720 return NULL; 1721 } 1722 if (count < 0 || count > IOV_MAX) { 1723 errno = EINVAL; 1724 return NULL; 1725 } 1726 1727 vec = calloc(count, sizeof(struct iovec)); 1728 if (vec == NULL) { 1729 errno = ENOMEM; 1730 return NULL; 1731 } 1732 1733 target_vec = lock_user(VERIFY_READ, target_addr, 1734 count * sizeof(struct target_iovec), 1); 1735 if (target_vec == NULL) { 1736 err = EFAULT; 1737 goto fail2; 1738 } 1739 1740 /* ??? If host page size > target page size, this will result in a 1741 value larger than what we can actually support. */ 1742 max_len = 0x7fffffff & TARGET_PAGE_MASK; 1743 total_len = 0; 1744 1745 for (i = 0; i < count; i++) { 1746 abi_ulong base = tswapal(target_vec[i].iov_base); 1747 abi_long len = tswapal(target_vec[i].iov_len); 1748 1749 if (len < 0) { 1750 err = EINVAL; 1751 goto fail; 1752 } else if (len == 0) { 1753 /* Zero length pointer is ignored. */ 1754 vec[i].iov_base = 0; 1755 } else { 1756 vec[i].iov_base = lock_user(type, base, len, copy); 1757 if (!vec[i].iov_base) { 1758 err = EFAULT; 1759 goto fail; 1760 } 1761 if (len > max_len - total_len) { 1762 len = max_len - total_len; 1763 } 1764 } 1765 vec[i].iov_len = len; 1766 total_len += len; 1767 } 1768 1769 unlock_user(target_vec, target_addr, 0); 1770 return vec; 1771 1772 fail: 1773 unlock_user(target_vec, target_addr, 0); 1774 fail2: 1775 free(vec); 1776 errno = err; 1777 return NULL; 1778 } 1779 1780 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr, 1781 int count, int copy) 1782 { 1783 struct target_iovec *target_vec; 1784 int i; 1785 1786 target_vec = lock_user(VERIFY_READ, target_addr, 1787 count * sizeof(struct target_iovec), 1); 1788 if (target_vec) { 1789 for (i = 0; i < count; i++) { 1790 abi_ulong base = tswapal(target_vec[i].iov_base); 1791 abi_long len = tswapal(target_vec[i].iov_base); 1792 if (len < 0) { 1793 break; 1794 } 1795 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0); 1796 } 1797 unlock_user(target_vec, target_addr, 0); 1798 } 1799 1800 free(vec); 1801 } 1802 1803 static inline int target_to_host_sock_type(int *type) 1804 { 1805 int host_type = 0; 1806 int target_type = *type; 1807 1808 switch (target_type & TARGET_SOCK_TYPE_MASK) { 1809 case TARGET_SOCK_DGRAM: 1810 host_type = SOCK_DGRAM; 1811 break; 1812 case TARGET_SOCK_STREAM: 1813 host_type = SOCK_STREAM; 1814 break; 1815 default: 1816 host_type = target_type & TARGET_SOCK_TYPE_MASK; 1817 break; 1818 } 1819 if (target_type & TARGET_SOCK_CLOEXEC) { 1820 #if defined(SOCK_CLOEXEC) 1821 host_type |= SOCK_CLOEXEC; 1822 #else 1823 return -TARGET_EINVAL; 1824 #endif 1825 } 1826 if (target_type & TARGET_SOCK_NONBLOCK) { 1827 #if defined(SOCK_NONBLOCK) 1828 host_type |= SOCK_NONBLOCK; 1829 #elif !defined(O_NONBLOCK) 1830 return -TARGET_EINVAL; 1831 #endif 1832 } 1833 *type = host_type; 1834 return 0; 1835 } 1836 1837 /* Try to emulate socket type flags after socket creation. */ 1838 static int sock_flags_fixup(int fd, int target_type) 1839 { 1840 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK) 1841 if (target_type & TARGET_SOCK_NONBLOCK) { 1842 int flags = fcntl(fd, F_GETFL); 1843 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) { 1844 close(fd); 1845 return -TARGET_EINVAL; 1846 } 1847 } 1848 #endif 1849 return fd; 1850 } 1851 1852 /* do_socket() Must return target values and target errnos. */ 1853 static abi_long do_socket(int domain, int type, int protocol) 1854 { 1855 int target_type = type; 1856 int ret; 1857 1858 ret = target_to_host_sock_type(&type); 1859 if (ret) { 1860 return ret; 1861 } 1862 1863 if (domain == PF_NETLINK) 1864 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */ 1865 ret = get_errno(socket(domain, type, protocol)); 1866 if (ret >= 0) { 1867 ret = sock_flags_fixup(ret, target_type); 1868 } 1869 return ret; 1870 } 1871 1872 /* do_bind() Must return target values and target errnos. */ 1873 static abi_long do_bind(int sockfd, abi_ulong target_addr, 1874 socklen_t addrlen) 1875 { 1876 void *addr; 1877 abi_long ret; 1878 1879 if ((int)addrlen < 0) { 1880 return -TARGET_EINVAL; 1881 } 1882 1883 addr = alloca(addrlen+1); 1884 1885 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1886 if (ret) 1887 return ret; 1888 1889 return get_errno(bind(sockfd, addr, addrlen)); 1890 } 1891 1892 /* do_connect() Must return target values and target errnos. */ 1893 static abi_long do_connect(int sockfd, abi_ulong target_addr, 1894 socklen_t addrlen) 1895 { 1896 void *addr; 1897 abi_long ret; 1898 1899 if ((int)addrlen < 0) { 1900 return -TARGET_EINVAL; 1901 } 1902 1903 addr = alloca(addrlen); 1904 1905 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1906 if (ret) 1907 return ret; 1908 1909 return get_errno(connect(sockfd, addr, addrlen)); 1910 } 1911 1912 /* do_sendrecvmsg_locked() Must return target values and target errnos. */ 1913 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp, 1914 int flags, int send) 1915 { 1916 abi_long ret, len; 1917 struct msghdr msg; 1918 int count; 1919 struct iovec *vec; 1920 abi_ulong target_vec; 1921 1922 if (msgp->msg_name) { 1923 msg.msg_namelen = tswap32(msgp->msg_namelen); 1924 msg.msg_name = alloca(msg.msg_namelen); 1925 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name), 1926 msg.msg_namelen); 1927 if (ret) { 1928 goto out2; 1929 } 1930 } else { 1931 msg.msg_name = NULL; 1932 msg.msg_namelen = 0; 1933 } 1934 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen); 1935 msg.msg_control = alloca(msg.msg_controllen); 1936 msg.msg_flags = tswap32(msgp->msg_flags); 1937 1938 count = tswapal(msgp->msg_iovlen); 1939 target_vec = tswapal(msgp->msg_iov); 1940 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, 1941 target_vec, count, send); 1942 if (vec == NULL) { 1943 ret = -host_to_target_errno(errno); 1944 goto out2; 1945 } 1946 msg.msg_iovlen = count; 1947 msg.msg_iov = vec; 1948 1949 if (send) { 1950 ret = target_to_host_cmsg(&msg, msgp); 1951 if (ret == 0) 1952 ret = get_errno(sendmsg(fd, &msg, flags)); 1953 } else { 1954 ret = get_errno(recvmsg(fd, &msg, flags)); 1955 if (!is_error(ret)) { 1956 len = ret; 1957 ret = host_to_target_cmsg(msgp, &msg); 1958 if (!is_error(ret)) { 1959 msgp->msg_namelen = tswap32(msg.msg_namelen); 1960 if (msg.msg_name != NULL) { 1961 ret = host_to_target_sockaddr(tswapal(msgp->msg_name), 1962 msg.msg_name, msg.msg_namelen); 1963 if (ret) { 1964 goto out; 1965 } 1966 } 1967 1968 ret = len; 1969 } 1970 } 1971 } 1972 1973 out: 1974 unlock_iovec(vec, target_vec, count, !send); 1975 out2: 1976 return ret; 1977 } 1978 1979 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg, 1980 int flags, int send) 1981 { 1982 abi_long ret; 1983 struct target_msghdr *msgp; 1984 1985 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE, 1986 msgp, 1987 target_msg, 1988 send ? 1 : 0)) { 1989 return -TARGET_EFAULT; 1990 } 1991 ret = do_sendrecvmsg_locked(fd, msgp, flags, send); 1992 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 1993 return ret; 1994 } 1995 1996 #ifdef TARGET_NR_sendmmsg 1997 /* We don't rely on the C library to have sendmmsg/recvmmsg support, 1998 * so it might not have this *mmsg-specific flag either. 1999 */ 2000 #ifndef MSG_WAITFORONE 2001 #define MSG_WAITFORONE 0x10000 2002 #endif 2003 2004 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec, 2005 unsigned int vlen, unsigned int flags, 2006 int send) 2007 { 2008 struct target_mmsghdr *mmsgp; 2009 abi_long ret = 0; 2010 int i; 2011 2012 if (vlen > UIO_MAXIOV) { 2013 vlen = UIO_MAXIOV; 2014 } 2015 2016 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1); 2017 if (!mmsgp) { 2018 return -TARGET_EFAULT; 2019 } 2020 2021 for (i = 0; i < vlen; i++) { 2022 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send); 2023 if (is_error(ret)) { 2024 break; 2025 } 2026 mmsgp[i].msg_len = tswap32(ret); 2027 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ 2028 if (flags & MSG_WAITFORONE) { 2029 flags |= MSG_DONTWAIT; 2030 } 2031 } 2032 2033 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i); 2034 2035 /* Return number of datagrams sent if we sent any at all; 2036 * otherwise return the error. 2037 */ 2038 if (i) { 2039 return i; 2040 } 2041 return ret; 2042 } 2043 #endif 2044 2045 /* If we don't have a system accept4() then just call accept. 2046 * The callsites to do_accept4() will ensure that they don't 2047 * pass a non-zero flags argument in this config. 2048 */ 2049 #ifndef CONFIG_ACCEPT4 2050 static inline int accept4(int sockfd, struct sockaddr *addr, 2051 socklen_t *addrlen, int flags) 2052 { 2053 assert(flags == 0); 2054 return accept(sockfd, addr, addrlen); 2055 } 2056 #endif 2057 2058 /* do_accept4() Must return target values and target errnos. */ 2059 static abi_long do_accept4(int fd, abi_ulong target_addr, 2060 abi_ulong target_addrlen_addr, int flags) 2061 { 2062 socklen_t addrlen; 2063 void *addr; 2064 abi_long ret; 2065 int host_flags; 2066 2067 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl); 2068 2069 if (target_addr == 0) { 2070 return get_errno(accept4(fd, NULL, NULL, host_flags)); 2071 } 2072 2073 /* linux returns EINVAL if addrlen pointer is invalid */ 2074 if (get_user_u32(addrlen, target_addrlen_addr)) 2075 return -TARGET_EINVAL; 2076 2077 if ((int)addrlen < 0) { 2078 return -TARGET_EINVAL; 2079 } 2080 2081 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2082 return -TARGET_EINVAL; 2083 2084 addr = alloca(addrlen); 2085 2086 ret = get_errno(accept4(fd, addr, &addrlen, host_flags)); 2087 if (!is_error(ret)) { 2088 host_to_target_sockaddr(target_addr, addr, addrlen); 2089 if (put_user_u32(addrlen, target_addrlen_addr)) 2090 ret = -TARGET_EFAULT; 2091 } 2092 return ret; 2093 } 2094 2095 /* do_getpeername() Must return target values and target errnos. */ 2096 static abi_long do_getpeername(int fd, abi_ulong target_addr, 2097 abi_ulong target_addrlen_addr) 2098 { 2099 socklen_t addrlen; 2100 void *addr; 2101 abi_long ret; 2102 2103 if (get_user_u32(addrlen, target_addrlen_addr)) 2104 return -TARGET_EFAULT; 2105 2106 if ((int)addrlen < 0) { 2107 return -TARGET_EINVAL; 2108 } 2109 2110 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2111 return -TARGET_EFAULT; 2112 2113 addr = alloca(addrlen); 2114 2115 ret = get_errno(getpeername(fd, addr, &addrlen)); 2116 if (!is_error(ret)) { 2117 host_to_target_sockaddr(target_addr, addr, addrlen); 2118 if (put_user_u32(addrlen, target_addrlen_addr)) 2119 ret = -TARGET_EFAULT; 2120 } 2121 return ret; 2122 } 2123 2124 /* do_getsockname() Must return target values and target errnos. */ 2125 static abi_long do_getsockname(int fd, abi_ulong target_addr, 2126 abi_ulong target_addrlen_addr) 2127 { 2128 socklen_t addrlen; 2129 void *addr; 2130 abi_long ret; 2131 2132 if (get_user_u32(addrlen, target_addrlen_addr)) 2133 return -TARGET_EFAULT; 2134 2135 if ((int)addrlen < 0) { 2136 return -TARGET_EINVAL; 2137 } 2138 2139 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2140 return -TARGET_EFAULT; 2141 2142 addr = alloca(addrlen); 2143 2144 ret = get_errno(getsockname(fd, addr, &addrlen)); 2145 if (!is_error(ret)) { 2146 host_to_target_sockaddr(target_addr, addr, addrlen); 2147 if (put_user_u32(addrlen, target_addrlen_addr)) 2148 ret = -TARGET_EFAULT; 2149 } 2150 return ret; 2151 } 2152 2153 /* do_socketpair() Must return target values and target errnos. */ 2154 static abi_long do_socketpair(int domain, int type, int protocol, 2155 abi_ulong target_tab_addr) 2156 { 2157 int tab[2]; 2158 abi_long ret; 2159 2160 target_to_host_sock_type(&type); 2161 2162 ret = get_errno(socketpair(domain, type, protocol, tab)); 2163 if (!is_error(ret)) { 2164 if (put_user_s32(tab[0], target_tab_addr) 2165 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0]))) 2166 ret = -TARGET_EFAULT; 2167 } 2168 return ret; 2169 } 2170 2171 /* do_sendto() Must return target values and target errnos. */ 2172 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags, 2173 abi_ulong target_addr, socklen_t addrlen) 2174 { 2175 void *addr; 2176 void *host_msg; 2177 abi_long ret; 2178 2179 if ((int)addrlen < 0) { 2180 return -TARGET_EINVAL; 2181 } 2182 2183 host_msg = lock_user(VERIFY_READ, msg, len, 1); 2184 if (!host_msg) 2185 return -TARGET_EFAULT; 2186 if (target_addr) { 2187 addr = alloca(addrlen); 2188 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 2189 if (ret) { 2190 unlock_user(host_msg, msg, 0); 2191 return ret; 2192 } 2193 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen)); 2194 } else { 2195 ret = get_errno(send(fd, host_msg, len, flags)); 2196 } 2197 unlock_user(host_msg, msg, 0); 2198 return ret; 2199 } 2200 2201 /* do_recvfrom() Must return target values and target errnos. */ 2202 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags, 2203 abi_ulong target_addr, 2204 abi_ulong target_addrlen) 2205 { 2206 socklen_t addrlen; 2207 void *addr; 2208 void *host_msg; 2209 abi_long ret; 2210 2211 host_msg = lock_user(VERIFY_WRITE, msg, len, 0); 2212 if (!host_msg) 2213 return -TARGET_EFAULT; 2214 if (target_addr) { 2215 if (get_user_u32(addrlen, target_addrlen)) { 2216 ret = -TARGET_EFAULT; 2217 goto fail; 2218 } 2219 if ((int)addrlen < 0) { 2220 ret = -TARGET_EINVAL; 2221 goto fail; 2222 } 2223 addr = alloca(addrlen); 2224 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen)); 2225 } else { 2226 addr = NULL; /* To keep compiler quiet. */ 2227 ret = get_errno(qemu_recv(fd, host_msg, len, flags)); 2228 } 2229 if (!is_error(ret)) { 2230 if (target_addr) { 2231 host_to_target_sockaddr(target_addr, addr, addrlen); 2232 if (put_user_u32(addrlen, target_addrlen)) { 2233 ret = -TARGET_EFAULT; 2234 goto fail; 2235 } 2236 } 2237 unlock_user(host_msg, msg, len); 2238 } else { 2239 fail: 2240 unlock_user(host_msg, msg, 0); 2241 } 2242 return ret; 2243 } 2244 2245 #ifdef TARGET_NR_socketcall 2246 /* do_socketcall() Must return target values and target errnos. */ 2247 static abi_long do_socketcall(int num, abi_ulong vptr) 2248 { 2249 static const unsigned ac[] = { /* number of arguments per call */ 2250 [SOCKOP_socket] = 3, /* domain, type, protocol */ 2251 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */ 2252 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */ 2253 [SOCKOP_listen] = 2, /* sockfd, backlog */ 2254 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */ 2255 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */ 2256 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */ 2257 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */ 2258 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */ 2259 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */ 2260 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */ 2261 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */ 2262 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */ 2263 [SOCKOP_shutdown] = 2, /* sockfd, how */ 2264 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */ 2265 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */ 2266 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */ 2267 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */ 2268 }; 2269 abi_long a[6]; /* max 6 args */ 2270 2271 /* first, collect the arguments in a[] according to ac[] */ 2272 if (num >= 0 && num < ARRAY_SIZE(ac)) { 2273 unsigned i; 2274 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */ 2275 for (i = 0; i < ac[num]; ++i) { 2276 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) { 2277 return -TARGET_EFAULT; 2278 } 2279 } 2280 } 2281 2282 /* now when we have the args, actually handle the call */ 2283 switch (num) { 2284 case SOCKOP_socket: /* domain, type, protocol */ 2285 return do_socket(a[0], a[1], a[2]); 2286 case SOCKOP_bind: /* sockfd, addr, addrlen */ 2287 return do_bind(a[0], a[1], a[2]); 2288 case SOCKOP_connect: /* sockfd, addr, addrlen */ 2289 return do_connect(a[0], a[1], a[2]); 2290 case SOCKOP_listen: /* sockfd, backlog */ 2291 return get_errno(listen(a[0], a[1])); 2292 case SOCKOP_accept: /* sockfd, addr, addrlen */ 2293 return do_accept4(a[0], a[1], a[2], 0); 2294 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */ 2295 return do_accept4(a[0], a[1], a[2], a[3]); 2296 case SOCKOP_getsockname: /* sockfd, addr, addrlen */ 2297 return do_getsockname(a[0], a[1], a[2]); 2298 case SOCKOP_getpeername: /* sockfd, addr, addrlen */ 2299 return do_getpeername(a[0], a[1], a[2]); 2300 case SOCKOP_socketpair: /* domain, type, protocol, tab */ 2301 return do_socketpair(a[0], a[1], a[2], a[3]); 2302 case SOCKOP_send: /* sockfd, msg, len, flags */ 2303 return do_sendto(a[0], a[1], a[2], a[3], 0, 0); 2304 case SOCKOP_recv: /* sockfd, msg, len, flags */ 2305 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0); 2306 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */ 2307 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]); 2308 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */ 2309 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]); 2310 case SOCKOP_shutdown: /* sockfd, how */ 2311 return get_errno(shutdown(a[0], a[1])); 2312 case SOCKOP_sendmsg: /* sockfd, msg, flags */ 2313 return do_sendrecvmsg(a[0], a[1], a[2], 1); 2314 case SOCKOP_recvmsg: /* sockfd, msg, flags */ 2315 return do_sendrecvmsg(a[0], a[1], a[2], 0); 2316 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */ 2317 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]); 2318 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */ 2319 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]); 2320 default: 2321 gemu_log("Unsupported socketcall: %d\n", num); 2322 return -TARGET_ENOSYS; 2323 } 2324 } 2325 #endif 2326 2327 #define N_SHM_REGIONS 32 2328 2329 static struct shm_region { 2330 abi_ulong start; 2331 abi_ulong size; 2332 } shm_regions[N_SHM_REGIONS]; 2333 2334 struct target_semid_ds 2335 { 2336 struct target_ipc_perm sem_perm; 2337 abi_ulong sem_otime; 2338 abi_ulong __unused1; 2339 abi_ulong sem_ctime; 2340 abi_ulong __unused2; 2341 abi_ulong sem_nsems; 2342 abi_ulong __unused3; 2343 abi_ulong __unused4; 2344 }; 2345 2346 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip, 2347 abi_ulong target_addr) 2348 { 2349 struct target_ipc_perm *target_ip; 2350 struct target_semid_ds *target_sd; 2351 2352 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2353 return -TARGET_EFAULT; 2354 target_ip = &(target_sd->sem_perm); 2355 host_ip->__key = tswap32(target_ip->__key); 2356 host_ip->uid = tswap32(target_ip->uid); 2357 host_ip->gid = tswap32(target_ip->gid); 2358 host_ip->cuid = tswap32(target_ip->cuid); 2359 host_ip->cgid = tswap32(target_ip->cgid); 2360 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 2361 host_ip->mode = tswap32(target_ip->mode); 2362 #else 2363 host_ip->mode = tswap16(target_ip->mode); 2364 #endif 2365 #if defined(TARGET_PPC) 2366 host_ip->__seq = tswap32(target_ip->__seq); 2367 #else 2368 host_ip->__seq = tswap16(target_ip->__seq); 2369 #endif 2370 unlock_user_struct(target_sd, target_addr, 0); 2371 return 0; 2372 } 2373 2374 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr, 2375 struct ipc_perm *host_ip) 2376 { 2377 struct target_ipc_perm *target_ip; 2378 struct target_semid_ds *target_sd; 2379 2380 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2381 return -TARGET_EFAULT; 2382 target_ip = &(target_sd->sem_perm); 2383 target_ip->__key = tswap32(host_ip->__key); 2384 target_ip->uid = tswap32(host_ip->uid); 2385 target_ip->gid = tswap32(host_ip->gid); 2386 target_ip->cuid = tswap32(host_ip->cuid); 2387 target_ip->cgid = tswap32(host_ip->cgid); 2388 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 2389 target_ip->mode = tswap32(host_ip->mode); 2390 #else 2391 target_ip->mode = tswap16(host_ip->mode); 2392 #endif 2393 #if defined(TARGET_PPC) 2394 target_ip->__seq = tswap32(host_ip->__seq); 2395 #else 2396 target_ip->__seq = tswap16(host_ip->__seq); 2397 #endif 2398 unlock_user_struct(target_sd, target_addr, 1); 2399 return 0; 2400 } 2401 2402 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd, 2403 abi_ulong target_addr) 2404 { 2405 struct target_semid_ds *target_sd; 2406 2407 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2408 return -TARGET_EFAULT; 2409 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr)) 2410 return -TARGET_EFAULT; 2411 host_sd->sem_nsems = tswapal(target_sd->sem_nsems); 2412 host_sd->sem_otime = tswapal(target_sd->sem_otime); 2413 host_sd->sem_ctime = tswapal(target_sd->sem_ctime); 2414 unlock_user_struct(target_sd, target_addr, 0); 2415 return 0; 2416 } 2417 2418 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr, 2419 struct semid_ds *host_sd) 2420 { 2421 struct target_semid_ds *target_sd; 2422 2423 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2424 return -TARGET_EFAULT; 2425 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm))) 2426 return -TARGET_EFAULT; 2427 target_sd->sem_nsems = tswapal(host_sd->sem_nsems); 2428 target_sd->sem_otime = tswapal(host_sd->sem_otime); 2429 target_sd->sem_ctime = tswapal(host_sd->sem_ctime); 2430 unlock_user_struct(target_sd, target_addr, 1); 2431 return 0; 2432 } 2433 2434 struct target_seminfo { 2435 int semmap; 2436 int semmni; 2437 int semmns; 2438 int semmnu; 2439 int semmsl; 2440 int semopm; 2441 int semume; 2442 int semusz; 2443 int semvmx; 2444 int semaem; 2445 }; 2446 2447 static inline abi_long host_to_target_seminfo(abi_ulong target_addr, 2448 struct seminfo *host_seminfo) 2449 { 2450 struct target_seminfo *target_seminfo; 2451 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0)) 2452 return -TARGET_EFAULT; 2453 __put_user(host_seminfo->semmap, &target_seminfo->semmap); 2454 __put_user(host_seminfo->semmni, &target_seminfo->semmni); 2455 __put_user(host_seminfo->semmns, &target_seminfo->semmns); 2456 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu); 2457 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl); 2458 __put_user(host_seminfo->semopm, &target_seminfo->semopm); 2459 __put_user(host_seminfo->semume, &target_seminfo->semume); 2460 __put_user(host_seminfo->semusz, &target_seminfo->semusz); 2461 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx); 2462 __put_user(host_seminfo->semaem, &target_seminfo->semaem); 2463 unlock_user_struct(target_seminfo, target_addr, 1); 2464 return 0; 2465 } 2466 2467 union semun { 2468 int val; 2469 struct semid_ds *buf; 2470 unsigned short *array; 2471 struct seminfo *__buf; 2472 }; 2473 2474 union target_semun { 2475 int val; 2476 abi_ulong buf; 2477 abi_ulong array; 2478 abi_ulong __buf; 2479 }; 2480 2481 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array, 2482 abi_ulong target_addr) 2483 { 2484 int nsems; 2485 unsigned short *array; 2486 union semun semun; 2487 struct semid_ds semid_ds; 2488 int i, ret; 2489 2490 semun.buf = &semid_ds; 2491 2492 ret = semctl(semid, 0, IPC_STAT, semun); 2493 if (ret == -1) 2494 return get_errno(ret); 2495 2496 nsems = semid_ds.sem_nsems; 2497 2498 *host_array = malloc(nsems*sizeof(unsigned short)); 2499 if (!*host_array) { 2500 return -TARGET_ENOMEM; 2501 } 2502 array = lock_user(VERIFY_READ, target_addr, 2503 nsems*sizeof(unsigned short), 1); 2504 if (!array) { 2505 free(*host_array); 2506 return -TARGET_EFAULT; 2507 } 2508 2509 for(i=0; i<nsems; i++) { 2510 __get_user((*host_array)[i], &array[i]); 2511 } 2512 unlock_user(array, target_addr, 0); 2513 2514 return 0; 2515 } 2516 2517 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr, 2518 unsigned short **host_array) 2519 { 2520 int nsems; 2521 unsigned short *array; 2522 union semun semun; 2523 struct semid_ds semid_ds; 2524 int i, ret; 2525 2526 semun.buf = &semid_ds; 2527 2528 ret = semctl(semid, 0, IPC_STAT, semun); 2529 if (ret == -1) 2530 return get_errno(ret); 2531 2532 nsems = semid_ds.sem_nsems; 2533 2534 array = lock_user(VERIFY_WRITE, target_addr, 2535 nsems*sizeof(unsigned short), 0); 2536 if (!array) 2537 return -TARGET_EFAULT; 2538 2539 for(i=0; i<nsems; i++) { 2540 __put_user((*host_array)[i], &array[i]); 2541 } 2542 free(*host_array); 2543 unlock_user(array, target_addr, 1); 2544 2545 return 0; 2546 } 2547 2548 static inline abi_long do_semctl(int semid, int semnum, int cmd, 2549 union target_semun target_su) 2550 { 2551 union semun arg; 2552 struct semid_ds dsarg; 2553 unsigned short *array = NULL; 2554 struct seminfo seminfo; 2555 abi_long ret = -TARGET_EINVAL; 2556 abi_long err; 2557 cmd &= 0xff; 2558 2559 switch( cmd ) { 2560 case GETVAL: 2561 case SETVAL: 2562 arg.val = tswap32(target_su.val); 2563 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2564 target_su.val = tswap32(arg.val); 2565 break; 2566 case GETALL: 2567 case SETALL: 2568 err = target_to_host_semarray(semid, &array, target_su.array); 2569 if (err) 2570 return err; 2571 arg.array = array; 2572 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2573 err = host_to_target_semarray(semid, target_su.array, &array); 2574 if (err) 2575 return err; 2576 break; 2577 case IPC_STAT: 2578 case IPC_SET: 2579 case SEM_STAT: 2580 err = target_to_host_semid_ds(&dsarg, target_su.buf); 2581 if (err) 2582 return err; 2583 arg.buf = &dsarg; 2584 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2585 err = host_to_target_semid_ds(target_su.buf, &dsarg); 2586 if (err) 2587 return err; 2588 break; 2589 case IPC_INFO: 2590 case SEM_INFO: 2591 arg.__buf = &seminfo; 2592 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2593 err = host_to_target_seminfo(target_su.__buf, &seminfo); 2594 if (err) 2595 return err; 2596 break; 2597 case IPC_RMID: 2598 case GETPID: 2599 case GETNCNT: 2600 case GETZCNT: 2601 ret = get_errno(semctl(semid, semnum, cmd, NULL)); 2602 break; 2603 } 2604 2605 return ret; 2606 } 2607 2608 struct target_sembuf { 2609 unsigned short sem_num; 2610 short sem_op; 2611 short sem_flg; 2612 }; 2613 2614 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf, 2615 abi_ulong target_addr, 2616 unsigned nsops) 2617 { 2618 struct target_sembuf *target_sembuf; 2619 int i; 2620 2621 target_sembuf = lock_user(VERIFY_READ, target_addr, 2622 nsops*sizeof(struct target_sembuf), 1); 2623 if (!target_sembuf) 2624 return -TARGET_EFAULT; 2625 2626 for(i=0; i<nsops; i++) { 2627 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num); 2628 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op); 2629 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg); 2630 } 2631 2632 unlock_user(target_sembuf, target_addr, 0); 2633 2634 return 0; 2635 } 2636 2637 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops) 2638 { 2639 struct sembuf sops[nsops]; 2640 2641 if (target_to_host_sembuf(sops, ptr, nsops)) 2642 return -TARGET_EFAULT; 2643 2644 return get_errno(semop(semid, sops, nsops)); 2645 } 2646 2647 struct target_msqid_ds 2648 { 2649 struct target_ipc_perm msg_perm; 2650 abi_ulong msg_stime; 2651 #if TARGET_ABI_BITS == 32 2652 abi_ulong __unused1; 2653 #endif 2654 abi_ulong msg_rtime; 2655 #if TARGET_ABI_BITS == 32 2656 abi_ulong __unused2; 2657 #endif 2658 abi_ulong msg_ctime; 2659 #if TARGET_ABI_BITS == 32 2660 abi_ulong __unused3; 2661 #endif 2662 abi_ulong __msg_cbytes; 2663 abi_ulong msg_qnum; 2664 abi_ulong msg_qbytes; 2665 abi_ulong msg_lspid; 2666 abi_ulong msg_lrpid; 2667 abi_ulong __unused4; 2668 abi_ulong __unused5; 2669 }; 2670 2671 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md, 2672 abi_ulong target_addr) 2673 { 2674 struct target_msqid_ds *target_md; 2675 2676 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1)) 2677 return -TARGET_EFAULT; 2678 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr)) 2679 return -TARGET_EFAULT; 2680 host_md->msg_stime = tswapal(target_md->msg_stime); 2681 host_md->msg_rtime = tswapal(target_md->msg_rtime); 2682 host_md->msg_ctime = tswapal(target_md->msg_ctime); 2683 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes); 2684 host_md->msg_qnum = tswapal(target_md->msg_qnum); 2685 host_md->msg_qbytes = tswapal(target_md->msg_qbytes); 2686 host_md->msg_lspid = tswapal(target_md->msg_lspid); 2687 host_md->msg_lrpid = tswapal(target_md->msg_lrpid); 2688 unlock_user_struct(target_md, target_addr, 0); 2689 return 0; 2690 } 2691 2692 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr, 2693 struct msqid_ds *host_md) 2694 { 2695 struct target_msqid_ds *target_md; 2696 2697 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0)) 2698 return -TARGET_EFAULT; 2699 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm))) 2700 return -TARGET_EFAULT; 2701 target_md->msg_stime = tswapal(host_md->msg_stime); 2702 target_md->msg_rtime = tswapal(host_md->msg_rtime); 2703 target_md->msg_ctime = tswapal(host_md->msg_ctime); 2704 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes); 2705 target_md->msg_qnum = tswapal(host_md->msg_qnum); 2706 target_md->msg_qbytes = tswapal(host_md->msg_qbytes); 2707 target_md->msg_lspid = tswapal(host_md->msg_lspid); 2708 target_md->msg_lrpid = tswapal(host_md->msg_lrpid); 2709 unlock_user_struct(target_md, target_addr, 1); 2710 return 0; 2711 } 2712 2713 struct target_msginfo { 2714 int msgpool; 2715 int msgmap; 2716 int msgmax; 2717 int msgmnb; 2718 int msgmni; 2719 int msgssz; 2720 int msgtql; 2721 unsigned short int msgseg; 2722 }; 2723 2724 static inline abi_long host_to_target_msginfo(abi_ulong target_addr, 2725 struct msginfo *host_msginfo) 2726 { 2727 struct target_msginfo *target_msginfo; 2728 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0)) 2729 return -TARGET_EFAULT; 2730 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool); 2731 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap); 2732 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax); 2733 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb); 2734 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni); 2735 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz); 2736 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql); 2737 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg); 2738 unlock_user_struct(target_msginfo, target_addr, 1); 2739 return 0; 2740 } 2741 2742 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr) 2743 { 2744 struct msqid_ds dsarg; 2745 struct msginfo msginfo; 2746 abi_long ret = -TARGET_EINVAL; 2747 2748 cmd &= 0xff; 2749 2750 switch (cmd) { 2751 case IPC_STAT: 2752 case IPC_SET: 2753 case MSG_STAT: 2754 if (target_to_host_msqid_ds(&dsarg,ptr)) 2755 return -TARGET_EFAULT; 2756 ret = get_errno(msgctl(msgid, cmd, &dsarg)); 2757 if (host_to_target_msqid_ds(ptr,&dsarg)) 2758 return -TARGET_EFAULT; 2759 break; 2760 case IPC_RMID: 2761 ret = get_errno(msgctl(msgid, cmd, NULL)); 2762 break; 2763 case IPC_INFO: 2764 case MSG_INFO: 2765 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo)); 2766 if (host_to_target_msginfo(ptr, &msginfo)) 2767 return -TARGET_EFAULT; 2768 break; 2769 } 2770 2771 return ret; 2772 } 2773 2774 struct target_msgbuf { 2775 abi_long mtype; 2776 char mtext[1]; 2777 }; 2778 2779 static inline abi_long do_msgsnd(int msqid, abi_long msgp, 2780 unsigned int msgsz, int msgflg) 2781 { 2782 struct target_msgbuf *target_mb; 2783 struct msgbuf *host_mb; 2784 abi_long ret = 0; 2785 2786 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0)) 2787 return -TARGET_EFAULT; 2788 host_mb = malloc(msgsz+sizeof(long)); 2789 host_mb->mtype = (abi_long) tswapal(target_mb->mtype); 2790 memcpy(host_mb->mtext, target_mb->mtext, msgsz); 2791 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg)); 2792 free(host_mb); 2793 unlock_user_struct(target_mb, msgp, 0); 2794 2795 return ret; 2796 } 2797 2798 static inline abi_long do_msgrcv(int msqid, abi_long msgp, 2799 unsigned int msgsz, abi_long msgtyp, 2800 int msgflg) 2801 { 2802 struct target_msgbuf *target_mb; 2803 char *target_mtext; 2804 struct msgbuf *host_mb; 2805 abi_long ret = 0; 2806 2807 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0)) 2808 return -TARGET_EFAULT; 2809 2810 host_mb = g_malloc(msgsz+sizeof(long)); 2811 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg)); 2812 2813 if (ret > 0) { 2814 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong); 2815 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0); 2816 if (!target_mtext) { 2817 ret = -TARGET_EFAULT; 2818 goto end; 2819 } 2820 memcpy(target_mb->mtext, host_mb->mtext, ret); 2821 unlock_user(target_mtext, target_mtext_addr, ret); 2822 } 2823 2824 target_mb->mtype = tswapal(host_mb->mtype); 2825 2826 end: 2827 if (target_mb) 2828 unlock_user_struct(target_mb, msgp, 1); 2829 g_free(host_mb); 2830 return ret; 2831 } 2832 2833 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd, 2834 abi_ulong target_addr) 2835 { 2836 struct target_shmid_ds *target_sd; 2837 2838 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2839 return -TARGET_EFAULT; 2840 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr)) 2841 return -TARGET_EFAULT; 2842 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2843 __get_user(host_sd->shm_atime, &target_sd->shm_atime); 2844 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2845 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2846 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2847 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2848 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2849 unlock_user_struct(target_sd, target_addr, 0); 2850 return 0; 2851 } 2852 2853 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr, 2854 struct shmid_ds *host_sd) 2855 { 2856 struct target_shmid_ds *target_sd; 2857 2858 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2859 return -TARGET_EFAULT; 2860 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm))) 2861 return -TARGET_EFAULT; 2862 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2863 __put_user(host_sd->shm_atime, &target_sd->shm_atime); 2864 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2865 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2866 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2867 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2868 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2869 unlock_user_struct(target_sd, target_addr, 1); 2870 return 0; 2871 } 2872 2873 struct target_shminfo { 2874 abi_ulong shmmax; 2875 abi_ulong shmmin; 2876 abi_ulong shmmni; 2877 abi_ulong shmseg; 2878 abi_ulong shmall; 2879 }; 2880 2881 static inline abi_long host_to_target_shminfo(abi_ulong target_addr, 2882 struct shminfo *host_shminfo) 2883 { 2884 struct target_shminfo *target_shminfo; 2885 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0)) 2886 return -TARGET_EFAULT; 2887 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax); 2888 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin); 2889 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni); 2890 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg); 2891 __put_user(host_shminfo->shmall, &target_shminfo->shmall); 2892 unlock_user_struct(target_shminfo, target_addr, 1); 2893 return 0; 2894 } 2895 2896 struct target_shm_info { 2897 int used_ids; 2898 abi_ulong shm_tot; 2899 abi_ulong shm_rss; 2900 abi_ulong shm_swp; 2901 abi_ulong swap_attempts; 2902 abi_ulong swap_successes; 2903 }; 2904 2905 static inline abi_long host_to_target_shm_info(abi_ulong target_addr, 2906 struct shm_info *host_shm_info) 2907 { 2908 struct target_shm_info *target_shm_info; 2909 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0)) 2910 return -TARGET_EFAULT; 2911 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids); 2912 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot); 2913 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss); 2914 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp); 2915 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts); 2916 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes); 2917 unlock_user_struct(target_shm_info, target_addr, 1); 2918 return 0; 2919 } 2920 2921 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf) 2922 { 2923 struct shmid_ds dsarg; 2924 struct shminfo shminfo; 2925 struct shm_info shm_info; 2926 abi_long ret = -TARGET_EINVAL; 2927 2928 cmd &= 0xff; 2929 2930 switch(cmd) { 2931 case IPC_STAT: 2932 case IPC_SET: 2933 case SHM_STAT: 2934 if (target_to_host_shmid_ds(&dsarg, buf)) 2935 return -TARGET_EFAULT; 2936 ret = get_errno(shmctl(shmid, cmd, &dsarg)); 2937 if (host_to_target_shmid_ds(buf, &dsarg)) 2938 return -TARGET_EFAULT; 2939 break; 2940 case IPC_INFO: 2941 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo)); 2942 if (host_to_target_shminfo(buf, &shminfo)) 2943 return -TARGET_EFAULT; 2944 break; 2945 case SHM_INFO: 2946 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info)); 2947 if (host_to_target_shm_info(buf, &shm_info)) 2948 return -TARGET_EFAULT; 2949 break; 2950 case IPC_RMID: 2951 case SHM_LOCK: 2952 case SHM_UNLOCK: 2953 ret = get_errno(shmctl(shmid, cmd, NULL)); 2954 break; 2955 } 2956 2957 return ret; 2958 } 2959 2960 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg) 2961 { 2962 abi_long raddr; 2963 void *host_raddr; 2964 struct shmid_ds shm_info; 2965 int i,ret; 2966 2967 /* find out the length of the shared memory segment */ 2968 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); 2969 if (is_error(ret)) { 2970 /* can't get length, bail out */ 2971 return ret; 2972 } 2973 2974 mmap_lock(); 2975 2976 if (shmaddr) 2977 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg); 2978 else { 2979 abi_ulong mmap_start; 2980 2981 mmap_start = mmap_find_vma(0, shm_info.shm_segsz); 2982 2983 if (mmap_start == -1) { 2984 errno = ENOMEM; 2985 host_raddr = (void *)-1; 2986 } else 2987 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP); 2988 } 2989 2990 if (host_raddr == (void *)-1) { 2991 mmap_unlock(); 2992 return get_errno((long)host_raddr); 2993 } 2994 raddr=h2g((unsigned long)host_raddr); 2995 2996 page_set_flags(raddr, raddr + shm_info.shm_segsz, 2997 PAGE_VALID | PAGE_READ | 2998 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE)); 2999 3000 for (i = 0; i < N_SHM_REGIONS; i++) { 3001 if (shm_regions[i].start == 0) { 3002 shm_regions[i].start = raddr; 3003 shm_regions[i].size = shm_info.shm_segsz; 3004 break; 3005 } 3006 } 3007 3008 mmap_unlock(); 3009 return raddr; 3010 3011 } 3012 3013 static inline abi_long do_shmdt(abi_ulong shmaddr) 3014 { 3015 int i; 3016 3017 for (i = 0; i < N_SHM_REGIONS; ++i) { 3018 if (shm_regions[i].start == shmaddr) { 3019 shm_regions[i].start = 0; 3020 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0); 3021 break; 3022 } 3023 } 3024 3025 return get_errno(shmdt(g2h(shmaddr))); 3026 } 3027 3028 #ifdef TARGET_NR_ipc 3029 /* ??? This only works with linear mappings. */ 3030 /* do_ipc() must return target values and target errnos. */ 3031 static abi_long do_ipc(unsigned int call, int first, 3032 int second, int third, 3033 abi_long ptr, abi_long fifth) 3034 { 3035 int version; 3036 abi_long ret = 0; 3037 3038 version = call >> 16; 3039 call &= 0xffff; 3040 3041 switch (call) { 3042 case IPCOP_semop: 3043 ret = do_semop(first, ptr, second); 3044 break; 3045 3046 case IPCOP_semget: 3047 ret = get_errno(semget(first, second, third)); 3048 break; 3049 3050 case IPCOP_semctl: 3051 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr); 3052 break; 3053 3054 case IPCOP_msgget: 3055 ret = get_errno(msgget(first, second)); 3056 break; 3057 3058 case IPCOP_msgsnd: 3059 ret = do_msgsnd(first, ptr, second, third); 3060 break; 3061 3062 case IPCOP_msgctl: 3063 ret = do_msgctl(first, second, ptr); 3064 break; 3065 3066 case IPCOP_msgrcv: 3067 switch (version) { 3068 case 0: 3069 { 3070 struct target_ipc_kludge { 3071 abi_long msgp; 3072 abi_long msgtyp; 3073 } *tmp; 3074 3075 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) { 3076 ret = -TARGET_EFAULT; 3077 break; 3078 } 3079 3080 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third); 3081 3082 unlock_user_struct(tmp, ptr, 0); 3083 break; 3084 } 3085 default: 3086 ret = do_msgrcv(first, ptr, second, fifth, third); 3087 } 3088 break; 3089 3090 case IPCOP_shmat: 3091 switch (version) { 3092 default: 3093 { 3094 abi_ulong raddr; 3095 raddr = do_shmat(first, ptr, second); 3096 if (is_error(raddr)) 3097 return get_errno(raddr); 3098 if (put_user_ual(raddr, third)) 3099 return -TARGET_EFAULT; 3100 break; 3101 } 3102 case 1: 3103 ret = -TARGET_EINVAL; 3104 break; 3105 } 3106 break; 3107 case IPCOP_shmdt: 3108 ret = do_shmdt(ptr); 3109 break; 3110 3111 case IPCOP_shmget: 3112 /* IPC_* flag values are the same on all linux platforms */ 3113 ret = get_errno(shmget(first, second, third)); 3114 break; 3115 3116 /* IPC_* and SHM_* command values are the same on all linux platforms */ 3117 case IPCOP_shmctl: 3118 ret = do_shmctl(first, second, ptr); 3119 break; 3120 default: 3121 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version); 3122 ret = -TARGET_ENOSYS; 3123 break; 3124 } 3125 return ret; 3126 } 3127 #endif 3128 3129 /* kernel structure types definitions */ 3130 3131 #define STRUCT(name, ...) STRUCT_ ## name, 3132 #define STRUCT_SPECIAL(name) STRUCT_ ## name, 3133 enum { 3134 #include "syscall_types.h" 3135 }; 3136 #undef STRUCT 3137 #undef STRUCT_SPECIAL 3138 3139 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL }; 3140 #define STRUCT_SPECIAL(name) 3141 #include "syscall_types.h" 3142 #undef STRUCT 3143 #undef STRUCT_SPECIAL 3144 3145 typedef struct IOCTLEntry IOCTLEntry; 3146 3147 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp, 3148 int fd, abi_long cmd, abi_long arg); 3149 3150 struct IOCTLEntry { 3151 unsigned int target_cmd; 3152 unsigned int host_cmd; 3153 const char *name; 3154 int access; 3155 do_ioctl_fn *do_ioctl; 3156 const argtype arg_type[5]; 3157 }; 3158 3159 #define IOC_R 0x0001 3160 #define IOC_W 0x0002 3161 #define IOC_RW (IOC_R | IOC_W) 3162 3163 #define MAX_STRUCT_SIZE 4096 3164 3165 #ifdef CONFIG_FIEMAP 3166 /* So fiemap access checks don't overflow on 32 bit systems. 3167 * This is very slightly smaller than the limit imposed by 3168 * the underlying kernel. 3169 */ 3170 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \ 3171 / sizeof(struct fiemap_extent)) 3172 3173 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp, 3174 int fd, abi_long cmd, abi_long arg) 3175 { 3176 /* The parameter for this ioctl is a struct fiemap followed 3177 * by an array of struct fiemap_extent whose size is set 3178 * in fiemap->fm_extent_count. The array is filled in by the 3179 * ioctl. 3180 */ 3181 int target_size_in, target_size_out; 3182 struct fiemap *fm; 3183 const argtype *arg_type = ie->arg_type; 3184 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) }; 3185 void *argptr, *p; 3186 abi_long ret; 3187 int i, extent_size = thunk_type_size(extent_arg_type, 0); 3188 uint32_t outbufsz; 3189 int free_fm = 0; 3190 3191 assert(arg_type[0] == TYPE_PTR); 3192 assert(ie->access == IOC_RW); 3193 arg_type++; 3194 target_size_in = thunk_type_size(arg_type, 0); 3195 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1); 3196 if (!argptr) { 3197 return -TARGET_EFAULT; 3198 } 3199 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3200 unlock_user(argptr, arg, 0); 3201 fm = (struct fiemap *)buf_temp; 3202 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) { 3203 return -TARGET_EINVAL; 3204 } 3205 3206 outbufsz = sizeof (*fm) + 3207 (sizeof(struct fiemap_extent) * fm->fm_extent_count); 3208 3209 if (outbufsz > MAX_STRUCT_SIZE) { 3210 /* We can't fit all the extents into the fixed size buffer. 3211 * Allocate one that is large enough and use it instead. 3212 */ 3213 fm = malloc(outbufsz); 3214 if (!fm) { 3215 return -TARGET_ENOMEM; 3216 } 3217 memcpy(fm, buf_temp, sizeof(struct fiemap)); 3218 free_fm = 1; 3219 } 3220 ret = get_errno(ioctl(fd, ie->host_cmd, fm)); 3221 if (!is_error(ret)) { 3222 target_size_out = target_size_in; 3223 /* An extent_count of 0 means we were only counting the extents 3224 * so there are no structs to copy 3225 */ 3226 if (fm->fm_extent_count != 0) { 3227 target_size_out += fm->fm_mapped_extents * extent_size; 3228 } 3229 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0); 3230 if (!argptr) { 3231 ret = -TARGET_EFAULT; 3232 } else { 3233 /* Convert the struct fiemap */ 3234 thunk_convert(argptr, fm, arg_type, THUNK_TARGET); 3235 if (fm->fm_extent_count != 0) { 3236 p = argptr + target_size_in; 3237 /* ...and then all the struct fiemap_extents */ 3238 for (i = 0; i < fm->fm_mapped_extents; i++) { 3239 thunk_convert(p, &fm->fm_extents[i], extent_arg_type, 3240 THUNK_TARGET); 3241 p += extent_size; 3242 } 3243 } 3244 unlock_user(argptr, arg, target_size_out); 3245 } 3246 } 3247 if (free_fm) { 3248 free(fm); 3249 } 3250 return ret; 3251 } 3252 #endif 3253 3254 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, 3255 int fd, abi_long cmd, abi_long arg) 3256 { 3257 const argtype *arg_type = ie->arg_type; 3258 int target_size; 3259 void *argptr; 3260 int ret; 3261 struct ifconf *host_ifconf; 3262 uint32_t outbufsz; 3263 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) }; 3264 int target_ifreq_size; 3265 int nb_ifreq; 3266 int free_buf = 0; 3267 int i; 3268 int target_ifc_len; 3269 abi_long target_ifc_buf; 3270 int host_ifc_len; 3271 char *host_ifc_buf; 3272 3273 assert(arg_type[0] == TYPE_PTR); 3274 assert(ie->access == IOC_RW); 3275 3276 arg_type++; 3277 target_size = thunk_type_size(arg_type, 0); 3278 3279 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3280 if (!argptr) 3281 return -TARGET_EFAULT; 3282 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3283 unlock_user(argptr, arg, 0); 3284 3285 host_ifconf = (struct ifconf *)(unsigned long)buf_temp; 3286 target_ifc_len = host_ifconf->ifc_len; 3287 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf; 3288 3289 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0); 3290 nb_ifreq = target_ifc_len / target_ifreq_size; 3291 host_ifc_len = nb_ifreq * sizeof(struct ifreq); 3292 3293 outbufsz = sizeof(*host_ifconf) + host_ifc_len; 3294 if (outbufsz > MAX_STRUCT_SIZE) { 3295 /* We can't fit all the extents into the fixed size buffer. 3296 * Allocate one that is large enough and use it instead. 3297 */ 3298 host_ifconf = malloc(outbufsz); 3299 if (!host_ifconf) { 3300 return -TARGET_ENOMEM; 3301 } 3302 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf)); 3303 free_buf = 1; 3304 } 3305 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf); 3306 3307 host_ifconf->ifc_len = host_ifc_len; 3308 host_ifconf->ifc_buf = host_ifc_buf; 3309 3310 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf)); 3311 if (!is_error(ret)) { 3312 /* convert host ifc_len to target ifc_len */ 3313 3314 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq); 3315 target_ifc_len = nb_ifreq * target_ifreq_size; 3316 host_ifconf->ifc_len = target_ifc_len; 3317 3318 /* restore target ifc_buf */ 3319 3320 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf; 3321 3322 /* copy struct ifconf to target user */ 3323 3324 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3325 if (!argptr) 3326 return -TARGET_EFAULT; 3327 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET); 3328 unlock_user(argptr, arg, target_size); 3329 3330 /* copy ifreq[] to target user */ 3331 3332 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0); 3333 for (i = 0; i < nb_ifreq ; i++) { 3334 thunk_convert(argptr + i * target_ifreq_size, 3335 host_ifc_buf + i * sizeof(struct ifreq), 3336 ifreq_arg_type, THUNK_TARGET); 3337 } 3338 unlock_user(argptr, target_ifc_buf, target_ifc_len); 3339 } 3340 3341 if (free_buf) { 3342 free(host_ifconf); 3343 } 3344 3345 return ret; 3346 } 3347 3348 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 3349 abi_long cmd, abi_long arg) 3350 { 3351 void *argptr; 3352 struct dm_ioctl *host_dm; 3353 abi_long guest_data; 3354 uint32_t guest_data_size; 3355 int target_size; 3356 const argtype *arg_type = ie->arg_type; 3357 abi_long ret; 3358 void *big_buf = NULL; 3359 char *host_data; 3360 3361 arg_type++; 3362 target_size = thunk_type_size(arg_type, 0); 3363 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3364 if (!argptr) { 3365 ret = -TARGET_EFAULT; 3366 goto out; 3367 } 3368 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3369 unlock_user(argptr, arg, 0); 3370 3371 /* buf_temp is too small, so fetch things into a bigger buffer */ 3372 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2); 3373 memcpy(big_buf, buf_temp, target_size); 3374 buf_temp = big_buf; 3375 host_dm = big_buf; 3376 3377 guest_data = arg + host_dm->data_start; 3378 if ((guest_data - arg) < 0) { 3379 ret = -EINVAL; 3380 goto out; 3381 } 3382 guest_data_size = host_dm->data_size - host_dm->data_start; 3383 host_data = (char*)host_dm + host_dm->data_start; 3384 3385 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1); 3386 switch (ie->host_cmd) { 3387 case DM_REMOVE_ALL: 3388 case DM_LIST_DEVICES: 3389 case DM_DEV_CREATE: 3390 case DM_DEV_REMOVE: 3391 case DM_DEV_SUSPEND: 3392 case DM_DEV_STATUS: 3393 case DM_DEV_WAIT: 3394 case DM_TABLE_STATUS: 3395 case DM_TABLE_CLEAR: 3396 case DM_TABLE_DEPS: 3397 case DM_LIST_VERSIONS: 3398 /* no input data */ 3399 break; 3400 case DM_DEV_RENAME: 3401 case DM_DEV_SET_GEOMETRY: 3402 /* data contains only strings */ 3403 memcpy(host_data, argptr, guest_data_size); 3404 break; 3405 case DM_TARGET_MSG: 3406 memcpy(host_data, argptr, guest_data_size); 3407 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr); 3408 break; 3409 case DM_TABLE_LOAD: 3410 { 3411 void *gspec = argptr; 3412 void *cur_data = host_data; 3413 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 3414 int spec_size = thunk_type_size(arg_type, 0); 3415 int i; 3416 3417 for (i = 0; i < host_dm->target_count; i++) { 3418 struct dm_target_spec *spec = cur_data; 3419 uint32_t next; 3420 int slen; 3421 3422 thunk_convert(spec, gspec, arg_type, THUNK_HOST); 3423 slen = strlen((char*)gspec + spec_size) + 1; 3424 next = spec->next; 3425 spec->next = sizeof(*spec) + slen; 3426 strcpy((char*)&spec[1], gspec + spec_size); 3427 gspec += next; 3428 cur_data += spec->next; 3429 } 3430 break; 3431 } 3432 default: 3433 ret = -TARGET_EINVAL; 3434 goto out; 3435 } 3436 unlock_user(argptr, guest_data, 0); 3437 3438 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3439 if (!is_error(ret)) { 3440 guest_data = arg + host_dm->data_start; 3441 guest_data_size = host_dm->data_size - host_dm->data_start; 3442 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0); 3443 switch (ie->host_cmd) { 3444 case DM_REMOVE_ALL: 3445 case DM_DEV_CREATE: 3446 case DM_DEV_REMOVE: 3447 case DM_DEV_RENAME: 3448 case DM_DEV_SUSPEND: 3449 case DM_DEV_STATUS: 3450 case DM_TABLE_LOAD: 3451 case DM_TABLE_CLEAR: 3452 case DM_TARGET_MSG: 3453 case DM_DEV_SET_GEOMETRY: 3454 /* no return data */ 3455 break; 3456 case DM_LIST_DEVICES: 3457 { 3458 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start; 3459 uint32_t remaining_data = guest_data_size; 3460 void *cur_data = argptr; 3461 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) }; 3462 int nl_size = 12; /* can't use thunk_size due to alignment */ 3463 3464 while (1) { 3465 uint32_t next = nl->next; 3466 if (next) { 3467 nl->next = nl_size + (strlen(nl->name) + 1); 3468 } 3469 if (remaining_data < nl->next) { 3470 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3471 break; 3472 } 3473 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET); 3474 strcpy(cur_data + nl_size, nl->name); 3475 cur_data += nl->next; 3476 remaining_data -= nl->next; 3477 if (!next) { 3478 break; 3479 } 3480 nl = (void*)nl + next; 3481 } 3482 break; 3483 } 3484 case DM_DEV_WAIT: 3485 case DM_TABLE_STATUS: 3486 { 3487 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start; 3488 void *cur_data = argptr; 3489 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 3490 int spec_size = thunk_type_size(arg_type, 0); 3491 int i; 3492 3493 for (i = 0; i < host_dm->target_count; i++) { 3494 uint32_t next = spec->next; 3495 int slen = strlen((char*)&spec[1]) + 1; 3496 spec->next = (cur_data - argptr) + spec_size + slen; 3497 if (guest_data_size < spec->next) { 3498 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3499 break; 3500 } 3501 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET); 3502 strcpy(cur_data + spec_size, (char*)&spec[1]); 3503 cur_data = argptr + spec->next; 3504 spec = (void*)host_dm + host_dm->data_start + next; 3505 } 3506 break; 3507 } 3508 case DM_TABLE_DEPS: 3509 { 3510 void *hdata = (void*)host_dm + host_dm->data_start; 3511 int count = *(uint32_t*)hdata; 3512 uint64_t *hdev = hdata + 8; 3513 uint64_t *gdev = argptr + 8; 3514 int i; 3515 3516 *(uint32_t*)argptr = tswap32(count); 3517 for (i = 0; i < count; i++) { 3518 *gdev = tswap64(*hdev); 3519 gdev++; 3520 hdev++; 3521 } 3522 break; 3523 } 3524 case DM_LIST_VERSIONS: 3525 { 3526 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start; 3527 uint32_t remaining_data = guest_data_size; 3528 void *cur_data = argptr; 3529 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) }; 3530 int vers_size = thunk_type_size(arg_type, 0); 3531 3532 while (1) { 3533 uint32_t next = vers->next; 3534 if (next) { 3535 vers->next = vers_size + (strlen(vers->name) + 1); 3536 } 3537 if (remaining_data < vers->next) { 3538 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3539 break; 3540 } 3541 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET); 3542 strcpy(cur_data + vers_size, vers->name); 3543 cur_data += vers->next; 3544 remaining_data -= vers->next; 3545 if (!next) { 3546 break; 3547 } 3548 vers = (void*)vers + next; 3549 } 3550 break; 3551 } 3552 default: 3553 ret = -TARGET_EINVAL; 3554 goto out; 3555 } 3556 unlock_user(argptr, guest_data, guest_data_size); 3557 3558 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3559 if (!argptr) { 3560 ret = -TARGET_EFAULT; 3561 goto out; 3562 } 3563 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3564 unlock_user(argptr, arg, target_size); 3565 } 3566 out: 3567 g_free(big_buf); 3568 return ret; 3569 } 3570 3571 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp, 3572 int fd, abi_long cmd, abi_long arg) 3573 { 3574 const argtype *arg_type = ie->arg_type; 3575 const StructEntry *se; 3576 const argtype *field_types; 3577 const int *dst_offsets, *src_offsets; 3578 int target_size; 3579 void *argptr; 3580 abi_ulong *target_rt_dev_ptr; 3581 unsigned long *host_rt_dev_ptr; 3582 abi_long ret; 3583 int i; 3584 3585 assert(ie->access == IOC_W); 3586 assert(*arg_type == TYPE_PTR); 3587 arg_type++; 3588 assert(*arg_type == TYPE_STRUCT); 3589 target_size = thunk_type_size(arg_type, 0); 3590 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3591 if (!argptr) { 3592 return -TARGET_EFAULT; 3593 } 3594 arg_type++; 3595 assert(*arg_type == (int)STRUCT_rtentry); 3596 se = struct_entries + *arg_type++; 3597 assert(se->convert[0] == NULL); 3598 /* convert struct here to be able to catch rt_dev string */ 3599 field_types = se->field_types; 3600 dst_offsets = se->field_offsets[THUNK_HOST]; 3601 src_offsets = se->field_offsets[THUNK_TARGET]; 3602 for (i = 0; i < se->nb_fields; i++) { 3603 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) { 3604 assert(*field_types == TYPE_PTRVOID); 3605 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]); 3606 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]); 3607 if (*target_rt_dev_ptr != 0) { 3608 *host_rt_dev_ptr = (unsigned long)lock_user_string( 3609 tswapal(*target_rt_dev_ptr)); 3610 if (!*host_rt_dev_ptr) { 3611 unlock_user(argptr, arg, 0); 3612 return -TARGET_EFAULT; 3613 } 3614 } else { 3615 *host_rt_dev_ptr = 0; 3616 } 3617 field_types++; 3618 continue; 3619 } 3620 field_types = thunk_convert(buf_temp + dst_offsets[i], 3621 argptr + src_offsets[i], 3622 field_types, THUNK_HOST); 3623 } 3624 unlock_user(argptr, arg, 0); 3625 3626 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3627 if (*host_rt_dev_ptr != 0) { 3628 unlock_user((void *)*host_rt_dev_ptr, 3629 *target_rt_dev_ptr, 0); 3630 } 3631 return ret; 3632 } 3633 3634 static IOCTLEntry ioctl_entries[] = { 3635 #define IOCTL(cmd, access, ...) \ 3636 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } }, 3637 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \ 3638 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } }, 3639 #include "ioctls.h" 3640 { 0, 0, }, 3641 }; 3642 3643 /* ??? Implement proper locking for ioctls. */ 3644 /* do_ioctl() Must return target values and target errnos. */ 3645 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg) 3646 { 3647 const IOCTLEntry *ie; 3648 const argtype *arg_type; 3649 abi_long ret; 3650 uint8_t buf_temp[MAX_STRUCT_SIZE]; 3651 int target_size; 3652 void *argptr; 3653 3654 ie = ioctl_entries; 3655 for(;;) { 3656 if (ie->target_cmd == 0) { 3657 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd); 3658 return -TARGET_ENOSYS; 3659 } 3660 if (ie->target_cmd == cmd) 3661 break; 3662 ie++; 3663 } 3664 arg_type = ie->arg_type; 3665 #if defined(DEBUG) 3666 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name); 3667 #endif 3668 if (ie->do_ioctl) { 3669 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg); 3670 } 3671 3672 switch(arg_type[0]) { 3673 case TYPE_NULL: 3674 /* no argument */ 3675 ret = get_errno(ioctl(fd, ie->host_cmd)); 3676 break; 3677 case TYPE_PTRVOID: 3678 case TYPE_INT: 3679 /* int argment */ 3680 ret = get_errno(ioctl(fd, ie->host_cmd, arg)); 3681 break; 3682 case TYPE_PTR: 3683 arg_type++; 3684 target_size = thunk_type_size(arg_type, 0); 3685 switch(ie->access) { 3686 case IOC_R: 3687 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3688 if (!is_error(ret)) { 3689 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3690 if (!argptr) 3691 return -TARGET_EFAULT; 3692 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3693 unlock_user(argptr, arg, target_size); 3694 } 3695 break; 3696 case IOC_W: 3697 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3698 if (!argptr) 3699 return -TARGET_EFAULT; 3700 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3701 unlock_user(argptr, arg, 0); 3702 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3703 break; 3704 default: 3705 case IOC_RW: 3706 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3707 if (!argptr) 3708 return -TARGET_EFAULT; 3709 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3710 unlock_user(argptr, arg, 0); 3711 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3712 if (!is_error(ret)) { 3713 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3714 if (!argptr) 3715 return -TARGET_EFAULT; 3716 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3717 unlock_user(argptr, arg, target_size); 3718 } 3719 break; 3720 } 3721 break; 3722 default: 3723 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n", 3724 (long)cmd, arg_type[0]); 3725 ret = -TARGET_ENOSYS; 3726 break; 3727 } 3728 return ret; 3729 } 3730 3731 static const bitmask_transtbl iflag_tbl[] = { 3732 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK }, 3733 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT }, 3734 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR }, 3735 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK }, 3736 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK }, 3737 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP }, 3738 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR }, 3739 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR }, 3740 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL }, 3741 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC }, 3742 { TARGET_IXON, TARGET_IXON, IXON, IXON }, 3743 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY }, 3744 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF }, 3745 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL }, 3746 { 0, 0, 0, 0 } 3747 }; 3748 3749 static const bitmask_transtbl oflag_tbl[] = { 3750 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST }, 3751 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC }, 3752 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR }, 3753 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL }, 3754 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR }, 3755 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET }, 3756 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL }, 3757 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL }, 3758 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 }, 3759 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 }, 3760 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 }, 3761 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 }, 3762 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 }, 3763 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 }, 3764 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 }, 3765 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 }, 3766 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 }, 3767 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 }, 3768 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 }, 3769 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 }, 3770 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 }, 3771 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 }, 3772 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 }, 3773 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 }, 3774 { 0, 0, 0, 0 } 3775 }; 3776 3777 static const bitmask_transtbl cflag_tbl[] = { 3778 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 }, 3779 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 }, 3780 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 }, 3781 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 }, 3782 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 }, 3783 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 }, 3784 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 }, 3785 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 }, 3786 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 }, 3787 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 }, 3788 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 }, 3789 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 }, 3790 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 }, 3791 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 }, 3792 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 }, 3793 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 }, 3794 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 }, 3795 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 }, 3796 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 }, 3797 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 }, 3798 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 }, 3799 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 }, 3800 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 }, 3801 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 }, 3802 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB }, 3803 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD }, 3804 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB }, 3805 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD }, 3806 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL }, 3807 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL }, 3808 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS }, 3809 { 0, 0, 0, 0 } 3810 }; 3811 3812 static const bitmask_transtbl lflag_tbl[] = { 3813 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG }, 3814 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON }, 3815 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE }, 3816 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO }, 3817 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE }, 3818 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK }, 3819 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL }, 3820 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH }, 3821 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP }, 3822 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL }, 3823 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT }, 3824 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE }, 3825 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO }, 3826 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN }, 3827 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN }, 3828 { 0, 0, 0, 0 } 3829 }; 3830 3831 static void target_to_host_termios (void *dst, const void *src) 3832 { 3833 struct host_termios *host = dst; 3834 const struct target_termios *target = src; 3835 3836 host->c_iflag = 3837 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl); 3838 host->c_oflag = 3839 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl); 3840 host->c_cflag = 3841 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl); 3842 host->c_lflag = 3843 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl); 3844 host->c_line = target->c_line; 3845 3846 memset(host->c_cc, 0, sizeof(host->c_cc)); 3847 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR]; 3848 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT]; 3849 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE]; 3850 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL]; 3851 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF]; 3852 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME]; 3853 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN]; 3854 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC]; 3855 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART]; 3856 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP]; 3857 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP]; 3858 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL]; 3859 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT]; 3860 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD]; 3861 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE]; 3862 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT]; 3863 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2]; 3864 } 3865 3866 static void host_to_target_termios (void *dst, const void *src) 3867 { 3868 struct target_termios *target = dst; 3869 const struct host_termios *host = src; 3870 3871 target->c_iflag = 3872 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl)); 3873 target->c_oflag = 3874 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl)); 3875 target->c_cflag = 3876 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl)); 3877 target->c_lflag = 3878 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl)); 3879 target->c_line = host->c_line; 3880 3881 memset(target->c_cc, 0, sizeof(target->c_cc)); 3882 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR]; 3883 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT]; 3884 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE]; 3885 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL]; 3886 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF]; 3887 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME]; 3888 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN]; 3889 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC]; 3890 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART]; 3891 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP]; 3892 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP]; 3893 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL]; 3894 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT]; 3895 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD]; 3896 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE]; 3897 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT]; 3898 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2]; 3899 } 3900 3901 static const StructEntry struct_termios_def = { 3902 .convert = { host_to_target_termios, target_to_host_termios }, 3903 .size = { sizeof(struct target_termios), sizeof(struct host_termios) }, 3904 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) }, 3905 }; 3906 3907 static bitmask_transtbl mmap_flags_tbl[] = { 3908 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED }, 3909 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE }, 3910 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED }, 3911 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS }, 3912 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN }, 3913 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE }, 3914 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE }, 3915 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED }, 3916 { 0, 0, 0, 0 } 3917 }; 3918 3919 #if defined(TARGET_I386) 3920 3921 /* NOTE: there is really one LDT for all the threads */ 3922 static uint8_t *ldt_table; 3923 3924 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount) 3925 { 3926 int size; 3927 void *p; 3928 3929 if (!ldt_table) 3930 return 0; 3931 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE; 3932 if (size > bytecount) 3933 size = bytecount; 3934 p = lock_user(VERIFY_WRITE, ptr, size, 0); 3935 if (!p) 3936 return -TARGET_EFAULT; 3937 /* ??? Should this by byteswapped? */ 3938 memcpy(p, ldt_table, size); 3939 unlock_user(p, ptr, size); 3940 return size; 3941 } 3942 3943 /* XXX: add locking support */ 3944 static abi_long write_ldt(CPUX86State *env, 3945 abi_ulong ptr, unsigned long bytecount, int oldmode) 3946 { 3947 struct target_modify_ldt_ldt_s ldt_info; 3948 struct target_modify_ldt_ldt_s *target_ldt_info; 3949 int seg_32bit, contents, read_exec_only, limit_in_pages; 3950 int seg_not_present, useable, lm; 3951 uint32_t *lp, entry_1, entry_2; 3952 3953 if (bytecount != sizeof(ldt_info)) 3954 return -TARGET_EINVAL; 3955 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1)) 3956 return -TARGET_EFAULT; 3957 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 3958 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 3959 ldt_info.limit = tswap32(target_ldt_info->limit); 3960 ldt_info.flags = tswap32(target_ldt_info->flags); 3961 unlock_user_struct(target_ldt_info, ptr, 0); 3962 3963 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES) 3964 return -TARGET_EINVAL; 3965 seg_32bit = ldt_info.flags & 1; 3966 contents = (ldt_info.flags >> 1) & 3; 3967 read_exec_only = (ldt_info.flags >> 3) & 1; 3968 limit_in_pages = (ldt_info.flags >> 4) & 1; 3969 seg_not_present = (ldt_info.flags >> 5) & 1; 3970 useable = (ldt_info.flags >> 6) & 1; 3971 #ifdef TARGET_ABI32 3972 lm = 0; 3973 #else 3974 lm = (ldt_info.flags >> 7) & 1; 3975 #endif 3976 if (contents == 3) { 3977 if (oldmode) 3978 return -TARGET_EINVAL; 3979 if (seg_not_present == 0) 3980 return -TARGET_EINVAL; 3981 } 3982 /* allocate the LDT */ 3983 if (!ldt_table) { 3984 env->ldt.base = target_mmap(0, 3985 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE, 3986 PROT_READ|PROT_WRITE, 3987 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 3988 if (env->ldt.base == -1) 3989 return -TARGET_ENOMEM; 3990 memset(g2h(env->ldt.base), 0, 3991 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE); 3992 env->ldt.limit = 0xffff; 3993 ldt_table = g2h(env->ldt.base); 3994 } 3995 3996 /* NOTE: same code as Linux kernel */ 3997 /* Allow LDTs to be cleared by the user. */ 3998 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 3999 if (oldmode || 4000 (contents == 0 && 4001 read_exec_only == 1 && 4002 seg_32bit == 0 && 4003 limit_in_pages == 0 && 4004 seg_not_present == 1 && 4005 useable == 0 )) { 4006 entry_1 = 0; 4007 entry_2 = 0; 4008 goto install; 4009 } 4010 } 4011 4012 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 4013 (ldt_info.limit & 0x0ffff); 4014 entry_2 = (ldt_info.base_addr & 0xff000000) | 4015 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 4016 (ldt_info.limit & 0xf0000) | 4017 ((read_exec_only ^ 1) << 9) | 4018 (contents << 10) | 4019 ((seg_not_present ^ 1) << 15) | 4020 (seg_32bit << 22) | 4021 (limit_in_pages << 23) | 4022 (lm << 21) | 4023 0x7000; 4024 if (!oldmode) 4025 entry_2 |= (useable << 20); 4026 4027 /* Install the new entry ... */ 4028 install: 4029 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3)); 4030 lp[0] = tswap32(entry_1); 4031 lp[1] = tswap32(entry_2); 4032 return 0; 4033 } 4034 4035 /* specific and weird i386 syscalls */ 4036 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr, 4037 unsigned long bytecount) 4038 { 4039 abi_long ret; 4040 4041 switch (func) { 4042 case 0: 4043 ret = read_ldt(ptr, bytecount); 4044 break; 4045 case 1: 4046 ret = write_ldt(env, ptr, bytecount, 1); 4047 break; 4048 case 0x11: 4049 ret = write_ldt(env, ptr, bytecount, 0); 4050 break; 4051 default: 4052 ret = -TARGET_ENOSYS; 4053 break; 4054 } 4055 return ret; 4056 } 4057 4058 #if defined(TARGET_I386) && defined(TARGET_ABI32) 4059 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr) 4060 { 4061 uint64_t *gdt_table = g2h(env->gdt.base); 4062 struct target_modify_ldt_ldt_s ldt_info; 4063 struct target_modify_ldt_ldt_s *target_ldt_info; 4064 int seg_32bit, contents, read_exec_only, limit_in_pages; 4065 int seg_not_present, useable, lm; 4066 uint32_t *lp, entry_1, entry_2; 4067 int i; 4068 4069 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 4070 if (!target_ldt_info) 4071 return -TARGET_EFAULT; 4072 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 4073 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 4074 ldt_info.limit = tswap32(target_ldt_info->limit); 4075 ldt_info.flags = tswap32(target_ldt_info->flags); 4076 if (ldt_info.entry_number == -1) { 4077 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) { 4078 if (gdt_table[i] == 0) { 4079 ldt_info.entry_number = i; 4080 target_ldt_info->entry_number = tswap32(i); 4081 break; 4082 } 4083 } 4084 } 4085 unlock_user_struct(target_ldt_info, ptr, 1); 4086 4087 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 4088 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX) 4089 return -TARGET_EINVAL; 4090 seg_32bit = ldt_info.flags & 1; 4091 contents = (ldt_info.flags >> 1) & 3; 4092 read_exec_only = (ldt_info.flags >> 3) & 1; 4093 limit_in_pages = (ldt_info.flags >> 4) & 1; 4094 seg_not_present = (ldt_info.flags >> 5) & 1; 4095 useable = (ldt_info.flags >> 6) & 1; 4096 #ifdef TARGET_ABI32 4097 lm = 0; 4098 #else 4099 lm = (ldt_info.flags >> 7) & 1; 4100 #endif 4101 4102 if (contents == 3) { 4103 if (seg_not_present == 0) 4104 return -TARGET_EINVAL; 4105 } 4106 4107 /* NOTE: same code as Linux kernel */ 4108 /* Allow LDTs to be cleared by the user. */ 4109 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 4110 if ((contents == 0 && 4111 read_exec_only == 1 && 4112 seg_32bit == 0 && 4113 limit_in_pages == 0 && 4114 seg_not_present == 1 && 4115 useable == 0 )) { 4116 entry_1 = 0; 4117 entry_2 = 0; 4118 goto install; 4119 } 4120 } 4121 4122 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 4123 (ldt_info.limit & 0x0ffff); 4124 entry_2 = (ldt_info.base_addr & 0xff000000) | 4125 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 4126 (ldt_info.limit & 0xf0000) | 4127 ((read_exec_only ^ 1) << 9) | 4128 (contents << 10) | 4129 ((seg_not_present ^ 1) << 15) | 4130 (seg_32bit << 22) | 4131 (limit_in_pages << 23) | 4132 (useable << 20) | 4133 (lm << 21) | 4134 0x7000; 4135 4136 /* Install the new entry ... */ 4137 install: 4138 lp = (uint32_t *)(gdt_table + ldt_info.entry_number); 4139 lp[0] = tswap32(entry_1); 4140 lp[1] = tswap32(entry_2); 4141 return 0; 4142 } 4143 4144 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr) 4145 { 4146 struct target_modify_ldt_ldt_s *target_ldt_info; 4147 uint64_t *gdt_table = g2h(env->gdt.base); 4148 uint32_t base_addr, limit, flags; 4149 int seg_32bit, contents, read_exec_only, limit_in_pages, idx; 4150 int seg_not_present, useable, lm; 4151 uint32_t *lp, entry_1, entry_2; 4152 4153 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 4154 if (!target_ldt_info) 4155 return -TARGET_EFAULT; 4156 idx = tswap32(target_ldt_info->entry_number); 4157 if (idx < TARGET_GDT_ENTRY_TLS_MIN || 4158 idx > TARGET_GDT_ENTRY_TLS_MAX) { 4159 unlock_user_struct(target_ldt_info, ptr, 1); 4160 return -TARGET_EINVAL; 4161 } 4162 lp = (uint32_t *)(gdt_table + idx); 4163 entry_1 = tswap32(lp[0]); 4164 entry_2 = tswap32(lp[1]); 4165 4166 read_exec_only = ((entry_2 >> 9) & 1) ^ 1; 4167 contents = (entry_2 >> 10) & 3; 4168 seg_not_present = ((entry_2 >> 15) & 1) ^ 1; 4169 seg_32bit = (entry_2 >> 22) & 1; 4170 limit_in_pages = (entry_2 >> 23) & 1; 4171 useable = (entry_2 >> 20) & 1; 4172 #ifdef TARGET_ABI32 4173 lm = 0; 4174 #else 4175 lm = (entry_2 >> 21) & 1; 4176 #endif 4177 flags = (seg_32bit << 0) | (contents << 1) | 4178 (read_exec_only << 3) | (limit_in_pages << 4) | 4179 (seg_not_present << 5) | (useable << 6) | (lm << 7); 4180 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000); 4181 base_addr = (entry_1 >> 16) | 4182 (entry_2 & 0xff000000) | 4183 ((entry_2 & 0xff) << 16); 4184 target_ldt_info->base_addr = tswapal(base_addr); 4185 target_ldt_info->limit = tswap32(limit); 4186 target_ldt_info->flags = tswap32(flags); 4187 unlock_user_struct(target_ldt_info, ptr, 1); 4188 return 0; 4189 } 4190 #endif /* TARGET_I386 && TARGET_ABI32 */ 4191 4192 #ifndef TARGET_ABI32 4193 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 4194 { 4195 abi_long ret = 0; 4196 abi_ulong val; 4197 int idx; 4198 4199 switch(code) { 4200 case TARGET_ARCH_SET_GS: 4201 case TARGET_ARCH_SET_FS: 4202 if (code == TARGET_ARCH_SET_GS) 4203 idx = R_GS; 4204 else 4205 idx = R_FS; 4206 cpu_x86_load_seg(env, idx, 0); 4207 env->segs[idx].base = addr; 4208 break; 4209 case TARGET_ARCH_GET_GS: 4210 case TARGET_ARCH_GET_FS: 4211 if (code == TARGET_ARCH_GET_GS) 4212 idx = R_GS; 4213 else 4214 idx = R_FS; 4215 val = env->segs[idx].base; 4216 if (put_user(val, addr, abi_ulong)) 4217 ret = -TARGET_EFAULT; 4218 break; 4219 default: 4220 ret = -TARGET_EINVAL; 4221 break; 4222 } 4223 return ret; 4224 } 4225 #endif 4226 4227 #endif /* defined(TARGET_I386) */ 4228 4229 #define NEW_STACK_SIZE 0x40000 4230 4231 4232 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; 4233 typedef struct { 4234 CPUArchState *env; 4235 pthread_mutex_t mutex; 4236 pthread_cond_t cond; 4237 pthread_t thread; 4238 uint32_t tid; 4239 abi_ulong child_tidptr; 4240 abi_ulong parent_tidptr; 4241 sigset_t sigmask; 4242 } new_thread_info; 4243 4244 static void *clone_func(void *arg) 4245 { 4246 new_thread_info *info = arg; 4247 CPUArchState *env; 4248 CPUState *cpu; 4249 TaskState *ts; 4250 4251 env = info->env; 4252 cpu = ENV_GET_CPU(env); 4253 thread_cpu = cpu; 4254 ts = (TaskState *)cpu->opaque; 4255 info->tid = gettid(); 4256 cpu->host_tid = info->tid; 4257 task_settid(ts); 4258 if (info->child_tidptr) 4259 put_user_u32(info->tid, info->child_tidptr); 4260 if (info->parent_tidptr) 4261 put_user_u32(info->tid, info->parent_tidptr); 4262 /* Enable signals. */ 4263 sigprocmask(SIG_SETMASK, &info->sigmask, NULL); 4264 /* Signal to the parent that we're ready. */ 4265 pthread_mutex_lock(&info->mutex); 4266 pthread_cond_broadcast(&info->cond); 4267 pthread_mutex_unlock(&info->mutex); 4268 /* Wait until the parent has finshed initializing the tls state. */ 4269 pthread_mutex_lock(&clone_lock); 4270 pthread_mutex_unlock(&clone_lock); 4271 cpu_loop(env); 4272 /* never exits */ 4273 return NULL; 4274 } 4275 4276 /* do_fork() Must return host values and target errnos (unlike most 4277 do_*() functions). */ 4278 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, 4279 abi_ulong parent_tidptr, target_ulong newtls, 4280 abi_ulong child_tidptr) 4281 { 4282 CPUState *cpu = ENV_GET_CPU(env); 4283 int ret; 4284 TaskState *ts; 4285 CPUState *new_cpu; 4286 CPUArchState *new_env; 4287 unsigned int nptl_flags; 4288 sigset_t sigmask; 4289 4290 /* Emulate vfork() with fork() */ 4291 if (flags & CLONE_VFORK) 4292 flags &= ~(CLONE_VFORK | CLONE_VM); 4293 4294 if (flags & CLONE_VM) { 4295 TaskState *parent_ts = (TaskState *)cpu->opaque; 4296 new_thread_info info; 4297 pthread_attr_t attr; 4298 4299 ts = g_malloc0(sizeof(TaskState)); 4300 init_task_state(ts); 4301 /* we create a new CPU instance. */ 4302 new_env = cpu_copy(env); 4303 /* Init regs that differ from the parent. */ 4304 cpu_clone_regs(new_env, newsp); 4305 new_cpu = ENV_GET_CPU(new_env); 4306 new_cpu->opaque = ts; 4307 ts->bprm = parent_ts->bprm; 4308 ts->info = parent_ts->info; 4309 nptl_flags = flags; 4310 flags &= ~CLONE_NPTL_FLAGS2; 4311 4312 if (nptl_flags & CLONE_CHILD_CLEARTID) { 4313 ts->child_tidptr = child_tidptr; 4314 } 4315 4316 if (nptl_flags & CLONE_SETTLS) 4317 cpu_set_tls (new_env, newtls); 4318 4319 /* Grab a mutex so that thread setup appears atomic. */ 4320 pthread_mutex_lock(&clone_lock); 4321 4322 memset(&info, 0, sizeof(info)); 4323 pthread_mutex_init(&info.mutex, NULL); 4324 pthread_mutex_lock(&info.mutex); 4325 pthread_cond_init(&info.cond, NULL); 4326 info.env = new_env; 4327 if (nptl_flags & CLONE_CHILD_SETTID) 4328 info.child_tidptr = child_tidptr; 4329 if (nptl_flags & CLONE_PARENT_SETTID) 4330 info.parent_tidptr = parent_tidptr; 4331 4332 ret = pthread_attr_init(&attr); 4333 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE); 4334 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 4335 /* It is not safe to deliver signals until the child has finished 4336 initializing, so temporarily block all signals. */ 4337 sigfillset(&sigmask); 4338 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); 4339 4340 ret = pthread_create(&info.thread, &attr, clone_func, &info); 4341 /* TODO: Free new CPU state if thread creation failed. */ 4342 4343 sigprocmask(SIG_SETMASK, &info.sigmask, NULL); 4344 pthread_attr_destroy(&attr); 4345 if (ret == 0) { 4346 /* Wait for the child to initialize. */ 4347 pthread_cond_wait(&info.cond, &info.mutex); 4348 ret = info.tid; 4349 if (flags & CLONE_PARENT_SETTID) 4350 put_user_u32(ret, parent_tidptr); 4351 } else { 4352 ret = -1; 4353 } 4354 pthread_mutex_unlock(&info.mutex); 4355 pthread_cond_destroy(&info.cond); 4356 pthread_mutex_destroy(&info.mutex); 4357 pthread_mutex_unlock(&clone_lock); 4358 } else { 4359 /* if no CLONE_VM, we consider it is a fork */ 4360 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) 4361 return -EINVAL; 4362 fork_start(); 4363 ret = fork(); 4364 if (ret == 0) { 4365 /* Child Process. */ 4366 cpu_clone_regs(env, newsp); 4367 fork_end(1); 4368 /* There is a race condition here. The parent process could 4369 theoretically read the TID in the child process before the child 4370 tid is set. This would require using either ptrace 4371 (not implemented) or having *_tidptr to point at a shared memory 4372 mapping. We can't repeat the spinlock hack used above because 4373 the child process gets its own copy of the lock. */ 4374 if (flags & CLONE_CHILD_SETTID) 4375 put_user_u32(gettid(), child_tidptr); 4376 if (flags & CLONE_PARENT_SETTID) 4377 put_user_u32(gettid(), parent_tidptr); 4378 ts = (TaskState *)cpu->opaque; 4379 if (flags & CLONE_SETTLS) 4380 cpu_set_tls (env, newtls); 4381 if (flags & CLONE_CHILD_CLEARTID) 4382 ts->child_tidptr = child_tidptr; 4383 } else { 4384 fork_end(0); 4385 } 4386 } 4387 return ret; 4388 } 4389 4390 /* warning : doesn't handle linux specific flags... */ 4391 static int target_to_host_fcntl_cmd(int cmd) 4392 { 4393 switch(cmd) { 4394 case TARGET_F_DUPFD: 4395 case TARGET_F_GETFD: 4396 case TARGET_F_SETFD: 4397 case TARGET_F_GETFL: 4398 case TARGET_F_SETFL: 4399 return cmd; 4400 case TARGET_F_GETLK: 4401 return F_GETLK; 4402 case TARGET_F_SETLK: 4403 return F_SETLK; 4404 case TARGET_F_SETLKW: 4405 return F_SETLKW; 4406 case TARGET_F_GETOWN: 4407 return F_GETOWN; 4408 case TARGET_F_SETOWN: 4409 return F_SETOWN; 4410 case TARGET_F_GETSIG: 4411 return F_GETSIG; 4412 case TARGET_F_SETSIG: 4413 return F_SETSIG; 4414 #if TARGET_ABI_BITS == 32 4415 case TARGET_F_GETLK64: 4416 return F_GETLK64; 4417 case TARGET_F_SETLK64: 4418 return F_SETLK64; 4419 case TARGET_F_SETLKW64: 4420 return F_SETLKW64; 4421 #endif 4422 case TARGET_F_SETLEASE: 4423 return F_SETLEASE; 4424 case TARGET_F_GETLEASE: 4425 return F_GETLEASE; 4426 #ifdef F_DUPFD_CLOEXEC 4427 case TARGET_F_DUPFD_CLOEXEC: 4428 return F_DUPFD_CLOEXEC; 4429 #endif 4430 case TARGET_F_NOTIFY: 4431 return F_NOTIFY; 4432 #ifdef F_GETOWN_EX 4433 case TARGET_F_GETOWN_EX: 4434 return F_GETOWN_EX; 4435 #endif 4436 #ifdef F_SETOWN_EX 4437 case TARGET_F_SETOWN_EX: 4438 return F_SETOWN_EX; 4439 #endif 4440 default: 4441 return -TARGET_EINVAL; 4442 } 4443 return -TARGET_EINVAL; 4444 } 4445 4446 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a } 4447 static const bitmask_transtbl flock_tbl[] = { 4448 TRANSTBL_CONVERT(F_RDLCK), 4449 TRANSTBL_CONVERT(F_WRLCK), 4450 TRANSTBL_CONVERT(F_UNLCK), 4451 TRANSTBL_CONVERT(F_EXLCK), 4452 TRANSTBL_CONVERT(F_SHLCK), 4453 { 0, 0, 0, 0 } 4454 }; 4455 4456 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) 4457 { 4458 struct flock fl; 4459 struct target_flock *target_fl; 4460 struct flock64 fl64; 4461 struct target_flock64 *target_fl64; 4462 #ifdef F_GETOWN_EX 4463 struct f_owner_ex fox; 4464 struct target_f_owner_ex *target_fox; 4465 #endif 4466 abi_long ret; 4467 int host_cmd = target_to_host_fcntl_cmd(cmd); 4468 4469 if (host_cmd == -TARGET_EINVAL) 4470 return host_cmd; 4471 4472 switch(cmd) { 4473 case TARGET_F_GETLK: 4474 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4475 return -TARGET_EFAULT; 4476 fl.l_type = 4477 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl); 4478 fl.l_whence = tswap16(target_fl->l_whence); 4479 fl.l_start = tswapal(target_fl->l_start); 4480 fl.l_len = tswapal(target_fl->l_len); 4481 fl.l_pid = tswap32(target_fl->l_pid); 4482 unlock_user_struct(target_fl, arg, 0); 4483 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4484 if (ret == 0) { 4485 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0)) 4486 return -TARGET_EFAULT; 4487 target_fl->l_type = 4488 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl); 4489 target_fl->l_whence = tswap16(fl.l_whence); 4490 target_fl->l_start = tswapal(fl.l_start); 4491 target_fl->l_len = tswapal(fl.l_len); 4492 target_fl->l_pid = tswap32(fl.l_pid); 4493 unlock_user_struct(target_fl, arg, 1); 4494 } 4495 break; 4496 4497 case TARGET_F_SETLK: 4498 case TARGET_F_SETLKW: 4499 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4500 return -TARGET_EFAULT; 4501 fl.l_type = 4502 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl); 4503 fl.l_whence = tswap16(target_fl->l_whence); 4504 fl.l_start = tswapal(target_fl->l_start); 4505 fl.l_len = tswapal(target_fl->l_len); 4506 fl.l_pid = tswap32(target_fl->l_pid); 4507 unlock_user_struct(target_fl, arg, 0); 4508 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4509 break; 4510 4511 case TARGET_F_GETLK64: 4512 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4513 return -TARGET_EFAULT; 4514 fl64.l_type = 4515 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1; 4516 fl64.l_whence = tswap16(target_fl64->l_whence); 4517 fl64.l_start = tswap64(target_fl64->l_start); 4518 fl64.l_len = tswap64(target_fl64->l_len); 4519 fl64.l_pid = tswap32(target_fl64->l_pid); 4520 unlock_user_struct(target_fl64, arg, 0); 4521 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4522 if (ret == 0) { 4523 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0)) 4524 return -TARGET_EFAULT; 4525 target_fl64->l_type = 4526 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1; 4527 target_fl64->l_whence = tswap16(fl64.l_whence); 4528 target_fl64->l_start = tswap64(fl64.l_start); 4529 target_fl64->l_len = tswap64(fl64.l_len); 4530 target_fl64->l_pid = tswap32(fl64.l_pid); 4531 unlock_user_struct(target_fl64, arg, 1); 4532 } 4533 break; 4534 case TARGET_F_SETLK64: 4535 case TARGET_F_SETLKW64: 4536 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4537 return -TARGET_EFAULT; 4538 fl64.l_type = 4539 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1; 4540 fl64.l_whence = tswap16(target_fl64->l_whence); 4541 fl64.l_start = tswap64(target_fl64->l_start); 4542 fl64.l_len = tswap64(target_fl64->l_len); 4543 fl64.l_pid = tswap32(target_fl64->l_pid); 4544 unlock_user_struct(target_fl64, arg, 0); 4545 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4546 break; 4547 4548 case TARGET_F_GETFL: 4549 ret = get_errno(fcntl(fd, host_cmd, arg)); 4550 if (ret >= 0) { 4551 ret = host_to_target_bitmask(ret, fcntl_flags_tbl); 4552 } 4553 break; 4554 4555 case TARGET_F_SETFL: 4556 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl))); 4557 break; 4558 4559 #ifdef F_GETOWN_EX 4560 case TARGET_F_GETOWN_EX: 4561 ret = get_errno(fcntl(fd, host_cmd, &fox)); 4562 if (ret >= 0) { 4563 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0)) 4564 return -TARGET_EFAULT; 4565 target_fox->type = tswap32(fox.type); 4566 target_fox->pid = tswap32(fox.pid); 4567 unlock_user_struct(target_fox, arg, 1); 4568 } 4569 break; 4570 #endif 4571 4572 #ifdef F_SETOWN_EX 4573 case TARGET_F_SETOWN_EX: 4574 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1)) 4575 return -TARGET_EFAULT; 4576 fox.type = tswap32(target_fox->type); 4577 fox.pid = tswap32(target_fox->pid); 4578 unlock_user_struct(target_fox, arg, 0); 4579 ret = get_errno(fcntl(fd, host_cmd, &fox)); 4580 break; 4581 #endif 4582 4583 case TARGET_F_SETOWN: 4584 case TARGET_F_GETOWN: 4585 case TARGET_F_SETSIG: 4586 case TARGET_F_GETSIG: 4587 case TARGET_F_SETLEASE: 4588 case TARGET_F_GETLEASE: 4589 ret = get_errno(fcntl(fd, host_cmd, arg)); 4590 break; 4591 4592 default: 4593 ret = get_errno(fcntl(fd, cmd, arg)); 4594 break; 4595 } 4596 return ret; 4597 } 4598 4599 #ifdef USE_UID16 4600 4601 static inline int high2lowuid(int uid) 4602 { 4603 if (uid > 65535) 4604 return 65534; 4605 else 4606 return uid; 4607 } 4608 4609 static inline int high2lowgid(int gid) 4610 { 4611 if (gid > 65535) 4612 return 65534; 4613 else 4614 return gid; 4615 } 4616 4617 static inline int low2highuid(int uid) 4618 { 4619 if ((int16_t)uid == -1) 4620 return -1; 4621 else 4622 return uid; 4623 } 4624 4625 static inline int low2highgid(int gid) 4626 { 4627 if ((int16_t)gid == -1) 4628 return -1; 4629 else 4630 return gid; 4631 } 4632 static inline int tswapid(int id) 4633 { 4634 return tswap16(id); 4635 } 4636 4637 #define put_user_id(x, gaddr) put_user_u16(x, gaddr) 4638 4639 #else /* !USE_UID16 */ 4640 static inline int high2lowuid(int uid) 4641 { 4642 return uid; 4643 } 4644 static inline int high2lowgid(int gid) 4645 { 4646 return gid; 4647 } 4648 static inline int low2highuid(int uid) 4649 { 4650 return uid; 4651 } 4652 static inline int low2highgid(int gid) 4653 { 4654 return gid; 4655 } 4656 static inline int tswapid(int id) 4657 { 4658 return tswap32(id); 4659 } 4660 4661 #define put_user_id(x, gaddr) put_user_u32(x, gaddr) 4662 4663 #endif /* USE_UID16 */ 4664 4665 void syscall_init(void) 4666 { 4667 IOCTLEntry *ie; 4668 const argtype *arg_type; 4669 int size; 4670 int i; 4671 4672 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def); 4673 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def); 4674 #include "syscall_types.h" 4675 #undef STRUCT 4676 #undef STRUCT_SPECIAL 4677 4678 /* Build target_to_host_errno_table[] table from 4679 * host_to_target_errno_table[]. */ 4680 for (i = 0; i < ERRNO_TABLE_SIZE; i++) { 4681 target_to_host_errno_table[host_to_target_errno_table[i]] = i; 4682 } 4683 4684 /* we patch the ioctl size if necessary. We rely on the fact that 4685 no ioctl has all the bits at '1' in the size field */ 4686 ie = ioctl_entries; 4687 while (ie->target_cmd != 0) { 4688 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) == 4689 TARGET_IOC_SIZEMASK) { 4690 arg_type = ie->arg_type; 4691 if (arg_type[0] != TYPE_PTR) { 4692 fprintf(stderr, "cannot patch size for ioctl 0x%x\n", 4693 ie->target_cmd); 4694 exit(1); 4695 } 4696 arg_type++; 4697 size = thunk_type_size(arg_type, 0); 4698 ie->target_cmd = (ie->target_cmd & 4699 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) | 4700 (size << TARGET_IOC_SIZESHIFT); 4701 } 4702 4703 /* automatic consistency check if same arch */ 4704 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 4705 (defined(__x86_64__) && defined(TARGET_X86_64)) 4706 if (unlikely(ie->target_cmd != ie->host_cmd)) { 4707 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n", 4708 ie->name, ie->target_cmd, ie->host_cmd); 4709 } 4710 #endif 4711 ie++; 4712 } 4713 } 4714 4715 #if TARGET_ABI_BITS == 32 4716 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1) 4717 { 4718 #ifdef TARGET_WORDS_BIGENDIAN 4719 return ((uint64_t)word0 << 32) | word1; 4720 #else 4721 return ((uint64_t)word1 << 32) | word0; 4722 #endif 4723 } 4724 #else /* TARGET_ABI_BITS == 32 */ 4725 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1) 4726 { 4727 return word0; 4728 } 4729 #endif /* TARGET_ABI_BITS != 32 */ 4730 4731 #ifdef TARGET_NR_truncate64 4732 static inline abi_long target_truncate64(void *cpu_env, const char *arg1, 4733 abi_long arg2, 4734 abi_long arg3, 4735 abi_long arg4) 4736 { 4737 if (regpairs_aligned(cpu_env)) { 4738 arg2 = arg3; 4739 arg3 = arg4; 4740 } 4741 return get_errno(truncate64(arg1, target_offset64(arg2, arg3))); 4742 } 4743 #endif 4744 4745 #ifdef TARGET_NR_ftruncate64 4746 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1, 4747 abi_long arg2, 4748 abi_long arg3, 4749 abi_long arg4) 4750 { 4751 if (regpairs_aligned(cpu_env)) { 4752 arg2 = arg3; 4753 arg3 = arg4; 4754 } 4755 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3))); 4756 } 4757 #endif 4758 4759 static inline abi_long target_to_host_timespec(struct timespec *host_ts, 4760 abi_ulong target_addr) 4761 { 4762 struct target_timespec *target_ts; 4763 4764 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) 4765 return -TARGET_EFAULT; 4766 host_ts->tv_sec = tswapal(target_ts->tv_sec); 4767 host_ts->tv_nsec = tswapal(target_ts->tv_nsec); 4768 unlock_user_struct(target_ts, target_addr, 0); 4769 return 0; 4770 } 4771 4772 static inline abi_long host_to_target_timespec(abi_ulong target_addr, 4773 struct timespec *host_ts) 4774 { 4775 struct target_timespec *target_ts; 4776 4777 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) 4778 return -TARGET_EFAULT; 4779 target_ts->tv_sec = tswapal(host_ts->tv_sec); 4780 target_ts->tv_nsec = tswapal(host_ts->tv_nsec); 4781 unlock_user_struct(target_ts, target_addr, 1); 4782 return 0; 4783 } 4784 4785 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec, 4786 abi_ulong target_addr) 4787 { 4788 struct target_itimerspec *target_itspec; 4789 4790 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) { 4791 return -TARGET_EFAULT; 4792 } 4793 4794 host_itspec->it_interval.tv_sec = 4795 tswapal(target_itspec->it_interval.tv_sec); 4796 host_itspec->it_interval.tv_nsec = 4797 tswapal(target_itspec->it_interval.tv_nsec); 4798 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec); 4799 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec); 4800 4801 unlock_user_struct(target_itspec, target_addr, 1); 4802 return 0; 4803 } 4804 4805 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr, 4806 struct itimerspec *host_its) 4807 { 4808 struct target_itimerspec *target_itspec; 4809 4810 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) { 4811 return -TARGET_EFAULT; 4812 } 4813 4814 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec); 4815 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec); 4816 4817 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec); 4818 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec); 4819 4820 unlock_user_struct(target_itspec, target_addr, 0); 4821 return 0; 4822 } 4823 4824 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat) 4825 static inline abi_long host_to_target_stat64(void *cpu_env, 4826 abi_ulong target_addr, 4827 struct stat *host_st) 4828 { 4829 #if defined(TARGET_ARM) && defined(TARGET_ABI32) 4830 if (((CPUARMState *)cpu_env)->eabi) { 4831 struct target_eabi_stat64 *target_st; 4832 4833 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 4834 return -TARGET_EFAULT; 4835 memset(target_st, 0, sizeof(struct target_eabi_stat64)); 4836 __put_user(host_st->st_dev, &target_st->st_dev); 4837 __put_user(host_st->st_ino, &target_st->st_ino); 4838 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 4839 __put_user(host_st->st_ino, &target_st->__st_ino); 4840 #endif 4841 __put_user(host_st->st_mode, &target_st->st_mode); 4842 __put_user(host_st->st_nlink, &target_st->st_nlink); 4843 __put_user(host_st->st_uid, &target_st->st_uid); 4844 __put_user(host_st->st_gid, &target_st->st_gid); 4845 __put_user(host_st->st_rdev, &target_st->st_rdev); 4846 __put_user(host_st->st_size, &target_st->st_size); 4847 __put_user(host_st->st_blksize, &target_st->st_blksize); 4848 __put_user(host_st->st_blocks, &target_st->st_blocks); 4849 __put_user(host_st->st_atime, &target_st->target_st_atime); 4850 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 4851 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 4852 unlock_user_struct(target_st, target_addr, 1); 4853 } else 4854 #endif 4855 { 4856 #if defined(TARGET_HAS_STRUCT_STAT64) 4857 struct target_stat64 *target_st; 4858 #else 4859 struct target_stat *target_st; 4860 #endif 4861 4862 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 4863 return -TARGET_EFAULT; 4864 memset(target_st, 0, sizeof(*target_st)); 4865 __put_user(host_st->st_dev, &target_st->st_dev); 4866 __put_user(host_st->st_ino, &target_st->st_ino); 4867 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 4868 __put_user(host_st->st_ino, &target_st->__st_ino); 4869 #endif 4870 __put_user(host_st->st_mode, &target_st->st_mode); 4871 __put_user(host_st->st_nlink, &target_st->st_nlink); 4872 __put_user(host_st->st_uid, &target_st->st_uid); 4873 __put_user(host_st->st_gid, &target_st->st_gid); 4874 __put_user(host_st->st_rdev, &target_st->st_rdev); 4875 /* XXX: better use of kernel struct */ 4876 __put_user(host_st->st_size, &target_st->st_size); 4877 __put_user(host_st->st_blksize, &target_st->st_blksize); 4878 __put_user(host_st->st_blocks, &target_st->st_blocks); 4879 __put_user(host_st->st_atime, &target_st->target_st_atime); 4880 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 4881 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 4882 unlock_user_struct(target_st, target_addr, 1); 4883 } 4884 4885 return 0; 4886 } 4887 #endif 4888 4889 /* ??? Using host futex calls even when target atomic operations 4890 are not really atomic probably breaks things. However implementing 4891 futexes locally would make futexes shared between multiple processes 4892 tricky. However they're probably useless because guest atomic 4893 operations won't work either. */ 4894 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout, 4895 target_ulong uaddr2, int val3) 4896 { 4897 struct timespec ts, *pts; 4898 int base_op; 4899 4900 /* ??? We assume FUTEX_* constants are the same on both host 4901 and target. */ 4902 #ifdef FUTEX_CMD_MASK 4903 base_op = op & FUTEX_CMD_MASK; 4904 #else 4905 base_op = op; 4906 #endif 4907 switch (base_op) { 4908 case FUTEX_WAIT: 4909 case FUTEX_WAIT_BITSET: 4910 if (timeout) { 4911 pts = &ts; 4912 target_to_host_timespec(pts, timeout); 4913 } else { 4914 pts = NULL; 4915 } 4916 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val), 4917 pts, NULL, val3)); 4918 case FUTEX_WAKE: 4919 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 4920 case FUTEX_FD: 4921 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 4922 case FUTEX_REQUEUE: 4923 case FUTEX_CMP_REQUEUE: 4924 case FUTEX_WAKE_OP: 4925 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 4926 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 4927 But the prototype takes a `struct timespec *'; insert casts 4928 to satisfy the compiler. We do not need to tswap TIMEOUT 4929 since it's not compared to guest memory. */ 4930 pts = (struct timespec *)(uintptr_t) timeout; 4931 return get_errno(sys_futex(g2h(uaddr), op, val, pts, 4932 g2h(uaddr2), 4933 (base_op == FUTEX_CMP_REQUEUE 4934 ? tswap32(val3) 4935 : val3))); 4936 default: 4937 return -TARGET_ENOSYS; 4938 } 4939 } 4940 4941 /* Map host to target signal numbers for the wait family of syscalls. 4942 Assume all other status bits are the same. */ 4943 int host_to_target_waitstatus(int status) 4944 { 4945 if (WIFSIGNALED(status)) { 4946 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f); 4947 } 4948 if (WIFSTOPPED(status)) { 4949 return (host_to_target_signal(WSTOPSIG(status)) << 8) 4950 | (status & 0xff); 4951 } 4952 return status; 4953 } 4954 4955 static int relstr_to_int(const char *s) 4956 { 4957 /* Convert a uname release string like "2.6.18" to an integer 4958 * of the form 0x020612. (Beware that 0x020612 is *not* 2.6.12.) 4959 */ 4960 int i, n, tmp; 4961 4962 tmp = 0; 4963 for (i = 0; i < 3; i++) { 4964 n = 0; 4965 while (*s >= '0' && *s <= '9') { 4966 n *= 10; 4967 n += *s - '0'; 4968 s++; 4969 } 4970 tmp = (tmp << 8) + n; 4971 if (*s == '.') { 4972 s++; 4973 } 4974 } 4975 return tmp; 4976 } 4977 4978 int get_osversion(void) 4979 { 4980 static int osversion; 4981 struct new_utsname buf; 4982 const char *s; 4983 4984 if (osversion) 4985 return osversion; 4986 if (qemu_uname_release && *qemu_uname_release) { 4987 s = qemu_uname_release; 4988 } else { 4989 if (sys_uname(&buf)) 4990 return 0; 4991 s = buf.release; 4992 } 4993 osversion = relstr_to_int(s); 4994 return osversion; 4995 } 4996 4997 void init_qemu_uname_release(void) 4998 { 4999 /* Initialize qemu_uname_release for later use. 5000 * If the host kernel is too old and the user hasn't asked for 5001 * a specific fake version number, we might want to fake a minimum 5002 * target kernel version. 5003 */ 5004 #ifdef UNAME_MINIMUM_RELEASE 5005 struct new_utsname buf; 5006 5007 if (qemu_uname_release && *qemu_uname_release) { 5008 return; 5009 } 5010 5011 if (sys_uname(&buf)) { 5012 return; 5013 } 5014 5015 if (relstr_to_int(buf.release) < relstr_to_int(UNAME_MINIMUM_RELEASE)) { 5016 qemu_uname_release = UNAME_MINIMUM_RELEASE; 5017 } 5018 #endif 5019 } 5020 5021 static int open_self_maps(void *cpu_env, int fd) 5022 { 5023 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32) 5024 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 5025 TaskState *ts = cpu->opaque; 5026 #endif 5027 FILE *fp; 5028 char *line = NULL; 5029 size_t len = 0; 5030 ssize_t read; 5031 5032 fp = fopen("/proc/self/maps", "r"); 5033 if (fp == NULL) { 5034 return -EACCES; 5035 } 5036 5037 while ((read = getline(&line, &len, fp)) != -1) { 5038 int fields, dev_maj, dev_min, inode; 5039 uint64_t min, max, offset; 5040 char flag_r, flag_w, flag_x, flag_p; 5041 char path[512] = ""; 5042 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d" 5043 " %512s", &min, &max, &flag_r, &flag_w, &flag_x, 5044 &flag_p, &offset, &dev_maj, &dev_min, &inode, path); 5045 5046 if ((fields < 10) || (fields > 11)) { 5047 continue; 5048 } 5049 if (!strncmp(path, "[stack]", 7)) { 5050 continue; 5051 } 5052 if (h2g_valid(min) && h2g_valid(max)) { 5053 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx 5054 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n", 5055 h2g(min), h2g(max), flag_r, flag_w, 5056 flag_x, flag_p, offset, dev_maj, dev_min, inode, 5057 path[0] ? " " : "", path); 5058 } 5059 } 5060 5061 free(line); 5062 fclose(fp); 5063 5064 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32) 5065 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n", 5066 (unsigned long long)ts->info->stack_limit, 5067 (unsigned long long)(ts->info->start_stack + 5068 (TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK, 5069 (unsigned long long)0); 5070 #endif 5071 5072 return 0; 5073 } 5074 5075 static int open_self_stat(void *cpu_env, int fd) 5076 { 5077 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 5078 TaskState *ts = cpu->opaque; 5079 abi_ulong start_stack = ts->info->start_stack; 5080 int i; 5081 5082 for (i = 0; i < 44; i++) { 5083 char buf[128]; 5084 int len; 5085 uint64_t val = 0; 5086 5087 if (i == 0) { 5088 /* pid */ 5089 val = getpid(); 5090 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 5091 } else if (i == 1) { 5092 /* app name */ 5093 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]); 5094 } else if (i == 27) { 5095 /* stack bottom */ 5096 val = start_stack; 5097 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 5098 } else { 5099 /* for the rest, there is MasterCard */ 5100 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' '); 5101 } 5102 5103 len = strlen(buf); 5104 if (write(fd, buf, len) != len) { 5105 return -1; 5106 } 5107 } 5108 5109 return 0; 5110 } 5111 5112 static int open_self_auxv(void *cpu_env, int fd) 5113 { 5114 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 5115 TaskState *ts = cpu->opaque; 5116 abi_ulong auxv = ts->info->saved_auxv; 5117 abi_ulong len = ts->info->auxv_len; 5118 char *ptr; 5119 5120 /* 5121 * Auxiliary vector is stored in target process stack. 5122 * read in whole auxv vector and copy it to file 5123 */ 5124 ptr = lock_user(VERIFY_READ, auxv, len, 0); 5125 if (ptr != NULL) { 5126 while (len > 0) { 5127 ssize_t r; 5128 r = write(fd, ptr, len); 5129 if (r <= 0) { 5130 break; 5131 } 5132 len -= r; 5133 ptr += r; 5134 } 5135 lseek(fd, 0, SEEK_SET); 5136 unlock_user(ptr, auxv, len); 5137 } 5138 5139 return 0; 5140 } 5141 5142 static int is_proc_myself(const char *filename, const char *entry) 5143 { 5144 if (!strncmp(filename, "/proc/", strlen("/proc/"))) { 5145 filename += strlen("/proc/"); 5146 if (!strncmp(filename, "self/", strlen("self/"))) { 5147 filename += strlen("self/"); 5148 } else if (*filename >= '1' && *filename <= '9') { 5149 char myself[80]; 5150 snprintf(myself, sizeof(myself), "%d/", getpid()); 5151 if (!strncmp(filename, myself, strlen(myself))) { 5152 filename += strlen(myself); 5153 } else { 5154 return 0; 5155 } 5156 } else { 5157 return 0; 5158 } 5159 if (!strcmp(filename, entry)) { 5160 return 1; 5161 } 5162 } 5163 return 0; 5164 } 5165 5166 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 5167 static int is_proc(const char *filename, const char *entry) 5168 { 5169 return strcmp(filename, entry) == 0; 5170 } 5171 5172 static int open_net_route(void *cpu_env, int fd) 5173 { 5174 FILE *fp; 5175 char *line = NULL; 5176 size_t len = 0; 5177 ssize_t read; 5178 5179 fp = fopen("/proc/net/route", "r"); 5180 if (fp == NULL) { 5181 return -EACCES; 5182 } 5183 5184 /* read header */ 5185 5186 read = getline(&line, &len, fp); 5187 dprintf(fd, "%s", line); 5188 5189 /* read routes */ 5190 5191 while ((read = getline(&line, &len, fp)) != -1) { 5192 char iface[16]; 5193 uint32_t dest, gw, mask; 5194 unsigned int flags, refcnt, use, metric, mtu, window, irtt; 5195 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 5196 iface, &dest, &gw, &flags, &refcnt, &use, &metric, 5197 &mask, &mtu, &window, &irtt); 5198 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 5199 iface, tswap32(dest), tswap32(gw), flags, refcnt, use, 5200 metric, tswap32(mask), mtu, window, irtt); 5201 } 5202 5203 free(line); 5204 fclose(fp); 5205 5206 return 0; 5207 } 5208 #endif 5209 5210 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode) 5211 { 5212 struct fake_open { 5213 const char *filename; 5214 int (*fill)(void *cpu_env, int fd); 5215 int (*cmp)(const char *s1, const char *s2); 5216 }; 5217 const struct fake_open *fake_open; 5218 static const struct fake_open fakes[] = { 5219 { "maps", open_self_maps, is_proc_myself }, 5220 { "stat", open_self_stat, is_proc_myself }, 5221 { "auxv", open_self_auxv, is_proc_myself }, 5222 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 5223 { "/proc/net/route", open_net_route, is_proc }, 5224 #endif 5225 { NULL, NULL, NULL } 5226 }; 5227 5228 for (fake_open = fakes; fake_open->filename; fake_open++) { 5229 if (fake_open->cmp(pathname, fake_open->filename)) { 5230 break; 5231 } 5232 } 5233 5234 if (fake_open->filename) { 5235 const char *tmpdir; 5236 char filename[PATH_MAX]; 5237 int fd, r; 5238 5239 /* create temporary file to map stat to */ 5240 tmpdir = getenv("TMPDIR"); 5241 if (!tmpdir) 5242 tmpdir = "/tmp"; 5243 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir); 5244 fd = mkstemp(filename); 5245 if (fd < 0) { 5246 return fd; 5247 } 5248 unlink(filename); 5249 5250 if ((r = fake_open->fill(cpu_env, fd))) { 5251 close(fd); 5252 return r; 5253 } 5254 lseek(fd, 0, SEEK_SET); 5255 5256 return fd; 5257 } 5258 5259 return get_errno(open(path(pathname), flags, mode)); 5260 } 5261 5262 /* do_syscall() should always have a single exit point at the end so 5263 that actions, such as logging of syscall results, can be performed. 5264 All errnos that do_syscall() returns must be -TARGET_<errcode>. */ 5265 abi_long do_syscall(void *cpu_env, int num, abi_long arg1, 5266 abi_long arg2, abi_long arg3, abi_long arg4, 5267 abi_long arg5, abi_long arg6, abi_long arg7, 5268 abi_long arg8) 5269 { 5270 CPUState *cpu = ENV_GET_CPU(cpu_env); 5271 abi_long ret; 5272 struct stat st; 5273 struct statfs stfs; 5274 void *p; 5275 5276 #ifdef DEBUG 5277 gemu_log("syscall %d", num); 5278 #endif 5279 if(do_strace) 5280 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); 5281 5282 switch(num) { 5283 case TARGET_NR_exit: 5284 /* In old applications this may be used to implement _exit(2). 5285 However in threaded applictions it is used for thread termination, 5286 and _exit_group is used for application termination. 5287 Do thread termination if we have more then one thread. */ 5288 /* FIXME: This probably breaks if a signal arrives. We should probably 5289 be disabling signals. */ 5290 if (CPU_NEXT(first_cpu)) { 5291 TaskState *ts; 5292 5293 cpu_list_lock(); 5294 /* Remove the CPU from the list. */ 5295 QTAILQ_REMOVE(&cpus, cpu, node); 5296 cpu_list_unlock(); 5297 ts = cpu->opaque; 5298 if (ts->child_tidptr) { 5299 put_user_u32(0, ts->child_tidptr); 5300 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX, 5301 NULL, NULL, 0); 5302 } 5303 thread_cpu = NULL; 5304 object_unref(OBJECT(cpu)); 5305 g_free(ts); 5306 pthread_exit(NULL); 5307 } 5308 #ifdef TARGET_GPROF 5309 _mcleanup(); 5310 #endif 5311 gdb_exit(cpu_env, arg1); 5312 _exit(arg1); 5313 ret = 0; /* avoid warning */ 5314 break; 5315 case TARGET_NR_read: 5316 if (arg3 == 0) 5317 ret = 0; 5318 else { 5319 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 5320 goto efault; 5321 ret = get_errno(read(arg1, p, arg3)); 5322 unlock_user(p, arg2, ret); 5323 } 5324 break; 5325 case TARGET_NR_write: 5326 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 5327 goto efault; 5328 ret = get_errno(write(arg1, p, arg3)); 5329 unlock_user(p, arg2, 0); 5330 break; 5331 case TARGET_NR_open: 5332 if (!(p = lock_user_string(arg1))) 5333 goto efault; 5334 ret = get_errno(do_open(cpu_env, p, 5335 target_to_host_bitmask(arg2, fcntl_flags_tbl), 5336 arg3)); 5337 unlock_user(p, arg1, 0); 5338 break; 5339 #if defined(TARGET_NR_openat) && defined(__NR_openat) 5340 case TARGET_NR_openat: 5341 if (!(p = lock_user_string(arg2))) 5342 goto efault; 5343 ret = get_errno(sys_openat(arg1, 5344 path(p), 5345 target_to_host_bitmask(arg3, fcntl_flags_tbl), 5346 arg4)); 5347 unlock_user(p, arg2, 0); 5348 break; 5349 #endif 5350 case TARGET_NR_close: 5351 ret = get_errno(close(arg1)); 5352 break; 5353 case TARGET_NR_brk: 5354 ret = do_brk(arg1); 5355 break; 5356 case TARGET_NR_fork: 5357 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0)); 5358 break; 5359 #ifdef TARGET_NR_waitpid 5360 case TARGET_NR_waitpid: 5361 { 5362 int status; 5363 ret = get_errno(waitpid(arg1, &status, arg3)); 5364 if (!is_error(ret) && arg2 && ret 5365 && put_user_s32(host_to_target_waitstatus(status), arg2)) 5366 goto efault; 5367 } 5368 break; 5369 #endif 5370 #ifdef TARGET_NR_waitid 5371 case TARGET_NR_waitid: 5372 { 5373 siginfo_t info; 5374 info.si_pid = 0; 5375 ret = get_errno(waitid(arg1, arg2, &info, arg4)); 5376 if (!is_error(ret) && arg3 && info.si_pid != 0) { 5377 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) 5378 goto efault; 5379 host_to_target_siginfo(p, &info); 5380 unlock_user(p, arg3, sizeof(target_siginfo_t)); 5381 } 5382 } 5383 break; 5384 #endif 5385 #ifdef TARGET_NR_creat /* not on alpha */ 5386 case TARGET_NR_creat: 5387 if (!(p = lock_user_string(arg1))) 5388 goto efault; 5389 ret = get_errno(creat(p, arg2)); 5390 unlock_user(p, arg1, 0); 5391 break; 5392 #endif 5393 case TARGET_NR_link: 5394 { 5395 void * p2; 5396 p = lock_user_string(arg1); 5397 p2 = lock_user_string(arg2); 5398 if (!p || !p2) 5399 ret = -TARGET_EFAULT; 5400 else 5401 ret = get_errno(link(p, p2)); 5402 unlock_user(p2, arg2, 0); 5403 unlock_user(p, arg1, 0); 5404 } 5405 break; 5406 #if defined(TARGET_NR_linkat) 5407 case TARGET_NR_linkat: 5408 { 5409 void * p2 = NULL; 5410 if (!arg2 || !arg4) 5411 goto efault; 5412 p = lock_user_string(arg2); 5413 p2 = lock_user_string(arg4); 5414 if (!p || !p2) 5415 ret = -TARGET_EFAULT; 5416 else 5417 ret = get_errno(linkat(arg1, p, arg3, p2, arg5)); 5418 unlock_user(p, arg2, 0); 5419 unlock_user(p2, arg4, 0); 5420 } 5421 break; 5422 #endif 5423 case TARGET_NR_unlink: 5424 if (!(p = lock_user_string(arg1))) 5425 goto efault; 5426 ret = get_errno(unlink(p)); 5427 unlock_user(p, arg1, 0); 5428 break; 5429 #if defined(TARGET_NR_unlinkat) 5430 case TARGET_NR_unlinkat: 5431 if (!(p = lock_user_string(arg2))) 5432 goto efault; 5433 ret = get_errno(unlinkat(arg1, p, arg3)); 5434 unlock_user(p, arg2, 0); 5435 break; 5436 #endif 5437 case TARGET_NR_execve: 5438 { 5439 char **argp, **envp; 5440 int argc, envc; 5441 abi_ulong gp; 5442 abi_ulong guest_argp; 5443 abi_ulong guest_envp; 5444 abi_ulong addr; 5445 char **q; 5446 int total_size = 0; 5447 5448 argc = 0; 5449 guest_argp = arg2; 5450 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { 5451 if (get_user_ual(addr, gp)) 5452 goto efault; 5453 if (!addr) 5454 break; 5455 argc++; 5456 } 5457 envc = 0; 5458 guest_envp = arg3; 5459 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { 5460 if (get_user_ual(addr, gp)) 5461 goto efault; 5462 if (!addr) 5463 break; 5464 envc++; 5465 } 5466 5467 argp = alloca((argc + 1) * sizeof(void *)); 5468 envp = alloca((envc + 1) * sizeof(void *)); 5469 5470 for (gp = guest_argp, q = argp; gp; 5471 gp += sizeof(abi_ulong), q++) { 5472 if (get_user_ual(addr, gp)) 5473 goto execve_efault; 5474 if (!addr) 5475 break; 5476 if (!(*q = lock_user_string(addr))) 5477 goto execve_efault; 5478 total_size += strlen(*q) + 1; 5479 } 5480 *q = NULL; 5481 5482 for (gp = guest_envp, q = envp; gp; 5483 gp += sizeof(abi_ulong), q++) { 5484 if (get_user_ual(addr, gp)) 5485 goto execve_efault; 5486 if (!addr) 5487 break; 5488 if (!(*q = lock_user_string(addr))) 5489 goto execve_efault; 5490 total_size += strlen(*q) + 1; 5491 } 5492 *q = NULL; 5493 5494 /* This case will not be caught by the host's execve() if its 5495 page size is bigger than the target's. */ 5496 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) { 5497 ret = -TARGET_E2BIG; 5498 goto execve_end; 5499 } 5500 if (!(p = lock_user_string(arg1))) 5501 goto execve_efault; 5502 ret = get_errno(execve(p, argp, envp)); 5503 unlock_user(p, arg1, 0); 5504 5505 goto execve_end; 5506 5507 execve_efault: 5508 ret = -TARGET_EFAULT; 5509 5510 execve_end: 5511 for (gp = guest_argp, q = argp; *q; 5512 gp += sizeof(abi_ulong), q++) { 5513 if (get_user_ual(addr, gp) 5514 || !addr) 5515 break; 5516 unlock_user(*q, addr, 0); 5517 } 5518 for (gp = guest_envp, q = envp; *q; 5519 gp += sizeof(abi_ulong), q++) { 5520 if (get_user_ual(addr, gp) 5521 || !addr) 5522 break; 5523 unlock_user(*q, addr, 0); 5524 } 5525 } 5526 break; 5527 case TARGET_NR_chdir: 5528 if (!(p = lock_user_string(arg1))) 5529 goto efault; 5530 ret = get_errno(chdir(p)); 5531 unlock_user(p, arg1, 0); 5532 break; 5533 #ifdef TARGET_NR_time 5534 case TARGET_NR_time: 5535 { 5536 time_t host_time; 5537 ret = get_errno(time(&host_time)); 5538 if (!is_error(ret) 5539 && arg1 5540 && put_user_sal(host_time, arg1)) 5541 goto efault; 5542 } 5543 break; 5544 #endif 5545 case TARGET_NR_mknod: 5546 if (!(p = lock_user_string(arg1))) 5547 goto efault; 5548 ret = get_errno(mknod(p, arg2, arg3)); 5549 unlock_user(p, arg1, 0); 5550 break; 5551 #if defined(TARGET_NR_mknodat) 5552 case TARGET_NR_mknodat: 5553 if (!(p = lock_user_string(arg2))) 5554 goto efault; 5555 ret = get_errno(mknodat(arg1, p, arg3, arg4)); 5556 unlock_user(p, arg2, 0); 5557 break; 5558 #endif 5559 case TARGET_NR_chmod: 5560 if (!(p = lock_user_string(arg1))) 5561 goto efault; 5562 ret = get_errno(chmod(p, arg2)); 5563 unlock_user(p, arg1, 0); 5564 break; 5565 #ifdef TARGET_NR_break 5566 case TARGET_NR_break: 5567 goto unimplemented; 5568 #endif 5569 #ifdef TARGET_NR_oldstat 5570 case TARGET_NR_oldstat: 5571 goto unimplemented; 5572 #endif 5573 case TARGET_NR_lseek: 5574 ret = get_errno(lseek(arg1, arg2, arg3)); 5575 break; 5576 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) 5577 /* Alpha specific */ 5578 case TARGET_NR_getxpid: 5579 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid(); 5580 ret = get_errno(getpid()); 5581 break; 5582 #endif 5583 #ifdef TARGET_NR_getpid 5584 case TARGET_NR_getpid: 5585 ret = get_errno(getpid()); 5586 break; 5587 #endif 5588 case TARGET_NR_mount: 5589 { 5590 /* need to look at the data field */ 5591 void *p2, *p3; 5592 p = lock_user_string(arg1); 5593 p2 = lock_user_string(arg2); 5594 p3 = lock_user_string(arg3); 5595 if (!p || !p2 || !p3) 5596 ret = -TARGET_EFAULT; 5597 else { 5598 /* FIXME - arg5 should be locked, but it isn't clear how to 5599 * do that since it's not guaranteed to be a NULL-terminated 5600 * string. 5601 */ 5602 if ( ! arg5 ) 5603 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL)); 5604 else 5605 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5))); 5606 } 5607 unlock_user(p, arg1, 0); 5608 unlock_user(p2, arg2, 0); 5609 unlock_user(p3, arg3, 0); 5610 break; 5611 } 5612 #ifdef TARGET_NR_umount 5613 case TARGET_NR_umount: 5614 if (!(p = lock_user_string(arg1))) 5615 goto efault; 5616 ret = get_errno(umount(p)); 5617 unlock_user(p, arg1, 0); 5618 break; 5619 #endif 5620 #ifdef TARGET_NR_stime /* not on alpha */ 5621 case TARGET_NR_stime: 5622 { 5623 time_t host_time; 5624 if (get_user_sal(host_time, arg1)) 5625 goto efault; 5626 ret = get_errno(stime(&host_time)); 5627 } 5628 break; 5629 #endif 5630 case TARGET_NR_ptrace: 5631 goto unimplemented; 5632 #ifdef TARGET_NR_alarm /* not on alpha */ 5633 case TARGET_NR_alarm: 5634 ret = alarm(arg1); 5635 break; 5636 #endif 5637 #ifdef TARGET_NR_oldfstat 5638 case TARGET_NR_oldfstat: 5639 goto unimplemented; 5640 #endif 5641 #ifdef TARGET_NR_pause /* not on alpha */ 5642 case TARGET_NR_pause: 5643 ret = get_errno(pause()); 5644 break; 5645 #endif 5646 #ifdef TARGET_NR_utime 5647 case TARGET_NR_utime: 5648 { 5649 struct utimbuf tbuf, *host_tbuf; 5650 struct target_utimbuf *target_tbuf; 5651 if (arg2) { 5652 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) 5653 goto efault; 5654 tbuf.actime = tswapal(target_tbuf->actime); 5655 tbuf.modtime = tswapal(target_tbuf->modtime); 5656 unlock_user_struct(target_tbuf, arg2, 0); 5657 host_tbuf = &tbuf; 5658 } else { 5659 host_tbuf = NULL; 5660 } 5661 if (!(p = lock_user_string(arg1))) 5662 goto efault; 5663 ret = get_errno(utime(p, host_tbuf)); 5664 unlock_user(p, arg1, 0); 5665 } 5666 break; 5667 #endif 5668 case TARGET_NR_utimes: 5669 { 5670 struct timeval *tvp, tv[2]; 5671 if (arg2) { 5672 if (copy_from_user_timeval(&tv[0], arg2) 5673 || copy_from_user_timeval(&tv[1], 5674 arg2 + sizeof(struct target_timeval))) 5675 goto efault; 5676 tvp = tv; 5677 } else { 5678 tvp = NULL; 5679 } 5680 if (!(p = lock_user_string(arg1))) 5681 goto efault; 5682 ret = get_errno(utimes(p, tvp)); 5683 unlock_user(p, arg1, 0); 5684 } 5685 break; 5686 #if defined(TARGET_NR_futimesat) 5687 case TARGET_NR_futimesat: 5688 { 5689 struct timeval *tvp, tv[2]; 5690 if (arg3) { 5691 if (copy_from_user_timeval(&tv[0], arg3) 5692 || copy_from_user_timeval(&tv[1], 5693 arg3 + sizeof(struct target_timeval))) 5694 goto efault; 5695 tvp = tv; 5696 } else { 5697 tvp = NULL; 5698 } 5699 if (!(p = lock_user_string(arg2))) 5700 goto efault; 5701 ret = get_errno(futimesat(arg1, path(p), tvp)); 5702 unlock_user(p, arg2, 0); 5703 } 5704 break; 5705 #endif 5706 #ifdef TARGET_NR_stty 5707 case TARGET_NR_stty: 5708 goto unimplemented; 5709 #endif 5710 #ifdef TARGET_NR_gtty 5711 case TARGET_NR_gtty: 5712 goto unimplemented; 5713 #endif 5714 case TARGET_NR_access: 5715 if (!(p = lock_user_string(arg1))) 5716 goto efault; 5717 ret = get_errno(access(path(p), arg2)); 5718 unlock_user(p, arg1, 0); 5719 break; 5720 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 5721 case TARGET_NR_faccessat: 5722 if (!(p = lock_user_string(arg2))) 5723 goto efault; 5724 ret = get_errno(faccessat(arg1, p, arg3, 0)); 5725 unlock_user(p, arg2, 0); 5726 break; 5727 #endif 5728 #ifdef TARGET_NR_nice /* not on alpha */ 5729 case TARGET_NR_nice: 5730 ret = get_errno(nice(arg1)); 5731 break; 5732 #endif 5733 #ifdef TARGET_NR_ftime 5734 case TARGET_NR_ftime: 5735 goto unimplemented; 5736 #endif 5737 case TARGET_NR_sync: 5738 sync(); 5739 ret = 0; 5740 break; 5741 case TARGET_NR_kill: 5742 ret = get_errno(kill(arg1, target_to_host_signal(arg2))); 5743 break; 5744 case TARGET_NR_rename: 5745 { 5746 void *p2; 5747 p = lock_user_string(arg1); 5748 p2 = lock_user_string(arg2); 5749 if (!p || !p2) 5750 ret = -TARGET_EFAULT; 5751 else 5752 ret = get_errno(rename(p, p2)); 5753 unlock_user(p2, arg2, 0); 5754 unlock_user(p, arg1, 0); 5755 } 5756 break; 5757 #if defined(TARGET_NR_renameat) 5758 case TARGET_NR_renameat: 5759 { 5760 void *p2; 5761 p = lock_user_string(arg2); 5762 p2 = lock_user_string(arg4); 5763 if (!p || !p2) 5764 ret = -TARGET_EFAULT; 5765 else 5766 ret = get_errno(renameat(arg1, p, arg3, p2)); 5767 unlock_user(p2, arg4, 0); 5768 unlock_user(p, arg2, 0); 5769 } 5770 break; 5771 #endif 5772 case TARGET_NR_mkdir: 5773 if (!(p = lock_user_string(arg1))) 5774 goto efault; 5775 ret = get_errno(mkdir(p, arg2)); 5776 unlock_user(p, arg1, 0); 5777 break; 5778 #if defined(TARGET_NR_mkdirat) 5779 case TARGET_NR_mkdirat: 5780 if (!(p = lock_user_string(arg2))) 5781 goto efault; 5782 ret = get_errno(mkdirat(arg1, p, arg3)); 5783 unlock_user(p, arg2, 0); 5784 break; 5785 #endif 5786 case TARGET_NR_rmdir: 5787 if (!(p = lock_user_string(arg1))) 5788 goto efault; 5789 ret = get_errno(rmdir(p)); 5790 unlock_user(p, arg1, 0); 5791 break; 5792 case TARGET_NR_dup: 5793 ret = get_errno(dup(arg1)); 5794 break; 5795 case TARGET_NR_pipe: 5796 ret = do_pipe(cpu_env, arg1, 0, 0); 5797 break; 5798 #ifdef TARGET_NR_pipe2 5799 case TARGET_NR_pipe2: 5800 ret = do_pipe(cpu_env, arg1, 5801 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1); 5802 break; 5803 #endif 5804 case TARGET_NR_times: 5805 { 5806 struct target_tms *tmsp; 5807 struct tms tms; 5808 ret = get_errno(times(&tms)); 5809 if (arg1) { 5810 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); 5811 if (!tmsp) 5812 goto efault; 5813 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime)); 5814 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime)); 5815 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime)); 5816 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime)); 5817 } 5818 if (!is_error(ret)) 5819 ret = host_to_target_clock_t(ret); 5820 } 5821 break; 5822 #ifdef TARGET_NR_prof 5823 case TARGET_NR_prof: 5824 goto unimplemented; 5825 #endif 5826 #ifdef TARGET_NR_signal 5827 case TARGET_NR_signal: 5828 goto unimplemented; 5829 #endif 5830 case TARGET_NR_acct: 5831 if (arg1 == 0) { 5832 ret = get_errno(acct(NULL)); 5833 } else { 5834 if (!(p = lock_user_string(arg1))) 5835 goto efault; 5836 ret = get_errno(acct(path(p))); 5837 unlock_user(p, arg1, 0); 5838 } 5839 break; 5840 #ifdef TARGET_NR_umount2 5841 case TARGET_NR_umount2: 5842 if (!(p = lock_user_string(arg1))) 5843 goto efault; 5844 ret = get_errno(umount2(p, arg2)); 5845 unlock_user(p, arg1, 0); 5846 break; 5847 #endif 5848 #ifdef TARGET_NR_lock 5849 case TARGET_NR_lock: 5850 goto unimplemented; 5851 #endif 5852 case TARGET_NR_ioctl: 5853 ret = do_ioctl(arg1, arg2, arg3); 5854 break; 5855 case TARGET_NR_fcntl: 5856 ret = do_fcntl(arg1, arg2, arg3); 5857 break; 5858 #ifdef TARGET_NR_mpx 5859 case TARGET_NR_mpx: 5860 goto unimplemented; 5861 #endif 5862 case TARGET_NR_setpgid: 5863 ret = get_errno(setpgid(arg1, arg2)); 5864 break; 5865 #ifdef TARGET_NR_ulimit 5866 case TARGET_NR_ulimit: 5867 goto unimplemented; 5868 #endif 5869 #ifdef TARGET_NR_oldolduname 5870 case TARGET_NR_oldolduname: 5871 goto unimplemented; 5872 #endif 5873 case TARGET_NR_umask: 5874 ret = get_errno(umask(arg1)); 5875 break; 5876 case TARGET_NR_chroot: 5877 if (!(p = lock_user_string(arg1))) 5878 goto efault; 5879 ret = get_errno(chroot(p)); 5880 unlock_user(p, arg1, 0); 5881 break; 5882 case TARGET_NR_ustat: 5883 goto unimplemented; 5884 case TARGET_NR_dup2: 5885 ret = get_errno(dup2(arg1, arg2)); 5886 break; 5887 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) 5888 case TARGET_NR_dup3: 5889 ret = get_errno(dup3(arg1, arg2, arg3)); 5890 break; 5891 #endif 5892 #ifdef TARGET_NR_getppid /* not on alpha */ 5893 case TARGET_NR_getppid: 5894 ret = get_errno(getppid()); 5895 break; 5896 #endif 5897 case TARGET_NR_getpgrp: 5898 ret = get_errno(getpgrp()); 5899 break; 5900 case TARGET_NR_setsid: 5901 ret = get_errno(setsid()); 5902 break; 5903 #ifdef TARGET_NR_sigaction 5904 case TARGET_NR_sigaction: 5905 { 5906 #if defined(TARGET_ALPHA) 5907 struct target_sigaction act, oact, *pact = 0; 5908 struct target_old_sigaction *old_act; 5909 if (arg2) { 5910 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5911 goto efault; 5912 act._sa_handler = old_act->_sa_handler; 5913 target_siginitset(&act.sa_mask, old_act->sa_mask); 5914 act.sa_flags = old_act->sa_flags; 5915 act.sa_restorer = 0; 5916 unlock_user_struct(old_act, arg2, 0); 5917 pact = &act; 5918 } 5919 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5920 if (!is_error(ret) && arg3) { 5921 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5922 goto efault; 5923 old_act->_sa_handler = oact._sa_handler; 5924 old_act->sa_mask = oact.sa_mask.sig[0]; 5925 old_act->sa_flags = oact.sa_flags; 5926 unlock_user_struct(old_act, arg3, 1); 5927 } 5928 #elif defined(TARGET_MIPS) 5929 struct target_sigaction act, oact, *pact, *old_act; 5930 5931 if (arg2) { 5932 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5933 goto efault; 5934 act._sa_handler = old_act->_sa_handler; 5935 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); 5936 act.sa_flags = old_act->sa_flags; 5937 unlock_user_struct(old_act, arg2, 0); 5938 pact = &act; 5939 } else { 5940 pact = NULL; 5941 } 5942 5943 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5944 5945 if (!is_error(ret) && arg3) { 5946 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5947 goto efault; 5948 old_act->_sa_handler = oact._sa_handler; 5949 old_act->sa_flags = oact.sa_flags; 5950 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; 5951 old_act->sa_mask.sig[1] = 0; 5952 old_act->sa_mask.sig[2] = 0; 5953 old_act->sa_mask.sig[3] = 0; 5954 unlock_user_struct(old_act, arg3, 1); 5955 } 5956 #else 5957 struct target_old_sigaction *old_act; 5958 struct target_sigaction act, oact, *pact; 5959 if (arg2) { 5960 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5961 goto efault; 5962 act._sa_handler = old_act->_sa_handler; 5963 target_siginitset(&act.sa_mask, old_act->sa_mask); 5964 act.sa_flags = old_act->sa_flags; 5965 act.sa_restorer = old_act->sa_restorer; 5966 unlock_user_struct(old_act, arg2, 0); 5967 pact = &act; 5968 } else { 5969 pact = NULL; 5970 } 5971 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5972 if (!is_error(ret) && arg3) { 5973 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5974 goto efault; 5975 old_act->_sa_handler = oact._sa_handler; 5976 old_act->sa_mask = oact.sa_mask.sig[0]; 5977 old_act->sa_flags = oact.sa_flags; 5978 old_act->sa_restorer = oact.sa_restorer; 5979 unlock_user_struct(old_act, arg3, 1); 5980 } 5981 #endif 5982 } 5983 break; 5984 #endif 5985 case TARGET_NR_rt_sigaction: 5986 { 5987 #if defined(TARGET_ALPHA) 5988 struct target_sigaction act, oact, *pact = 0; 5989 struct target_rt_sigaction *rt_act; 5990 /* ??? arg4 == sizeof(sigset_t). */ 5991 if (arg2) { 5992 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1)) 5993 goto efault; 5994 act._sa_handler = rt_act->_sa_handler; 5995 act.sa_mask = rt_act->sa_mask; 5996 act.sa_flags = rt_act->sa_flags; 5997 act.sa_restorer = arg5; 5998 unlock_user_struct(rt_act, arg2, 0); 5999 pact = &act; 6000 } 6001 ret = get_errno(do_sigaction(arg1, pact, &oact)); 6002 if (!is_error(ret) && arg3) { 6003 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0)) 6004 goto efault; 6005 rt_act->_sa_handler = oact._sa_handler; 6006 rt_act->sa_mask = oact.sa_mask; 6007 rt_act->sa_flags = oact.sa_flags; 6008 unlock_user_struct(rt_act, arg3, 1); 6009 } 6010 #else 6011 struct target_sigaction *act; 6012 struct target_sigaction *oact; 6013 6014 if (arg2) { 6015 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) 6016 goto efault; 6017 } else 6018 act = NULL; 6019 if (arg3) { 6020 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { 6021 ret = -TARGET_EFAULT; 6022 goto rt_sigaction_fail; 6023 } 6024 } else 6025 oact = NULL; 6026 ret = get_errno(do_sigaction(arg1, act, oact)); 6027 rt_sigaction_fail: 6028 if (act) 6029 unlock_user_struct(act, arg2, 0); 6030 if (oact) 6031 unlock_user_struct(oact, arg3, 1); 6032 #endif 6033 } 6034 break; 6035 #ifdef TARGET_NR_sgetmask /* not on alpha */ 6036 case TARGET_NR_sgetmask: 6037 { 6038 sigset_t cur_set; 6039 abi_ulong target_set; 6040 do_sigprocmask(0, NULL, &cur_set); 6041 host_to_target_old_sigset(&target_set, &cur_set); 6042 ret = target_set; 6043 } 6044 break; 6045 #endif 6046 #ifdef TARGET_NR_ssetmask /* not on alpha */ 6047 case TARGET_NR_ssetmask: 6048 { 6049 sigset_t set, oset, cur_set; 6050 abi_ulong target_set = arg1; 6051 do_sigprocmask(0, NULL, &cur_set); 6052 target_to_host_old_sigset(&set, &target_set); 6053 sigorset(&set, &set, &cur_set); 6054 do_sigprocmask(SIG_SETMASK, &set, &oset); 6055 host_to_target_old_sigset(&target_set, &oset); 6056 ret = target_set; 6057 } 6058 break; 6059 #endif 6060 #ifdef TARGET_NR_sigprocmask 6061 case TARGET_NR_sigprocmask: 6062 { 6063 #if defined(TARGET_ALPHA) 6064 sigset_t set, oldset; 6065 abi_ulong mask; 6066 int how; 6067 6068 switch (arg1) { 6069 case TARGET_SIG_BLOCK: 6070 how = SIG_BLOCK; 6071 break; 6072 case TARGET_SIG_UNBLOCK: 6073 how = SIG_UNBLOCK; 6074 break; 6075 case TARGET_SIG_SETMASK: 6076 how = SIG_SETMASK; 6077 break; 6078 default: 6079 ret = -TARGET_EINVAL; 6080 goto fail; 6081 } 6082 mask = arg2; 6083 target_to_host_old_sigset(&set, &mask); 6084 6085 ret = get_errno(do_sigprocmask(how, &set, &oldset)); 6086 if (!is_error(ret)) { 6087 host_to_target_old_sigset(&mask, &oldset); 6088 ret = mask; 6089 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */ 6090 } 6091 #else 6092 sigset_t set, oldset, *set_ptr; 6093 int how; 6094 6095 if (arg2) { 6096 switch (arg1) { 6097 case TARGET_SIG_BLOCK: 6098 how = SIG_BLOCK; 6099 break; 6100 case TARGET_SIG_UNBLOCK: 6101 how = SIG_UNBLOCK; 6102 break; 6103 case TARGET_SIG_SETMASK: 6104 how = SIG_SETMASK; 6105 break; 6106 default: 6107 ret = -TARGET_EINVAL; 6108 goto fail; 6109 } 6110 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 6111 goto efault; 6112 target_to_host_old_sigset(&set, p); 6113 unlock_user(p, arg2, 0); 6114 set_ptr = &set; 6115 } else { 6116 how = 0; 6117 set_ptr = NULL; 6118 } 6119 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset)); 6120 if (!is_error(ret) && arg3) { 6121 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 6122 goto efault; 6123 host_to_target_old_sigset(p, &oldset); 6124 unlock_user(p, arg3, sizeof(target_sigset_t)); 6125 } 6126 #endif 6127 } 6128 break; 6129 #endif 6130 case TARGET_NR_rt_sigprocmask: 6131 { 6132 int how = arg1; 6133 sigset_t set, oldset, *set_ptr; 6134 6135 if (arg2) { 6136 switch(how) { 6137 case TARGET_SIG_BLOCK: 6138 how = SIG_BLOCK; 6139 break; 6140 case TARGET_SIG_UNBLOCK: 6141 how = SIG_UNBLOCK; 6142 break; 6143 case TARGET_SIG_SETMASK: 6144 how = SIG_SETMASK; 6145 break; 6146 default: 6147 ret = -TARGET_EINVAL; 6148 goto fail; 6149 } 6150 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 6151 goto efault; 6152 target_to_host_sigset(&set, p); 6153 unlock_user(p, arg2, 0); 6154 set_ptr = &set; 6155 } else { 6156 how = 0; 6157 set_ptr = NULL; 6158 } 6159 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset)); 6160 if (!is_error(ret) && arg3) { 6161 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 6162 goto efault; 6163 host_to_target_sigset(p, &oldset); 6164 unlock_user(p, arg3, sizeof(target_sigset_t)); 6165 } 6166 } 6167 break; 6168 #ifdef TARGET_NR_sigpending 6169 case TARGET_NR_sigpending: 6170 { 6171 sigset_t set; 6172 ret = get_errno(sigpending(&set)); 6173 if (!is_error(ret)) { 6174 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 6175 goto efault; 6176 host_to_target_old_sigset(p, &set); 6177 unlock_user(p, arg1, sizeof(target_sigset_t)); 6178 } 6179 } 6180 break; 6181 #endif 6182 case TARGET_NR_rt_sigpending: 6183 { 6184 sigset_t set; 6185 ret = get_errno(sigpending(&set)); 6186 if (!is_error(ret)) { 6187 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 6188 goto efault; 6189 host_to_target_sigset(p, &set); 6190 unlock_user(p, arg1, sizeof(target_sigset_t)); 6191 } 6192 } 6193 break; 6194 #ifdef TARGET_NR_sigsuspend 6195 case TARGET_NR_sigsuspend: 6196 { 6197 sigset_t set; 6198 #if defined(TARGET_ALPHA) 6199 abi_ulong mask = arg1; 6200 target_to_host_old_sigset(&set, &mask); 6201 #else 6202 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6203 goto efault; 6204 target_to_host_old_sigset(&set, p); 6205 unlock_user(p, arg1, 0); 6206 #endif 6207 ret = get_errno(sigsuspend(&set)); 6208 } 6209 break; 6210 #endif 6211 case TARGET_NR_rt_sigsuspend: 6212 { 6213 sigset_t set; 6214 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6215 goto efault; 6216 target_to_host_sigset(&set, p); 6217 unlock_user(p, arg1, 0); 6218 ret = get_errno(sigsuspend(&set)); 6219 } 6220 break; 6221 case TARGET_NR_rt_sigtimedwait: 6222 { 6223 sigset_t set; 6224 struct timespec uts, *puts; 6225 siginfo_t uinfo; 6226 6227 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6228 goto efault; 6229 target_to_host_sigset(&set, p); 6230 unlock_user(p, arg1, 0); 6231 if (arg3) { 6232 puts = &uts; 6233 target_to_host_timespec(puts, arg3); 6234 } else { 6235 puts = NULL; 6236 } 6237 ret = get_errno(sigtimedwait(&set, &uinfo, puts)); 6238 if (!is_error(ret)) { 6239 if (arg2) { 6240 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 6241 0); 6242 if (!p) { 6243 goto efault; 6244 } 6245 host_to_target_siginfo(p, &uinfo); 6246 unlock_user(p, arg2, sizeof(target_siginfo_t)); 6247 } 6248 ret = host_to_target_signal(ret); 6249 } 6250 } 6251 break; 6252 case TARGET_NR_rt_sigqueueinfo: 6253 { 6254 siginfo_t uinfo; 6255 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1))) 6256 goto efault; 6257 target_to_host_siginfo(&uinfo, p); 6258 unlock_user(p, arg1, 0); 6259 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo)); 6260 } 6261 break; 6262 #ifdef TARGET_NR_sigreturn 6263 case TARGET_NR_sigreturn: 6264 /* NOTE: ret is eax, so not transcoding must be done */ 6265 ret = do_sigreturn(cpu_env); 6266 break; 6267 #endif 6268 case TARGET_NR_rt_sigreturn: 6269 /* NOTE: ret is eax, so not transcoding must be done */ 6270 ret = do_rt_sigreturn(cpu_env); 6271 break; 6272 case TARGET_NR_sethostname: 6273 if (!(p = lock_user_string(arg1))) 6274 goto efault; 6275 ret = get_errno(sethostname(p, arg2)); 6276 unlock_user(p, arg1, 0); 6277 break; 6278 case TARGET_NR_setrlimit: 6279 { 6280 int resource = target_to_host_resource(arg1); 6281 struct target_rlimit *target_rlim; 6282 struct rlimit rlim; 6283 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) 6284 goto efault; 6285 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur); 6286 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max); 6287 unlock_user_struct(target_rlim, arg2, 0); 6288 ret = get_errno(setrlimit(resource, &rlim)); 6289 } 6290 break; 6291 case TARGET_NR_getrlimit: 6292 { 6293 int resource = target_to_host_resource(arg1); 6294 struct target_rlimit *target_rlim; 6295 struct rlimit rlim; 6296 6297 ret = get_errno(getrlimit(resource, &rlim)); 6298 if (!is_error(ret)) { 6299 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 6300 goto efault; 6301 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 6302 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 6303 unlock_user_struct(target_rlim, arg2, 1); 6304 } 6305 } 6306 break; 6307 case TARGET_NR_getrusage: 6308 { 6309 struct rusage rusage; 6310 ret = get_errno(getrusage(arg1, &rusage)); 6311 if (!is_error(ret)) { 6312 host_to_target_rusage(arg2, &rusage); 6313 } 6314 } 6315 break; 6316 case TARGET_NR_gettimeofday: 6317 { 6318 struct timeval tv; 6319 ret = get_errno(gettimeofday(&tv, NULL)); 6320 if (!is_error(ret)) { 6321 if (copy_to_user_timeval(arg1, &tv)) 6322 goto efault; 6323 } 6324 } 6325 break; 6326 case TARGET_NR_settimeofday: 6327 { 6328 struct timeval tv; 6329 if (copy_from_user_timeval(&tv, arg1)) 6330 goto efault; 6331 ret = get_errno(settimeofday(&tv, NULL)); 6332 } 6333 break; 6334 #if defined(TARGET_NR_select) 6335 case TARGET_NR_select: 6336 #if defined(TARGET_S390X) || defined(TARGET_ALPHA) 6337 ret = do_select(arg1, arg2, arg3, arg4, arg5); 6338 #else 6339 { 6340 struct target_sel_arg_struct *sel; 6341 abi_ulong inp, outp, exp, tvp; 6342 long nsel; 6343 6344 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) 6345 goto efault; 6346 nsel = tswapal(sel->n); 6347 inp = tswapal(sel->inp); 6348 outp = tswapal(sel->outp); 6349 exp = tswapal(sel->exp); 6350 tvp = tswapal(sel->tvp); 6351 unlock_user_struct(sel, arg1, 0); 6352 ret = do_select(nsel, inp, outp, exp, tvp); 6353 } 6354 #endif 6355 break; 6356 #endif 6357 #ifdef TARGET_NR_pselect6 6358 case TARGET_NR_pselect6: 6359 { 6360 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr; 6361 fd_set rfds, wfds, efds; 6362 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 6363 struct timespec ts, *ts_ptr; 6364 6365 /* 6366 * The 6th arg is actually two args smashed together, 6367 * so we cannot use the C library. 6368 */ 6369 sigset_t set; 6370 struct { 6371 sigset_t *set; 6372 size_t size; 6373 } sig, *sig_ptr; 6374 6375 abi_ulong arg_sigset, arg_sigsize, *arg7; 6376 target_sigset_t *target_sigset; 6377 6378 n = arg1; 6379 rfd_addr = arg2; 6380 wfd_addr = arg3; 6381 efd_addr = arg4; 6382 ts_addr = arg5; 6383 6384 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 6385 if (ret) { 6386 goto fail; 6387 } 6388 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 6389 if (ret) { 6390 goto fail; 6391 } 6392 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 6393 if (ret) { 6394 goto fail; 6395 } 6396 6397 /* 6398 * This takes a timespec, and not a timeval, so we cannot 6399 * use the do_select() helper ... 6400 */ 6401 if (ts_addr) { 6402 if (target_to_host_timespec(&ts, ts_addr)) { 6403 goto efault; 6404 } 6405 ts_ptr = &ts; 6406 } else { 6407 ts_ptr = NULL; 6408 } 6409 6410 /* Extract the two packed args for the sigset */ 6411 if (arg6) { 6412 sig_ptr = &sig; 6413 sig.size = _NSIG / 8; 6414 6415 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1); 6416 if (!arg7) { 6417 goto efault; 6418 } 6419 arg_sigset = tswapal(arg7[0]); 6420 arg_sigsize = tswapal(arg7[1]); 6421 unlock_user(arg7, arg6, 0); 6422 6423 if (arg_sigset) { 6424 sig.set = &set; 6425 if (arg_sigsize != sizeof(*target_sigset)) { 6426 /* Like the kernel, we enforce correct size sigsets */ 6427 ret = -TARGET_EINVAL; 6428 goto fail; 6429 } 6430 target_sigset = lock_user(VERIFY_READ, arg_sigset, 6431 sizeof(*target_sigset), 1); 6432 if (!target_sigset) { 6433 goto efault; 6434 } 6435 target_to_host_sigset(&set, target_sigset); 6436 unlock_user(target_sigset, arg_sigset, 0); 6437 } else { 6438 sig.set = NULL; 6439 } 6440 } else { 6441 sig_ptr = NULL; 6442 } 6443 6444 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 6445 ts_ptr, sig_ptr)); 6446 6447 if (!is_error(ret)) { 6448 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 6449 goto efault; 6450 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 6451 goto efault; 6452 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 6453 goto efault; 6454 6455 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) 6456 goto efault; 6457 } 6458 } 6459 break; 6460 #endif 6461 case TARGET_NR_symlink: 6462 { 6463 void *p2; 6464 p = lock_user_string(arg1); 6465 p2 = lock_user_string(arg2); 6466 if (!p || !p2) 6467 ret = -TARGET_EFAULT; 6468 else 6469 ret = get_errno(symlink(p, p2)); 6470 unlock_user(p2, arg2, 0); 6471 unlock_user(p, arg1, 0); 6472 } 6473 break; 6474 #if defined(TARGET_NR_symlinkat) 6475 case TARGET_NR_symlinkat: 6476 { 6477 void *p2; 6478 p = lock_user_string(arg1); 6479 p2 = lock_user_string(arg3); 6480 if (!p || !p2) 6481 ret = -TARGET_EFAULT; 6482 else 6483 ret = get_errno(symlinkat(p, arg2, p2)); 6484 unlock_user(p2, arg3, 0); 6485 unlock_user(p, arg1, 0); 6486 } 6487 break; 6488 #endif 6489 #ifdef TARGET_NR_oldlstat 6490 case TARGET_NR_oldlstat: 6491 goto unimplemented; 6492 #endif 6493 case TARGET_NR_readlink: 6494 { 6495 void *p2; 6496 p = lock_user_string(arg1); 6497 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); 6498 if (!p || !p2) { 6499 ret = -TARGET_EFAULT; 6500 } else if (is_proc_myself((const char *)p, "exe")) { 6501 char real[PATH_MAX], *temp; 6502 temp = realpath(exec_path, real); 6503 ret = temp == NULL ? get_errno(-1) : strlen(real) ; 6504 snprintf((char *)p2, arg3, "%s", real); 6505 } else { 6506 ret = get_errno(readlink(path(p), p2, arg3)); 6507 } 6508 unlock_user(p2, arg2, ret); 6509 unlock_user(p, arg1, 0); 6510 } 6511 break; 6512 #if defined(TARGET_NR_readlinkat) 6513 case TARGET_NR_readlinkat: 6514 { 6515 void *p2; 6516 p = lock_user_string(arg2); 6517 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); 6518 if (!p || !p2) { 6519 ret = -TARGET_EFAULT; 6520 } else if (is_proc_myself((const char *)p, "exe")) { 6521 char real[PATH_MAX], *temp; 6522 temp = realpath(exec_path, real); 6523 ret = temp == NULL ? get_errno(-1) : strlen(real) ; 6524 snprintf((char *)p2, arg4, "%s", real); 6525 } else { 6526 ret = get_errno(readlinkat(arg1, path(p), p2, arg4)); 6527 } 6528 unlock_user(p2, arg3, ret); 6529 unlock_user(p, arg2, 0); 6530 } 6531 break; 6532 #endif 6533 #ifdef TARGET_NR_uselib 6534 case TARGET_NR_uselib: 6535 goto unimplemented; 6536 #endif 6537 #ifdef TARGET_NR_swapon 6538 case TARGET_NR_swapon: 6539 if (!(p = lock_user_string(arg1))) 6540 goto efault; 6541 ret = get_errno(swapon(p, arg2)); 6542 unlock_user(p, arg1, 0); 6543 break; 6544 #endif 6545 case TARGET_NR_reboot: 6546 if (arg3 == LINUX_REBOOT_CMD_RESTART2) { 6547 /* arg4 must be ignored in all other cases */ 6548 p = lock_user_string(arg4); 6549 if (!p) { 6550 goto efault; 6551 } 6552 ret = get_errno(reboot(arg1, arg2, arg3, p)); 6553 unlock_user(p, arg4, 0); 6554 } else { 6555 ret = get_errno(reboot(arg1, arg2, arg3, NULL)); 6556 } 6557 break; 6558 #ifdef TARGET_NR_readdir 6559 case TARGET_NR_readdir: 6560 goto unimplemented; 6561 #endif 6562 #ifdef TARGET_NR_mmap 6563 case TARGET_NR_mmap: 6564 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 6565 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \ 6566 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ 6567 || defined(TARGET_S390X) 6568 { 6569 abi_ulong *v; 6570 abi_ulong v1, v2, v3, v4, v5, v6; 6571 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) 6572 goto efault; 6573 v1 = tswapal(v[0]); 6574 v2 = tswapal(v[1]); 6575 v3 = tswapal(v[2]); 6576 v4 = tswapal(v[3]); 6577 v5 = tswapal(v[4]); 6578 v6 = tswapal(v[5]); 6579 unlock_user(v, arg1, 0); 6580 ret = get_errno(target_mmap(v1, v2, v3, 6581 target_to_host_bitmask(v4, mmap_flags_tbl), 6582 v5, v6)); 6583 } 6584 #else 6585 ret = get_errno(target_mmap(arg1, arg2, arg3, 6586 target_to_host_bitmask(arg4, mmap_flags_tbl), 6587 arg5, 6588 arg6)); 6589 #endif 6590 break; 6591 #endif 6592 #ifdef TARGET_NR_mmap2 6593 case TARGET_NR_mmap2: 6594 #ifndef MMAP_SHIFT 6595 #define MMAP_SHIFT 12 6596 #endif 6597 ret = get_errno(target_mmap(arg1, arg2, arg3, 6598 target_to_host_bitmask(arg4, mmap_flags_tbl), 6599 arg5, 6600 arg6 << MMAP_SHIFT)); 6601 break; 6602 #endif 6603 case TARGET_NR_munmap: 6604 ret = get_errno(target_munmap(arg1, arg2)); 6605 break; 6606 case TARGET_NR_mprotect: 6607 { 6608 TaskState *ts = cpu->opaque; 6609 /* Special hack to detect libc making the stack executable. */ 6610 if ((arg3 & PROT_GROWSDOWN) 6611 && arg1 >= ts->info->stack_limit 6612 && arg1 <= ts->info->start_stack) { 6613 arg3 &= ~PROT_GROWSDOWN; 6614 arg2 = arg2 + arg1 - ts->info->stack_limit; 6615 arg1 = ts->info->stack_limit; 6616 } 6617 } 6618 ret = get_errno(target_mprotect(arg1, arg2, arg3)); 6619 break; 6620 #ifdef TARGET_NR_mremap 6621 case TARGET_NR_mremap: 6622 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); 6623 break; 6624 #endif 6625 /* ??? msync/mlock/munlock are broken for softmmu. */ 6626 #ifdef TARGET_NR_msync 6627 case TARGET_NR_msync: 6628 ret = get_errno(msync(g2h(arg1), arg2, arg3)); 6629 break; 6630 #endif 6631 #ifdef TARGET_NR_mlock 6632 case TARGET_NR_mlock: 6633 ret = get_errno(mlock(g2h(arg1), arg2)); 6634 break; 6635 #endif 6636 #ifdef TARGET_NR_munlock 6637 case TARGET_NR_munlock: 6638 ret = get_errno(munlock(g2h(arg1), arg2)); 6639 break; 6640 #endif 6641 #ifdef TARGET_NR_mlockall 6642 case TARGET_NR_mlockall: 6643 ret = get_errno(mlockall(arg1)); 6644 break; 6645 #endif 6646 #ifdef TARGET_NR_munlockall 6647 case TARGET_NR_munlockall: 6648 ret = get_errno(munlockall()); 6649 break; 6650 #endif 6651 case TARGET_NR_truncate: 6652 if (!(p = lock_user_string(arg1))) 6653 goto efault; 6654 ret = get_errno(truncate(p, arg2)); 6655 unlock_user(p, arg1, 0); 6656 break; 6657 case TARGET_NR_ftruncate: 6658 ret = get_errno(ftruncate(arg1, arg2)); 6659 break; 6660 case TARGET_NR_fchmod: 6661 ret = get_errno(fchmod(arg1, arg2)); 6662 break; 6663 #if defined(TARGET_NR_fchmodat) 6664 case TARGET_NR_fchmodat: 6665 if (!(p = lock_user_string(arg2))) 6666 goto efault; 6667 ret = get_errno(fchmodat(arg1, p, arg3, 0)); 6668 unlock_user(p, arg2, 0); 6669 break; 6670 #endif 6671 case TARGET_NR_getpriority: 6672 /* Note that negative values are valid for getpriority, so we must 6673 differentiate based on errno settings. */ 6674 errno = 0; 6675 ret = getpriority(arg1, arg2); 6676 if (ret == -1 && errno != 0) { 6677 ret = -host_to_target_errno(errno); 6678 break; 6679 } 6680 #ifdef TARGET_ALPHA 6681 /* Return value is the unbiased priority. Signal no error. */ 6682 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; 6683 #else 6684 /* Return value is a biased priority to avoid negative numbers. */ 6685 ret = 20 - ret; 6686 #endif 6687 break; 6688 case TARGET_NR_setpriority: 6689 ret = get_errno(setpriority(arg1, arg2, arg3)); 6690 break; 6691 #ifdef TARGET_NR_profil 6692 case TARGET_NR_profil: 6693 goto unimplemented; 6694 #endif 6695 case TARGET_NR_statfs: 6696 if (!(p = lock_user_string(arg1))) 6697 goto efault; 6698 ret = get_errno(statfs(path(p), &stfs)); 6699 unlock_user(p, arg1, 0); 6700 convert_statfs: 6701 if (!is_error(ret)) { 6702 struct target_statfs *target_stfs; 6703 6704 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) 6705 goto efault; 6706 __put_user(stfs.f_type, &target_stfs->f_type); 6707 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 6708 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 6709 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 6710 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 6711 __put_user(stfs.f_files, &target_stfs->f_files); 6712 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 6713 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 6714 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 6715 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 6716 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 6717 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 6718 unlock_user_struct(target_stfs, arg2, 1); 6719 } 6720 break; 6721 case TARGET_NR_fstatfs: 6722 ret = get_errno(fstatfs(arg1, &stfs)); 6723 goto convert_statfs; 6724 #ifdef TARGET_NR_statfs64 6725 case TARGET_NR_statfs64: 6726 if (!(p = lock_user_string(arg1))) 6727 goto efault; 6728 ret = get_errno(statfs(path(p), &stfs)); 6729 unlock_user(p, arg1, 0); 6730 convert_statfs64: 6731 if (!is_error(ret)) { 6732 struct target_statfs64 *target_stfs; 6733 6734 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) 6735 goto efault; 6736 __put_user(stfs.f_type, &target_stfs->f_type); 6737 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 6738 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 6739 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 6740 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 6741 __put_user(stfs.f_files, &target_stfs->f_files); 6742 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 6743 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 6744 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 6745 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 6746 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 6747 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 6748 unlock_user_struct(target_stfs, arg3, 1); 6749 } 6750 break; 6751 case TARGET_NR_fstatfs64: 6752 ret = get_errno(fstatfs(arg1, &stfs)); 6753 goto convert_statfs64; 6754 #endif 6755 #ifdef TARGET_NR_ioperm 6756 case TARGET_NR_ioperm: 6757 goto unimplemented; 6758 #endif 6759 #ifdef TARGET_NR_socketcall 6760 case TARGET_NR_socketcall: 6761 ret = do_socketcall(arg1, arg2); 6762 break; 6763 #endif 6764 #ifdef TARGET_NR_accept 6765 case TARGET_NR_accept: 6766 ret = do_accept4(arg1, arg2, arg3, 0); 6767 break; 6768 #endif 6769 #ifdef TARGET_NR_accept4 6770 case TARGET_NR_accept4: 6771 #ifdef CONFIG_ACCEPT4 6772 ret = do_accept4(arg1, arg2, arg3, arg4); 6773 #else 6774 goto unimplemented; 6775 #endif 6776 break; 6777 #endif 6778 #ifdef TARGET_NR_bind 6779 case TARGET_NR_bind: 6780 ret = do_bind(arg1, arg2, arg3); 6781 break; 6782 #endif 6783 #ifdef TARGET_NR_connect 6784 case TARGET_NR_connect: 6785 ret = do_connect(arg1, arg2, arg3); 6786 break; 6787 #endif 6788 #ifdef TARGET_NR_getpeername 6789 case TARGET_NR_getpeername: 6790 ret = do_getpeername(arg1, arg2, arg3); 6791 break; 6792 #endif 6793 #ifdef TARGET_NR_getsockname 6794 case TARGET_NR_getsockname: 6795 ret = do_getsockname(arg1, arg2, arg3); 6796 break; 6797 #endif 6798 #ifdef TARGET_NR_getsockopt 6799 case TARGET_NR_getsockopt: 6800 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5); 6801 break; 6802 #endif 6803 #ifdef TARGET_NR_listen 6804 case TARGET_NR_listen: 6805 ret = get_errno(listen(arg1, arg2)); 6806 break; 6807 #endif 6808 #ifdef TARGET_NR_recv 6809 case TARGET_NR_recv: 6810 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); 6811 break; 6812 #endif 6813 #ifdef TARGET_NR_recvfrom 6814 case TARGET_NR_recvfrom: 6815 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); 6816 break; 6817 #endif 6818 #ifdef TARGET_NR_recvmsg 6819 case TARGET_NR_recvmsg: 6820 ret = do_sendrecvmsg(arg1, arg2, arg3, 0); 6821 break; 6822 #endif 6823 #ifdef TARGET_NR_send 6824 case TARGET_NR_send: 6825 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0); 6826 break; 6827 #endif 6828 #ifdef TARGET_NR_sendmsg 6829 case TARGET_NR_sendmsg: 6830 ret = do_sendrecvmsg(arg1, arg2, arg3, 1); 6831 break; 6832 #endif 6833 #ifdef TARGET_NR_sendmmsg 6834 case TARGET_NR_sendmmsg: 6835 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1); 6836 break; 6837 case TARGET_NR_recvmmsg: 6838 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0); 6839 break; 6840 #endif 6841 #ifdef TARGET_NR_sendto 6842 case TARGET_NR_sendto: 6843 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); 6844 break; 6845 #endif 6846 #ifdef TARGET_NR_shutdown 6847 case TARGET_NR_shutdown: 6848 ret = get_errno(shutdown(arg1, arg2)); 6849 break; 6850 #endif 6851 #ifdef TARGET_NR_socket 6852 case TARGET_NR_socket: 6853 ret = do_socket(arg1, arg2, arg3); 6854 break; 6855 #endif 6856 #ifdef TARGET_NR_socketpair 6857 case TARGET_NR_socketpair: 6858 ret = do_socketpair(arg1, arg2, arg3, arg4); 6859 break; 6860 #endif 6861 #ifdef TARGET_NR_setsockopt 6862 case TARGET_NR_setsockopt: 6863 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); 6864 break; 6865 #endif 6866 6867 case TARGET_NR_syslog: 6868 if (!(p = lock_user_string(arg2))) 6869 goto efault; 6870 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); 6871 unlock_user(p, arg2, 0); 6872 break; 6873 6874 case TARGET_NR_setitimer: 6875 { 6876 struct itimerval value, ovalue, *pvalue; 6877 6878 if (arg2) { 6879 pvalue = &value; 6880 if (copy_from_user_timeval(&pvalue->it_interval, arg2) 6881 || copy_from_user_timeval(&pvalue->it_value, 6882 arg2 + sizeof(struct target_timeval))) 6883 goto efault; 6884 } else { 6885 pvalue = NULL; 6886 } 6887 ret = get_errno(setitimer(arg1, pvalue, &ovalue)); 6888 if (!is_error(ret) && arg3) { 6889 if (copy_to_user_timeval(arg3, 6890 &ovalue.it_interval) 6891 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), 6892 &ovalue.it_value)) 6893 goto efault; 6894 } 6895 } 6896 break; 6897 case TARGET_NR_getitimer: 6898 { 6899 struct itimerval value; 6900 6901 ret = get_errno(getitimer(arg1, &value)); 6902 if (!is_error(ret) && arg2) { 6903 if (copy_to_user_timeval(arg2, 6904 &value.it_interval) 6905 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), 6906 &value.it_value)) 6907 goto efault; 6908 } 6909 } 6910 break; 6911 case TARGET_NR_stat: 6912 if (!(p = lock_user_string(arg1))) 6913 goto efault; 6914 ret = get_errno(stat(path(p), &st)); 6915 unlock_user(p, arg1, 0); 6916 goto do_stat; 6917 case TARGET_NR_lstat: 6918 if (!(p = lock_user_string(arg1))) 6919 goto efault; 6920 ret = get_errno(lstat(path(p), &st)); 6921 unlock_user(p, arg1, 0); 6922 goto do_stat; 6923 case TARGET_NR_fstat: 6924 { 6925 ret = get_errno(fstat(arg1, &st)); 6926 do_stat: 6927 if (!is_error(ret)) { 6928 struct target_stat *target_st; 6929 6930 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) 6931 goto efault; 6932 memset(target_st, 0, sizeof(*target_st)); 6933 __put_user(st.st_dev, &target_st->st_dev); 6934 __put_user(st.st_ino, &target_st->st_ino); 6935 __put_user(st.st_mode, &target_st->st_mode); 6936 __put_user(st.st_uid, &target_st->st_uid); 6937 __put_user(st.st_gid, &target_st->st_gid); 6938 __put_user(st.st_nlink, &target_st->st_nlink); 6939 __put_user(st.st_rdev, &target_st->st_rdev); 6940 __put_user(st.st_size, &target_st->st_size); 6941 __put_user(st.st_blksize, &target_st->st_blksize); 6942 __put_user(st.st_blocks, &target_st->st_blocks); 6943 __put_user(st.st_atime, &target_st->target_st_atime); 6944 __put_user(st.st_mtime, &target_st->target_st_mtime); 6945 __put_user(st.st_ctime, &target_st->target_st_ctime); 6946 unlock_user_struct(target_st, arg2, 1); 6947 } 6948 } 6949 break; 6950 #ifdef TARGET_NR_olduname 6951 case TARGET_NR_olduname: 6952 goto unimplemented; 6953 #endif 6954 #ifdef TARGET_NR_iopl 6955 case TARGET_NR_iopl: 6956 goto unimplemented; 6957 #endif 6958 case TARGET_NR_vhangup: 6959 ret = get_errno(vhangup()); 6960 break; 6961 #ifdef TARGET_NR_idle 6962 case TARGET_NR_idle: 6963 goto unimplemented; 6964 #endif 6965 #ifdef TARGET_NR_syscall 6966 case TARGET_NR_syscall: 6967 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5, 6968 arg6, arg7, arg8, 0); 6969 break; 6970 #endif 6971 case TARGET_NR_wait4: 6972 { 6973 int status; 6974 abi_long status_ptr = arg2; 6975 struct rusage rusage, *rusage_ptr; 6976 abi_ulong target_rusage = arg4; 6977 if (target_rusage) 6978 rusage_ptr = &rusage; 6979 else 6980 rusage_ptr = NULL; 6981 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr)); 6982 if (!is_error(ret)) { 6983 if (status_ptr && ret) { 6984 status = host_to_target_waitstatus(status); 6985 if (put_user_s32(status, status_ptr)) 6986 goto efault; 6987 } 6988 if (target_rusage) 6989 host_to_target_rusage(target_rusage, &rusage); 6990 } 6991 } 6992 break; 6993 #ifdef TARGET_NR_swapoff 6994 case TARGET_NR_swapoff: 6995 if (!(p = lock_user_string(arg1))) 6996 goto efault; 6997 ret = get_errno(swapoff(p)); 6998 unlock_user(p, arg1, 0); 6999 break; 7000 #endif 7001 case TARGET_NR_sysinfo: 7002 { 7003 struct target_sysinfo *target_value; 7004 struct sysinfo value; 7005 ret = get_errno(sysinfo(&value)); 7006 if (!is_error(ret) && arg1) 7007 { 7008 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) 7009 goto efault; 7010 __put_user(value.uptime, &target_value->uptime); 7011 __put_user(value.loads[0], &target_value->loads[0]); 7012 __put_user(value.loads[1], &target_value->loads[1]); 7013 __put_user(value.loads[2], &target_value->loads[2]); 7014 __put_user(value.totalram, &target_value->totalram); 7015 __put_user(value.freeram, &target_value->freeram); 7016 __put_user(value.sharedram, &target_value->sharedram); 7017 __put_user(value.bufferram, &target_value->bufferram); 7018 __put_user(value.totalswap, &target_value->totalswap); 7019 __put_user(value.freeswap, &target_value->freeswap); 7020 __put_user(value.procs, &target_value->procs); 7021 __put_user(value.totalhigh, &target_value->totalhigh); 7022 __put_user(value.freehigh, &target_value->freehigh); 7023 __put_user(value.mem_unit, &target_value->mem_unit); 7024 unlock_user_struct(target_value, arg1, 1); 7025 } 7026 } 7027 break; 7028 #ifdef TARGET_NR_ipc 7029 case TARGET_NR_ipc: 7030 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6); 7031 break; 7032 #endif 7033 #ifdef TARGET_NR_semget 7034 case TARGET_NR_semget: 7035 ret = get_errno(semget(arg1, arg2, arg3)); 7036 break; 7037 #endif 7038 #ifdef TARGET_NR_semop 7039 case TARGET_NR_semop: 7040 ret = do_semop(arg1, arg2, arg3); 7041 break; 7042 #endif 7043 #ifdef TARGET_NR_semctl 7044 case TARGET_NR_semctl: 7045 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4); 7046 break; 7047 #endif 7048 #ifdef TARGET_NR_msgctl 7049 case TARGET_NR_msgctl: 7050 ret = do_msgctl(arg1, arg2, arg3); 7051 break; 7052 #endif 7053 #ifdef TARGET_NR_msgget 7054 case TARGET_NR_msgget: 7055 ret = get_errno(msgget(arg1, arg2)); 7056 break; 7057 #endif 7058 #ifdef TARGET_NR_msgrcv 7059 case TARGET_NR_msgrcv: 7060 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5); 7061 break; 7062 #endif 7063 #ifdef TARGET_NR_msgsnd 7064 case TARGET_NR_msgsnd: 7065 ret = do_msgsnd(arg1, arg2, arg3, arg4); 7066 break; 7067 #endif 7068 #ifdef TARGET_NR_shmget 7069 case TARGET_NR_shmget: 7070 ret = get_errno(shmget(arg1, arg2, arg3)); 7071 break; 7072 #endif 7073 #ifdef TARGET_NR_shmctl 7074 case TARGET_NR_shmctl: 7075 ret = do_shmctl(arg1, arg2, arg3); 7076 break; 7077 #endif 7078 #ifdef TARGET_NR_shmat 7079 case TARGET_NR_shmat: 7080 ret = do_shmat(arg1, arg2, arg3); 7081 break; 7082 #endif 7083 #ifdef TARGET_NR_shmdt 7084 case TARGET_NR_shmdt: 7085 ret = do_shmdt(arg1); 7086 break; 7087 #endif 7088 case TARGET_NR_fsync: 7089 ret = get_errno(fsync(arg1)); 7090 break; 7091 case TARGET_NR_clone: 7092 /* Linux manages to have three different orderings for its 7093 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines 7094 * match the kernel's CONFIG_CLONE_* settings. 7095 * Microblaze is further special in that it uses a sixth 7096 * implicit argument to clone for the TLS pointer. 7097 */ 7098 #if defined(TARGET_MICROBLAZE) 7099 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5)); 7100 #elif defined(TARGET_CLONE_BACKWARDS) 7101 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); 7102 #elif defined(TARGET_CLONE_BACKWARDS2) 7103 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); 7104 #else 7105 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); 7106 #endif 7107 break; 7108 #ifdef __NR_exit_group 7109 /* new thread calls */ 7110 case TARGET_NR_exit_group: 7111 #ifdef TARGET_GPROF 7112 _mcleanup(); 7113 #endif 7114 gdb_exit(cpu_env, arg1); 7115 ret = get_errno(exit_group(arg1)); 7116 break; 7117 #endif 7118 case TARGET_NR_setdomainname: 7119 if (!(p = lock_user_string(arg1))) 7120 goto efault; 7121 ret = get_errno(setdomainname(p, arg2)); 7122 unlock_user(p, arg1, 0); 7123 break; 7124 case TARGET_NR_uname: 7125 /* no need to transcode because we use the linux syscall */ 7126 { 7127 struct new_utsname * buf; 7128 7129 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) 7130 goto efault; 7131 ret = get_errno(sys_uname(buf)); 7132 if (!is_error(ret)) { 7133 /* Overrite the native machine name with whatever is being 7134 emulated. */ 7135 strcpy (buf->machine, cpu_to_uname_machine(cpu_env)); 7136 /* Allow the user to override the reported release. */ 7137 if (qemu_uname_release && *qemu_uname_release) 7138 strcpy (buf->release, qemu_uname_release); 7139 } 7140 unlock_user_struct(buf, arg1, 1); 7141 } 7142 break; 7143 #ifdef TARGET_I386 7144 case TARGET_NR_modify_ldt: 7145 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3); 7146 break; 7147 #if !defined(TARGET_X86_64) 7148 case TARGET_NR_vm86old: 7149 goto unimplemented; 7150 case TARGET_NR_vm86: 7151 ret = do_vm86(cpu_env, arg1, arg2); 7152 break; 7153 #endif 7154 #endif 7155 case TARGET_NR_adjtimex: 7156 goto unimplemented; 7157 #ifdef TARGET_NR_create_module 7158 case TARGET_NR_create_module: 7159 #endif 7160 case TARGET_NR_init_module: 7161 case TARGET_NR_delete_module: 7162 #ifdef TARGET_NR_get_kernel_syms 7163 case TARGET_NR_get_kernel_syms: 7164 #endif 7165 goto unimplemented; 7166 case TARGET_NR_quotactl: 7167 goto unimplemented; 7168 case TARGET_NR_getpgid: 7169 ret = get_errno(getpgid(arg1)); 7170 break; 7171 case TARGET_NR_fchdir: 7172 ret = get_errno(fchdir(arg1)); 7173 break; 7174 #ifdef TARGET_NR_bdflush /* not on x86_64 */ 7175 case TARGET_NR_bdflush: 7176 goto unimplemented; 7177 #endif 7178 #ifdef TARGET_NR_sysfs 7179 case TARGET_NR_sysfs: 7180 goto unimplemented; 7181 #endif 7182 case TARGET_NR_personality: 7183 ret = get_errno(personality(arg1)); 7184 break; 7185 #ifdef TARGET_NR_afs_syscall 7186 case TARGET_NR_afs_syscall: 7187 goto unimplemented; 7188 #endif 7189 #ifdef TARGET_NR__llseek /* Not on alpha */ 7190 case TARGET_NR__llseek: 7191 { 7192 int64_t res; 7193 #if !defined(__NR_llseek) 7194 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5); 7195 if (res == -1) { 7196 ret = get_errno(res); 7197 } else { 7198 ret = 0; 7199 } 7200 #else 7201 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); 7202 #endif 7203 if ((ret == 0) && put_user_s64(res, arg4)) { 7204 goto efault; 7205 } 7206 } 7207 break; 7208 #endif 7209 case TARGET_NR_getdents: 7210 #ifdef __NR_getdents 7211 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 7212 { 7213 struct target_dirent *target_dirp; 7214 struct linux_dirent *dirp; 7215 abi_long count = arg3; 7216 7217 dirp = malloc(count); 7218 if (!dirp) { 7219 ret = -TARGET_ENOMEM; 7220 goto fail; 7221 } 7222 7223 ret = get_errno(sys_getdents(arg1, dirp, count)); 7224 if (!is_error(ret)) { 7225 struct linux_dirent *de; 7226 struct target_dirent *tde; 7227 int len = ret; 7228 int reclen, treclen; 7229 int count1, tnamelen; 7230 7231 count1 = 0; 7232 de = dirp; 7233 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7234 goto efault; 7235 tde = target_dirp; 7236 while (len > 0) { 7237 reclen = de->d_reclen; 7238 tnamelen = reclen - offsetof(struct linux_dirent, d_name); 7239 assert(tnamelen >= 0); 7240 treclen = tnamelen + offsetof(struct target_dirent, d_name); 7241 assert(count1 + treclen <= count); 7242 tde->d_reclen = tswap16(treclen); 7243 tde->d_ino = tswapal(de->d_ino); 7244 tde->d_off = tswapal(de->d_off); 7245 memcpy(tde->d_name, de->d_name, tnamelen); 7246 de = (struct linux_dirent *)((char *)de + reclen); 7247 len -= reclen; 7248 tde = (struct target_dirent *)((char *)tde + treclen); 7249 count1 += treclen; 7250 } 7251 ret = count1; 7252 unlock_user(target_dirp, arg2, ret); 7253 } 7254 free(dirp); 7255 } 7256 #else 7257 { 7258 struct linux_dirent *dirp; 7259 abi_long count = arg3; 7260 7261 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7262 goto efault; 7263 ret = get_errno(sys_getdents(arg1, dirp, count)); 7264 if (!is_error(ret)) { 7265 struct linux_dirent *de; 7266 int len = ret; 7267 int reclen; 7268 de = dirp; 7269 while (len > 0) { 7270 reclen = de->d_reclen; 7271 if (reclen > len) 7272 break; 7273 de->d_reclen = tswap16(reclen); 7274 tswapls(&de->d_ino); 7275 tswapls(&de->d_off); 7276 de = (struct linux_dirent *)((char *)de + reclen); 7277 len -= reclen; 7278 } 7279 } 7280 unlock_user(dirp, arg2, ret); 7281 } 7282 #endif 7283 #else 7284 /* Implement getdents in terms of getdents64 */ 7285 { 7286 struct linux_dirent64 *dirp; 7287 abi_long count = arg3; 7288 7289 dirp = lock_user(VERIFY_WRITE, arg2, count, 0); 7290 if (!dirp) { 7291 goto efault; 7292 } 7293 ret = get_errno(sys_getdents64(arg1, dirp, count)); 7294 if (!is_error(ret)) { 7295 /* Convert the dirent64 structs to target dirent. We do this 7296 * in-place, since we can guarantee that a target_dirent is no 7297 * larger than a dirent64; however this means we have to be 7298 * careful to read everything before writing in the new format. 7299 */ 7300 struct linux_dirent64 *de; 7301 struct target_dirent *tde; 7302 int len = ret; 7303 int tlen = 0; 7304 7305 de = dirp; 7306 tde = (struct target_dirent *)dirp; 7307 while (len > 0) { 7308 int namelen, treclen; 7309 int reclen = de->d_reclen; 7310 uint64_t ino = de->d_ino; 7311 int64_t off = de->d_off; 7312 uint8_t type = de->d_type; 7313 7314 namelen = strlen(de->d_name); 7315 treclen = offsetof(struct target_dirent, d_name) 7316 + namelen + 2; 7317 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long)); 7318 7319 memmove(tde->d_name, de->d_name, namelen + 1); 7320 tde->d_ino = tswapal(ino); 7321 tde->d_off = tswapal(off); 7322 tde->d_reclen = tswap16(treclen); 7323 /* The target_dirent type is in what was formerly a padding 7324 * byte at the end of the structure: 7325 */ 7326 *(((char *)tde) + treclen - 1) = type; 7327 7328 de = (struct linux_dirent64 *)((char *)de + reclen); 7329 tde = (struct target_dirent *)((char *)tde + treclen); 7330 len -= reclen; 7331 tlen += treclen; 7332 } 7333 ret = tlen; 7334 } 7335 unlock_user(dirp, arg2, ret); 7336 } 7337 #endif 7338 break; 7339 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 7340 case TARGET_NR_getdents64: 7341 { 7342 struct linux_dirent64 *dirp; 7343 abi_long count = arg3; 7344 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7345 goto efault; 7346 ret = get_errno(sys_getdents64(arg1, dirp, count)); 7347 if (!is_error(ret)) { 7348 struct linux_dirent64 *de; 7349 int len = ret; 7350 int reclen; 7351 de = dirp; 7352 while (len > 0) { 7353 reclen = de->d_reclen; 7354 if (reclen > len) 7355 break; 7356 de->d_reclen = tswap16(reclen); 7357 tswap64s((uint64_t *)&de->d_ino); 7358 tswap64s((uint64_t *)&de->d_off); 7359 de = (struct linux_dirent64 *)((char *)de + reclen); 7360 len -= reclen; 7361 } 7362 } 7363 unlock_user(dirp, arg2, ret); 7364 } 7365 break; 7366 #endif /* TARGET_NR_getdents64 */ 7367 #if defined(TARGET_NR__newselect) 7368 case TARGET_NR__newselect: 7369 ret = do_select(arg1, arg2, arg3, arg4, arg5); 7370 break; 7371 #endif 7372 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) 7373 # ifdef TARGET_NR_poll 7374 case TARGET_NR_poll: 7375 # endif 7376 # ifdef TARGET_NR_ppoll 7377 case TARGET_NR_ppoll: 7378 # endif 7379 { 7380 struct target_pollfd *target_pfd; 7381 unsigned int nfds = arg2; 7382 int timeout = arg3; 7383 struct pollfd *pfd; 7384 unsigned int i; 7385 7386 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1); 7387 if (!target_pfd) 7388 goto efault; 7389 7390 pfd = alloca(sizeof(struct pollfd) * nfds); 7391 for(i = 0; i < nfds; i++) { 7392 pfd[i].fd = tswap32(target_pfd[i].fd); 7393 pfd[i].events = tswap16(target_pfd[i].events); 7394 } 7395 7396 # ifdef TARGET_NR_ppoll 7397 if (num == TARGET_NR_ppoll) { 7398 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; 7399 target_sigset_t *target_set; 7400 sigset_t _set, *set = &_set; 7401 7402 if (arg3) { 7403 if (target_to_host_timespec(timeout_ts, arg3)) { 7404 unlock_user(target_pfd, arg1, 0); 7405 goto efault; 7406 } 7407 } else { 7408 timeout_ts = NULL; 7409 } 7410 7411 if (arg4) { 7412 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1); 7413 if (!target_set) { 7414 unlock_user(target_pfd, arg1, 0); 7415 goto efault; 7416 } 7417 target_to_host_sigset(set, target_set); 7418 } else { 7419 set = NULL; 7420 } 7421 7422 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8)); 7423 7424 if (!is_error(ret) && arg3) { 7425 host_to_target_timespec(arg3, timeout_ts); 7426 } 7427 if (arg4) { 7428 unlock_user(target_set, arg4, 0); 7429 } 7430 } else 7431 # endif 7432 ret = get_errno(poll(pfd, nfds, timeout)); 7433 7434 if (!is_error(ret)) { 7435 for(i = 0; i < nfds; i++) { 7436 target_pfd[i].revents = tswap16(pfd[i].revents); 7437 } 7438 } 7439 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds); 7440 } 7441 break; 7442 #endif 7443 case TARGET_NR_flock: 7444 /* NOTE: the flock constant seems to be the same for every 7445 Linux platform */ 7446 ret = get_errno(flock(arg1, arg2)); 7447 break; 7448 case TARGET_NR_readv: 7449 { 7450 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 7451 if (vec != NULL) { 7452 ret = get_errno(readv(arg1, vec, arg3)); 7453 unlock_iovec(vec, arg2, arg3, 1); 7454 } else { 7455 ret = -host_to_target_errno(errno); 7456 } 7457 } 7458 break; 7459 case TARGET_NR_writev: 7460 { 7461 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 7462 if (vec != NULL) { 7463 ret = get_errno(writev(arg1, vec, arg3)); 7464 unlock_iovec(vec, arg2, arg3, 0); 7465 } else { 7466 ret = -host_to_target_errno(errno); 7467 } 7468 } 7469 break; 7470 case TARGET_NR_getsid: 7471 ret = get_errno(getsid(arg1)); 7472 break; 7473 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ 7474 case TARGET_NR_fdatasync: 7475 ret = get_errno(fdatasync(arg1)); 7476 break; 7477 #endif 7478 case TARGET_NR__sysctl: 7479 /* We don't implement this, but ENOTDIR is always a safe 7480 return value. */ 7481 ret = -TARGET_ENOTDIR; 7482 break; 7483 case TARGET_NR_sched_getaffinity: 7484 { 7485 unsigned int mask_size; 7486 unsigned long *mask; 7487 7488 /* 7489 * sched_getaffinity needs multiples of ulong, so need to take 7490 * care of mismatches between target ulong and host ulong sizes. 7491 */ 7492 if (arg2 & (sizeof(abi_ulong) - 1)) { 7493 ret = -TARGET_EINVAL; 7494 break; 7495 } 7496 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 7497 7498 mask = alloca(mask_size); 7499 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask)); 7500 7501 if (!is_error(ret)) { 7502 if (copy_to_user(arg3, mask, ret)) { 7503 goto efault; 7504 } 7505 } 7506 } 7507 break; 7508 case TARGET_NR_sched_setaffinity: 7509 { 7510 unsigned int mask_size; 7511 unsigned long *mask; 7512 7513 /* 7514 * sched_setaffinity needs multiples of ulong, so need to take 7515 * care of mismatches between target ulong and host ulong sizes. 7516 */ 7517 if (arg2 & (sizeof(abi_ulong) - 1)) { 7518 ret = -TARGET_EINVAL; 7519 break; 7520 } 7521 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 7522 7523 mask = alloca(mask_size); 7524 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) { 7525 goto efault; 7526 } 7527 memcpy(mask, p, arg2); 7528 unlock_user_struct(p, arg2, 0); 7529 7530 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask)); 7531 } 7532 break; 7533 case TARGET_NR_sched_setparam: 7534 { 7535 struct sched_param *target_schp; 7536 struct sched_param schp; 7537 7538 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) 7539 goto efault; 7540 schp.sched_priority = tswap32(target_schp->sched_priority); 7541 unlock_user_struct(target_schp, arg2, 0); 7542 ret = get_errno(sched_setparam(arg1, &schp)); 7543 } 7544 break; 7545 case TARGET_NR_sched_getparam: 7546 { 7547 struct sched_param *target_schp; 7548 struct sched_param schp; 7549 ret = get_errno(sched_getparam(arg1, &schp)); 7550 if (!is_error(ret)) { 7551 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) 7552 goto efault; 7553 target_schp->sched_priority = tswap32(schp.sched_priority); 7554 unlock_user_struct(target_schp, arg2, 1); 7555 } 7556 } 7557 break; 7558 case TARGET_NR_sched_setscheduler: 7559 { 7560 struct sched_param *target_schp; 7561 struct sched_param schp; 7562 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) 7563 goto efault; 7564 schp.sched_priority = tswap32(target_schp->sched_priority); 7565 unlock_user_struct(target_schp, arg3, 0); 7566 ret = get_errno(sched_setscheduler(arg1, arg2, &schp)); 7567 } 7568 break; 7569 case TARGET_NR_sched_getscheduler: 7570 ret = get_errno(sched_getscheduler(arg1)); 7571 break; 7572 case TARGET_NR_sched_yield: 7573 ret = get_errno(sched_yield()); 7574 break; 7575 case TARGET_NR_sched_get_priority_max: 7576 ret = get_errno(sched_get_priority_max(arg1)); 7577 break; 7578 case TARGET_NR_sched_get_priority_min: 7579 ret = get_errno(sched_get_priority_min(arg1)); 7580 break; 7581 case TARGET_NR_sched_rr_get_interval: 7582 { 7583 struct timespec ts; 7584 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 7585 if (!is_error(ret)) { 7586 host_to_target_timespec(arg2, &ts); 7587 } 7588 } 7589 break; 7590 case TARGET_NR_nanosleep: 7591 { 7592 struct timespec req, rem; 7593 target_to_host_timespec(&req, arg1); 7594 ret = get_errno(nanosleep(&req, &rem)); 7595 if (is_error(ret) && arg2) { 7596 host_to_target_timespec(arg2, &rem); 7597 } 7598 } 7599 break; 7600 #ifdef TARGET_NR_query_module 7601 case TARGET_NR_query_module: 7602 goto unimplemented; 7603 #endif 7604 #ifdef TARGET_NR_nfsservctl 7605 case TARGET_NR_nfsservctl: 7606 goto unimplemented; 7607 #endif 7608 case TARGET_NR_prctl: 7609 switch (arg1) { 7610 case PR_GET_PDEATHSIG: 7611 { 7612 int deathsig; 7613 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5)); 7614 if (!is_error(ret) && arg2 7615 && put_user_ual(deathsig, arg2)) { 7616 goto efault; 7617 } 7618 break; 7619 } 7620 #ifdef PR_GET_NAME 7621 case PR_GET_NAME: 7622 { 7623 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1); 7624 if (!name) { 7625 goto efault; 7626 } 7627 ret = get_errno(prctl(arg1, (unsigned long)name, 7628 arg3, arg4, arg5)); 7629 unlock_user(name, arg2, 16); 7630 break; 7631 } 7632 case PR_SET_NAME: 7633 { 7634 void *name = lock_user(VERIFY_READ, arg2, 16, 1); 7635 if (!name) { 7636 goto efault; 7637 } 7638 ret = get_errno(prctl(arg1, (unsigned long)name, 7639 arg3, arg4, arg5)); 7640 unlock_user(name, arg2, 0); 7641 break; 7642 } 7643 #endif 7644 default: 7645 /* Most prctl options have no pointer arguments */ 7646 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5)); 7647 break; 7648 } 7649 break; 7650 #ifdef TARGET_NR_arch_prctl 7651 case TARGET_NR_arch_prctl: 7652 #if defined(TARGET_I386) && !defined(TARGET_ABI32) 7653 ret = do_arch_prctl(cpu_env, arg1, arg2); 7654 break; 7655 #else 7656 goto unimplemented; 7657 #endif 7658 #endif 7659 #ifdef TARGET_NR_pread64 7660 case TARGET_NR_pread64: 7661 if (regpairs_aligned(cpu_env)) { 7662 arg4 = arg5; 7663 arg5 = arg6; 7664 } 7665 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 7666 goto efault; 7667 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); 7668 unlock_user(p, arg2, ret); 7669 break; 7670 case TARGET_NR_pwrite64: 7671 if (regpairs_aligned(cpu_env)) { 7672 arg4 = arg5; 7673 arg5 = arg6; 7674 } 7675 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 7676 goto efault; 7677 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); 7678 unlock_user(p, arg2, 0); 7679 break; 7680 #endif 7681 case TARGET_NR_getcwd: 7682 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) 7683 goto efault; 7684 ret = get_errno(sys_getcwd1(p, arg2)); 7685 unlock_user(p, arg1, ret); 7686 break; 7687 case TARGET_NR_capget: 7688 case TARGET_NR_capset: 7689 { 7690 struct target_user_cap_header *target_header; 7691 struct target_user_cap_data *target_data = NULL; 7692 struct __user_cap_header_struct header; 7693 struct __user_cap_data_struct data[2]; 7694 struct __user_cap_data_struct *dataptr = NULL; 7695 int i, target_datalen; 7696 int data_items = 1; 7697 7698 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) { 7699 goto efault; 7700 } 7701 header.version = tswap32(target_header->version); 7702 header.pid = tswap32(target_header->pid); 7703 7704 if (header.version != _LINUX_CAPABILITY_VERSION) { 7705 /* Version 2 and up takes pointer to two user_data structs */ 7706 data_items = 2; 7707 } 7708 7709 target_datalen = sizeof(*target_data) * data_items; 7710 7711 if (arg2) { 7712 if (num == TARGET_NR_capget) { 7713 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0); 7714 } else { 7715 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1); 7716 } 7717 if (!target_data) { 7718 unlock_user_struct(target_header, arg1, 0); 7719 goto efault; 7720 } 7721 7722 if (num == TARGET_NR_capset) { 7723 for (i = 0; i < data_items; i++) { 7724 data[i].effective = tswap32(target_data[i].effective); 7725 data[i].permitted = tswap32(target_data[i].permitted); 7726 data[i].inheritable = tswap32(target_data[i].inheritable); 7727 } 7728 } 7729 7730 dataptr = data; 7731 } 7732 7733 if (num == TARGET_NR_capget) { 7734 ret = get_errno(capget(&header, dataptr)); 7735 } else { 7736 ret = get_errno(capset(&header, dataptr)); 7737 } 7738 7739 /* The kernel always updates version for both capget and capset */ 7740 target_header->version = tswap32(header.version); 7741 unlock_user_struct(target_header, arg1, 1); 7742 7743 if (arg2) { 7744 if (num == TARGET_NR_capget) { 7745 for (i = 0; i < data_items; i++) { 7746 target_data[i].effective = tswap32(data[i].effective); 7747 target_data[i].permitted = tswap32(data[i].permitted); 7748 target_data[i].inheritable = tswap32(data[i].inheritable); 7749 } 7750 unlock_user(target_data, arg2, target_datalen); 7751 } else { 7752 unlock_user(target_data, arg2, 0); 7753 } 7754 } 7755 break; 7756 } 7757 case TARGET_NR_sigaltstack: 7758 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \ 7759 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \ 7760 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC) 7761 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env)); 7762 break; 7763 #else 7764 goto unimplemented; 7765 #endif 7766 7767 #ifdef CONFIG_SENDFILE 7768 case TARGET_NR_sendfile: 7769 { 7770 off_t *offp = NULL; 7771 off_t off; 7772 if (arg3) { 7773 ret = get_user_sal(off, arg3); 7774 if (is_error(ret)) { 7775 break; 7776 } 7777 offp = &off; 7778 } 7779 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 7780 if (!is_error(ret) && arg3) { 7781 abi_long ret2 = put_user_sal(off, arg3); 7782 if (is_error(ret2)) { 7783 ret = ret2; 7784 } 7785 } 7786 break; 7787 } 7788 #ifdef TARGET_NR_sendfile64 7789 case TARGET_NR_sendfile64: 7790 { 7791 off_t *offp = NULL; 7792 off_t off; 7793 if (arg3) { 7794 ret = get_user_s64(off, arg3); 7795 if (is_error(ret)) { 7796 break; 7797 } 7798 offp = &off; 7799 } 7800 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 7801 if (!is_error(ret) && arg3) { 7802 abi_long ret2 = put_user_s64(off, arg3); 7803 if (is_error(ret2)) { 7804 ret = ret2; 7805 } 7806 } 7807 break; 7808 } 7809 #endif 7810 #else 7811 case TARGET_NR_sendfile: 7812 #ifdef TARGET_NR_sendfile64 7813 case TARGET_NR_sendfile64: 7814 #endif 7815 goto unimplemented; 7816 #endif 7817 7818 #ifdef TARGET_NR_getpmsg 7819 case TARGET_NR_getpmsg: 7820 goto unimplemented; 7821 #endif 7822 #ifdef TARGET_NR_putpmsg 7823 case TARGET_NR_putpmsg: 7824 goto unimplemented; 7825 #endif 7826 #ifdef TARGET_NR_vfork 7827 case TARGET_NR_vfork: 7828 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 7829 0, 0, 0, 0)); 7830 break; 7831 #endif 7832 #ifdef TARGET_NR_ugetrlimit 7833 case TARGET_NR_ugetrlimit: 7834 { 7835 struct rlimit rlim; 7836 int resource = target_to_host_resource(arg1); 7837 ret = get_errno(getrlimit(resource, &rlim)); 7838 if (!is_error(ret)) { 7839 struct target_rlimit *target_rlim; 7840 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 7841 goto efault; 7842 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 7843 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 7844 unlock_user_struct(target_rlim, arg2, 1); 7845 } 7846 break; 7847 } 7848 #endif 7849 #ifdef TARGET_NR_truncate64 7850 case TARGET_NR_truncate64: 7851 if (!(p = lock_user_string(arg1))) 7852 goto efault; 7853 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); 7854 unlock_user(p, arg1, 0); 7855 break; 7856 #endif 7857 #ifdef TARGET_NR_ftruncate64 7858 case TARGET_NR_ftruncate64: 7859 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); 7860 break; 7861 #endif 7862 #ifdef TARGET_NR_stat64 7863 case TARGET_NR_stat64: 7864 if (!(p = lock_user_string(arg1))) 7865 goto efault; 7866 ret = get_errno(stat(path(p), &st)); 7867 unlock_user(p, arg1, 0); 7868 if (!is_error(ret)) 7869 ret = host_to_target_stat64(cpu_env, arg2, &st); 7870 break; 7871 #endif 7872 #ifdef TARGET_NR_lstat64 7873 case TARGET_NR_lstat64: 7874 if (!(p = lock_user_string(arg1))) 7875 goto efault; 7876 ret = get_errno(lstat(path(p), &st)); 7877 unlock_user(p, arg1, 0); 7878 if (!is_error(ret)) 7879 ret = host_to_target_stat64(cpu_env, arg2, &st); 7880 break; 7881 #endif 7882 #ifdef TARGET_NR_fstat64 7883 case TARGET_NR_fstat64: 7884 ret = get_errno(fstat(arg1, &st)); 7885 if (!is_error(ret)) 7886 ret = host_to_target_stat64(cpu_env, arg2, &st); 7887 break; 7888 #endif 7889 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) 7890 #ifdef TARGET_NR_fstatat64 7891 case TARGET_NR_fstatat64: 7892 #endif 7893 #ifdef TARGET_NR_newfstatat 7894 case TARGET_NR_newfstatat: 7895 #endif 7896 if (!(p = lock_user_string(arg2))) 7897 goto efault; 7898 ret = get_errno(fstatat(arg1, path(p), &st, arg4)); 7899 if (!is_error(ret)) 7900 ret = host_to_target_stat64(cpu_env, arg3, &st); 7901 break; 7902 #endif 7903 case TARGET_NR_lchown: 7904 if (!(p = lock_user_string(arg1))) 7905 goto efault; 7906 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); 7907 unlock_user(p, arg1, 0); 7908 break; 7909 #ifdef TARGET_NR_getuid 7910 case TARGET_NR_getuid: 7911 ret = get_errno(high2lowuid(getuid())); 7912 break; 7913 #endif 7914 #ifdef TARGET_NR_getgid 7915 case TARGET_NR_getgid: 7916 ret = get_errno(high2lowgid(getgid())); 7917 break; 7918 #endif 7919 #ifdef TARGET_NR_geteuid 7920 case TARGET_NR_geteuid: 7921 ret = get_errno(high2lowuid(geteuid())); 7922 break; 7923 #endif 7924 #ifdef TARGET_NR_getegid 7925 case TARGET_NR_getegid: 7926 ret = get_errno(high2lowgid(getegid())); 7927 break; 7928 #endif 7929 case TARGET_NR_setreuid: 7930 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); 7931 break; 7932 case TARGET_NR_setregid: 7933 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); 7934 break; 7935 case TARGET_NR_getgroups: 7936 { 7937 int gidsetsize = arg1; 7938 target_id *target_grouplist; 7939 gid_t *grouplist; 7940 int i; 7941 7942 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7943 ret = get_errno(getgroups(gidsetsize, grouplist)); 7944 if (gidsetsize == 0) 7945 break; 7946 if (!is_error(ret)) { 7947 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0); 7948 if (!target_grouplist) 7949 goto efault; 7950 for(i = 0;i < ret; i++) 7951 target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); 7952 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id)); 7953 } 7954 } 7955 break; 7956 case TARGET_NR_setgroups: 7957 { 7958 int gidsetsize = arg1; 7959 target_id *target_grouplist; 7960 gid_t *grouplist = NULL; 7961 int i; 7962 if (gidsetsize) { 7963 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7964 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1); 7965 if (!target_grouplist) { 7966 ret = -TARGET_EFAULT; 7967 goto fail; 7968 } 7969 for (i = 0; i < gidsetsize; i++) { 7970 grouplist[i] = low2highgid(tswapid(target_grouplist[i])); 7971 } 7972 unlock_user(target_grouplist, arg2, 0); 7973 } 7974 ret = get_errno(setgroups(gidsetsize, grouplist)); 7975 } 7976 break; 7977 case TARGET_NR_fchown: 7978 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); 7979 break; 7980 #if defined(TARGET_NR_fchownat) 7981 case TARGET_NR_fchownat: 7982 if (!(p = lock_user_string(arg2))) 7983 goto efault; 7984 ret = get_errno(fchownat(arg1, p, low2highuid(arg3), 7985 low2highgid(arg4), arg5)); 7986 unlock_user(p, arg2, 0); 7987 break; 7988 #endif 7989 #ifdef TARGET_NR_setresuid 7990 case TARGET_NR_setresuid: 7991 ret = get_errno(setresuid(low2highuid(arg1), 7992 low2highuid(arg2), 7993 low2highuid(arg3))); 7994 break; 7995 #endif 7996 #ifdef TARGET_NR_getresuid 7997 case TARGET_NR_getresuid: 7998 { 7999 uid_t ruid, euid, suid; 8000 ret = get_errno(getresuid(&ruid, &euid, &suid)); 8001 if (!is_error(ret)) { 8002 if (put_user_id(high2lowuid(ruid), arg1) 8003 || put_user_id(high2lowuid(euid), arg2) 8004 || put_user_id(high2lowuid(suid), arg3)) 8005 goto efault; 8006 } 8007 } 8008 break; 8009 #endif 8010 #ifdef TARGET_NR_getresgid 8011 case TARGET_NR_setresgid: 8012 ret = get_errno(setresgid(low2highgid(arg1), 8013 low2highgid(arg2), 8014 low2highgid(arg3))); 8015 break; 8016 #endif 8017 #ifdef TARGET_NR_getresgid 8018 case TARGET_NR_getresgid: 8019 { 8020 gid_t rgid, egid, sgid; 8021 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 8022 if (!is_error(ret)) { 8023 if (put_user_id(high2lowgid(rgid), arg1) 8024 || put_user_id(high2lowgid(egid), arg2) 8025 || put_user_id(high2lowgid(sgid), arg3)) 8026 goto efault; 8027 } 8028 } 8029 break; 8030 #endif 8031 case TARGET_NR_chown: 8032 if (!(p = lock_user_string(arg1))) 8033 goto efault; 8034 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); 8035 unlock_user(p, arg1, 0); 8036 break; 8037 case TARGET_NR_setuid: 8038 ret = get_errno(setuid(low2highuid(arg1))); 8039 break; 8040 case TARGET_NR_setgid: 8041 ret = get_errno(setgid(low2highgid(arg1))); 8042 break; 8043 case TARGET_NR_setfsuid: 8044 ret = get_errno(setfsuid(arg1)); 8045 break; 8046 case TARGET_NR_setfsgid: 8047 ret = get_errno(setfsgid(arg1)); 8048 break; 8049 8050 #ifdef TARGET_NR_lchown32 8051 case TARGET_NR_lchown32: 8052 if (!(p = lock_user_string(arg1))) 8053 goto efault; 8054 ret = get_errno(lchown(p, arg2, arg3)); 8055 unlock_user(p, arg1, 0); 8056 break; 8057 #endif 8058 #ifdef TARGET_NR_getuid32 8059 case TARGET_NR_getuid32: 8060 ret = get_errno(getuid()); 8061 break; 8062 #endif 8063 8064 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) 8065 /* Alpha specific */ 8066 case TARGET_NR_getxuid: 8067 { 8068 uid_t euid; 8069 euid=geteuid(); 8070 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid; 8071 } 8072 ret = get_errno(getuid()); 8073 break; 8074 #endif 8075 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) 8076 /* Alpha specific */ 8077 case TARGET_NR_getxgid: 8078 { 8079 uid_t egid; 8080 egid=getegid(); 8081 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid; 8082 } 8083 ret = get_errno(getgid()); 8084 break; 8085 #endif 8086 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) 8087 /* Alpha specific */ 8088 case TARGET_NR_osf_getsysinfo: 8089 ret = -TARGET_EOPNOTSUPP; 8090 switch (arg1) { 8091 case TARGET_GSI_IEEE_FP_CONTROL: 8092 { 8093 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env); 8094 8095 /* Copied from linux ieee_fpcr_to_swcr. */ 8096 swcr = (fpcr >> 35) & SWCR_STATUS_MASK; 8097 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ; 8098 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV 8099 | SWCR_TRAP_ENABLE_DZE 8100 | SWCR_TRAP_ENABLE_OVF); 8101 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF 8102 | SWCR_TRAP_ENABLE_INE); 8103 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ; 8104 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO; 8105 8106 if (put_user_u64 (swcr, arg2)) 8107 goto efault; 8108 ret = 0; 8109 } 8110 break; 8111 8112 /* case GSI_IEEE_STATE_AT_SIGNAL: 8113 -- Not implemented in linux kernel. 8114 case GSI_UACPROC: 8115 -- Retrieves current unaligned access state; not much used. 8116 case GSI_PROC_TYPE: 8117 -- Retrieves implver information; surely not used. 8118 case GSI_GET_HWRPB: 8119 -- Grabs a copy of the HWRPB; surely not used. 8120 */ 8121 } 8122 break; 8123 #endif 8124 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) 8125 /* Alpha specific */ 8126 case TARGET_NR_osf_setsysinfo: 8127 ret = -TARGET_EOPNOTSUPP; 8128 switch (arg1) { 8129 case TARGET_SSI_IEEE_FP_CONTROL: 8130 { 8131 uint64_t swcr, fpcr, orig_fpcr; 8132 8133 if (get_user_u64 (swcr, arg2)) { 8134 goto efault; 8135 } 8136 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 8137 fpcr = orig_fpcr & FPCR_DYN_MASK; 8138 8139 /* Copied from linux ieee_swcr_to_fpcr. */ 8140 fpcr |= (swcr & SWCR_STATUS_MASK) << 35; 8141 fpcr |= (swcr & SWCR_MAP_DMZ) << 36; 8142 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV 8143 | SWCR_TRAP_ENABLE_DZE 8144 | SWCR_TRAP_ENABLE_OVF)) << 48; 8145 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF 8146 | SWCR_TRAP_ENABLE_INE)) << 57; 8147 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0); 8148 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41; 8149 8150 cpu_alpha_store_fpcr(cpu_env, fpcr); 8151 ret = 0; 8152 } 8153 break; 8154 8155 case TARGET_SSI_IEEE_RAISE_EXCEPTION: 8156 { 8157 uint64_t exc, fpcr, orig_fpcr; 8158 int si_code; 8159 8160 if (get_user_u64(exc, arg2)) { 8161 goto efault; 8162 } 8163 8164 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 8165 8166 /* We only add to the exception status here. */ 8167 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35); 8168 8169 cpu_alpha_store_fpcr(cpu_env, fpcr); 8170 ret = 0; 8171 8172 /* Old exceptions are not signaled. */ 8173 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK); 8174 8175 /* If any exceptions set by this call, 8176 and are unmasked, send a signal. */ 8177 si_code = 0; 8178 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) { 8179 si_code = TARGET_FPE_FLTRES; 8180 } 8181 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) { 8182 si_code = TARGET_FPE_FLTUND; 8183 } 8184 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) { 8185 si_code = TARGET_FPE_FLTOVF; 8186 } 8187 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) { 8188 si_code = TARGET_FPE_FLTDIV; 8189 } 8190 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) { 8191 si_code = TARGET_FPE_FLTINV; 8192 } 8193 if (si_code != 0) { 8194 target_siginfo_t info; 8195 info.si_signo = SIGFPE; 8196 info.si_errno = 0; 8197 info.si_code = si_code; 8198 info._sifields._sigfault._addr 8199 = ((CPUArchState *)cpu_env)->pc; 8200 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info); 8201 } 8202 } 8203 break; 8204 8205 /* case SSI_NVPAIRS: 8206 -- Used with SSIN_UACPROC to enable unaligned accesses. 8207 case SSI_IEEE_STATE_AT_SIGNAL: 8208 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: 8209 -- Not implemented in linux kernel 8210 */ 8211 } 8212 break; 8213 #endif 8214 #ifdef TARGET_NR_osf_sigprocmask 8215 /* Alpha specific. */ 8216 case TARGET_NR_osf_sigprocmask: 8217 { 8218 abi_ulong mask; 8219 int how; 8220 sigset_t set, oldset; 8221 8222 switch(arg1) { 8223 case TARGET_SIG_BLOCK: 8224 how = SIG_BLOCK; 8225 break; 8226 case TARGET_SIG_UNBLOCK: 8227 how = SIG_UNBLOCK; 8228 break; 8229 case TARGET_SIG_SETMASK: 8230 how = SIG_SETMASK; 8231 break; 8232 default: 8233 ret = -TARGET_EINVAL; 8234 goto fail; 8235 } 8236 mask = arg2; 8237 target_to_host_old_sigset(&set, &mask); 8238 do_sigprocmask(how, &set, &oldset); 8239 host_to_target_old_sigset(&mask, &oldset); 8240 ret = mask; 8241 } 8242 break; 8243 #endif 8244 8245 #ifdef TARGET_NR_getgid32 8246 case TARGET_NR_getgid32: 8247 ret = get_errno(getgid()); 8248 break; 8249 #endif 8250 #ifdef TARGET_NR_geteuid32 8251 case TARGET_NR_geteuid32: 8252 ret = get_errno(geteuid()); 8253 break; 8254 #endif 8255 #ifdef TARGET_NR_getegid32 8256 case TARGET_NR_getegid32: 8257 ret = get_errno(getegid()); 8258 break; 8259 #endif 8260 #ifdef TARGET_NR_setreuid32 8261 case TARGET_NR_setreuid32: 8262 ret = get_errno(setreuid(arg1, arg2)); 8263 break; 8264 #endif 8265 #ifdef TARGET_NR_setregid32 8266 case TARGET_NR_setregid32: 8267 ret = get_errno(setregid(arg1, arg2)); 8268 break; 8269 #endif 8270 #ifdef TARGET_NR_getgroups32 8271 case TARGET_NR_getgroups32: 8272 { 8273 int gidsetsize = arg1; 8274 uint32_t *target_grouplist; 8275 gid_t *grouplist; 8276 int i; 8277 8278 grouplist = alloca(gidsetsize * sizeof(gid_t)); 8279 ret = get_errno(getgroups(gidsetsize, grouplist)); 8280 if (gidsetsize == 0) 8281 break; 8282 if (!is_error(ret)) { 8283 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); 8284 if (!target_grouplist) { 8285 ret = -TARGET_EFAULT; 8286 goto fail; 8287 } 8288 for(i = 0;i < ret; i++) 8289 target_grouplist[i] = tswap32(grouplist[i]); 8290 unlock_user(target_grouplist, arg2, gidsetsize * 4); 8291 } 8292 } 8293 break; 8294 #endif 8295 #ifdef TARGET_NR_setgroups32 8296 case TARGET_NR_setgroups32: 8297 { 8298 int gidsetsize = arg1; 8299 uint32_t *target_grouplist; 8300 gid_t *grouplist; 8301 int i; 8302 8303 grouplist = alloca(gidsetsize * sizeof(gid_t)); 8304 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); 8305 if (!target_grouplist) { 8306 ret = -TARGET_EFAULT; 8307 goto fail; 8308 } 8309 for(i = 0;i < gidsetsize; i++) 8310 grouplist[i] = tswap32(target_grouplist[i]); 8311 unlock_user(target_grouplist, arg2, 0); 8312 ret = get_errno(setgroups(gidsetsize, grouplist)); 8313 } 8314 break; 8315 #endif 8316 #ifdef TARGET_NR_fchown32 8317 case TARGET_NR_fchown32: 8318 ret = get_errno(fchown(arg1, arg2, arg3)); 8319 break; 8320 #endif 8321 #ifdef TARGET_NR_setresuid32 8322 case TARGET_NR_setresuid32: 8323 ret = get_errno(setresuid(arg1, arg2, arg3)); 8324 break; 8325 #endif 8326 #ifdef TARGET_NR_getresuid32 8327 case TARGET_NR_getresuid32: 8328 { 8329 uid_t ruid, euid, suid; 8330 ret = get_errno(getresuid(&ruid, &euid, &suid)); 8331 if (!is_error(ret)) { 8332 if (put_user_u32(ruid, arg1) 8333 || put_user_u32(euid, arg2) 8334 || put_user_u32(suid, arg3)) 8335 goto efault; 8336 } 8337 } 8338 break; 8339 #endif 8340 #ifdef TARGET_NR_setresgid32 8341 case TARGET_NR_setresgid32: 8342 ret = get_errno(setresgid(arg1, arg2, arg3)); 8343 break; 8344 #endif 8345 #ifdef TARGET_NR_getresgid32 8346 case TARGET_NR_getresgid32: 8347 { 8348 gid_t rgid, egid, sgid; 8349 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 8350 if (!is_error(ret)) { 8351 if (put_user_u32(rgid, arg1) 8352 || put_user_u32(egid, arg2) 8353 || put_user_u32(sgid, arg3)) 8354 goto efault; 8355 } 8356 } 8357 break; 8358 #endif 8359 #ifdef TARGET_NR_chown32 8360 case TARGET_NR_chown32: 8361 if (!(p = lock_user_string(arg1))) 8362 goto efault; 8363 ret = get_errno(chown(p, arg2, arg3)); 8364 unlock_user(p, arg1, 0); 8365 break; 8366 #endif 8367 #ifdef TARGET_NR_setuid32 8368 case TARGET_NR_setuid32: 8369 ret = get_errno(setuid(arg1)); 8370 break; 8371 #endif 8372 #ifdef TARGET_NR_setgid32 8373 case TARGET_NR_setgid32: 8374 ret = get_errno(setgid(arg1)); 8375 break; 8376 #endif 8377 #ifdef TARGET_NR_setfsuid32 8378 case TARGET_NR_setfsuid32: 8379 ret = get_errno(setfsuid(arg1)); 8380 break; 8381 #endif 8382 #ifdef TARGET_NR_setfsgid32 8383 case TARGET_NR_setfsgid32: 8384 ret = get_errno(setfsgid(arg1)); 8385 break; 8386 #endif 8387 8388 case TARGET_NR_pivot_root: 8389 goto unimplemented; 8390 #ifdef TARGET_NR_mincore 8391 case TARGET_NR_mincore: 8392 { 8393 void *a; 8394 ret = -TARGET_EFAULT; 8395 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0))) 8396 goto efault; 8397 if (!(p = lock_user_string(arg3))) 8398 goto mincore_fail; 8399 ret = get_errno(mincore(a, arg2, p)); 8400 unlock_user(p, arg3, ret); 8401 mincore_fail: 8402 unlock_user(a, arg1, 0); 8403 } 8404 break; 8405 #endif 8406 #ifdef TARGET_NR_arm_fadvise64_64 8407 case TARGET_NR_arm_fadvise64_64: 8408 { 8409 /* 8410 * arm_fadvise64_64 looks like fadvise64_64 but 8411 * with different argument order 8412 */ 8413 abi_long temp; 8414 temp = arg3; 8415 arg3 = arg4; 8416 arg4 = temp; 8417 } 8418 #endif 8419 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64) 8420 #ifdef TARGET_NR_fadvise64_64 8421 case TARGET_NR_fadvise64_64: 8422 #endif 8423 #ifdef TARGET_NR_fadvise64 8424 case TARGET_NR_fadvise64: 8425 #endif 8426 #ifdef TARGET_S390X 8427 switch (arg4) { 8428 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ 8429 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ 8430 case 6: arg4 = POSIX_FADV_DONTNEED; break; 8431 case 7: arg4 = POSIX_FADV_NOREUSE; break; 8432 default: break; 8433 } 8434 #endif 8435 ret = -posix_fadvise(arg1, arg2, arg3, arg4); 8436 break; 8437 #endif 8438 #ifdef TARGET_NR_madvise 8439 case TARGET_NR_madvise: 8440 /* A straight passthrough may not be safe because qemu sometimes 8441 turns private file-backed mappings into anonymous mappings. 8442 This will break MADV_DONTNEED. 8443 This is a hint, so ignoring and returning success is ok. */ 8444 ret = get_errno(0); 8445 break; 8446 #endif 8447 #if TARGET_ABI_BITS == 32 8448 case TARGET_NR_fcntl64: 8449 { 8450 int cmd; 8451 struct flock64 fl; 8452 struct target_flock64 *target_fl; 8453 #ifdef TARGET_ARM 8454 struct target_eabi_flock64 *target_efl; 8455 #endif 8456 8457 cmd = target_to_host_fcntl_cmd(arg2); 8458 if (cmd == -TARGET_EINVAL) { 8459 ret = cmd; 8460 break; 8461 } 8462 8463 switch(arg2) { 8464 case TARGET_F_GETLK64: 8465 #ifdef TARGET_ARM 8466 if (((CPUARMState *)cpu_env)->eabi) { 8467 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 8468 goto efault; 8469 fl.l_type = tswap16(target_efl->l_type); 8470 fl.l_whence = tswap16(target_efl->l_whence); 8471 fl.l_start = tswap64(target_efl->l_start); 8472 fl.l_len = tswap64(target_efl->l_len); 8473 fl.l_pid = tswap32(target_efl->l_pid); 8474 unlock_user_struct(target_efl, arg3, 0); 8475 } else 8476 #endif 8477 { 8478 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 8479 goto efault; 8480 fl.l_type = tswap16(target_fl->l_type); 8481 fl.l_whence = tswap16(target_fl->l_whence); 8482 fl.l_start = tswap64(target_fl->l_start); 8483 fl.l_len = tswap64(target_fl->l_len); 8484 fl.l_pid = tswap32(target_fl->l_pid); 8485 unlock_user_struct(target_fl, arg3, 0); 8486 } 8487 ret = get_errno(fcntl(arg1, cmd, &fl)); 8488 if (ret == 0) { 8489 #ifdef TARGET_ARM 8490 if (((CPUARMState *)cpu_env)->eabi) { 8491 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0)) 8492 goto efault; 8493 target_efl->l_type = tswap16(fl.l_type); 8494 target_efl->l_whence = tswap16(fl.l_whence); 8495 target_efl->l_start = tswap64(fl.l_start); 8496 target_efl->l_len = tswap64(fl.l_len); 8497 target_efl->l_pid = tswap32(fl.l_pid); 8498 unlock_user_struct(target_efl, arg3, 1); 8499 } else 8500 #endif 8501 { 8502 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0)) 8503 goto efault; 8504 target_fl->l_type = tswap16(fl.l_type); 8505 target_fl->l_whence = tswap16(fl.l_whence); 8506 target_fl->l_start = tswap64(fl.l_start); 8507 target_fl->l_len = tswap64(fl.l_len); 8508 target_fl->l_pid = tswap32(fl.l_pid); 8509 unlock_user_struct(target_fl, arg3, 1); 8510 } 8511 } 8512 break; 8513 8514 case TARGET_F_SETLK64: 8515 case TARGET_F_SETLKW64: 8516 #ifdef TARGET_ARM 8517 if (((CPUARMState *)cpu_env)->eabi) { 8518 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 8519 goto efault; 8520 fl.l_type = tswap16(target_efl->l_type); 8521 fl.l_whence = tswap16(target_efl->l_whence); 8522 fl.l_start = tswap64(target_efl->l_start); 8523 fl.l_len = tswap64(target_efl->l_len); 8524 fl.l_pid = tswap32(target_efl->l_pid); 8525 unlock_user_struct(target_efl, arg3, 0); 8526 } else 8527 #endif 8528 { 8529 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 8530 goto efault; 8531 fl.l_type = tswap16(target_fl->l_type); 8532 fl.l_whence = tswap16(target_fl->l_whence); 8533 fl.l_start = tswap64(target_fl->l_start); 8534 fl.l_len = tswap64(target_fl->l_len); 8535 fl.l_pid = tswap32(target_fl->l_pid); 8536 unlock_user_struct(target_fl, arg3, 0); 8537 } 8538 ret = get_errno(fcntl(arg1, cmd, &fl)); 8539 break; 8540 default: 8541 ret = do_fcntl(arg1, arg2, arg3); 8542 break; 8543 } 8544 break; 8545 } 8546 #endif 8547 #ifdef TARGET_NR_cacheflush 8548 case TARGET_NR_cacheflush: 8549 /* self-modifying code is handled automatically, so nothing needed */ 8550 ret = 0; 8551 break; 8552 #endif 8553 #ifdef TARGET_NR_security 8554 case TARGET_NR_security: 8555 goto unimplemented; 8556 #endif 8557 #ifdef TARGET_NR_getpagesize 8558 case TARGET_NR_getpagesize: 8559 ret = TARGET_PAGE_SIZE; 8560 break; 8561 #endif 8562 case TARGET_NR_gettid: 8563 ret = get_errno(gettid()); 8564 break; 8565 #ifdef TARGET_NR_readahead 8566 case TARGET_NR_readahead: 8567 #if TARGET_ABI_BITS == 32 8568 if (regpairs_aligned(cpu_env)) { 8569 arg2 = arg3; 8570 arg3 = arg4; 8571 arg4 = arg5; 8572 } 8573 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4)); 8574 #else 8575 ret = get_errno(readahead(arg1, arg2, arg3)); 8576 #endif 8577 break; 8578 #endif 8579 #ifdef CONFIG_ATTR 8580 #ifdef TARGET_NR_setxattr 8581 case TARGET_NR_listxattr: 8582 case TARGET_NR_llistxattr: 8583 { 8584 void *p, *b = 0; 8585 if (arg2) { 8586 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 8587 if (!b) { 8588 ret = -TARGET_EFAULT; 8589 break; 8590 } 8591 } 8592 p = lock_user_string(arg1); 8593 if (p) { 8594 if (num == TARGET_NR_listxattr) { 8595 ret = get_errno(listxattr(p, b, arg3)); 8596 } else { 8597 ret = get_errno(llistxattr(p, b, arg3)); 8598 } 8599 } else { 8600 ret = -TARGET_EFAULT; 8601 } 8602 unlock_user(p, arg1, 0); 8603 unlock_user(b, arg2, arg3); 8604 break; 8605 } 8606 case TARGET_NR_flistxattr: 8607 { 8608 void *b = 0; 8609 if (arg2) { 8610 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 8611 if (!b) { 8612 ret = -TARGET_EFAULT; 8613 break; 8614 } 8615 } 8616 ret = get_errno(flistxattr(arg1, b, arg3)); 8617 unlock_user(b, arg2, arg3); 8618 break; 8619 } 8620 case TARGET_NR_setxattr: 8621 case TARGET_NR_lsetxattr: 8622 { 8623 void *p, *n, *v = 0; 8624 if (arg3) { 8625 v = lock_user(VERIFY_READ, arg3, arg4, 1); 8626 if (!v) { 8627 ret = -TARGET_EFAULT; 8628 break; 8629 } 8630 } 8631 p = lock_user_string(arg1); 8632 n = lock_user_string(arg2); 8633 if (p && n) { 8634 if (num == TARGET_NR_setxattr) { 8635 ret = get_errno(setxattr(p, n, v, arg4, arg5)); 8636 } else { 8637 ret = get_errno(lsetxattr(p, n, v, arg4, arg5)); 8638 } 8639 } else { 8640 ret = -TARGET_EFAULT; 8641 } 8642 unlock_user(p, arg1, 0); 8643 unlock_user(n, arg2, 0); 8644 unlock_user(v, arg3, 0); 8645 } 8646 break; 8647 case TARGET_NR_fsetxattr: 8648 { 8649 void *n, *v = 0; 8650 if (arg3) { 8651 v = lock_user(VERIFY_READ, arg3, arg4, 1); 8652 if (!v) { 8653 ret = -TARGET_EFAULT; 8654 break; 8655 } 8656 } 8657 n = lock_user_string(arg2); 8658 if (n) { 8659 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5)); 8660 } else { 8661 ret = -TARGET_EFAULT; 8662 } 8663 unlock_user(n, arg2, 0); 8664 unlock_user(v, arg3, 0); 8665 } 8666 break; 8667 case TARGET_NR_getxattr: 8668 case TARGET_NR_lgetxattr: 8669 { 8670 void *p, *n, *v = 0; 8671 if (arg3) { 8672 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 8673 if (!v) { 8674 ret = -TARGET_EFAULT; 8675 break; 8676 } 8677 } 8678 p = lock_user_string(arg1); 8679 n = lock_user_string(arg2); 8680 if (p && n) { 8681 if (num == TARGET_NR_getxattr) { 8682 ret = get_errno(getxattr(p, n, v, arg4)); 8683 } else { 8684 ret = get_errno(lgetxattr(p, n, v, arg4)); 8685 } 8686 } else { 8687 ret = -TARGET_EFAULT; 8688 } 8689 unlock_user(p, arg1, 0); 8690 unlock_user(n, arg2, 0); 8691 unlock_user(v, arg3, arg4); 8692 } 8693 break; 8694 case TARGET_NR_fgetxattr: 8695 { 8696 void *n, *v = 0; 8697 if (arg3) { 8698 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 8699 if (!v) { 8700 ret = -TARGET_EFAULT; 8701 break; 8702 } 8703 } 8704 n = lock_user_string(arg2); 8705 if (n) { 8706 ret = get_errno(fgetxattr(arg1, n, v, arg4)); 8707 } else { 8708 ret = -TARGET_EFAULT; 8709 } 8710 unlock_user(n, arg2, 0); 8711 unlock_user(v, arg3, arg4); 8712 } 8713 break; 8714 case TARGET_NR_removexattr: 8715 case TARGET_NR_lremovexattr: 8716 { 8717 void *p, *n; 8718 p = lock_user_string(arg1); 8719 n = lock_user_string(arg2); 8720 if (p && n) { 8721 if (num == TARGET_NR_removexattr) { 8722 ret = get_errno(removexattr(p, n)); 8723 } else { 8724 ret = get_errno(lremovexattr(p, n)); 8725 } 8726 } else { 8727 ret = -TARGET_EFAULT; 8728 } 8729 unlock_user(p, arg1, 0); 8730 unlock_user(n, arg2, 0); 8731 } 8732 break; 8733 case TARGET_NR_fremovexattr: 8734 { 8735 void *n; 8736 n = lock_user_string(arg2); 8737 if (n) { 8738 ret = get_errno(fremovexattr(arg1, n)); 8739 } else { 8740 ret = -TARGET_EFAULT; 8741 } 8742 unlock_user(n, arg2, 0); 8743 } 8744 break; 8745 #endif 8746 #endif /* CONFIG_ATTR */ 8747 #ifdef TARGET_NR_set_thread_area 8748 case TARGET_NR_set_thread_area: 8749 #if defined(TARGET_MIPS) 8750 ((CPUMIPSState *) cpu_env)->tls_value = arg1; 8751 ret = 0; 8752 break; 8753 #elif defined(TARGET_CRIS) 8754 if (arg1 & 0xff) 8755 ret = -TARGET_EINVAL; 8756 else { 8757 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1; 8758 ret = 0; 8759 } 8760 break; 8761 #elif defined(TARGET_I386) && defined(TARGET_ABI32) 8762 ret = do_set_thread_area(cpu_env, arg1); 8763 break; 8764 #elif defined(TARGET_M68K) 8765 { 8766 TaskState *ts = cpu->opaque; 8767 ts->tp_value = arg1; 8768 ret = 0; 8769 break; 8770 } 8771 #else 8772 goto unimplemented_nowarn; 8773 #endif 8774 #endif 8775 #ifdef TARGET_NR_get_thread_area 8776 case TARGET_NR_get_thread_area: 8777 #if defined(TARGET_I386) && defined(TARGET_ABI32) 8778 ret = do_get_thread_area(cpu_env, arg1); 8779 break; 8780 #elif defined(TARGET_M68K) 8781 { 8782 TaskState *ts = cpu->opaque; 8783 ret = ts->tp_value; 8784 break; 8785 } 8786 #else 8787 goto unimplemented_nowarn; 8788 #endif 8789 #endif 8790 #ifdef TARGET_NR_getdomainname 8791 case TARGET_NR_getdomainname: 8792 goto unimplemented_nowarn; 8793 #endif 8794 8795 #ifdef TARGET_NR_clock_gettime 8796 case TARGET_NR_clock_gettime: 8797 { 8798 struct timespec ts; 8799 ret = get_errno(clock_gettime(arg1, &ts)); 8800 if (!is_error(ret)) { 8801 host_to_target_timespec(arg2, &ts); 8802 } 8803 break; 8804 } 8805 #endif 8806 #ifdef TARGET_NR_clock_getres 8807 case TARGET_NR_clock_getres: 8808 { 8809 struct timespec ts; 8810 ret = get_errno(clock_getres(arg1, &ts)); 8811 if (!is_error(ret)) { 8812 host_to_target_timespec(arg2, &ts); 8813 } 8814 break; 8815 } 8816 #endif 8817 #ifdef TARGET_NR_clock_nanosleep 8818 case TARGET_NR_clock_nanosleep: 8819 { 8820 struct timespec ts; 8821 target_to_host_timespec(&ts, arg3); 8822 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL)); 8823 if (arg4) 8824 host_to_target_timespec(arg4, &ts); 8825 break; 8826 } 8827 #endif 8828 8829 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 8830 case TARGET_NR_set_tid_address: 8831 ret = get_errno(set_tid_address((int *)g2h(arg1))); 8832 break; 8833 #endif 8834 8835 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 8836 case TARGET_NR_tkill: 8837 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2))); 8838 break; 8839 #endif 8840 8841 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 8842 case TARGET_NR_tgkill: 8843 ret = get_errno(sys_tgkill((int)arg1, (int)arg2, 8844 target_to_host_signal(arg3))); 8845 break; 8846 #endif 8847 8848 #ifdef TARGET_NR_set_robust_list 8849 case TARGET_NR_set_robust_list: 8850 case TARGET_NR_get_robust_list: 8851 /* The ABI for supporting robust futexes has userspace pass 8852 * the kernel a pointer to a linked list which is updated by 8853 * userspace after the syscall; the list is walked by the kernel 8854 * when the thread exits. Since the linked list in QEMU guest 8855 * memory isn't a valid linked list for the host and we have 8856 * no way to reliably intercept the thread-death event, we can't 8857 * support these. Silently return ENOSYS so that guest userspace 8858 * falls back to a non-robust futex implementation (which should 8859 * be OK except in the corner case of the guest crashing while 8860 * holding a mutex that is shared with another process via 8861 * shared memory). 8862 */ 8863 goto unimplemented_nowarn; 8864 #endif 8865 8866 #if defined(TARGET_NR_utimensat) 8867 case TARGET_NR_utimensat: 8868 { 8869 struct timespec *tsp, ts[2]; 8870 if (!arg3) { 8871 tsp = NULL; 8872 } else { 8873 target_to_host_timespec(ts, arg3); 8874 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec)); 8875 tsp = ts; 8876 } 8877 if (!arg2) 8878 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 8879 else { 8880 if (!(p = lock_user_string(arg2))) { 8881 ret = -TARGET_EFAULT; 8882 goto fail; 8883 } 8884 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 8885 unlock_user(p, arg2, 0); 8886 } 8887 } 8888 break; 8889 #endif 8890 case TARGET_NR_futex: 8891 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6); 8892 break; 8893 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 8894 case TARGET_NR_inotify_init: 8895 ret = get_errno(sys_inotify_init()); 8896 break; 8897 #endif 8898 #ifdef CONFIG_INOTIFY1 8899 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 8900 case TARGET_NR_inotify_init1: 8901 ret = get_errno(sys_inotify_init1(arg1)); 8902 break; 8903 #endif 8904 #endif 8905 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 8906 case TARGET_NR_inotify_add_watch: 8907 p = lock_user_string(arg2); 8908 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3)); 8909 unlock_user(p, arg2, 0); 8910 break; 8911 #endif 8912 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 8913 case TARGET_NR_inotify_rm_watch: 8914 ret = get_errno(sys_inotify_rm_watch(arg1, arg2)); 8915 break; 8916 #endif 8917 8918 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 8919 case TARGET_NR_mq_open: 8920 { 8921 struct mq_attr posix_mq_attr; 8922 8923 p = lock_user_string(arg1 - 1); 8924 if (arg4 != 0) 8925 copy_from_user_mq_attr (&posix_mq_attr, arg4); 8926 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr)); 8927 unlock_user (p, arg1, 0); 8928 } 8929 break; 8930 8931 case TARGET_NR_mq_unlink: 8932 p = lock_user_string(arg1 - 1); 8933 ret = get_errno(mq_unlink(p)); 8934 unlock_user (p, arg1, 0); 8935 break; 8936 8937 case TARGET_NR_mq_timedsend: 8938 { 8939 struct timespec ts; 8940 8941 p = lock_user (VERIFY_READ, arg2, arg3, 1); 8942 if (arg5 != 0) { 8943 target_to_host_timespec(&ts, arg5); 8944 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts)); 8945 host_to_target_timespec(arg5, &ts); 8946 } 8947 else 8948 ret = get_errno(mq_send(arg1, p, arg3, arg4)); 8949 unlock_user (p, arg2, arg3); 8950 } 8951 break; 8952 8953 case TARGET_NR_mq_timedreceive: 8954 { 8955 struct timespec ts; 8956 unsigned int prio; 8957 8958 p = lock_user (VERIFY_READ, arg2, arg3, 1); 8959 if (arg5 != 0) { 8960 target_to_host_timespec(&ts, arg5); 8961 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts)); 8962 host_to_target_timespec(arg5, &ts); 8963 } 8964 else 8965 ret = get_errno(mq_receive(arg1, p, arg3, &prio)); 8966 unlock_user (p, arg2, arg3); 8967 if (arg4 != 0) 8968 put_user_u32(prio, arg4); 8969 } 8970 break; 8971 8972 /* Not implemented for now... */ 8973 /* case TARGET_NR_mq_notify: */ 8974 /* break; */ 8975 8976 case TARGET_NR_mq_getsetattr: 8977 { 8978 struct mq_attr posix_mq_attr_in, posix_mq_attr_out; 8979 ret = 0; 8980 if (arg3 != 0) { 8981 ret = mq_getattr(arg1, &posix_mq_attr_out); 8982 copy_to_user_mq_attr(arg3, &posix_mq_attr_out); 8983 } 8984 if (arg2 != 0) { 8985 copy_from_user_mq_attr(&posix_mq_attr_in, arg2); 8986 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out); 8987 } 8988 8989 } 8990 break; 8991 #endif 8992 8993 #ifdef CONFIG_SPLICE 8994 #ifdef TARGET_NR_tee 8995 case TARGET_NR_tee: 8996 { 8997 ret = get_errno(tee(arg1,arg2,arg3,arg4)); 8998 } 8999 break; 9000 #endif 9001 #ifdef TARGET_NR_splice 9002 case TARGET_NR_splice: 9003 { 9004 loff_t loff_in, loff_out; 9005 loff_t *ploff_in = NULL, *ploff_out = NULL; 9006 if(arg2) { 9007 get_user_u64(loff_in, arg2); 9008 ploff_in = &loff_in; 9009 } 9010 if(arg4) { 9011 get_user_u64(loff_out, arg2); 9012 ploff_out = &loff_out; 9013 } 9014 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); 9015 } 9016 break; 9017 #endif 9018 #ifdef TARGET_NR_vmsplice 9019 case TARGET_NR_vmsplice: 9020 { 9021 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 9022 if (vec != NULL) { 9023 ret = get_errno(vmsplice(arg1, vec, arg3, arg4)); 9024 unlock_iovec(vec, arg2, arg3, 0); 9025 } else { 9026 ret = -host_to_target_errno(errno); 9027 } 9028 } 9029 break; 9030 #endif 9031 #endif /* CONFIG_SPLICE */ 9032 #ifdef CONFIG_EVENTFD 9033 #if defined(TARGET_NR_eventfd) 9034 case TARGET_NR_eventfd: 9035 ret = get_errno(eventfd(arg1, 0)); 9036 break; 9037 #endif 9038 #if defined(TARGET_NR_eventfd2) 9039 case TARGET_NR_eventfd2: 9040 { 9041 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)); 9042 if (arg2 & TARGET_O_NONBLOCK) { 9043 host_flags |= O_NONBLOCK; 9044 } 9045 if (arg2 & TARGET_O_CLOEXEC) { 9046 host_flags |= O_CLOEXEC; 9047 } 9048 ret = get_errno(eventfd(arg1, host_flags)); 9049 break; 9050 } 9051 #endif 9052 #endif /* CONFIG_EVENTFD */ 9053 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) 9054 case TARGET_NR_fallocate: 9055 #if TARGET_ABI_BITS == 32 9056 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4), 9057 target_offset64(arg5, arg6))); 9058 #else 9059 ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); 9060 #endif 9061 break; 9062 #endif 9063 #if defined(CONFIG_SYNC_FILE_RANGE) 9064 #if defined(TARGET_NR_sync_file_range) 9065 case TARGET_NR_sync_file_range: 9066 #if TARGET_ABI_BITS == 32 9067 #if defined(TARGET_MIPS) 9068 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 9069 target_offset64(arg5, arg6), arg7)); 9070 #else 9071 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3), 9072 target_offset64(arg4, arg5), arg6)); 9073 #endif /* !TARGET_MIPS */ 9074 #else 9075 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4)); 9076 #endif 9077 break; 9078 #endif 9079 #if defined(TARGET_NR_sync_file_range2) 9080 case TARGET_NR_sync_file_range2: 9081 /* This is like sync_file_range but the arguments are reordered */ 9082 #if TARGET_ABI_BITS == 32 9083 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 9084 target_offset64(arg5, arg6), arg2)); 9085 #else 9086 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2)); 9087 #endif 9088 break; 9089 #endif 9090 #endif 9091 #if defined(CONFIG_EPOLL) 9092 #if defined(TARGET_NR_epoll_create) 9093 case TARGET_NR_epoll_create: 9094 ret = get_errno(epoll_create(arg1)); 9095 break; 9096 #endif 9097 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1) 9098 case TARGET_NR_epoll_create1: 9099 ret = get_errno(epoll_create1(arg1)); 9100 break; 9101 #endif 9102 #if defined(TARGET_NR_epoll_ctl) 9103 case TARGET_NR_epoll_ctl: 9104 { 9105 struct epoll_event ep; 9106 struct epoll_event *epp = 0; 9107 if (arg4) { 9108 struct target_epoll_event *target_ep; 9109 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) { 9110 goto efault; 9111 } 9112 ep.events = tswap32(target_ep->events); 9113 /* The epoll_data_t union is just opaque data to the kernel, 9114 * so we transfer all 64 bits across and need not worry what 9115 * actual data type it is. 9116 */ 9117 ep.data.u64 = tswap64(target_ep->data.u64); 9118 unlock_user_struct(target_ep, arg4, 0); 9119 epp = &ep; 9120 } 9121 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp)); 9122 break; 9123 } 9124 #endif 9125 9126 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT) 9127 #define IMPLEMENT_EPOLL_PWAIT 9128 #endif 9129 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT) 9130 #if defined(TARGET_NR_epoll_wait) 9131 case TARGET_NR_epoll_wait: 9132 #endif 9133 #if defined(IMPLEMENT_EPOLL_PWAIT) 9134 case TARGET_NR_epoll_pwait: 9135 #endif 9136 { 9137 struct target_epoll_event *target_ep; 9138 struct epoll_event *ep; 9139 int epfd = arg1; 9140 int maxevents = arg3; 9141 int timeout = arg4; 9142 9143 target_ep = lock_user(VERIFY_WRITE, arg2, 9144 maxevents * sizeof(struct target_epoll_event), 1); 9145 if (!target_ep) { 9146 goto efault; 9147 } 9148 9149 ep = alloca(maxevents * sizeof(struct epoll_event)); 9150 9151 switch (num) { 9152 #if defined(IMPLEMENT_EPOLL_PWAIT) 9153 case TARGET_NR_epoll_pwait: 9154 { 9155 target_sigset_t *target_set; 9156 sigset_t _set, *set = &_set; 9157 9158 if (arg5) { 9159 target_set = lock_user(VERIFY_READ, arg5, 9160 sizeof(target_sigset_t), 1); 9161 if (!target_set) { 9162 unlock_user(target_ep, arg2, 0); 9163 goto efault; 9164 } 9165 target_to_host_sigset(set, target_set); 9166 unlock_user(target_set, arg5, 0); 9167 } else { 9168 set = NULL; 9169 } 9170 9171 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set)); 9172 break; 9173 } 9174 #endif 9175 #if defined(TARGET_NR_epoll_wait) 9176 case TARGET_NR_epoll_wait: 9177 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout)); 9178 break; 9179 #endif 9180 default: 9181 ret = -TARGET_ENOSYS; 9182 } 9183 if (!is_error(ret)) { 9184 int i; 9185 for (i = 0; i < ret; i++) { 9186 target_ep[i].events = tswap32(ep[i].events); 9187 target_ep[i].data.u64 = tswap64(ep[i].data.u64); 9188 } 9189 } 9190 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event)); 9191 break; 9192 } 9193 #endif 9194 #endif 9195 #ifdef TARGET_NR_prlimit64 9196 case TARGET_NR_prlimit64: 9197 { 9198 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */ 9199 struct target_rlimit64 *target_rnew, *target_rold; 9200 struct host_rlimit64 rnew, rold, *rnewp = 0; 9201 if (arg3) { 9202 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { 9203 goto efault; 9204 } 9205 rnew.rlim_cur = tswap64(target_rnew->rlim_cur); 9206 rnew.rlim_max = tswap64(target_rnew->rlim_max); 9207 unlock_user_struct(target_rnew, arg3, 0); 9208 rnewp = &rnew; 9209 } 9210 9211 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0)); 9212 if (!is_error(ret) && arg4) { 9213 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { 9214 goto efault; 9215 } 9216 target_rold->rlim_cur = tswap64(rold.rlim_cur); 9217 target_rold->rlim_max = tswap64(rold.rlim_max); 9218 unlock_user_struct(target_rold, arg4, 1); 9219 } 9220 break; 9221 } 9222 #endif 9223 #ifdef TARGET_NR_gethostname 9224 case TARGET_NR_gethostname: 9225 { 9226 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0); 9227 if (name) { 9228 ret = get_errno(gethostname(name, arg2)); 9229 unlock_user(name, arg1, arg2); 9230 } else { 9231 ret = -TARGET_EFAULT; 9232 } 9233 break; 9234 } 9235 #endif 9236 #ifdef TARGET_NR_atomic_cmpxchg_32 9237 case TARGET_NR_atomic_cmpxchg_32: 9238 { 9239 /* should use start_exclusive from main.c */ 9240 abi_ulong mem_value; 9241 if (get_user_u32(mem_value, arg6)) { 9242 target_siginfo_t info; 9243 info.si_signo = SIGSEGV; 9244 info.si_errno = 0; 9245 info.si_code = TARGET_SEGV_MAPERR; 9246 info._sifields._sigfault._addr = arg6; 9247 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info); 9248 ret = 0xdeadbeef; 9249 9250 } 9251 if (mem_value == arg2) 9252 put_user_u32(arg1, arg6); 9253 ret = mem_value; 9254 break; 9255 } 9256 #endif 9257 #ifdef TARGET_NR_atomic_barrier 9258 case TARGET_NR_atomic_barrier: 9259 { 9260 /* Like the kernel implementation and the qemu arm barrier, no-op this? */ 9261 ret = 0; 9262 break; 9263 } 9264 #endif 9265 9266 #ifdef TARGET_NR_timer_create 9267 case TARGET_NR_timer_create: 9268 { 9269 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */ 9270 9271 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL; 9272 struct target_sigevent *ptarget_sevp; 9273 struct target_timer_t *ptarget_timer; 9274 9275 int clkid = arg1; 9276 int timer_index = next_free_host_timer(); 9277 9278 if (timer_index < 0) { 9279 ret = -TARGET_EAGAIN; 9280 } else { 9281 timer_t *phtimer = g_posix_timers + timer_index; 9282 9283 if (arg2) { 9284 if (!lock_user_struct(VERIFY_READ, ptarget_sevp, arg2, 1)) { 9285 goto efault; 9286 } 9287 9288 host_sevp.sigev_signo = tswap32(ptarget_sevp->sigev_signo); 9289 host_sevp.sigev_notify = tswap32(ptarget_sevp->sigev_notify); 9290 9291 phost_sevp = &host_sevp; 9292 } 9293 9294 ret = get_errno(timer_create(clkid, phost_sevp, phtimer)); 9295 if (ret) { 9296 phtimer = NULL; 9297 } else { 9298 if (!lock_user_struct(VERIFY_WRITE, ptarget_timer, arg3, 1)) { 9299 goto efault; 9300 } 9301 ptarget_timer->ptr = tswap32(0xcafe0000 | timer_index); 9302 unlock_user_struct(ptarget_timer, arg3, 1); 9303 } 9304 } 9305 break; 9306 } 9307 #endif 9308 9309 #ifdef TARGET_NR_timer_settime 9310 case TARGET_NR_timer_settime: 9311 { 9312 /* args: timer_t timerid, int flags, const struct itimerspec *new_value, 9313 * struct itimerspec * old_value */ 9314 arg1 &= 0xffff; 9315 if (arg3 == 0 || arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9316 ret = -TARGET_EINVAL; 9317 } else { 9318 timer_t htimer = g_posix_timers[arg1]; 9319 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},}; 9320 9321 target_to_host_itimerspec(&hspec_new, arg3); 9322 ret = get_errno( 9323 timer_settime(htimer, arg2, &hspec_new, &hspec_old)); 9324 host_to_target_itimerspec(arg2, &hspec_old); 9325 } 9326 break; 9327 } 9328 #endif 9329 9330 #ifdef TARGET_NR_timer_gettime 9331 case TARGET_NR_timer_gettime: 9332 { 9333 /* args: timer_t timerid, struct itimerspec *curr_value */ 9334 arg1 &= 0xffff; 9335 if (!arg2) { 9336 return -TARGET_EFAULT; 9337 } else if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9338 ret = -TARGET_EINVAL; 9339 } else { 9340 timer_t htimer = g_posix_timers[arg1]; 9341 struct itimerspec hspec; 9342 ret = get_errno(timer_gettime(htimer, &hspec)); 9343 9344 if (host_to_target_itimerspec(arg2, &hspec)) { 9345 ret = -TARGET_EFAULT; 9346 } 9347 } 9348 break; 9349 } 9350 #endif 9351 9352 #ifdef TARGET_NR_timer_getoverrun 9353 case TARGET_NR_timer_getoverrun: 9354 { 9355 /* args: timer_t timerid */ 9356 arg1 &= 0xffff; 9357 if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9358 ret = -TARGET_EINVAL; 9359 } else { 9360 timer_t htimer = g_posix_timers[arg1]; 9361 ret = get_errno(timer_getoverrun(htimer)); 9362 } 9363 break; 9364 } 9365 #endif 9366 9367 #ifdef TARGET_NR_timer_delete 9368 case TARGET_NR_timer_delete: 9369 { 9370 /* args: timer_t timerid */ 9371 arg1 &= 0xffff; 9372 if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9373 ret = -TARGET_EINVAL; 9374 } else { 9375 timer_t htimer = g_posix_timers[arg1]; 9376 ret = get_errno(timer_delete(htimer)); 9377 g_posix_timers[arg1] = 0; 9378 } 9379 break; 9380 } 9381 #endif 9382 9383 default: 9384 unimplemented: 9385 gemu_log("qemu: Unsupported syscall: %d\n", num); 9386 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list) 9387 unimplemented_nowarn: 9388 #endif 9389 ret = -TARGET_ENOSYS; 9390 break; 9391 } 9392 fail: 9393 #ifdef DEBUG 9394 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret); 9395 #endif 9396 if(do_strace) 9397 print_syscall_ret(num, ret); 9398 return ret; 9399 efault: 9400 ret = -TARGET_EFAULT; 9401 goto fail; 9402 } 9403