1 /* 2 * Linux syscalls 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #define _ATFILE_SOURCE 20 #include <stdlib.h> 21 #include <stdio.h> 22 #include <stdarg.h> 23 #include <string.h> 24 #include <elf.h> 25 #include <endian.h> 26 #include <errno.h> 27 #include <unistd.h> 28 #include <fcntl.h> 29 #include <time.h> 30 #include <limits.h> 31 #include <grp.h> 32 #include <sys/types.h> 33 #include <sys/ipc.h> 34 #include <sys/msg.h> 35 #include <sys/wait.h> 36 #include <sys/time.h> 37 #include <sys/stat.h> 38 #include <sys/mount.h> 39 #include <sys/file.h> 40 #include <sys/fsuid.h> 41 #include <sys/personality.h> 42 #include <sys/prctl.h> 43 #include <sys/resource.h> 44 #include <sys/mman.h> 45 #include <sys/swap.h> 46 #include <signal.h> 47 #include <sched.h> 48 #ifdef __ia64__ 49 int __clone2(int (*fn)(void *), void *child_stack_base, 50 size_t stack_size, int flags, void *arg, ...); 51 #endif 52 #include <sys/socket.h> 53 #include <sys/un.h> 54 #include <sys/uio.h> 55 #include <sys/poll.h> 56 #include <sys/times.h> 57 #include <sys/shm.h> 58 #include <sys/sem.h> 59 #include <sys/statfs.h> 60 #include <utime.h> 61 #include <sys/sysinfo.h> 62 #include <sys/utsname.h> 63 //#include <sys/user.h> 64 #include <netinet/ip.h> 65 #include <netinet/tcp.h> 66 #include <linux/wireless.h> 67 #include <linux/icmp.h> 68 #include "qemu-common.h" 69 #ifdef TARGET_GPROF 70 #include <sys/gmon.h> 71 #endif 72 #ifdef CONFIG_EVENTFD 73 #include <sys/eventfd.h> 74 #endif 75 #ifdef CONFIG_EPOLL 76 #include <sys/epoll.h> 77 #endif 78 #ifdef CONFIG_ATTR 79 #include "qemu/xattr.h" 80 #endif 81 #ifdef CONFIG_SENDFILE 82 #include <sys/sendfile.h> 83 #endif 84 85 #define termios host_termios 86 #define winsize host_winsize 87 #define termio host_termio 88 #define sgttyb host_sgttyb /* same as target */ 89 #define tchars host_tchars /* same as target */ 90 #define ltchars host_ltchars /* same as target */ 91 92 #include <linux/termios.h> 93 #include <linux/unistd.h> 94 #include <linux/utsname.h> 95 #include <linux/cdrom.h> 96 #include <linux/hdreg.h> 97 #include <linux/soundcard.h> 98 #include <linux/kd.h> 99 #include <linux/mtio.h> 100 #include <linux/fs.h> 101 #if defined(CONFIG_FIEMAP) 102 #include <linux/fiemap.h> 103 #endif 104 #include <linux/fb.h> 105 #include <linux/vt.h> 106 #include <linux/dm-ioctl.h> 107 #include <linux/reboot.h> 108 #include <linux/route.h> 109 #include <linux/filter.h> 110 #include "linux_loop.h" 111 #include "cpu-uname.h" 112 113 #include "qemu.h" 114 115 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \ 116 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID) 117 118 //#define DEBUG 119 120 //#include <linux/msdos_fs.h> 121 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2]) 122 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2]) 123 124 125 #undef _syscall0 126 #undef _syscall1 127 #undef _syscall2 128 #undef _syscall3 129 #undef _syscall4 130 #undef _syscall5 131 #undef _syscall6 132 133 #define _syscall0(type,name) \ 134 static type name (void) \ 135 { \ 136 return syscall(__NR_##name); \ 137 } 138 139 #define _syscall1(type,name,type1,arg1) \ 140 static type name (type1 arg1) \ 141 { \ 142 return syscall(__NR_##name, arg1); \ 143 } 144 145 #define _syscall2(type,name,type1,arg1,type2,arg2) \ 146 static type name (type1 arg1,type2 arg2) \ 147 { \ 148 return syscall(__NR_##name, arg1, arg2); \ 149 } 150 151 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 152 static type name (type1 arg1,type2 arg2,type3 arg3) \ 153 { \ 154 return syscall(__NR_##name, arg1, arg2, arg3); \ 155 } 156 157 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 158 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ 159 { \ 160 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 161 } 162 163 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 164 type5,arg5) \ 165 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ 166 { \ 167 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 168 } 169 170 171 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 172 type5,arg5,type6,arg6) \ 173 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \ 174 type6 arg6) \ 175 { \ 176 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 177 } 178 179 180 #define __NR_sys_uname __NR_uname 181 #define __NR_sys_getcwd1 __NR_getcwd 182 #define __NR_sys_getdents __NR_getdents 183 #define __NR_sys_getdents64 __NR_getdents64 184 #define __NR_sys_getpriority __NR_getpriority 185 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo 186 #define __NR_sys_syslog __NR_syslog 187 #define __NR_sys_tgkill __NR_tgkill 188 #define __NR_sys_tkill __NR_tkill 189 #define __NR_sys_futex __NR_futex 190 #define __NR_sys_inotify_init __NR_inotify_init 191 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch 192 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch 193 194 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \ 195 defined(__s390x__) 196 #define __NR__llseek __NR_lseek 197 #endif 198 199 #ifdef __NR_gettid 200 _syscall0(int, gettid) 201 #else 202 /* This is a replacement for the host gettid() and must return a host 203 errno. */ 204 static int gettid(void) { 205 return -ENOSYS; 206 } 207 #endif 208 #ifdef __NR_getdents 209 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count); 210 #endif 211 #if !defined(__NR_getdents) || \ 212 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64)) 213 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count); 214 #endif 215 #if defined(TARGET_NR__llseek) && defined(__NR_llseek) 216 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo, 217 loff_t *, res, uint, wh); 218 #endif 219 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo) 220 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len) 221 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 222 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig) 223 #endif 224 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 225 _syscall2(int,sys_tkill,int,tid,int,sig) 226 #endif 227 #ifdef __NR_exit_group 228 _syscall1(int,exit_group,int,error_code) 229 #endif 230 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 231 _syscall1(int,set_tid_address,int *,tidptr) 232 #endif 233 #if defined(TARGET_NR_futex) && defined(__NR_futex) 234 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, 235 const struct timespec *,timeout,int *,uaddr2,int,val3) 236 #endif 237 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity 238 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, 239 unsigned long *, user_mask_ptr); 240 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity 241 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len, 242 unsigned long *, user_mask_ptr); 243 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd, 244 void *, arg); 245 246 static bitmask_transtbl fcntl_flags_tbl[] = { 247 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, }, 248 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, }, 249 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, }, 250 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, }, 251 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, }, 252 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, }, 253 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, }, 254 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, }, 255 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, }, 256 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, }, 257 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, }, 258 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, }, 259 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, }, 260 #if defined(O_DIRECT) 261 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, }, 262 #endif 263 #if defined(O_NOATIME) 264 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME }, 265 #endif 266 #if defined(O_CLOEXEC) 267 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC }, 268 #endif 269 #if defined(O_PATH) 270 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH }, 271 #endif 272 /* Don't terminate the list prematurely on 64-bit host+guest. */ 273 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0 274 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, }, 275 #endif 276 { 0, 0, 0, 0 } 277 }; 278 279 #define COPY_UTSNAME_FIELD(dest, src) \ 280 do { \ 281 /* __NEW_UTS_LEN doesn't include terminating null */ \ 282 (void) strncpy((dest), (src), __NEW_UTS_LEN); \ 283 (dest)[__NEW_UTS_LEN] = '\0'; \ 284 } while (0) 285 286 static int sys_uname(struct new_utsname *buf) 287 { 288 struct utsname uts_buf; 289 290 if (uname(&uts_buf) < 0) 291 return (-1); 292 293 /* 294 * Just in case these have some differences, we 295 * translate utsname to new_utsname (which is the 296 * struct linux kernel uses). 297 */ 298 299 memset(buf, 0, sizeof(*buf)); 300 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname); 301 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename); 302 COPY_UTSNAME_FIELD(buf->release, uts_buf.release); 303 COPY_UTSNAME_FIELD(buf->version, uts_buf.version); 304 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine); 305 #ifdef _GNU_SOURCE 306 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname); 307 #endif 308 return (0); 309 310 #undef COPY_UTSNAME_FIELD 311 } 312 313 static int sys_getcwd1(char *buf, size_t size) 314 { 315 if (getcwd(buf, size) == NULL) { 316 /* getcwd() sets errno */ 317 return (-1); 318 } 319 return strlen(buf)+1; 320 } 321 322 #ifdef TARGET_NR_openat 323 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode) 324 { 325 /* 326 * open(2) has extra parameter 'mode' when called with 327 * flag O_CREAT. 328 */ 329 if ((flags & O_CREAT) != 0) { 330 return (openat(dirfd, pathname, flags, mode)); 331 } 332 return (openat(dirfd, pathname, flags)); 333 } 334 #endif 335 336 #ifdef TARGET_NR_utimensat 337 #ifdef CONFIG_UTIMENSAT 338 static int sys_utimensat(int dirfd, const char *pathname, 339 const struct timespec times[2], int flags) 340 { 341 if (pathname == NULL) 342 return futimens(dirfd, times); 343 else 344 return utimensat(dirfd, pathname, times, flags); 345 } 346 #elif defined(__NR_utimensat) 347 #define __NR_sys_utimensat __NR_utimensat 348 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname, 349 const struct timespec *,tsp,int,flags) 350 #else 351 static int sys_utimensat(int dirfd, const char *pathname, 352 const struct timespec times[2], int flags) 353 { 354 errno = ENOSYS; 355 return -1; 356 } 357 #endif 358 #endif /* TARGET_NR_utimensat */ 359 360 #ifdef CONFIG_INOTIFY 361 #include <sys/inotify.h> 362 363 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 364 static int sys_inotify_init(void) 365 { 366 return (inotify_init()); 367 } 368 #endif 369 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 370 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask) 371 { 372 return (inotify_add_watch(fd, pathname, mask)); 373 } 374 #endif 375 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 376 static int sys_inotify_rm_watch(int fd, int32_t wd) 377 { 378 return (inotify_rm_watch(fd, wd)); 379 } 380 #endif 381 #ifdef CONFIG_INOTIFY1 382 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 383 static int sys_inotify_init1(int flags) 384 { 385 return (inotify_init1(flags)); 386 } 387 #endif 388 #endif 389 #else 390 /* Userspace can usually survive runtime without inotify */ 391 #undef TARGET_NR_inotify_init 392 #undef TARGET_NR_inotify_init1 393 #undef TARGET_NR_inotify_add_watch 394 #undef TARGET_NR_inotify_rm_watch 395 #endif /* CONFIG_INOTIFY */ 396 397 #if defined(TARGET_NR_ppoll) 398 #ifndef __NR_ppoll 399 # define __NR_ppoll -1 400 #endif 401 #define __NR_sys_ppoll __NR_ppoll 402 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds, 403 struct timespec *, timeout, const __sigset_t *, sigmask, 404 size_t, sigsetsize) 405 #endif 406 407 #if defined(TARGET_NR_pselect6) 408 #ifndef __NR_pselect6 409 # define __NR_pselect6 -1 410 #endif 411 #define __NR_sys_pselect6 __NR_pselect6 412 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, 413 fd_set *, exceptfds, struct timespec *, timeout, void *, sig); 414 #endif 415 416 #if defined(TARGET_NR_prlimit64) 417 #ifndef __NR_prlimit64 418 # define __NR_prlimit64 -1 419 #endif 420 #define __NR_sys_prlimit64 __NR_prlimit64 421 /* The glibc rlimit structure may not be that used by the underlying syscall */ 422 struct host_rlimit64 { 423 uint64_t rlim_cur; 424 uint64_t rlim_max; 425 }; 426 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource, 427 const struct host_rlimit64 *, new_limit, 428 struct host_rlimit64 *, old_limit) 429 #endif 430 431 432 #if defined(TARGET_NR_timer_create) 433 /* Maxiumum of 32 active POSIX timers allowed at any one time. */ 434 static timer_t g_posix_timers[32] = { 0, } ; 435 436 static inline int next_free_host_timer(void) 437 { 438 int k ; 439 /* FIXME: Does finding the next free slot require a lock? */ 440 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) { 441 if (g_posix_timers[k] == 0) { 442 g_posix_timers[k] = (timer_t) 1; 443 return k; 444 } 445 } 446 return -1; 447 } 448 #endif 449 450 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */ 451 #ifdef TARGET_ARM 452 static inline int regpairs_aligned(void *cpu_env) { 453 return ((((CPUARMState *)cpu_env)->eabi) == 1) ; 454 } 455 #elif defined(TARGET_MIPS) 456 static inline int regpairs_aligned(void *cpu_env) { return 1; } 457 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64) 458 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs 459 * of registers which translates to the same as ARM/MIPS, because we start with 460 * r3 as arg1 */ 461 static inline int regpairs_aligned(void *cpu_env) { return 1; } 462 #else 463 static inline int regpairs_aligned(void *cpu_env) { return 0; } 464 #endif 465 466 #define ERRNO_TABLE_SIZE 1200 467 468 /* target_to_host_errno_table[] is initialized from 469 * host_to_target_errno_table[] in syscall_init(). */ 470 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = { 471 }; 472 473 /* 474 * This list is the union of errno values overridden in asm-<arch>/errno.h 475 * minus the errnos that are not actually generic to all archs. 476 */ 477 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = { 478 [EIDRM] = TARGET_EIDRM, 479 [ECHRNG] = TARGET_ECHRNG, 480 [EL2NSYNC] = TARGET_EL2NSYNC, 481 [EL3HLT] = TARGET_EL3HLT, 482 [EL3RST] = TARGET_EL3RST, 483 [ELNRNG] = TARGET_ELNRNG, 484 [EUNATCH] = TARGET_EUNATCH, 485 [ENOCSI] = TARGET_ENOCSI, 486 [EL2HLT] = TARGET_EL2HLT, 487 [EDEADLK] = TARGET_EDEADLK, 488 [ENOLCK] = TARGET_ENOLCK, 489 [EBADE] = TARGET_EBADE, 490 [EBADR] = TARGET_EBADR, 491 [EXFULL] = TARGET_EXFULL, 492 [ENOANO] = TARGET_ENOANO, 493 [EBADRQC] = TARGET_EBADRQC, 494 [EBADSLT] = TARGET_EBADSLT, 495 [EBFONT] = TARGET_EBFONT, 496 [ENOSTR] = TARGET_ENOSTR, 497 [ENODATA] = TARGET_ENODATA, 498 [ETIME] = TARGET_ETIME, 499 [ENOSR] = TARGET_ENOSR, 500 [ENONET] = TARGET_ENONET, 501 [ENOPKG] = TARGET_ENOPKG, 502 [EREMOTE] = TARGET_EREMOTE, 503 [ENOLINK] = TARGET_ENOLINK, 504 [EADV] = TARGET_EADV, 505 [ESRMNT] = TARGET_ESRMNT, 506 [ECOMM] = TARGET_ECOMM, 507 [EPROTO] = TARGET_EPROTO, 508 [EDOTDOT] = TARGET_EDOTDOT, 509 [EMULTIHOP] = TARGET_EMULTIHOP, 510 [EBADMSG] = TARGET_EBADMSG, 511 [ENAMETOOLONG] = TARGET_ENAMETOOLONG, 512 [EOVERFLOW] = TARGET_EOVERFLOW, 513 [ENOTUNIQ] = TARGET_ENOTUNIQ, 514 [EBADFD] = TARGET_EBADFD, 515 [EREMCHG] = TARGET_EREMCHG, 516 [ELIBACC] = TARGET_ELIBACC, 517 [ELIBBAD] = TARGET_ELIBBAD, 518 [ELIBSCN] = TARGET_ELIBSCN, 519 [ELIBMAX] = TARGET_ELIBMAX, 520 [ELIBEXEC] = TARGET_ELIBEXEC, 521 [EILSEQ] = TARGET_EILSEQ, 522 [ENOSYS] = TARGET_ENOSYS, 523 [ELOOP] = TARGET_ELOOP, 524 [ERESTART] = TARGET_ERESTART, 525 [ESTRPIPE] = TARGET_ESTRPIPE, 526 [ENOTEMPTY] = TARGET_ENOTEMPTY, 527 [EUSERS] = TARGET_EUSERS, 528 [ENOTSOCK] = TARGET_ENOTSOCK, 529 [EDESTADDRREQ] = TARGET_EDESTADDRREQ, 530 [EMSGSIZE] = TARGET_EMSGSIZE, 531 [EPROTOTYPE] = TARGET_EPROTOTYPE, 532 [ENOPROTOOPT] = TARGET_ENOPROTOOPT, 533 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT, 534 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT, 535 [EOPNOTSUPP] = TARGET_EOPNOTSUPP, 536 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT, 537 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT, 538 [EADDRINUSE] = TARGET_EADDRINUSE, 539 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL, 540 [ENETDOWN] = TARGET_ENETDOWN, 541 [ENETUNREACH] = TARGET_ENETUNREACH, 542 [ENETRESET] = TARGET_ENETRESET, 543 [ECONNABORTED] = TARGET_ECONNABORTED, 544 [ECONNRESET] = TARGET_ECONNRESET, 545 [ENOBUFS] = TARGET_ENOBUFS, 546 [EISCONN] = TARGET_EISCONN, 547 [ENOTCONN] = TARGET_ENOTCONN, 548 [EUCLEAN] = TARGET_EUCLEAN, 549 [ENOTNAM] = TARGET_ENOTNAM, 550 [ENAVAIL] = TARGET_ENAVAIL, 551 [EISNAM] = TARGET_EISNAM, 552 [EREMOTEIO] = TARGET_EREMOTEIO, 553 [ESHUTDOWN] = TARGET_ESHUTDOWN, 554 [ETOOMANYREFS] = TARGET_ETOOMANYREFS, 555 [ETIMEDOUT] = TARGET_ETIMEDOUT, 556 [ECONNREFUSED] = TARGET_ECONNREFUSED, 557 [EHOSTDOWN] = TARGET_EHOSTDOWN, 558 [EHOSTUNREACH] = TARGET_EHOSTUNREACH, 559 [EALREADY] = TARGET_EALREADY, 560 [EINPROGRESS] = TARGET_EINPROGRESS, 561 [ESTALE] = TARGET_ESTALE, 562 [ECANCELED] = TARGET_ECANCELED, 563 [ENOMEDIUM] = TARGET_ENOMEDIUM, 564 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE, 565 #ifdef ENOKEY 566 [ENOKEY] = TARGET_ENOKEY, 567 #endif 568 #ifdef EKEYEXPIRED 569 [EKEYEXPIRED] = TARGET_EKEYEXPIRED, 570 #endif 571 #ifdef EKEYREVOKED 572 [EKEYREVOKED] = TARGET_EKEYREVOKED, 573 #endif 574 #ifdef EKEYREJECTED 575 [EKEYREJECTED] = TARGET_EKEYREJECTED, 576 #endif 577 #ifdef EOWNERDEAD 578 [EOWNERDEAD] = TARGET_EOWNERDEAD, 579 #endif 580 #ifdef ENOTRECOVERABLE 581 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE, 582 #endif 583 }; 584 585 static inline int host_to_target_errno(int err) 586 { 587 if(host_to_target_errno_table[err]) 588 return host_to_target_errno_table[err]; 589 return err; 590 } 591 592 static inline int target_to_host_errno(int err) 593 { 594 if (target_to_host_errno_table[err]) 595 return target_to_host_errno_table[err]; 596 return err; 597 } 598 599 static inline abi_long get_errno(abi_long ret) 600 { 601 if (ret == -1) 602 return -host_to_target_errno(errno); 603 else 604 return ret; 605 } 606 607 static inline int is_error(abi_long ret) 608 { 609 return (abi_ulong)ret >= (abi_ulong)(-4096); 610 } 611 612 char *target_strerror(int err) 613 { 614 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) { 615 return NULL; 616 } 617 return strerror(target_to_host_errno(err)); 618 } 619 620 static abi_ulong target_brk; 621 static abi_ulong target_original_brk; 622 static abi_ulong brk_page; 623 624 void target_set_brk(abi_ulong new_brk) 625 { 626 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk); 627 brk_page = HOST_PAGE_ALIGN(target_brk); 628 } 629 630 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0) 631 #define DEBUGF_BRK(message, args...) 632 633 /* do_brk() must return target values and target errnos. */ 634 abi_long do_brk(abi_ulong new_brk) 635 { 636 abi_long mapped_addr; 637 int new_alloc_size; 638 639 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk); 640 641 if (!new_brk) { 642 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk); 643 return target_brk; 644 } 645 if (new_brk < target_original_brk) { 646 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n", 647 target_brk); 648 return target_brk; 649 } 650 651 /* If the new brk is less than the highest page reserved to the 652 * target heap allocation, set it and we're almost done... */ 653 if (new_brk <= brk_page) { 654 /* Heap contents are initialized to zero, as for anonymous 655 * mapped pages. */ 656 if (new_brk > target_brk) { 657 memset(g2h(target_brk), 0, new_brk - target_brk); 658 } 659 target_brk = new_brk; 660 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk); 661 return target_brk; 662 } 663 664 /* We need to allocate more memory after the brk... Note that 665 * we don't use MAP_FIXED because that will map over the top of 666 * any existing mapping (like the one with the host libc or qemu 667 * itself); instead we treat "mapped but at wrong address" as 668 * a failure and unmap again. 669 */ 670 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page); 671 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, 672 PROT_READ|PROT_WRITE, 673 MAP_ANON|MAP_PRIVATE, 0, 0)); 674 675 if (mapped_addr == brk_page) { 676 /* Heap contents are initialized to zero, as for anonymous 677 * mapped pages. Technically the new pages are already 678 * initialized to zero since they *are* anonymous mapped 679 * pages, however we have to take care with the contents that 680 * come from the remaining part of the previous page: it may 681 * contains garbage data due to a previous heap usage (grown 682 * then shrunken). */ 683 memset(g2h(target_brk), 0, brk_page - target_brk); 684 685 target_brk = new_brk; 686 brk_page = HOST_PAGE_ALIGN(target_brk); 687 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n", 688 target_brk); 689 return target_brk; 690 } else if (mapped_addr != -1) { 691 /* Mapped but at wrong address, meaning there wasn't actually 692 * enough space for this brk. 693 */ 694 target_munmap(mapped_addr, new_alloc_size); 695 mapped_addr = -1; 696 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk); 697 } 698 else { 699 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk); 700 } 701 702 #if defined(TARGET_ALPHA) 703 /* We (partially) emulate OSF/1 on Alpha, which requires we 704 return a proper errno, not an unchanged brk value. */ 705 return -TARGET_ENOMEM; 706 #endif 707 /* For everything else, return the previous break. */ 708 return target_brk; 709 } 710 711 static inline abi_long copy_from_user_fdset(fd_set *fds, 712 abi_ulong target_fds_addr, 713 int n) 714 { 715 int i, nw, j, k; 716 abi_ulong b, *target_fds; 717 718 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 719 if (!(target_fds = lock_user(VERIFY_READ, 720 target_fds_addr, 721 sizeof(abi_ulong) * nw, 722 1))) 723 return -TARGET_EFAULT; 724 725 FD_ZERO(fds); 726 k = 0; 727 for (i = 0; i < nw; i++) { 728 /* grab the abi_ulong */ 729 __get_user(b, &target_fds[i]); 730 for (j = 0; j < TARGET_ABI_BITS; j++) { 731 /* check the bit inside the abi_ulong */ 732 if ((b >> j) & 1) 733 FD_SET(k, fds); 734 k++; 735 } 736 } 737 738 unlock_user(target_fds, target_fds_addr, 0); 739 740 return 0; 741 } 742 743 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr, 744 abi_ulong target_fds_addr, 745 int n) 746 { 747 if (target_fds_addr) { 748 if (copy_from_user_fdset(fds, target_fds_addr, n)) 749 return -TARGET_EFAULT; 750 *fds_ptr = fds; 751 } else { 752 *fds_ptr = NULL; 753 } 754 return 0; 755 } 756 757 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr, 758 const fd_set *fds, 759 int n) 760 { 761 int i, nw, j, k; 762 abi_long v; 763 abi_ulong *target_fds; 764 765 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 766 if (!(target_fds = lock_user(VERIFY_WRITE, 767 target_fds_addr, 768 sizeof(abi_ulong) * nw, 769 0))) 770 return -TARGET_EFAULT; 771 772 k = 0; 773 for (i = 0; i < nw; i++) { 774 v = 0; 775 for (j = 0; j < TARGET_ABI_BITS; j++) { 776 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j); 777 k++; 778 } 779 __put_user(v, &target_fds[i]); 780 } 781 782 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw); 783 784 return 0; 785 } 786 787 #if defined(__alpha__) 788 #define HOST_HZ 1024 789 #else 790 #define HOST_HZ 100 791 #endif 792 793 static inline abi_long host_to_target_clock_t(long ticks) 794 { 795 #if HOST_HZ == TARGET_HZ 796 return ticks; 797 #else 798 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ; 799 #endif 800 } 801 802 static inline abi_long host_to_target_rusage(abi_ulong target_addr, 803 const struct rusage *rusage) 804 { 805 struct target_rusage *target_rusage; 806 807 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0)) 808 return -TARGET_EFAULT; 809 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec); 810 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec); 811 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec); 812 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec); 813 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss); 814 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss); 815 target_rusage->ru_idrss = tswapal(rusage->ru_idrss); 816 target_rusage->ru_isrss = tswapal(rusage->ru_isrss); 817 target_rusage->ru_minflt = tswapal(rusage->ru_minflt); 818 target_rusage->ru_majflt = tswapal(rusage->ru_majflt); 819 target_rusage->ru_nswap = tswapal(rusage->ru_nswap); 820 target_rusage->ru_inblock = tswapal(rusage->ru_inblock); 821 target_rusage->ru_oublock = tswapal(rusage->ru_oublock); 822 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd); 823 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv); 824 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals); 825 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw); 826 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw); 827 unlock_user_struct(target_rusage, target_addr, 1); 828 829 return 0; 830 } 831 832 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim) 833 { 834 abi_ulong target_rlim_swap; 835 rlim_t result; 836 837 target_rlim_swap = tswapal(target_rlim); 838 if (target_rlim_swap == TARGET_RLIM_INFINITY) 839 return RLIM_INFINITY; 840 841 result = target_rlim_swap; 842 if (target_rlim_swap != (rlim_t)result) 843 return RLIM_INFINITY; 844 845 return result; 846 } 847 848 static inline abi_ulong host_to_target_rlim(rlim_t rlim) 849 { 850 abi_ulong target_rlim_swap; 851 abi_ulong result; 852 853 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim) 854 target_rlim_swap = TARGET_RLIM_INFINITY; 855 else 856 target_rlim_swap = rlim; 857 result = tswapal(target_rlim_swap); 858 859 return result; 860 } 861 862 static inline int target_to_host_resource(int code) 863 { 864 switch (code) { 865 case TARGET_RLIMIT_AS: 866 return RLIMIT_AS; 867 case TARGET_RLIMIT_CORE: 868 return RLIMIT_CORE; 869 case TARGET_RLIMIT_CPU: 870 return RLIMIT_CPU; 871 case TARGET_RLIMIT_DATA: 872 return RLIMIT_DATA; 873 case TARGET_RLIMIT_FSIZE: 874 return RLIMIT_FSIZE; 875 case TARGET_RLIMIT_LOCKS: 876 return RLIMIT_LOCKS; 877 case TARGET_RLIMIT_MEMLOCK: 878 return RLIMIT_MEMLOCK; 879 case TARGET_RLIMIT_MSGQUEUE: 880 return RLIMIT_MSGQUEUE; 881 case TARGET_RLIMIT_NICE: 882 return RLIMIT_NICE; 883 case TARGET_RLIMIT_NOFILE: 884 return RLIMIT_NOFILE; 885 case TARGET_RLIMIT_NPROC: 886 return RLIMIT_NPROC; 887 case TARGET_RLIMIT_RSS: 888 return RLIMIT_RSS; 889 case TARGET_RLIMIT_RTPRIO: 890 return RLIMIT_RTPRIO; 891 case TARGET_RLIMIT_SIGPENDING: 892 return RLIMIT_SIGPENDING; 893 case TARGET_RLIMIT_STACK: 894 return RLIMIT_STACK; 895 default: 896 return code; 897 } 898 } 899 900 static inline abi_long copy_from_user_timeval(struct timeval *tv, 901 abi_ulong target_tv_addr) 902 { 903 struct target_timeval *target_tv; 904 905 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) 906 return -TARGET_EFAULT; 907 908 __get_user(tv->tv_sec, &target_tv->tv_sec); 909 __get_user(tv->tv_usec, &target_tv->tv_usec); 910 911 unlock_user_struct(target_tv, target_tv_addr, 0); 912 913 return 0; 914 } 915 916 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr, 917 const struct timeval *tv) 918 { 919 struct target_timeval *target_tv; 920 921 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) 922 return -TARGET_EFAULT; 923 924 __put_user(tv->tv_sec, &target_tv->tv_sec); 925 __put_user(tv->tv_usec, &target_tv->tv_usec); 926 927 unlock_user_struct(target_tv, target_tv_addr, 1); 928 929 return 0; 930 } 931 932 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 933 #include <mqueue.h> 934 935 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr, 936 abi_ulong target_mq_attr_addr) 937 { 938 struct target_mq_attr *target_mq_attr; 939 940 if (!lock_user_struct(VERIFY_READ, target_mq_attr, 941 target_mq_attr_addr, 1)) 942 return -TARGET_EFAULT; 943 944 __get_user(attr->mq_flags, &target_mq_attr->mq_flags); 945 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 946 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 947 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 948 949 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0); 950 951 return 0; 952 } 953 954 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr, 955 const struct mq_attr *attr) 956 { 957 struct target_mq_attr *target_mq_attr; 958 959 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr, 960 target_mq_attr_addr, 0)) 961 return -TARGET_EFAULT; 962 963 __put_user(attr->mq_flags, &target_mq_attr->mq_flags); 964 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 965 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 966 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 967 968 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1); 969 970 return 0; 971 } 972 #endif 973 974 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) 975 /* do_select() must return target values and target errnos. */ 976 static abi_long do_select(int n, 977 abi_ulong rfd_addr, abi_ulong wfd_addr, 978 abi_ulong efd_addr, abi_ulong target_tv_addr) 979 { 980 fd_set rfds, wfds, efds; 981 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 982 struct timeval tv, *tv_ptr; 983 abi_long ret; 984 985 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 986 if (ret) { 987 return ret; 988 } 989 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 990 if (ret) { 991 return ret; 992 } 993 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 994 if (ret) { 995 return ret; 996 } 997 998 if (target_tv_addr) { 999 if (copy_from_user_timeval(&tv, target_tv_addr)) 1000 return -TARGET_EFAULT; 1001 tv_ptr = &tv; 1002 } else { 1003 tv_ptr = NULL; 1004 } 1005 1006 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr)); 1007 1008 if (!is_error(ret)) { 1009 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 1010 return -TARGET_EFAULT; 1011 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 1012 return -TARGET_EFAULT; 1013 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 1014 return -TARGET_EFAULT; 1015 1016 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv)) 1017 return -TARGET_EFAULT; 1018 } 1019 1020 return ret; 1021 } 1022 #endif 1023 1024 static abi_long do_pipe2(int host_pipe[], int flags) 1025 { 1026 #ifdef CONFIG_PIPE2 1027 return pipe2(host_pipe, flags); 1028 #else 1029 return -ENOSYS; 1030 #endif 1031 } 1032 1033 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes, 1034 int flags, int is_pipe2) 1035 { 1036 int host_pipe[2]; 1037 abi_long ret; 1038 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe); 1039 1040 if (is_error(ret)) 1041 return get_errno(ret); 1042 1043 /* Several targets have special calling conventions for the original 1044 pipe syscall, but didn't replicate this into the pipe2 syscall. */ 1045 if (!is_pipe2) { 1046 #if defined(TARGET_ALPHA) 1047 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1]; 1048 return host_pipe[0]; 1049 #elif defined(TARGET_MIPS) 1050 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1]; 1051 return host_pipe[0]; 1052 #elif defined(TARGET_SH4) 1053 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1]; 1054 return host_pipe[0]; 1055 #elif defined(TARGET_SPARC) 1056 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1]; 1057 return host_pipe[0]; 1058 #endif 1059 } 1060 1061 if (put_user_s32(host_pipe[0], pipedes) 1062 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0]))) 1063 return -TARGET_EFAULT; 1064 return get_errno(ret); 1065 } 1066 1067 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn, 1068 abi_ulong target_addr, 1069 socklen_t len) 1070 { 1071 struct target_ip_mreqn *target_smreqn; 1072 1073 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1); 1074 if (!target_smreqn) 1075 return -TARGET_EFAULT; 1076 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr; 1077 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr; 1078 if (len == sizeof(struct target_ip_mreqn)) 1079 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex); 1080 unlock_user(target_smreqn, target_addr, 0); 1081 1082 return 0; 1083 } 1084 1085 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr, 1086 abi_ulong target_addr, 1087 socklen_t len) 1088 { 1089 const socklen_t unix_maxlen = sizeof (struct sockaddr_un); 1090 sa_family_t sa_family; 1091 struct target_sockaddr *target_saddr; 1092 1093 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 1094 if (!target_saddr) 1095 return -TARGET_EFAULT; 1096 1097 sa_family = tswap16(target_saddr->sa_family); 1098 1099 /* Oops. The caller might send a incomplete sun_path; sun_path 1100 * must be terminated by \0 (see the manual page), but 1101 * unfortunately it is quite common to specify sockaddr_un 1102 * length as "strlen(x->sun_path)" while it should be 1103 * "strlen(...) + 1". We'll fix that here if needed. 1104 * Linux kernel has a similar feature. 1105 */ 1106 1107 if (sa_family == AF_UNIX) { 1108 if (len < unix_maxlen && len > 0) { 1109 char *cp = (char*)target_saddr; 1110 1111 if ( cp[len-1] && !cp[len] ) 1112 len++; 1113 } 1114 if (len > unix_maxlen) 1115 len = unix_maxlen; 1116 } 1117 1118 memcpy(addr, target_saddr, len); 1119 addr->sa_family = sa_family; 1120 unlock_user(target_saddr, target_addr, 0); 1121 1122 return 0; 1123 } 1124 1125 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr, 1126 struct sockaddr *addr, 1127 socklen_t len) 1128 { 1129 struct target_sockaddr *target_saddr; 1130 1131 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0); 1132 if (!target_saddr) 1133 return -TARGET_EFAULT; 1134 memcpy(target_saddr, addr, len); 1135 target_saddr->sa_family = tswap16(addr->sa_family); 1136 unlock_user(target_saddr, target_addr, len); 1137 1138 return 0; 1139 } 1140 1141 static inline abi_long target_to_host_cmsg(struct msghdr *msgh, 1142 struct target_msghdr *target_msgh) 1143 { 1144 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1145 abi_long msg_controllen; 1146 abi_ulong target_cmsg_addr; 1147 struct target_cmsghdr *target_cmsg; 1148 socklen_t space = 0; 1149 1150 msg_controllen = tswapal(target_msgh->msg_controllen); 1151 if (msg_controllen < sizeof (struct target_cmsghdr)) 1152 goto the_end; 1153 target_cmsg_addr = tswapal(target_msgh->msg_control); 1154 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1); 1155 if (!target_cmsg) 1156 return -TARGET_EFAULT; 1157 1158 while (cmsg && target_cmsg) { 1159 void *data = CMSG_DATA(cmsg); 1160 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1161 1162 int len = tswapal(target_cmsg->cmsg_len) 1163 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr)); 1164 1165 space += CMSG_SPACE(len); 1166 if (space > msgh->msg_controllen) { 1167 space -= CMSG_SPACE(len); 1168 gemu_log("Host cmsg overflow\n"); 1169 break; 1170 } 1171 1172 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) { 1173 cmsg->cmsg_level = SOL_SOCKET; 1174 } else { 1175 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level); 1176 } 1177 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type); 1178 cmsg->cmsg_len = CMSG_LEN(len); 1179 1180 if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) { 1181 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type); 1182 memcpy(data, target_data, len); 1183 } else { 1184 int *fd = (int *)data; 1185 int *target_fd = (int *)target_data; 1186 int i, numfds = len / sizeof(int); 1187 1188 for (i = 0; i < numfds; i++) 1189 fd[i] = tswap32(target_fd[i]); 1190 } 1191 1192 cmsg = CMSG_NXTHDR(msgh, cmsg); 1193 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1194 } 1195 unlock_user(target_cmsg, target_cmsg_addr, 0); 1196 the_end: 1197 msgh->msg_controllen = space; 1198 return 0; 1199 } 1200 1201 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, 1202 struct msghdr *msgh) 1203 { 1204 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1205 abi_long msg_controllen; 1206 abi_ulong target_cmsg_addr; 1207 struct target_cmsghdr *target_cmsg; 1208 socklen_t space = 0; 1209 1210 msg_controllen = tswapal(target_msgh->msg_controllen); 1211 if (msg_controllen < sizeof (struct target_cmsghdr)) 1212 goto the_end; 1213 target_cmsg_addr = tswapal(target_msgh->msg_control); 1214 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0); 1215 if (!target_cmsg) 1216 return -TARGET_EFAULT; 1217 1218 while (cmsg && target_cmsg) { 1219 void *data = CMSG_DATA(cmsg); 1220 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1221 1222 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr)); 1223 1224 space += TARGET_CMSG_SPACE(len); 1225 if (space > msg_controllen) { 1226 space -= TARGET_CMSG_SPACE(len); 1227 gemu_log("Target cmsg overflow\n"); 1228 break; 1229 } 1230 1231 if (cmsg->cmsg_level == SOL_SOCKET) { 1232 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET); 1233 } else { 1234 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level); 1235 } 1236 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type); 1237 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len)); 1238 1239 if ((cmsg->cmsg_level == SOL_SOCKET) && 1240 (cmsg->cmsg_type == SCM_RIGHTS)) { 1241 int *fd = (int *)data; 1242 int *target_fd = (int *)target_data; 1243 int i, numfds = len / sizeof(int); 1244 1245 for (i = 0; i < numfds; i++) 1246 target_fd[i] = tswap32(fd[i]); 1247 } else if ((cmsg->cmsg_level == SOL_SOCKET) && 1248 (cmsg->cmsg_type == SO_TIMESTAMP) && 1249 (len == sizeof(struct timeval))) { 1250 /* copy struct timeval to target */ 1251 struct timeval *tv = (struct timeval *)data; 1252 struct target_timeval *target_tv = 1253 (struct target_timeval *)target_data; 1254 1255 target_tv->tv_sec = tswapal(tv->tv_sec); 1256 target_tv->tv_usec = tswapal(tv->tv_usec); 1257 } else { 1258 gemu_log("Unsupported ancillary data: %d/%d\n", 1259 cmsg->cmsg_level, cmsg->cmsg_type); 1260 memcpy(target_data, data, len); 1261 } 1262 1263 cmsg = CMSG_NXTHDR(msgh, cmsg); 1264 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1265 } 1266 unlock_user(target_cmsg, target_cmsg_addr, space); 1267 the_end: 1268 target_msgh->msg_controllen = tswapal(space); 1269 return 0; 1270 } 1271 1272 /* do_setsockopt() Must return target values and target errnos. */ 1273 static abi_long do_setsockopt(int sockfd, int level, int optname, 1274 abi_ulong optval_addr, socklen_t optlen) 1275 { 1276 abi_long ret; 1277 int val; 1278 struct ip_mreqn *ip_mreq; 1279 struct ip_mreq_source *ip_mreq_source; 1280 1281 switch(level) { 1282 case SOL_TCP: 1283 /* TCP options all take an 'int' value. */ 1284 if (optlen < sizeof(uint32_t)) 1285 return -TARGET_EINVAL; 1286 1287 if (get_user_u32(val, optval_addr)) 1288 return -TARGET_EFAULT; 1289 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1290 break; 1291 case SOL_IP: 1292 switch(optname) { 1293 case IP_TOS: 1294 case IP_TTL: 1295 case IP_HDRINCL: 1296 case IP_ROUTER_ALERT: 1297 case IP_RECVOPTS: 1298 case IP_RETOPTS: 1299 case IP_PKTINFO: 1300 case IP_MTU_DISCOVER: 1301 case IP_RECVERR: 1302 case IP_RECVTOS: 1303 #ifdef IP_FREEBIND 1304 case IP_FREEBIND: 1305 #endif 1306 case IP_MULTICAST_TTL: 1307 case IP_MULTICAST_LOOP: 1308 val = 0; 1309 if (optlen >= sizeof(uint32_t)) { 1310 if (get_user_u32(val, optval_addr)) 1311 return -TARGET_EFAULT; 1312 } else if (optlen >= 1) { 1313 if (get_user_u8(val, optval_addr)) 1314 return -TARGET_EFAULT; 1315 } 1316 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1317 break; 1318 case IP_ADD_MEMBERSHIP: 1319 case IP_DROP_MEMBERSHIP: 1320 if (optlen < sizeof (struct target_ip_mreq) || 1321 optlen > sizeof (struct target_ip_mreqn)) 1322 return -TARGET_EINVAL; 1323 1324 ip_mreq = (struct ip_mreqn *) alloca(optlen); 1325 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen); 1326 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen)); 1327 break; 1328 1329 case IP_BLOCK_SOURCE: 1330 case IP_UNBLOCK_SOURCE: 1331 case IP_ADD_SOURCE_MEMBERSHIP: 1332 case IP_DROP_SOURCE_MEMBERSHIP: 1333 if (optlen != sizeof (struct target_ip_mreq_source)) 1334 return -TARGET_EINVAL; 1335 1336 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1); 1337 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen)); 1338 unlock_user (ip_mreq_source, optval_addr, 0); 1339 break; 1340 1341 default: 1342 goto unimplemented; 1343 } 1344 break; 1345 case SOL_IPV6: 1346 switch (optname) { 1347 case IPV6_MTU_DISCOVER: 1348 case IPV6_MTU: 1349 case IPV6_V6ONLY: 1350 case IPV6_RECVPKTINFO: 1351 val = 0; 1352 if (optlen < sizeof(uint32_t)) { 1353 return -TARGET_EINVAL; 1354 } 1355 if (get_user_u32(val, optval_addr)) { 1356 return -TARGET_EFAULT; 1357 } 1358 ret = get_errno(setsockopt(sockfd, level, optname, 1359 &val, sizeof(val))); 1360 break; 1361 default: 1362 goto unimplemented; 1363 } 1364 break; 1365 case SOL_RAW: 1366 switch (optname) { 1367 case ICMP_FILTER: 1368 /* struct icmp_filter takes an u32 value */ 1369 if (optlen < sizeof(uint32_t)) { 1370 return -TARGET_EINVAL; 1371 } 1372 1373 if (get_user_u32(val, optval_addr)) { 1374 return -TARGET_EFAULT; 1375 } 1376 ret = get_errno(setsockopt(sockfd, level, optname, 1377 &val, sizeof(val))); 1378 break; 1379 1380 default: 1381 goto unimplemented; 1382 } 1383 break; 1384 case TARGET_SOL_SOCKET: 1385 switch (optname) { 1386 case TARGET_SO_RCVTIMEO: 1387 { 1388 struct timeval tv; 1389 1390 optname = SO_RCVTIMEO; 1391 1392 set_timeout: 1393 if (optlen != sizeof(struct target_timeval)) { 1394 return -TARGET_EINVAL; 1395 } 1396 1397 if (copy_from_user_timeval(&tv, optval_addr)) { 1398 return -TARGET_EFAULT; 1399 } 1400 1401 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 1402 &tv, sizeof(tv))); 1403 return ret; 1404 } 1405 case TARGET_SO_SNDTIMEO: 1406 optname = SO_SNDTIMEO; 1407 goto set_timeout; 1408 case TARGET_SO_ATTACH_FILTER: 1409 { 1410 struct target_sock_fprog *tfprog; 1411 struct target_sock_filter *tfilter; 1412 struct sock_fprog fprog; 1413 struct sock_filter *filter; 1414 int i; 1415 1416 if (optlen != sizeof(*tfprog)) { 1417 return -TARGET_EINVAL; 1418 } 1419 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) { 1420 return -TARGET_EFAULT; 1421 } 1422 if (!lock_user_struct(VERIFY_READ, tfilter, 1423 tswapal(tfprog->filter), 0)) { 1424 unlock_user_struct(tfprog, optval_addr, 1); 1425 return -TARGET_EFAULT; 1426 } 1427 1428 fprog.len = tswap16(tfprog->len); 1429 filter = malloc(fprog.len * sizeof(*filter)); 1430 if (filter == NULL) { 1431 unlock_user_struct(tfilter, tfprog->filter, 1); 1432 unlock_user_struct(tfprog, optval_addr, 1); 1433 return -TARGET_ENOMEM; 1434 } 1435 for (i = 0; i < fprog.len; i++) { 1436 filter[i].code = tswap16(tfilter[i].code); 1437 filter[i].jt = tfilter[i].jt; 1438 filter[i].jf = tfilter[i].jf; 1439 filter[i].k = tswap32(tfilter[i].k); 1440 } 1441 fprog.filter = filter; 1442 1443 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, 1444 SO_ATTACH_FILTER, &fprog, sizeof(fprog))); 1445 free(filter); 1446 1447 unlock_user_struct(tfilter, tfprog->filter, 1); 1448 unlock_user_struct(tfprog, optval_addr, 1); 1449 return ret; 1450 } 1451 /* Options with 'int' argument. */ 1452 case TARGET_SO_DEBUG: 1453 optname = SO_DEBUG; 1454 break; 1455 case TARGET_SO_REUSEADDR: 1456 optname = SO_REUSEADDR; 1457 break; 1458 case TARGET_SO_TYPE: 1459 optname = SO_TYPE; 1460 break; 1461 case TARGET_SO_ERROR: 1462 optname = SO_ERROR; 1463 break; 1464 case TARGET_SO_DONTROUTE: 1465 optname = SO_DONTROUTE; 1466 break; 1467 case TARGET_SO_BROADCAST: 1468 optname = SO_BROADCAST; 1469 break; 1470 case TARGET_SO_SNDBUF: 1471 optname = SO_SNDBUF; 1472 break; 1473 case TARGET_SO_RCVBUF: 1474 optname = SO_RCVBUF; 1475 break; 1476 case TARGET_SO_KEEPALIVE: 1477 optname = SO_KEEPALIVE; 1478 break; 1479 case TARGET_SO_OOBINLINE: 1480 optname = SO_OOBINLINE; 1481 break; 1482 case TARGET_SO_NO_CHECK: 1483 optname = SO_NO_CHECK; 1484 break; 1485 case TARGET_SO_PRIORITY: 1486 optname = SO_PRIORITY; 1487 break; 1488 #ifdef SO_BSDCOMPAT 1489 case TARGET_SO_BSDCOMPAT: 1490 optname = SO_BSDCOMPAT; 1491 break; 1492 #endif 1493 case TARGET_SO_PASSCRED: 1494 optname = SO_PASSCRED; 1495 break; 1496 case TARGET_SO_TIMESTAMP: 1497 optname = SO_TIMESTAMP; 1498 break; 1499 case TARGET_SO_RCVLOWAT: 1500 optname = SO_RCVLOWAT; 1501 break; 1502 break; 1503 default: 1504 goto unimplemented; 1505 } 1506 if (optlen < sizeof(uint32_t)) 1507 return -TARGET_EINVAL; 1508 1509 if (get_user_u32(val, optval_addr)) 1510 return -TARGET_EFAULT; 1511 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val))); 1512 break; 1513 default: 1514 unimplemented: 1515 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname); 1516 ret = -TARGET_ENOPROTOOPT; 1517 } 1518 return ret; 1519 } 1520 1521 /* do_getsockopt() Must return target values and target errnos. */ 1522 static abi_long do_getsockopt(int sockfd, int level, int optname, 1523 abi_ulong optval_addr, abi_ulong optlen) 1524 { 1525 abi_long ret; 1526 int len, val; 1527 socklen_t lv; 1528 1529 switch(level) { 1530 case TARGET_SOL_SOCKET: 1531 level = SOL_SOCKET; 1532 switch (optname) { 1533 /* These don't just return a single integer */ 1534 case TARGET_SO_LINGER: 1535 case TARGET_SO_RCVTIMEO: 1536 case TARGET_SO_SNDTIMEO: 1537 case TARGET_SO_PEERNAME: 1538 goto unimplemented; 1539 case TARGET_SO_PEERCRED: { 1540 struct ucred cr; 1541 socklen_t crlen; 1542 struct target_ucred *tcr; 1543 1544 if (get_user_u32(len, optlen)) { 1545 return -TARGET_EFAULT; 1546 } 1547 if (len < 0) { 1548 return -TARGET_EINVAL; 1549 } 1550 1551 crlen = sizeof(cr); 1552 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED, 1553 &cr, &crlen)); 1554 if (ret < 0) { 1555 return ret; 1556 } 1557 if (len > crlen) { 1558 len = crlen; 1559 } 1560 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) { 1561 return -TARGET_EFAULT; 1562 } 1563 __put_user(cr.pid, &tcr->pid); 1564 __put_user(cr.uid, &tcr->uid); 1565 __put_user(cr.gid, &tcr->gid); 1566 unlock_user_struct(tcr, optval_addr, 1); 1567 if (put_user_u32(len, optlen)) { 1568 return -TARGET_EFAULT; 1569 } 1570 break; 1571 } 1572 /* Options with 'int' argument. */ 1573 case TARGET_SO_DEBUG: 1574 optname = SO_DEBUG; 1575 goto int_case; 1576 case TARGET_SO_REUSEADDR: 1577 optname = SO_REUSEADDR; 1578 goto int_case; 1579 case TARGET_SO_TYPE: 1580 optname = SO_TYPE; 1581 goto int_case; 1582 case TARGET_SO_ERROR: 1583 optname = SO_ERROR; 1584 goto int_case; 1585 case TARGET_SO_DONTROUTE: 1586 optname = SO_DONTROUTE; 1587 goto int_case; 1588 case TARGET_SO_BROADCAST: 1589 optname = SO_BROADCAST; 1590 goto int_case; 1591 case TARGET_SO_SNDBUF: 1592 optname = SO_SNDBUF; 1593 goto int_case; 1594 case TARGET_SO_RCVBUF: 1595 optname = SO_RCVBUF; 1596 goto int_case; 1597 case TARGET_SO_KEEPALIVE: 1598 optname = SO_KEEPALIVE; 1599 goto int_case; 1600 case TARGET_SO_OOBINLINE: 1601 optname = SO_OOBINLINE; 1602 goto int_case; 1603 case TARGET_SO_NO_CHECK: 1604 optname = SO_NO_CHECK; 1605 goto int_case; 1606 case TARGET_SO_PRIORITY: 1607 optname = SO_PRIORITY; 1608 goto int_case; 1609 #ifdef SO_BSDCOMPAT 1610 case TARGET_SO_BSDCOMPAT: 1611 optname = SO_BSDCOMPAT; 1612 goto int_case; 1613 #endif 1614 case TARGET_SO_PASSCRED: 1615 optname = SO_PASSCRED; 1616 goto int_case; 1617 case TARGET_SO_TIMESTAMP: 1618 optname = SO_TIMESTAMP; 1619 goto int_case; 1620 case TARGET_SO_RCVLOWAT: 1621 optname = SO_RCVLOWAT; 1622 goto int_case; 1623 default: 1624 goto int_case; 1625 } 1626 break; 1627 case SOL_TCP: 1628 /* TCP options all take an 'int' value. */ 1629 int_case: 1630 if (get_user_u32(len, optlen)) 1631 return -TARGET_EFAULT; 1632 if (len < 0) 1633 return -TARGET_EINVAL; 1634 lv = sizeof(lv); 1635 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1636 if (ret < 0) 1637 return ret; 1638 if (len > lv) 1639 len = lv; 1640 if (len == 4) { 1641 if (put_user_u32(val, optval_addr)) 1642 return -TARGET_EFAULT; 1643 } else { 1644 if (put_user_u8(val, optval_addr)) 1645 return -TARGET_EFAULT; 1646 } 1647 if (put_user_u32(len, optlen)) 1648 return -TARGET_EFAULT; 1649 break; 1650 case SOL_IP: 1651 switch(optname) { 1652 case IP_TOS: 1653 case IP_TTL: 1654 case IP_HDRINCL: 1655 case IP_ROUTER_ALERT: 1656 case IP_RECVOPTS: 1657 case IP_RETOPTS: 1658 case IP_PKTINFO: 1659 case IP_MTU_DISCOVER: 1660 case IP_RECVERR: 1661 case IP_RECVTOS: 1662 #ifdef IP_FREEBIND 1663 case IP_FREEBIND: 1664 #endif 1665 case IP_MULTICAST_TTL: 1666 case IP_MULTICAST_LOOP: 1667 if (get_user_u32(len, optlen)) 1668 return -TARGET_EFAULT; 1669 if (len < 0) 1670 return -TARGET_EINVAL; 1671 lv = sizeof(lv); 1672 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1673 if (ret < 0) 1674 return ret; 1675 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 1676 len = 1; 1677 if (put_user_u32(len, optlen) 1678 || put_user_u8(val, optval_addr)) 1679 return -TARGET_EFAULT; 1680 } else { 1681 if (len > sizeof(int)) 1682 len = sizeof(int); 1683 if (put_user_u32(len, optlen) 1684 || put_user_u32(val, optval_addr)) 1685 return -TARGET_EFAULT; 1686 } 1687 break; 1688 default: 1689 ret = -TARGET_ENOPROTOOPT; 1690 break; 1691 } 1692 break; 1693 default: 1694 unimplemented: 1695 gemu_log("getsockopt level=%d optname=%d not yet supported\n", 1696 level, optname); 1697 ret = -TARGET_EOPNOTSUPP; 1698 break; 1699 } 1700 return ret; 1701 } 1702 1703 static struct iovec *lock_iovec(int type, abi_ulong target_addr, 1704 int count, int copy) 1705 { 1706 struct target_iovec *target_vec; 1707 struct iovec *vec; 1708 abi_ulong total_len, max_len; 1709 int i; 1710 1711 if (count == 0) { 1712 errno = 0; 1713 return NULL; 1714 } 1715 if (count < 0 || count > IOV_MAX) { 1716 errno = EINVAL; 1717 return NULL; 1718 } 1719 1720 vec = calloc(count, sizeof(struct iovec)); 1721 if (vec == NULL) { 1722 errno = ENOMEM; 1723 return NULL; 1724 } 1725 1726 target_vec = lock_user(VERIFY_READ, target_addr, 1727 count * sizeof(struct target_iovec), 1); 1728 if (target_vec == NULL) { 1729 errno = EFAULT; 1730 goto fail2; 1731 } 1732 1733 /* ??? If host page size > target page size, this will result in a 1734 value larger than what we can actually support. */ 1735 max_len = 0x7fffffff & TARGET_PAGE_MASK; 1736 total_len = 0; 1737 1738 for (i = 0; i < count; i++) { 1739 abi_ulong base = tswapal(target_vec[i].iov_base); 1740 abi_long len = tswapal(target_vec[i].iov_len); 1741 1742 if (len < 0) { 1743 errno = EINVAL; 1744 goto fail; 1745 } else if (len == 0) { 1746 /* Zero length pointer is ignored. */ 1747 vec[i].iov_base = 0; 1748 } else { 1749 vec[i].iov_base = lock_user(type, base, len, copy); 1750 if (!vec[i].iov_base) { 1751 errno = EFAULT; 1752 goto fail; 1753 } 1754 if (len > max_len - total_len) { 1755 len = max_len - total_len; 1756 } 1757 } 1758 vec[i].iov_len = len; 1759 total_len += len; 1760 } 1761 1762 unlock_user(target_vec, target_addr, 0); 1763 return vec; 1764 1765 fail: 1766 free(vec); 1767 fail2: 1768 unlock_user(target_vec, target_addr, 0); 1769 return NULL; 1770 } 1771 1772 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr, 1773 int count, int copy) 1774 { 1775 struct target_iovec *target_vec; 1776 int i; 1777 1778 target_vec = lock_user(VERIFY_READ, target_addr, 1779 count * sizeof(struct target_iovec), 1); 1780 if (target_vec) { 1781 for (i = 0; i < count; i++) { 1782 abi_ulong base = tswapal(target_vec[i].iov_base); 1783 abi_long len = tswapal(target_vec[i].iov_base); 1784 if (len < 0) { 1785 break; 1786 } 1787 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0); 1788 } 1789 unlock_user(target_vec, target_addr, 0); 1790 } 1791 1792 free(vec); 1793 } 1794 1795 static inline int target_to_host_sock_type(int *type) 1796 { 1797 int host_type = 0; 1798 int target_type = *type; 1799 1800 switch (target_type & TARGET_SOCK_TYPE_MASK) { 1801 case TARGET_SOCK_DGRAM: 1802 host_type = SOCK_DGRAM; 1803 break; 1804 case TARGET_SOCK_STREAM: 1805 host_type = SOCK_STREAM; 1806 break; 1807 default: 1808 host_type = target_type & TARGET_SOCK_TYPE_MASK; 1809 break; 1810 } 1811 if (target_type & TARGET_SOCK_CLOEXEC) { 1812 #if defined(SOCK_CLOEXEC) 1813 host_type |= SOCK_CLOEXEC; 1814 #else 1815 return -TARGET_EINVAL; 1816 #endif 1817 } 1818 if (target_type & TARGET_SOCK_NONBLOCK) { 1819 #if defined(SOCK_NONBLOCK) 1820 host_type |= SOCK_NONBLOCK; 1821 #elif !defined(O_NONBLOCK) 1822 return -TARGET_EINVAL; 1823 #endif 1824 } 1825 *type = host_type; 1826 return 0; 1827 } 1828 1829 /* Try to emulate socket type flags after socket creation. */ 1830 static int sock_flags_fixup(int fd, int target_type) 1831 { 1832 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK) 1833 if (target_type & TARGET_SOCK_NONBLOCK) { 1834 int flags = fcntl(fd, F_GETFL); 1835 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) { 1836 close(fd); 1837 return -TARGET_EINVAL; 1838 } 1839 } 1840 #endif 1841 return fd; 1842 } 1843 1844 /* do_socket() Must return target values and target errnos. */ 1845 static abi_long do_socket(int domain, int type, int protocol) 1846 { 1847 int target_type = type; 1848 int ret; 1849 1850 ret = target_to_host_sock_type(&type); 1851 if (ret) { 1852 return ret; 1853 } 1854 1855 if (domain == PF_NETLINK) 1856 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */ 1857 ret = get_errno(socket(domain, type, protocol)); 1858 if (ret >= 0) { 1859 ret = sock_flags_fixup(ret, target_type); 1860 } 1861 return ret; 1862 } 1863 1864 /* do_bind() Must return target values and target errnos. */ 1865 static abi_long do_bind(int sockfd, abi_ulong target_addr, 1866 socklen_t addrlen) 1867 { 1868 void *addr; 1869 abi_long ret; 1870 1871 if ((int)addrlen < 0) { 1872 return -TARGET_EINVAL; 1873 } 1874 1875 addr = alloca(addrlen+1); 1876 1877 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1878 if (ret) 1879 return ret; 1880 1881 return get_errno(bind(sockfd, addr, addrlen)); 1882 } 1883 1884 /* do_connect() Must return target values and target errnos. */ 1885 static abi_long do_connect(int sockfd, abi_ulong target_addr, 1886 socklen_t addrlen) 1887 { 1888 void *addr; 1889 abi_long ret; 1890 1891 if ((int)addrlen < 0) { 1892 return -TARGET_EINVAL; 1893 } 1894 1895 addr = alloca(addrlen); 1896 1897 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1898 if (ret) 1899 return ret; 1900 1901 return get_errno(connect(sockfd, addr, addrlen)); 1902 } 1903 1904 /* do_sendrecvmsg() Must return target values and target errnos. */ 1905 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg, 1906 int flags, int send) 1907 { 1908 abi_long ret, len; 1909 struct target_msghdr *msgp; 1910 struct msghdr msg; 1911 int count; 1912 struct iovec *vec; 1913 abi_ulong target_vec; 1914 1915 /* FIXME */ 1916 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE, 1917 msgp, 1918 target_msg, 1919 send ? 1 : 0)) 1920 return -TARGET_EFAULT; 1921 if (msgp->msg_name) { 1922 msg.msg_namelen = tswap32(msgp->msg_namelen); 1923 msg.msg_name = alloca(msg.msg_namelen); 1924 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name), 1925 msg.msg_namelen); 1926 if (ret) { 1927 goto out2; 1928 } 1929 } else { 1930 msg.msg_name = NULL; 1931 msg.msg_namelen = 0; 1932 } 1933 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen); 1934 msg.msg_control = alloca(msg.msg_controllen); 1935 msg.msg_flags = tswap32(msgp->msg_flags); 1936 1937 count = tswapal(msgp->msg_iovlen); 1938 target_vec = tswapal(msgp->msg_iov); 1939 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, 1940 target_vec, count, send); 1941 if (vec == NULL) { 1942 ret = -host_to_target_errno(errno); 1943 goto out2; 1944 } 1945 msg.msg_iovlen = count; 1946 msg.msg_iov = vec; 1947 1948 if (send) { 1949 ret = target_to_host_cmsg(&msg, msgp); 1950 if (ret == 0) 1951 ret = get_errno(sendmsg(fd, &msg, flags)); 1952 } else { 1953 ret = get_errno(recvmsg(fd, &msg, flags)); 1954 if (!is_error(ret)) { 1955 len = ret; 1956 ret = host_to_target_cmsg(msgp, &msg); 1957 if (!is_error(ret)) { 1958 msgp->msg_namelen = tswap32(msg.msg_namelen); 1959 if (msg.msg_name != NULL) { 1960 ret = host_to_target_sockaddr(tswapal(msgp->msg_name), 1961 msg.msg_name, msg.msg_namelen); 1962 if (ret) { 1963 goto out; 1964 } 1965 } 1966 1967 ret = len; 1968 } 1969 } 1970 } 1971 1972 out: 1973 unlock_iovec(vec, target_vec, count, !send); 1974 out2: 1975 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 1976 return ret; 1977 } 1978 1979 /* If we don't have a system accept4() then just call accept. 1980 * The callsites to do_accept4() will ensure that they don't 1981 * pass a non-zero flags argument in this config. 1982 */ 1983 #ifndef CONFIG_ACCEPT4 1984 static inline int accept4(int sockfd, struct sockaddr *addr, 1985 socklen_t *addrlen, int flags) 1986 { 1987 assert(flags == 0); 1988 return accept(sockfd, addr, addrlen); 1989 } 1990 #endif 1991 1992 /* do_accept4() Must return target values and target errnos. */ 1993 static abi_long do_accept4(int fd, abi_ulong target_addr, 1994 abi_ulong target_addrlen_addr, int flags) 1995 { 1996 socklen_t addrlen; 1997 void *addr; 1998 abi_long ret; 1999 2000 if (target_addr == 0) { 2001 return get_errno(accept4(fd, NULL, NULL, flags)); 2002 } 2003 2004 /* linux returns EINVAL if addrlen pointer is invalid */ 2005 if (get_user_u32(addrlen, target_addrlen_addr)) 2006 return -TARGET_EINVAL; 2007 2008 if ((int)addrlen < 0) { 2009 return -TARGET_EINVAL; 2010 } 2011 2012 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2013 return -TARGET_EINVAL; 2014 2015 addr = alloca(addrlen); 2016 2017 ret = get_errno(accept4(fd, addr, &addrlen, flags)); 2018 if (!is_error(ret)) { 2019 host_to_target_sockaddr(target_addr, addr, addrlen); 2020 if (put_user_u32(addrlen, target_addrlen_addr)) 2021 ret = -TARGET_EFAULT; 2022 } 2023 return ret; 2024 } 2025 2026 /* do_getpeername() Must return target values and target errnos. */ 2027 static abi_long do_getpeername(int fd, abi_ulong target_addr, 2028 abi_ulong target_addrlen_addr) 2029 { 2030 socklen_t addrlen; 2031 void *addr; 2032 abi_long ret; 2033 2034 if (get_user_u32(addrlen, target_addrlen_addr)) 2035 return -TARGET_EFAULT; 2036 2037 if ((int)addrlen < 0) { 2038 return -TARGET_EINVAL; 2039 } 2040 2041 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2042 return -TARGET_EFAULT; 2043 2044 addr = alloca(addrlen); 2045 2046 ret = get_errno(getpeername(fd, addr, &addrlen)); 2047 if (!is_error(ret)) { 2048 host_to_target_sockaddr(target_addr, addr, addrlen); 2049 if (put_user_u32(addrlen, target_addrlen_addr)) 2050 ret = -TARGET_EFAULT; 2051 } 2052 return ret; 2053 } 2054 2055 /* do_getsockname() Must return target values and target errnos. */ 2056 static abi_long do_getsockname(int fd, abi_ulong target_addr, 2057 abi_ulong target_addrlen_addr) 2058 { 2059 socklen_t addrlen; 2060 void *addr; 2061 abi_long ret; 2062 2063 if (get_user_u32(addrlen, target_addrlen_addr)) 2064 return -TARGET_EFAULT; 2065 2066 if ((int)addrlen < 0) { 2067 return -TARGET_EINVAL; 2068 } 2069 2070 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2071 return -TARGET_EFAULT; 2072 2073 addr = alloca(addrlen); 2074 2075 ret = get_errno(getsockname(fd, addr, &addrlen)); 2076 if (!is_error(ret)) { 2077 host_to_target_sockaddr(target_addr, addr, addrlen); 2078 if (put_user_u32(addrlen, target_addrlen_addr)) 2079 ret = -TARGET_EFAULT; 2080 } 2081 return ret; 2082 } 2083 2084 /* do_socketpair() Must return target values and target errnos. */ 2085 static abi_long do_socketpair(int domain, int type, int protocol, 2086 abi_ulong target_tab_addr) 2087 { 2088 int tab[2]; 2089 abi_long ret; 2090 2091 target_to_host_sock_type(&type); 2092 2093 ret = get_errno(socketpair(domain, type, protocol, tab)); 2094 if (!is_error(ret)) { 2095 if (put_user_s32(tab[0], target_tab_addr) 2096 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0]))) 2097 ret = -TARGET_EFAULT; 2098 } 2099 return ret; 2100 } 2101 2102 /* do_sendto() Must return target values and target errnos. */ 2103 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags, 2104 abi_ulong target_addr, socklen_t addrlen) 2105 { 2106 void *addr; 2107 void *host_msg; 2108 abi_long ret; 2109 2110 if ((int)addrlen < 0) { 2111 return -TARGET_EINVAL; 2112 } 2113 2114 host_msg = lock_user(VERIFY_READ, msg, len, 1); 2115 if (!host_msg) 2116 return -TARGET_EFAULT; 2117 if (target_addr) { 2118 addr = alloca(addrlen); 2119 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 2120 if (ret) { 2121 unlock_user(host_msg, msg, 0); 2122 return ret; 2123 } 2124 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen)); 2125 } else { 2126 ret = get_errno(send(fd, host_msg, len, flags)); 2127 } 2128 unlock_user(host_msg, msg, 0); 2129 return ret; 2130 } 2131 2132 /* do_recvfrom() Must return target values and target errnos. */ 2133 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags, 2134 abi_ulong target_addr, 2135 abi_ulong target_addrlen) 2136 { 2137 socklen_t addrlen; 2138 void *addr; 2139 void *host_msg; 2140 abi_long ret; 2141 2142 host_msg = lock_user(VERIFY_WRITE, msg, len, 0); 2143 if (!host_msg) 2144 return -TARGET_EFAULT; 2145 if (target_addr) { 2146 if (get_user_u32(addrlen, target_addrlen)) { 2147 ret = -TARGET_EFAULT; 2148 goto fail; 2149 } 2150 if ((int)addrlen < 0) { 2151 ret = -TARGET_EINVAL; 2152 goto fail; 2153 } 2154 addr = alloca(addrlen); 2155 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen)); 2156 } else { 2157 addr = NULL; /* To keep compiler quiet. */ 2158 ret = get_errno(qemu_recv(fd, host_msg, len, flags)); 2159 } 2160 if (!is_error(ret)) { 2161 if (target_addr) { 2162 host_to_target_sockaddr(target_addr, addr, addrlen); 2163 if (put_user_u32(addrlen, target_addrlen)) { 2164 ret = -TARGET_EFAULT; 2165 goto fail; 2166 } 2167 } 2168 unlock_user(host_msg, msg, len); 2169 } else { 2170 fail: 2171 unlock_user(host_msg, msg, 0); 2172 } 2173 return ret; 2174 } 2175 2176 #ifdef TARGET_NR_socketcall 2177 /* do_socketcall() Must return target values and target errnos. */ 2178 static abi_long do_socketcall(int num, abi_ulong vptr) 2179 { 2180 static const unsigned ac[] = { /* number of arguments per call */ 2181 [SOCKOP_socket] = 3, /* domain, type, protocol */ 2182 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */ 2183 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */ 2184 [SOCKOP_listen] = 2, /* sockfd, backlog */ 2185 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */ 2186 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */ 2187 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */ 2188 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */ 2189 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */ 2190 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */ 2191 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */ 2192 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */ 2193 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */ 2194 [SOCKOP_shutdown] = 2, /* sockfd, how */ 2195 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */ 2196 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */ 2197 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */ 2198 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */ 2199 }; 2200 abi_long a[6]; /* max 6 args */ 2201 2202 /* first, collect the arguments in a[] according to ac[] */ 2203 if (num >= 0 && num < ARRAY_SIZE(ac)) { 2204 unsigned i; 2205 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */ 2206 for (i = 0; i < ac[num]; ++i) { 2207 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) { 2208 return -TARGET_EFAULT; 2209 } 2210 } 2211 } 2212 2213 /* now when we have the args, actually handle the call */ 2214 switch (num) { 2215 case SOCKOP_socket: /* domain, type, protocol */ 2216 return do_socket(a[0], a[1], a[2]); 2217 case SOCKOP_bind: /* sockfd, addr, addrlen */ 2218 return do_bind(a[0], a[1], a[2]); 2219 case SOCKOP_connect: /* sockfd, addr, addrlen */ 2220 return do_connect(a[0], a[1], a[2]); 2221 case SOCKOP_listen: /* sockfd, backlog */ 2222 return get_errno(listen(a[0], a[1])); 2223 case SOCKOP_accept: /* sockfd, addr, addrlen */ 2224 return do_accept4(a[0], a[1], a[2], 0); 2225 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */ 2226 return do_accept4(a[0], a[1], a[2], a[3]); 2227 case SOCKOP_getsockname: /* sockfd, addr, addrlen */ 2228 return do_getsockname(a[0], a[1], a[2]); 2229 case SOCKOP_getpeername: /* sockfd, addr, addrlen */ 2230 return do_getpeername(a[0], a[1], a[2]); 2231 case SOCKOP_socketpair: /* domain, type, protocol, tab */ 2232 return do_socketpair(a[0], a[1], a[2], a[3]); 2233 case SOCKOP_send: /* sockfd, msg, len, flags */ 2234 return do_sendto(a[0], a[1], a[2], a[3], 0, 0); 2235 case SOCKOP_recv: /* sockfd, msg, len, flags */ 2236 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0); 2237 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */ 2238 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]); 2239 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */ 2240 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]); 2241 case SOCKOP_shutdown: /* sockfd, how */ 2242 return get_errno(shutdown(a[0], a[1])); 2243 case SOCKOP_sendmsg: /* sockfd, msg, flags */ 2244 return do_sendrecvmsg(a[0], a[1], a[2], 1); 2245 case SOCKOP_recvmsg: /* sockfd, msg, flags */ 2246 return do_sendrecvmsg(a[0], a[1], a[2], 0); 2247 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */ 2248 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]); 2249 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */ 2250 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]); 2251 default: 2252 gemu_log("Unsupported socketcall: %d\n", num); 2253 return -TARGET_ENOSYS; 2254 } 2255 } 2256 #endif 2257 2258 #define N_SHM_REGIONS 32 2259 2260 static struct shm_region { 2261 abi_ulong start; 2262 abi_ulong size; 2263 } shm_regions[N_SHM_REGIONS]; 2264 2265 struct target_semid_ds 2266 { 2267 struct target_ipc_perm sem_perm; 2268 abi_ulong sem_otime; 2269 abi_ulong __unused1; 2270 abi_ulong sem_ctime; 2271 abi_ulong __unused2; 2272 abi_ulong sem_nsems; 2273 abi_ulong __unused3; 2274 abi_ulong __unused4; 2275 }; 2276 2277 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip, 2278 abi_ulong target_addr) 2279 { 2280 struct target_ipc_perm *target_ip; 2281 struct target_semid_ds *target_sd; 2282 2283 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2284 return -TARGET_EFAULT; 2285 target_ip = &(target_sd->sem_perm); 2286 host_ip->__key = tswap32(target_ip->__key); 2287 host_ip->uid = tswap32(target_ip->uid); 2288 host_ip->gid = tswap32(target_ip->gid); 2289 host_ip->cuid = tswap32(target_ip->cuid); 2290 host_ip->cgid = tswap32(target_ip->cgid); 2291 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 2292 host_ip->mode = tswap32(target_ip->mode); 2293 #else 2294 host_ip->mode = tswap16(target_ip->mode); 2295 #endif 2296 #if defined(TARGET_PPC) 2297 host_ip->__seq = tswap32(target_ip->__seq); 2298 #else 2299 host_ip->__seq = tswap16(target_ip->__seq); 2300 #endif 2301 unlock_user_struct(target_sd, target_addr, 0); 2302 return 0; 2303 } 2304 2305 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr, 2306 struct ipc_perm *host_ip) 2307 { 2308 struct target_ipc_perm *target_ip; 2309 struct target_semid_ds *target_sd; 2310 2311 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2312 return -TARGET_EFAULT; 2313 target_ip = &(target_sd->sem_perm); 2314 target_ip->__key = tswap32(host_ip->__key); 2315 target_ip->uid = tswap32(host_ip->uid); 2316 target_ip->gid = tswap32(host_ip->gid); 2317 target_ip->cuid = tswap32(host_ip->cuid); 2318 target_ip->cgid = tswap32(host_ip->cgid); 2319 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 2320 target_ip->mode = tswap32(host_ip->mode); 2321 #else 2322 target_ip->mode = tswap16(host_ip->mode); 2323 #endif 2324 #if defined(TARGET_PPC) 2325 target_ip->__seq = tswap32(host_ip->__seq); 2326 #else 2327 target_ip->__seq = tswap16(host_ip->__seq); 2328 #endif 2329 unlock_user_struct(target_sd, target_addr, 1); 2330 return 0; 2331 } 2332 2333 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd, 2334 abi_ulong target_addr) 2335 { 2336 struct target_semid_ds *target_sd; 2337 2338 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2339 return -TARGET_EFAULT; 2340 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr)) 2341 return -TARGET_EFAULT; 2342 host_sd->sem_nsems = tswapal(target_sd->sem_nsems); 2343 host_sd->sem_otime = tswapal(target_sd->sem_otime); 2344 host_sd->sem_ctime = tswapal(target_sd->sem_ctime); 2345 unlock_user_struct(target_sd, target_addr, 0); 2346 return 0; 2347 } 2348 2349 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr, 2350 struct semid_ds *host_sd) 2351 { 2352 struct target_semid_ds *target_sd; 2353 2354 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2355 return -TARGET_EFAULT; 2356 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm))) 2357 return -TARGET_EFAULT; 2358 target_sd->sem_nsems = tswapal(host_sd->sem_nsems); 2359 target_sd->sem_otime = tswapal(host_sd->sem_otime); 2360 target_sd->sem_ctime = tswapal(host_sd->sem_ctime); 2361 unlock_user_struct(target_sd, target_addr, 1); 2362 return 0; 2363 } 2364 2365 struct target_seminfo { 2366 int semmap; 2367 int semmni; 2368 int semmns; 2369 int semmnu; 2370 int semmsl; 2371 int semopm; 2372 int semume; 2373 int semusz; 2374 int semvmx; 2375 int semaem; 2376 }; 2377 2378 static inline abi_long host_to_target_seminfo(abi_ulong target_addr, 2379 struct seminfo *host_seminfo) 2380 { 2381 struct target_seminfo *target_seminfo; 2382 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0)) 2383 return -TARGET_EFAULT; 2384 __put_user(host_seminfo->semmap, &target_seminfo->semmap); 2385 __put_user(host_seminfo->semmni, &target_seminfo->semmni); 2386 __put_user(host_seminfo->semmns, &target_seminfo->semmns); 2387 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu); 2388 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl); 2389 __put_user(host_seminfo->semopm, &target_seminfo->semopm); 2390 __put_user(host_seminfo->semume, &target_seminfo->semume); 2391 __put_user(host_seminfo->semusz, &target_seminfo->semusz); 2392 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx); 2393 __put_user(host_seminfo->semaem, &target_seminfo->semaem); 2394 unlock_user_struct(target_seminfo, target_addr, 1); 2395 return 0; 2396 } 2397 2398 union semun { 2399 int val; 2400 struct semid_ds *buf; 2401 unsigned short *array; 2402 struct seminfo *__buf; 2403 }; 2404 2405 union target_semun { 2406 int val; 2407 abi_ulong buf; 2408 abi_ulong array; 2409 abi_ulong __buf; 2410 }; 2411 2412 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array, 2413 abi_ulong target_addr) 2414 { 2415 int nsems; 2416 unsigned short *array; 2417 union semun semun; 2418 struct semid_ds semid_ds; 2419 int i, ret; 2420 2421 semun.buf = &semid_ds; 2422 2423 ret = semctl(semid, 0, IPC_STAT, semun); 2424 if (ret == -1) 2425 return get_errno(ret); 2426 2427 nsems = semid_ds.sem_nsems; 2428 2429 *host_array = malloc(nsems*sizeof(unsigned short)); 2430 array = lock_user(VERIFY_READ, target_addr, 2431 nsems*sizeof(unsigned short), 1); 2432 if (!array) 2433 return -TARGET_EFAULT; 2434 2435 for(i=0; i<nsems; i++) { 2436 __get_user((*host_array)[i], &array[i]); 2437 } 2438 unlock_user(array, target_addr, 0); 2439 2440 return 0; 2441 } 2442 2443 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr, 2444 unsigned short **host_array) 2445 { 2446 int nsems; 2447 unsigned short *array; 2448 union semun semun; 2449 struct semid_ds semid_ds; 2450 int i, ret; 2451 2452 semun.buf = &semid_ds; 2453 2454 ret = semctl(semid, 0, IPC_STAT, semun); 2455 if (ret == -1) 2456 return get_errno(ret); 2457 2458 nsems = semid_ds.sem_nsems; 2459 2460 array = lock_user(VERIFY_WRITE, target_addr, 2461 nsems*sizeof(unsigned short), 0); 2462 if (!array) 2463 return -TARGET_EFAULT; 2464 2465 for(i=0; i<nsems; i++) { 2466 __put_user((*host_array)[i], &array[i]); 2467 } 2468 free(*host_array); 2469 unlock_user(array, target_addr, 1); 2470 2471 return 0; 2472 } 2473 2474 static inline abi_long do_semctl(int semid, int semnum, int cmd, 2475 union target_semun target_su) 2476 { 2477 union semun arg; 2478 struct semid_ds dsarg; 2479 unsigned short *array = NULL; 2480 struct seminfo seminfo; 2481 abi_long ret = -TARGET_EINVAL; 2482 abi_long err; 2483 cmd &= 0xff; 2484 2485 switch( cmd ) { 2486 case GETVAL: 2487 case SETVAL: 2488 arg.val = tswap32(target_su.val); 2489 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2490 target_su.val = tswap32(arg.val); 2491 break; 2492 case GETALL: 2493 case SETALL: 2494 err = target_to_host_semarray(semid, &array, target_su.array); 2495 if (err) 2496 return err; 2497 arg.array = array; 2498 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2499 err = host_to_target_semarray(semid, target_su.array, &array); 2500 if (err) 2501 return err; 2502 break; 2503 case IPC_STAT: 2504 case IPC_SET: 2505 case SEM_STAT: 2506 err = target_to_host_semid_ds(&dsarg, target_su.buf); 2507 if (err) 2508 return err; 2509 arg.buf = &dsarg; 2510 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2511 err = host_to_target_semid_ds(target_su.buf, &dsarg); 2512 if (err) 2513 return err; 2514 break; 2515 case IPC_INFO: 2516 case SEM_INFO: 2517 arg.__buf = &seminfo; 2518 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2519 err = host_to_target_seminfo(target_su.__buf, &seminfo); 2520 if (err) 2521 return err; 2522 break; 2523 case IPC_RMID: 2524 case GETPID: 2525 case GETNCNT: 2526 case GETZCNT: 2527 ret = get_errno(semctl(semid, semnum, cmd, NULL)); 2528 break; 2529 } 2530 2531 return ret; 2532 } 2533 2534 struct target_sembuf { 2535 unsigned short sem_num; 2536 short sem_op; 2537 short sem_flg; 2538 }; 2539 2540 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf, 2541 abi_ulong target_addr, 2542 unsigned nsops) 2543 { 2544 struct target_sembuf *target_sembuf; 2545 int i; 2546 2547 target_sembuf = lock_user(VERIFY_READ, target_addr, 2548 nsops*sizeof(struct target_sembuf), 1); 2549 if (!target_sembuf) 2550 return -TARGET_EFAULT; 2551 2552 for(i=0; i<nsops; i++) { 2553 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num); 2554 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op); 2555 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg); 2556 } 2557 2558 unlock_user(target_sembuf, target_addr, 0); 2559 2560 return 0; 2561 } 2562 2563 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops) 2564 { 2565 struct sembuf sops[nsops]; 2566 2567 if (target_to_host_sembuf(sops, ptr, nsops)) 2568 return -TARGET_EFAULT; 2569 2570 return get_errno(semop(semid, sops, nsops)); 2571 } 2572 2573 struct target_msqid_ds 2574 { 2575 struct target_ipc_perm msg_perm; 2576 abi_ulong msg_stime; 2577 #if TARGET_ABI_BITS == 32 2578 abi_ulong __unused1; 2579 #endif 2580 abi_ulong msg_rtime; 2581 #if TARGET_ABI_BITS == 32 2582 abi_ulong __unused2; 2583 #endif 2584 abi_ulong msg_ctime; 2585 #if TARGET_ABI_BITS == 32 2586 abi_ulong __unused3; 2587 #endif 2588 abi_ulong __msg_cbytes; 2589 abi_ulong msg_qnum; 2590 abi_ulong msg_qbytes; 2591 abi_ulong msg_lspid; 2592 abi_ulong msg_lrpid; 2593 abi_ulong __unused4; 2594 abi_ulong __unused5; 2595 }; 2596 2597 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md, 2598 abi_ulong target_addr) 2599 { 2600 struct target_msqid_ds *target_md; 2601 2602 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1)) 2603 return -TARGET_EFAULT; 2604 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr)) 2605 return -TARGET_EFAULT; 2606 host_md->msg_stime = tswapal(target_md->msg_stime); 2607 host_md->msg_rtime = tswapal(target_md->msg_rtime); 2608 host_md->msg_ctime = tswapal(target_md->msg_ctime); 2609 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes); 2610 host_md->msg_qnum = tswapal(target_md->msg_qnum); 2611 host_md->msg_qbytes = tswapal(target_md->msg_qbytes); 2612 host_md->msg_lspid = tswapal(target_md->msg_lspid); 2613 host_md->msg_lrpid = tswapal(target_md->msg_lrpid); 2614 unlock_user_struct(target_md, target_addr, 0); 2615 return 0; 2616 } 2617 2618 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr, 2619 struct msqid_ds *host_md) 2620 { 2621 struct target_msqid_ds *target_md; 2622 2623 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0)) 2624 return -TARGET_EFAULT; 2625 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm))) 2626 return -TARGET_EFAULT; 2627 target_md->msg_stime = tswapal(host_md->msg_stime); 2628 target_md->msg_rtime = tswapal(host_md->msg_rtime); 2629 target_md->msg_ctime = tswapal(host_md->msg_ctime); 2630 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes); 2631 target_md->msg_qnum = tswapal(host_md->msg_qnum); 2632 target_md->msg_qbytes = tswapal(host_md->msg_qbytes); 2633 target_md->msg_lspid = tswapal(host_md->msg_lspid); 2634 target_md->msg_lrpid = tswapal(host_md->msg_lrpid); 2635 unlock_user_struct(target_md, target_addr, 1); 2636 return 0; 2637 } 2638 2639 struct target_msginfo { 2640 int msgpool; 2641 int msgmap; 2642 int msgmax; 2643 int msgmnb; 2644 int msgmni; 2645 int msgssz; 2646 int msgtql; 2647 unsigned short int msgseg; 2648 }; 2649 2650 static inline abi_long host_to_target_msginfo(abi_ulong target_addr, 2651 struct msginfo *host_msginfo) 2652 { 2653 struct target_msginfo *target_msginfo; 2654 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0)) 2655 return -TARGET_EFAULT; 2656 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool); 2657 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap); 2658 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax); 2659 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb); 2660 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni); 2661 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz); 2662 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql); 2663 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg); 2664 unlock_user_struct(target_msginfo, target_addr, 1); 2665 return 0; 2666 } 2667 2668 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr) 2669 { 2670 struct msqid_ds dsarg; 2671 struct msginfo msginfo; 2672 abi_long ret = -TARGET_EINVAL; 2673 2674 cmd &= 0xff; 2675 2676 switch (cmd) { 2677 case IPC_STAT: 2678 case IPC_SET: 2679 case MSG_STAT: 2680 if (target_to_host_msqid_ds(&dsarg,ptr)) 2681 return -TARGET_EFAULT; 2682 ret = get_errno(msgctl(msgid, cmd, &dsarg)); 2683 if (host_to_target_msqid_ds(ptr,&dsarg)) 2684 return -TARGET_EFAULT; 2685 break; 2686 case IPC_RMID: 2687 ret = get_errno(msgctl(msgid, cmd, NULL)); 2688 break; 2689 case IPC_INFO: 2690 case MSG_INFO: 2691 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo)); 2692 if (host_to_target_msginfo(ptr, &msginfo)) 2693 return -TARGET_EFAULT; 2694 break; 2695 } 2696 2697 return ret; 2698 } 2699 2700 struct target_msgbuf { 2701 abi_long mtype; 2702 char mtext[1]; 2703 }; 2704 2705 static inline abi_long do_msgsnd(int msqid, abi_long msgp, 2706 unsigned int msgsz, int msgflg) 2707 { 2708 struct target_msgbuf *target_mb; 2709 struct msgbuf *host_mb; 2710 abi_long ret = 0; 2711 2712 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0)) 2713 return -TARGET_EFAULT; 2714 host_mb = malloc(msgsz+sizeof(long)); 2715 host_mb->mtype = (abi_long) tswapal(target_mb->mtype); 2716 memcpy(host_mb->mtext, target_mb->mtext, msgsz); 2717 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg)); 2718 free(host_mb); 2719 unlock_user_struct(target_mb, msgp, 0); 2720 2721 return ret; 2722 } 2723 2724 static inline abi_long do_msgrcv(int msqid, abi_long msgp, 2725 unsigned int msgsz, abi_long msgtyp, 2726 int msgflg) 2727 { 2728 struct target_msgbuf *target_mb; 2729 char *target_mtext; 2730 struct msgbuf *host_mb; 2731 abi_long ret = 0; 2732 2733 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0)) 2734 return -TARGET_EFAULT; 2735 2736 host_mb = g_malloc(msgsz+sizeof(long)); 2737 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg)); 2738 2739 if (ret > 0) { 2740 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong); 2741 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0); 2742 if (!target_mtext) { 2743 ret = -TARGET_EFAULT; 2744 goto end; 2745 } 2746 memcpy(target_mb->mtext, host_mb->mtext, ret); 2747 unlock_user(target_mtext, target_mtext_addr, ret); 2748 } 2749 2750 target_mb->mtype = tswapal(host_mb->mtype); 2751 2752 end: 2753 if (target_mb) 2754 unlock_user_struct(target_mb, msgp, 1); 2755 g_free(host_mb); 2756 return ret; 2757 } 2758 2759 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd, 2760 abi_ulong target_addr) 2761 { 2762 struct target_shmid_ds *target_sd; 2763 2764 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2765 return -TARGET_EFAULT; 2766 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr)) 2767 return -TARGET_EFAULT; 2768 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2769 __get_user(host_sd->shm_atime, &target_sd->shm_atime); 2770 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2771 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2772 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2773 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2774 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2775 unlock_user_struct(target_sd, target_addr, 0); 2776 return 0; 2777 } 2778 2779 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr, 2780 struct shmid_ds *host_sd) 2781 { 2782 struct target_shmid_ds *target_sd; 2783 2784 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2785 return -TARGET_EFAULT; 2786 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm))) 2787 return -TARGET_EFAULT; 2788 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2789 __put_user(host_sd->shm_atime, &target_sd->shm_atime); 2790 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2791 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2792 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2793 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2794 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2795 unlock_user_struct(target_sd, target_addr, 1); 2796 return 0; 2797 } 2798 2799 struct target_shminfo { 2800 abi_ulong shmmax; 2801 abi_ulong shmmin; 2802 abi_ulong shmmni; 2803 abi_ulong shmseg; 2804 abi_ulong shmall; 2805 }; 2806 2807 static inline abi_long host_to_target_shminfo(abi_ulong target_addr, 2808 struct shminfo *host_shminfo) 2809 { 2810 struct target_shminfo *target_shminfo; 2811 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0)) 2812 return -TARGET_EFAULT; 2813 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax); 2814 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin); 2815 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni); 2816 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg); 2817 __put_user(host_shminfo->shmall, &target_shminfo->shmall); 2818 unlock_user_struct(target_shminfo, target_addr, 1); 2819 return 0; 2820 } 2821 2822 struct target_shm_info { 2823 int used_ids; 2824 abi_ulong shm_tot; 2825 abi_ulong shm_rss; 2826 abi_ulong shm_swp; 2827 abi_ulong swap_attempts; 2828 abi_ulong swap_successes; 2829 }; 2830 2831 static inline abi_long host_to_target_shm_info(abi_ulong target_addr, 2832 struct shm_info *host_shm_info) 2833 { 2834 struct target_shm_info *target_shm_info; 2835 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0)) 2836 return -TARGET_EFAULT; 2837 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids); 2838 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot); 2839 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss); 2840 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp); 2841 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts); 2842 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes); 2843 unlock_user_struct(target_shm_info, target_addr, 1); 2844 return 0; 2845 } 2846 2847 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf) 2848 { 2849 struct shmid_ds dsarg; 2850 struct shminfo shminfo; 2851 struct shm_info shm_info; 2852 abi_long ret = -TARGET_EINVAL; 2853 2854 cmd &= 0xff; 2855 2856 switch(cmd) { 2857 case IPC_STAT: 2858 case IPC_SET: 2859 case SHM_STAT: 2860 if (target_to_host_shmid_ds(&dsarg, buf)) 2861 return -TARGET_EFAULT; 2862 ret = get_errno(shmctl(shmid, cmd, &dsarg)); 2863 if (host_to_target_shmid_ds(buf, &dsarg)) 2864 return -TARGET_EFAULT; 2865 break; 2866 case IPC_INFO: 2867 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo)); 2868 if (host_to_target_shminfo(buf, &shminfo)) 2869 return -TARGET_EFAULT; 2870 break; 2871 case SHM_INFO: 2872 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info)); 2873 if (host_to_target_shm_info(buf, &shm_info)) 2874 return -TARGET_EFAULT; 2875 break; 2876 case IPC_RMID: 2877 case SHM_LOCK: 2878 case SHM_UNLOCK: 2879 ret = get_errno(shmctl(shmid, cmd, NULL)); 2880 break; 2881 } 2882 2883 return ret; 2884 } 2885 2886 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg) 2887 { 2888 abi_long raddr; 2889 void *host_raddr; 2890 struct shmid_ds shm_info; 2891 int i,ret; 2892 2893 /* find out the length of the shared memory segment */ 2894 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); 2895 if (is_error(ret)) { 2896 /* can't get length, bail out */ 2897 return ret; 2898 } 2899 2900 mmap_lock(); 2901 2902 if (shmaddr) 2903 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg); 2904 else { 2905 abi_ulong mmap_start; 2906 2907 mmap_start = mmap_find_vma(0, shm_info.shm_segsz); 2908 2909 if (mmap_start == -1) { 2910 errno = ENOMEM; 2911 host_raddr = (void *)-1; 2912 } else 2913 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP); 2914 } 2915 2916 if (host_raddr == (void *)-1) { 2917 mmap_unlock(); 2918 return get_errno((long)host_raddr); 2919 } 2920 raddr=h2g((unsigned long)host_raddr); 2921 2922 page_set_flags(raddr, raddr + shm_info.shm_segsz, 2923 PAGE_VALID | PAGE_READ | 2924 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE)); 2925 2926 for (i = 0; i < N_SHM_REGIONS; i++) { 2927 if (shm_regions[i].start == 0) { 2928 shm_regions[i].start = raddr; 2929 shm_regions[i].size = shm_info.shm_segsz; 2930 break; 2931 } 2932 } 2933 2934 mmap_unlock(); 2935 return raddr; 2936 2937 } 2938 2939 static inline abi_long do_shmdt(abi_ulong shmaddr) 2940 { 2941 int i; 2942 2943 for (i = 0; i < N_SHM_REGIONS; ++i) { 2944 if (shm_regions[i].start == shmaddr) { 2945 shm_regions[i].start = 0; 2946 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0); 2947 break; 2948 } 2949 } 2950 2951 return get_errno(shmdt(g2h(shmaddr))); 2952 } 2953 2954 #ifdef TARGET_NR_ipc 2955 /* ??? This only works with linear mappings. */ 2956 /* do_ipc() must return target values and target errnos. */ 2957 static abi_long do_ipc(unsigned int call, int first, 2958 int second, int third, 2959 abi_long ptr, abi_long fifth) 2960 { 2961 int version; 2962 abi_long ret = 0; 2963 2964 version = call >> 16; 2965 call &= 0xffff; 2966 2967 switch (call) { 2968 case IPCOP_semop: 2969 ret = do_semop(first, ptr, second); 2970 break; 2971 2972 case IPCOP_semget: 2973 ret = get_errno(semget(first, second, third)); 2974 break; 2975 2976 case IPCOP_semctl: 2977 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr); 2978 break; 2979 2980 case IPCOP_msgget: 2981 ret = get_errno(msgget(first, second)); 2982 break; 2983 2984 case IPCOP_msgsnd: 2985 ret = do_msgsnd(first, ptr, second, third); 2986 break; 2987 2988 case IPCOP_msgctl: 2989 ret = do_msgctl(first, second, ptr); 2990 break; 2991 2992 case IPCOP_msgrcv: 2993 switch (version) { 2994 case 0: 2995 { 2996 struct target_ipc_kludge { 2997 abi_long msgp; 2998 abi_long msgtyp; 2999 } *tmp; 3000 3001 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) { 3002 ret = -TARGET_EFAULT; 3003 break; 3004 } 3005 3006 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third); 3007 3008 unlock_user_struct(tmp, ptr, 0); 3009 break; 3010 } 3011 default: 3012 ret = do_msgrcv(first, ptr, second, fifth, third); 3013 } 3014 break; 3015 3016 case IPCOP_shmat: 3017 switch (version) { 3018 default: 3019 { 3020 abi_ulong raddr; 3021 raddr = do_shmat(first, ptr, second); 3022 if (is_error(raddr)) 3023 return get_errno(raddr); 3024 if (put_user_ual(raddr, third)) 3025 return -TARGET_EFAULT; 3026 break; 3027 } 3028 case 1: 3029 ret = -TARGET_EINVAL; 3030 break; 3031 } 3032 break; 3033 case IPCOP_shmdt: 3034 ret = do_shmdt(ptr); 3035 break; 3036 3037 case IPCOP_shmget: 3038 /* IPC_* flag values are the same on all linux platforms */ 3039 ret = get_errno(shmget(first, second, third)); 3040 break; 3041 3042 /* IPC_* and SHM_* command values are the same on all linux platforms */ 3043 case IPCOP_shmctl: 3044 ret = do_shmctl(first, second, ptr); 3045 break; 3046 default: 3047 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version); 3048 ret = -TARGET_ENOSYS; 3049 break; 3050 } 3051 return ret; 3052 } 3053 #endif 3054 3055 /* kernel structure types definitions */ 3056 3057 #define STRUCT(name, ...) STRUCT_ ## name, 3058 #define STRUCT_SPECIAL(name) STRUCT_ ## name, 3059 enum { 3060 #include "syscall_types.h" 3061 }; 3062 #undef STRUCT 3063 #undef STRUCT_SPECIAL 3064 3065 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL }; 3066 #define STRUCT_SPECIAL(name) 3067 #include "syscall_types.h" 3068 #undef STRUCT 3069 #undef STRUCT_SPECIAL 3070 3071 typedef struct IOCTLEntry IOCTLEntry; 3072 3073 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp, 3074 int fd, abi_long cmd, abi_long arg); 3075 3076 struct IOCTLEntry { 3077 unsigned int target_cmd; 3078 unsigned int host_cmd; 3079 const char *name; 3080 int access; 3081 do_ioctl_fn *do_ioctl; 3082 const argtype arg_type[5]; 3083 }; 3084 3085 #define IOC_R 0x0001 3086 #define IOC_W 0x0002 3087 #define IOC_RW (IOC_R | IOC_W) 3088 3089 #define MAX_STRUCT_SIZE 4096 3090 3091 #ifdef CONFIG_FIEMAP 3092 /* So fiemap access checks don't overflow on 32 bit systems. 3093 * This is very slightly smaller than the limit imposed by 3094 * the underlying kernel. 3095 */ 3096 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \ 3097 / sizeof(struct fiemap_extent)) 3098 3099 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp, 3100 int fd, abi_long cmd, abi_long arg) 3101 { 3102 /* The parameter for this ioctl is a struct fiemap followed 3103 * by an array of struct fiemap_extent whose size is set 3104 * in fiemap->fm_extent_count. The array is filled in by the 3105 * ioctl. 3106 */ 3107 int target_size_in, target_size_out; 3108 struct fiemap *fm; 3109 const argtype *arg_type = ie->arg_type; 3110 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) }; 3111 void *argptr, *p; 3112 abi_long ret; 3113 int i, extent_size = thunk_type_size(extent_arg_type, 0); 3114 uint32_t outbufsz; 3115 int free_fm = 0; 3116 3117 assert(arg_type[0] == TYPE_PTR); 3118 assert(ie->access == IOC_RW); 3119 arg_type++; 3120 target_size_in = thunk_type_size(arg_type, 0); 3121 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1); 3122 if (!argptr) { 3123 return -TARGET_EFAULT; 3124 } 3125 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3126 unlock_user(argptr, arg, 0); 3127 fm = (struct fiemap *)buf_temp; 3128 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) { 3129 return -TARGET_EINVAL; 3130 } 3131 3132 outbufsz = sizeof (*fm) + 3133 (sizeof(struct fiemap_extent) * fm->fm_extent_count); 3134 3135 if (outbufsz > MAX_STRUCT_SIZE) { 3136 /* We can't fit all the extents into the fixed size buffer. 3137 * Allocate one that is large enough and use it instead. 3138 */ 3139 fm = malloc(outbufsz); 3140 if (!fm) { 3141 return -TARGET_ENOMEM; 3142 } 3143 memcpy(fm, buf_temp, sizeof(struct fiemap)); 3144 free_fm = 1; 3145 } 3146 ret = get_errno(ioctl(fd, ie->host_cmd, fm)); 3147 if (!is_error(ret)) { 3148 target_size_out = target_size_in; 3149 /* An extent_count of 0 means we were only counting the extents 3150 * so there are no structs to copy 3151 */ 3152 if (fm->fm_extent_count != 0) { 3153 target_size_out += fm->fm_mapped_extents * extent_size; 3154 } 3155 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0); 3156 if (!argptr) { 3157 ret = -TARGET_EFAULT; 3158 } else { 3159 /* Convert the struct fiemap */ 3160 thunk_convert(argptr, fm, arg_type, THUNK_TARGET); 3161 if (fm->fm_extent_count != 0) { 3162 p = argptr + target_size_in; 3163 /* ...and then all the struct fiemap_extents */ 3164 for (i = 0; i < fm->fm_mapped_extents; i++) { 3165 thunk_convert(p, &fm->fm_extents[i], extent_arg_type, 3166 THUNK_TARGET); 3167 p += extent_size; 3168 } 3169 } 3170 unlock_user(argptr, arg, target_size_out); 3171 } 3172 } 3173 if (free_fm) { 3174 free(fm); 3175 } 3176 return ret; 3177 } 3178 #endif 3179 3180 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, 3181 int fd, abi_long cmd, abi_long arg) 3182 { 3183 const argtype *arg_type = ie->arg_type; 3184 int target_size; 3185 void *argptr; 3186 int ret; 3187 struct ifconf *host_ifconf; 3188 uint32_t outbufsz; 3189 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) }; 3190 int target_ifreq_size; 3191 int nb_ifreq; 3192 int free_buf = 0; 3193 int i; 3194 int target_ifc_len; 3195 abi_long target_ifc_buf; 3196 int host_ifc_len; 3197 char *host_ifc_buf; 3198 3199 assert(arg_type[0] == TYPE_PTR); 3200 assert(ie->access == IOC_RW); 3201 3202 arg_type++; 3203 target_size = thunk_type_size(arg_type, 0); 3204 3205 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3206 if (!argptr) 3207 return -TARGET_EFAULT; 3208 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3209 unlock_user(argptr, arg, 0); 3210 3211 host_ifconf = (struct ifconf *)(unsigned long)buf_temp; 3212 target_ifc_len = host_ifconf->ifc_len; 3213 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf; 3214 3215 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0); 3216 nb_ifreq = target_ifc_len / target_ifreq_size; 3217 host_ifc_len = nb_ifreq * sizeof(struct ifreq); 3218 3219 outbufsz = sizeof(*host_ifconf) + host_ifc_len; 3220 if (outbufsz > MAX_STRUCT_SIZE) { 3221 /* We can't fit all the extents into the fixed size buffer. 3222 * Allocate one that is large enough and use it instead. 3223 */ 3224 host_ifconf = malloc(outbufsz); 3225 if (!host_ifconf) { 3226 return -TARGET_ENOMEM; 3227 } 3228 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf)); 3229 free_buf = 1; 3230 } 3231 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf); 3232 3233 host_ifconf->ifc_len = host_ifc_len; 3234 host_ifconf->ifc_buf = host_ifc_buf; 3235 3236 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf)); 3237 if (!is_error(ret)) { 3238 /* convert host ifc_len to target ifc_len */ 3239 3240 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq); 3241 target_ifc_len = nb_ifreq * target_ifreq_size; 3242 host_ifconf->ifc_len = target_ifc_len; 3243 3244 /* restore target ifc_buf */ 3245 3246 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf; 3247 3248 /* copy struct ifconf to target user */ 3249 3250 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3251 if (!argptr) 3252 return -TARGET_EFAULT; 3253 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET); 3254 unlock_user(argptr, arg, target_size); 3255 3256 /* copy ifreq[] to target user */ 3257 3258 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0); 3259 for (i = 0; i < nb_ifreq ; i++) { 3260 thunk_convert(argptr + i * target_ifreq_size, 3261 host_ifc_buf + i * sizeof(struct ifreq), 3262 ifreq_arg_type, THUNK_TARGET); 3263 } 3264 unlock_user(argptr, target_ifc_buf, target_ifc_len); 3265 } 3266 3267 if (free_buf) { 3268 free(host_ifconf); 3269 } 3270 3271 return ret; 3272 } 3273 3274 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 3275 abi_long cmd, abi_long arg) 3276 { 3277 void *argptr; 3278 struct dm_ioctl *host_dm; 3279 abi_long guest_data; 3280 uint32_t guest_data_size; 3281 int target_size; 3282 const argtype *arg_type = ie->arg_type; 3283 abi_long ret; 3284 void *big_buf = NULL; 3285 char *host_data; 3286 3287 arg_type++; 3288 target_size = thunk_type_size(arg_type, 0); 3289 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3290 if (!argptr) { 3291 ret = -TARGET_EFAULT; 3292 goto out; 3293 } 3294 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3295 unlock_user(argptr, arg, 0); 3296 3297 /* buf_temp is too small, so fetch things into a bigger buffer */ 3298 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2); 3299 memcpy(big_buf, buf_temp, target_size); 3300 buf_temp = big_buf; 3301 host_dm = big_buf; 3302 3303 guest_data = arg + host_dm->data_start; 3304 if ((guest_data - arg) < 0) { 3305 ret = -EINVAL; 3306 goto out; 3307 } 3308 guest_data_size = host_dm->data_size - host_dm->data_start; 3309 host_data = (char*)host_dm + host_dm->data_start; 3310 3311 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1); 3312 switch (ie->host_cmd) { 3313 case DM_REMOVE_ALL: 3314 case DM_LIST_DEVICES: 3315 case DM_DEV_CREATE: 3316 case DM_DEV_REMOVE: 3317 case DM_DEV_SUSPEND: 3318 case DM_DEV_STATUS: 3319 case DM_DEV_WAIT: 3320 case DM_TABLE_STATUS: 3321 case DM_TABLE_CLEAR: 3322 case DM_TABLE_DEPS: 3323 case DM_LIST_VERSIONS: 3324 /* no input data */ 3325 break; 3326 case DM_DEV_RENAME: 3327 case DM_DEV_SET_GEOMETRY: 3328 /* data contains only strings */ 3329 memcpy(host_data, argptr, guest_data_size); 3330 break; 3331 case DM_TARGET_MSG: 3332 memcpy(host_data, argptr, guest_data_size); 3333 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr); 3334 break; 3335 case DM_TABLE_LOAD: 3336 { 3337 void *gspec = argptr; 3338 void *cur_data = host_data; 3339 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 3340 int spec_size = thunk_type_size(arg_type, 0); 3341 int i; 3342 3343 for (i = 0; i < host_dm->target_count; i++) { 3344 struct dm_target_spec *spec = cur_data; 3345 uint32_t next; 3346 int slen; 3347 3348 thunk_convert(spec, gspec, arg_type, THUNK_HOST); 3349 slen = strlen((char*)gspec + spec_size) + 1; 3350 next = spec->next; 3351 spec->next = sizeof(*spec) + slen; 3352 strcpy((char*)&spec[1], gspec + spec_size); 3353 gspec += next; 3354 cur_data += spec->next; 3355 } 3356 break; 3357 } 3358 default: 3359 ret = -TARGET_EINVAL; 3360 goto out; 3361 } 3362 unlock_user(argptr, guest_data, 0); 3363 3364 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3365 if (!is_error(ret)) { 3366 guest_data = arg + host_dm->data_start; 3367 guest_data_size = host_dm->data_size - host_dm->data_start; 3368 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0); 3369 switch (ie->host_cmd) { 3370 case DM_REMOVE_ALL: 3371 case DM_DEV_CREATE: 3372 case DM_DEV_REMOVE: 3373 case DM_DEV_RENAME: 3374 case DM_DEV_SUSPEND: 3375 case DM_DEV_STATUS: 3376 case DM_TABLE_LOAD: 3377 case DM_TABLE_CLEAR: 3378 case DM_TARGET_MSG: 3379 case DM_DEV_SET_GEOMETRY: 3380 /* no return data */ 3381 break; 3382 case DM_LIST_DEVICES: 3383 { 3384 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start; 3385 uint32_t remaining_data = guest_data_size; 3386 void *cur_data = argptr; 3387 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) }; 3388 int nl_size = 12; /* can't use thunk_size due to alignment */ 3389 3390 while (1) { 3391 uint32_t next = nl->next; 3392 if (next) { 3393 nl->next = nl_size + (strlen(nl->name) + 1); 3394 } 3395 if (remaining_data < nl->next) { 3396 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3397 break; 3398 } 3399 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET); 3400 strcpy(cur_data + nl_size, nl->name); 3401 cur_data += nl->next; 3402 remaining_data -= nl->next; 3403 if (!next) { 3404 break; 3405 } 3406 nl = (void*)nl + next; 3407 } 3408 break; 3409 } 3410 case DM_DEV_WAIT: 3411 case DM_TABLE_STATUS: 3412 { 3413 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start; 3414 void *cur_data = argptr; 3415 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 3416 int spec_size = thunk_type_size(arg_type, 0); 3417 int i; 3418 3419 for (i = 0; i < host_dm->target_count; i++) { 3420 uint32_t next = spec->next; 3421 int slen = strlen((char*)&spec[1]) + 1; 3422 spec->next = (cur_data - argptr) + spec_size + slen; 3423 if (guest_data_size < spec->next) { 3424 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3425 break; 3426 } 3427 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET); 3428 strcpy(cur_data + spec_size, (char*)&spec[1]); 3429 cur_data = argptr + spec->next; 3430 spec = (void*)host_dm + host_dm->data_start + next; 3431 } 3432 break; 3433 } 3434 case DM_TABLE_DEPS: 3435 { 3436 void *hdata = (void*)host_dm + host_dm->data_start; 3437 int count = *(uint32_t*)hdata; 3438 uint64_t *hdev = hdata + 8; 3439 uint64_t *gdev = argptr + 8; 3440 int i; 3441 3442 *(uint32_t*)argptr = tswap32(count); 3443 for (i = 0; i < count; i++) { 3444 *gdev = tswap64(*hdev); 3445 gdev++; 3446 hdev++; 3447 } 3448 break; 3449 } 3450 case DM_LIST_VERSIONS: 3451 { 3452 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start; 3453 uint32_t remaining_data = guest_data_size; 3454 void *cur_data = argptr; 3455 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) }; 3456 int vers_size = thunk_type_size(arg_type, 0); 3457 3458 while (1) { 3459 uint32_t next = vers->next; 3460 if (next) { 3461 vers->next = vers_size + (strlen(vers->name) + 1); 3462 } 3463 if (remaining_data < vers->next) { 3464 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3465 break; 3466 } 3467 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET); 3468 strcpy(cur_data + vers_size, vers->name); 3469 cur_data += vers->next; 3470 remaining_data -= vers->next; 3471 if (!next) { 3472 break; 3473 } 3474 vers = (void*)vers + next; 3475 } 3476 break; 3477 } 3478 default: 3479 ret = -TARGET_EINVAL; 3480 goto out; 3481 } 3482 unlock_user(argptr, guest_data, guest_data_size); 3483 3484 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3485 if (!argptr) { 3486 ret = -TARGET_EFAULT; 3487 goto out; 3488 } 3489 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3490 unlock_user(argptr, arg, target_size); 3491 } 3492 out: 3493 g_free(big_buf); 3494 return ret; 3495 } 3496 3497 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp, 3498 int fd, abi_long cmd, abi_long arg) 3499 { 3500 const argtype *arg_type = ie->arg_type; 3501 const StructEntry *se; 3502 const argtype *field_types; 3503 const int *dst_offsets, *src_offsets; 3504 int target_size; 3505 void *argptr; 3506 abi_ulong *target_rt_dev_ptr; 3507 unsigned long *host_rt_dev_ptr; 3508 abi_long ret; 3509 int i; 3510 3511 assert(ie->access == IOC_W); 3512 assert(*arg_type == TYPE_PTR); 3513 arg_type++; 3514 assert(*arg_type == TYPE_STRUCT); 3515 target_size = thunk_type_size(arg_type, 0); 3516 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3517 if (!argptr) { 3518 return -TARGET_EFAULT; 3519 } 3520 arg_type++; 3521 assert(*arg_type == (int)STRUCT_rtentry); 3522 se = struct_entries + *arg_type++; 3523 assert(se->convert[0] == NULL); 3524 /* convert struct here to be able to catch rt_dev string */ 3525 field_types = se->field_types; 3526 dst_offsets = se->field_offsets[THUNK_HOST]; 3527 src_offsets = se->field_offsets[THUNK_TARGET]; 3528 for (i = 0; i < se->nb_fields; i++) { 3529 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) { 3530 assert(*field_types == TYPE_PTRVOID); 3531 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]); 3532 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]); 3533 if (*target_rt_dev_ptr != 0) { 3534 *host_rt_dev_ptr = (unsigned long)lock_user_string( 3535 tswapal(*target_rt_dev_ptr)); 3536 if (!*host_rt_dev_ptr) { 3537 unlock_user(argptr, arg, 0); 3538 return -TARGET_EFAULT; 3539 } 3540 } else { 3541 *host_rt_dev_ptr = 0; 3542 } 3543 field_types++; 3544 continue; 3545 } 3546 field_types = thunk_convert(buf_temp + dst_offsets[i], 3547 argptr + src_offsets[i], 3548 field_types, THUNK_HOST); 3549 } 3550 unlock_user(argptr, arg, 0); 3551 3552 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3553 if (*host_rt_dev_ptr != 0) { 3554 unlock_user((void *)*host_rt_dev_ptr, 3555 *target_rt_dev_ptr, 0); 3556 } 3557 return ret; 3558 } 3559 3560 static IOCTLEntry ioctl_entries[] = { 3561 #define IOCTL(cmd, access, ...) \ 3562 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } }, 3563 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \ 3564 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } }, 3565 #include "ioctls.h" 3566 { 0, 0, }, 3567 }; 3568 3569 /* ??? Implement proper locking for ioctls. */ 3570 /* do_ioctl() Must return target values and target errnos. */ 3571 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg) 3572 { 3573 const IOCTLEntry *ie; 3574 const argtype *arg_type; 3575 abi_long ret; 3576 uint8_t buf_temp[MAX_STRUCT_SIZE]; 3577 int target_size; 3578 void *argptr; 3579 3580 ie = ioctl_entries; 3581 for(;;) { 3582 if (ie->target_cmd == 0) { 3583 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd); 3584 return -TARGET_ENOSYS; 3585 } 3586 if (ie->target_cmd == cmd) 3587 break; 3588 ie++; 3589 } 3590 arg_type = ie->arg_type; 3591 #if defined(DEBUG) 3592 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name); 3593 #endif 3594 if (ie->do_ioctl) { 3595 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg); 3596 } 3597 3598 switch(arg_type[0]) { 3599 case TYPE_NULL: 3600 /* no argument */ 3601 ret = get_errno(ioctl(fd, ie->host_cmd)); 3602 break; 3603 case TYPE_PTRVOID: 3604 case TYPE_INT: 3605 /* int argment */ 3606 ret = get_errno(ioctl(fd, ie->host_cmd, arg)); 3607 break; 3608 case TYPE_PTR: 3609 arg_type++; 3610 target_size = thunk_type_size(arg_type, 0); 3611 switch(ie->access) { 3612 case IOC_R: 3613 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3614 if (!is_error(ret)) { 3615 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3616 if (!argptr) 3617 return -TARGET_EFAULT; 3618 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3619 unlock_user(argptr, arg, target_size); 3620 } 3621 break; 3622 case IOC_W: 3623 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3624 if (!argptr) 3625 return -TARGET_EFAULT; 3626 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3627 unlock_user(argptr, arg, 0); 3628 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3629 break; 3630 default: 3631 case IOC_RW: 3632 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3633 if (!argptr) 3634 return -TARGET_EFAULT; 3635 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3636 unlock_user(argptr, arg, 0); 3637 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3638 if (!is_error(ret)) { 3639 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3640 if (!argptr) 3641 return -TARGET_EFAULT; 3642 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3643 unlock_user(argptr, arg, target_size); 3644 } 3645 break; 3646 } 3647 break; 3648 default: 3649 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n", 3650 (long)cmd, arg_type[0]); 3651 ret = -TARGET_ENOSYS; 3652 break; 3653 } 3654 return ret; 3655 } 3656 3657 static const bitmask_transtbl iflag_tbl[] = { 3658 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK }, 3659 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT }, 3660 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR }, 3661 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK }, 3662 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK }, 3663 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP }, 3664 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR }, 3665 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR }, 3666 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL }, 3667 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC }, 3668 { TARGET_IXON, TARGET_IXON, IXON, IXON }, 3669 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY }, 3670 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF }, 3671 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL }, 3672 { 0, 0, 0, 0 } 3673 }; 3674 3675 static const bitmask_transtbl oflag_tbl[] = { 3676 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST }, 3677 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC }, 3678 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR }, 3679 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL }, 3680 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR }, 3681 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET }, 3682 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL }, 3683 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL }, 3684 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 }, 3685 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 }, 3686 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 }, 3687 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 }, 3688 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 }, 3689 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 }, 3690 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 }, 3691 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 }, 3692 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 }, 3693 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 }, 3694 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 }, 3695 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 }, 3696 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 }, 3697 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 }, 3698 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 }, 3699 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 }, 3700 { 0, 0, 0, 0 } 3701 }; 3702 3703 static const bitmask_transtbl cflag_tbl[] = { 3704 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 }, 3705 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 }, 3706 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 }, 3707 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 }, 3708 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 }, 3709 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 }, 3710 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 }, 3711 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 }, 3712 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 }, 3713 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 }, 3714 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 }, 3715 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 }, 3716 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 }, 3717 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 }, 3718 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 }, 3719 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 }, 3720 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 }, 3721 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 }, 3722 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 }, 3723 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 }, 3724 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 }, 3725 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 }, 3726 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 }, 3727 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 }, 3728 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB }, 3729 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD }, 3730 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB }, 3731 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD }, 3732 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL }, 3733 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL }, 3734 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS }, 3735 { 0, 0, 0, 0 } 3736 }; 3737 3738 static const bitmask_transtbl lflag_tbl[] = { 3739 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG }, 3740 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON }, 3741 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE }, 3742 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO }, 3743 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE }, 3744 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK }, 3745 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL }, 3746 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH }, 3747 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP }, 3748 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL }, 3749 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT }, 3750 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE }, 3751 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO }, 3752 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN }, 3753 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN }, 3754 { 0, 0, 0, 0 } 3755 }; 3756 3757 static void target_to_host_termios (void *dst, const void *src) 3758 { 3759 struct host_termios *host = dst; 3760 const struct target_termios *target = src; 3761 3762 host->c_iflag = 3763 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl); 3764 host->c_oflag = 3765 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl); 3766 host->c_cflag = 3767 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl); 3768 host->c_lflag = 3769 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl); 3770 host->c_line = target->c_line; 3771 3772 memset(host->c_cc, 0, sizeof(host->c_cc)); 3773 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR]; 3774 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT]; 3775 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE]; 3776 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL]; 3777 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF]; 3778 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME]; 3779 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN]; 3780 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC]; 3781 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART]; 3782 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP]; 3783 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP]; 3784 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL]; 3785 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT]; 3786 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD]; 3787 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE]; 3788 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT]; 3789 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2]; 3790 } 3791 3792 static void host_to_target_termios (void *dst, const void *src) 3793 { 3794 struct target_termios *target = dst; 3795 const struct host_termios *host = src; 3796 3797 target->c_iflag = 3798 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl)); 3799 target->c_oflag = 3800 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl)); 3801 target->c_cflag = 3802 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl)); 3803 target->c_lflag = 3804 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl)); 3805 target->c_line = host->c_line; 3806 3807 memset(target->c_cc, 0, sizeof(target->c_cc)); 3808 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR]; 3809 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT]; 3810 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE]; 3811 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL]; 3812 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF]; 3813 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME]; 3814 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN]; 3815 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC]; 3816 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART]; 3817 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP]; 3818 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP]; 3819 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL]; 3820 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT]; 3821 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD]; 3822 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE]; 3823 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT]; 3824 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2]; 3825 } 3826 3827 static const StructEntry struct_termios_def = { 3828 .convert = { host_to_target_termios, target_to_host_termios }, 3829 .size = { sizeof(struct target_termios), sizeof(struct host_termios) }, 3830 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) }, 3831 }; 3832 3833 static bitmask_transtbl mmap_flags_tbl[] = { 3834 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED }, 3835 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE }, 3836 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED }, 3837 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS }, 3838 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN }, 3839 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE }, 3840 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE }, 3841 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED }, 3842 { 0, 0, 0, 0 } 3843 }; 3844 3845 #if defined(TARGET_I386) 3846 3847 /* NOTE: there is really one LDT for all the threads */ 3848 static uint8_t *ldt_table; 3849 3850 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount) 3851 { 3852 int size; 3853 void *p; 3854 3855 if (!ldt_table) 3856 return 0; 3857 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE; 3858 if (size > bytecount) 3859 size = bytecount; 3860 p = lock_user(VERIFY_WRITE, ptr, size, 0); 3861 if (!p) 3862 return -TARGET_EFAULT; 3863 /* ??? Should this by byteswapped? */ 3864 memcpy(p, ldt_table, size); 3865 unlock_user(p, ptr, size); 3866 return size; 3867 } 3868 3869 /* XXX: add locking support */ 3870 static abi_long write_ldt(CPUX86State *env, 3871 abi_ulong ptr, unsigned long bytecount, int oldmode) 3872 { 3873 struct target_modify_ldt_ldt_s ldt_info; 3874 struct target_modify_ldt_ldt_s *target_ldt_info; 3875 int seg_32bit, contents, read_exec_only, limit_in_pages; 3876 int seg_not_present, useable, lm; 3877 uint32_t *lp, entry_1, entry_2; 3878 3879 if (bytecount != sizeof(ldt_info)) 3880 return -TARGET_EINVAL; 3881 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1)) 3882 return -TARGET_EFAULT; 3883 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 3884 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 3885 ldt_info.limit = tswap32(target_ldt_info->limit); 3886 ldt_info.flags = tswap32(target_ldt_info->flags); 3887 unlock_user_struct(target_ldt_info, ptr, 0); 3888 3889 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES) 3890 return -TARGET_EINVAL; 3891 seg_32bit = ldt_info.flags & 1; 3892 contents = (ldt_info.flags >> 1) & 3; 3893 read_exec_only = (ldt_info.flags >> 3) & 1; 3894 limit_in_pages = (ldt_info.flags >> 4) & 1; 3895 seg_not_present = (ldt_info.flags >> 5) & 1; 3896 useable = (ldt_info.flags >> 6) & 1; 3897 #ifdef TARGET_ABI32 3898 lm = 0; 3899 #else 3900 lm = (ldt_info.flags >> 7) & 1; 3901 #endif 3902 if (contents == 3) { 3903 if (oldmode) 3904 return -TARGET_EINVAL; 3905 if (seg_not_present == 0) 3906 return -TARGET_EINVAL; 3907 } 3908 /* allocate the LDT */ 3909 if (!ldt_table) { 3910 env->ldt.base = target_mmap(0, 3911 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE, 3912 PROT_READ|PROT_WRITE, 3913 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 3914 if (env->ldt.base == -1) 3915 return -TARGET_ENOMEM; 3916 memset(g2h(env->ldt.base), 0, 3917 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE); 3918 env->ldt.limit = 0xffff; 3919 ldt_table = g2h(env->ldt.base); 3920 } 3921 3922 /* NOTE: same code as Linux kernel */ 3923 /* Allow LDTs to be cleared by the user. */ 3924 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 3925 if (oldmode || 3926 (contents == 0 && 3927 read_exec_only == 1 && 3928 seg_32bit == 0 && 3929 limit_in_pages == 0 && 3930 seg_not_present == 1 && 3931 useable == 0 )) { 3932 entry_1 = 0; 3933 entry_2 = 0; 3934 goto install; 3935 } 3936 } 3937 3938 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 3939 (ldt_info.limit & 0x0ffff); 3940 entry_2 = (ldt_info.base_addr & 0xff000000) | 3941 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 3942 (ldt_info.limit & 0xf0000) | 3943 ((read_exec_only ^ 1) << 9) | 3944 (contents << 10) | 3945 ((seg_not_present ^ 1) << 15) | 3946 (seg_32bit << 22) | 3947 (limit_in_pages << 23) | 3948 (lm << 21) | 3949 0x7000; 3950 if (!oldmode) 3951 entry_2 |= (useable << 20); 3952 3953 /* Install the new entry ... */ 3954 install: 3955 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3)); 3956 lp[0] = tswap32(entry_1); 3957 lp[1] = tswap32(entry_2); 3958 return 0; 3959 } 3960 3961 /* specific and weird i386 syscalls */ 3962 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr, 3963 unsigned long bytecount) 3964 { 3965 abi_long ret; 3966 3967 switch (func) { 3968 case 0: 3969 ret = read_ldt(ptr, bytecount); 3970 break; 3971 case 1: 3972 ret = write_ldt(env, ptr, bytecount, 1); 3973 break; 3974 case 0x11: 3975 ret = write_ldt(env, ptr, bytecount, 0); 3976 break; 3977 default: 3978 ret = -TARGET_ENOSYS; 3979 break; 3980 } 3981 return ret; 3982 } 3983 3984 #if defined(TARGET_I386) && defined(TARGET_ABI32) 3985 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr) 3986 { 3987 uint64_t *gdt_table = g2h(env->gdt.base); 3988 struct target_modify_ldt_ldt_s ldt_info; 3989 struct target_modify_ldt_ldt_s *target_ldt_info; 3990 int seg_32bit, contents, read_exec_only, limit_in_pages; 3991 int seg_not_present, useable, lm; 3992 uint32_t *lp, entry_1, entry_2; 3993 int i; 3994 3995 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 3996 if (!target_ldt_info) 3997 return -TARGET_EFAULT; 3998 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 3999 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 4000 ldt_info.limit = tswap32(target_ldt_info->limit); 4001 ldt_info.flags = tswap32(target_ldt_info->flags); 4002 if (ldt_info.entry_number == -1) { 4003 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) { 4004 if (gdt_table[i] == 0) { 4005 ldt_info.entry_number = i; 4006 target_ldt_info->entry_number = tswap32(i); 4007 break; 4008 } 4009 } 4010 } 4011 unlock_user_struct(target_ldt_info, ptr, 1); 4012 4013 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 4014 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX) 4015 return -TARGET_EINVAL; 4016 seg_32bit = ldt_info.flags & 1; 4017 contents = (ldt_info.flags >> 1) & 3; 4018 read_exec_only = (ldt_info.flags >> 3) & 1; 4019 limit_in_pages = (ldt_info.flags >> 4) & 1; 4020 seg_not_present = (ldt_info.flags >> 5) & 1; 4021 useable = (ldt_info.flags >> 6) & 1; 4022 #ifdef TARGET_ABI32 4023 lm = 0; 4024 #else 4025 lm = (ldt_info.flags >> 7) & 1; 4026 #endif 4027 4028 if (contents == 3) { 4029 if (seg_not_present == 0) 4030 return -TARGET_EINVAL; 4031 } 4032 4033 /* NOTE: same code as Linux kernel */ 4034 /* Allow LDTs to be cleared by the user. */ 4035 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 4036 if ((contents == 0 && 4037 read_exec_only == 1 && 4038 seg_32bit == 0 && 4039 limit_in_pages == 0 && 4040 seg_not_present == 1 && 4041 useable == 0 )) { 4042 entry_1 = 0; 4043 entry_2 = 0; 4044 goto install; 4045 } 4046 } 4047 4048 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 4049 (ldt_info.limit & 0x0ffff); 4050 entry_2 = (ldt_info.base_addr & 0xff000000) | 4051 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 4052 (ldt_info.limit & 0xf0000) | 4053 ((read_exec_only ^ 1) << 9) | 4054 (contents << 10) | 4055 ((seg_not_present ^ 1) << 15) | 4056 (seg_32bit << 22) | 4057 (limit_in_pages << 23) | 4058 (useable << 20) | 4059 (lm << 21) | 4060 0x7000; 4061 4062 /* Install the new entry ... */ 4063 install: 4064 lp = (uint32_t *)(gdt_table + ldt_info.entry_number); 4065 lp[0] = tswap32(entry_1); 4066 lp[1] = tswap32(entry_2); 4067 return 0; 4068 } 4069 4070 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr) 4071 { 4072 struct target_modify_ldt_ldt_s *target_ldt_info; 4073 uint64_t *gdt_table = g2h(env->gdt.base); 4074 uint32_t base_addr, limit, flags; 4075 int seg_32bit, contents, read_exec_only, limit_in_pages, idx; 4076 int seg_not_present, useable, lm; 4077 uint32_t *lp, entry_1, entry_2; 4078 4079 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 4080 if (!target_ldt_info) 4081 return -TARGET_EFAULT; 4082 idx = tswap32(target_ldt_info->entry_number); 4083 if (idx < TARGET_GDT_ENTRY_TLS_MIN || 4084 idx > TARGET_GDT_ENTRY_TLS_MAX) { 4085 unlock_user_struct(target_ldt_info, ptr, 1); 4086 return -TARGET_EINVAL; 4087 } 4088 lp = (uint32_t *)(gdt_table + idx); 4089 entry_1 = tswap32(lp[0]); 4090 entry_2 = tswap32(lp[1]); 4091 4092 read_exec_only = ((entry_2 >> 9) & 1) ^ 1; 4093 contents = (entry_2 >> 10) & 3; 4094 seg_not_present = ((entry_2 >> 15) & 1) ^ 1; 4095 seg_32bit = (entry_2 >> 22) & 1; 4096 limit_in_pages = (entry_2 >> 23) & 1; 4097 useable = (entry_2 >> 20) & 1; 4098 #ifdef TARGET_ABI32 4099 lm = 0; 4100 #else 4101 lm = (entry_2 >> 21) & 1; 4102 #endif 4103 flags = (seg_32bit << 0) | (contents << 1) | 4104 (read_exec_only << 3) | (limit_in_pages << 4) | 4105 (seg_not_present << 5) | (useable << 6) | (lm << 7); 4106 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000); 4107 base_addr = (entry_1 >> 16) | 4108 (entry_2 & 0xff000000) | 4109 ((entry_2 & 0xff) << 16); 4110 target_ldt_info->base_addr = tswapal(base_addr); 4111 target_ldt_info->limit = tswap32(limit); 4112 target_ldt_info->flags = tswap32(flags); 4113 unlock_user_struct(target_ldt_info, ptr, 1); 4114 return 0; 4115 } 4116 #endif /* TARGET_I386 && TARGET_ABI32 */ 4117 4118 #ifndef TARGET_ABI32 4119 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 4120 { 4121 abi_long ret = 0; 4122 abi_ulong val; 4123 int idx; 4124 4125 switch(code) { 4126 case TARGET_ARCH_SET_GS: 4127 case TARGET_ARCH_SET_FS: 4128 if (code == TARGET_ARCH_SET_GS) 4129 idx = R_GS; 4130 else 4131 idx = R_FS; 4132 cpu_x86_load_seg(env, idx, 0); 4133 env->segs[idx].base = addr; 4134 break; 4135 case TARGET_ARCH_GET_GS: 4136 case TARGET_ARCH_GET_FS: 4137 if (code == TARGET_ARCH_GET_GS) 4138 idx = R_GS; 4139 else 4140 idx = R_FS; 4141 val = env->segs[idx].base; 4142 if (put_user(val, addr, abi_ulong)) 4143 ret = -TARGET_EFAULT; 4144 break; 4145 default: 4146 ret = -TARGET_EINVAL; 4147 break; 4148 } 4149 return ret; 4150 } 4151 #endif 4152 4153 #endif /* defined(TARGET_I386) */ 4154 4155 #define NEW_STACK_SIZE 0x40000 4156 4157 4158 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; 4159 typedef struct { 4160 CPUArchState *env; 4161 pthread_mutex_t mutex; 4162 pthread_cond_t cond; 4163 pthread_t thread; 4164 uint32_t tid; 4165 abi_ulong child_tidptr; 4166 abi_ulong parent_tidptr; 4167 sigset_t sigmask; 4168 } new_thread_info; 4169 4170 static void *clone_func(void *arg) 4171 { 4172 new_thread_info *info = arg; 4173 CPUArchState *env; 4174 CPUState *cpu; 4175 TaskState *ts; 4176 4177 env = info->env; 4178 cpu = ENV_GET_CPU(env); 4179 thread_cpu = cpu; 4180 ts = (TaskState *)env->opaque; 4181 info->tid = gettid(); 4182 cpu->host_tid = info->tid; 4183 task_settid(ts); 4184 if (info->child_tidptr) 4185 put_user_u32(info->tid, info->child_tidptr); 4186 if (info->parent_tidptr) 4187 put_user_u32(info->tid, info->parent_tidptr); 4188 /* Enable signals. */ 4189 sigprocmask(SIG_SETMASK, &info->sigmask, NULL); 4190 /* Signal to the parent that we're ready. */ 4191 pthread_mutex_lock(&info->mutex); 4192 pthread_cond_broadcast(&info->cond); 4193 pthread_mutex_unlock(&info->mutex); 4194 /* Wait until the parent has finshed initializing the tls state. */ 4195 pthread_mutex_lock(&clone_lock); 4196 pthread_mutex_unlock(&clone_lock); 4197 cpu_loop(env); 4198 /* never exits */ 4199 return NULL; 4200 } 4201 4202 /* do_fork() Must return host values and target errnos (unlike most 4203 do_*() functions). */ 4204 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, 4205 abi_ulong parent_tidptr, target_ulong newtls, 4206 abi_ulong child_tidptr) 4207 { 4208 int ret; 4209 TaskState *ts; 4210 CPUArchState *new_env; 4211 unsigned int nptl_flags; 4212 sigset_t sigmask; 4213 4214 /* Emulate vfork() with fork() */ 4215 if (flags & CLONE_VFORK) 4216 flags &= ~(CLONE_VFORK | CLONE_VM); 4217 4218 if (flags & CLONE_VM) { 4219 TaskState *parent_ts = (TaskState *)env->opaque; 4220 new_thread_info info; 4221 pthread_attr_t attr; 4222 4223 ts = g_malloc0(sizeof(TaskState)); 4224 init_task_state(ts); 4225 /* we create a new CPU instance. */ 4226 new_env = cpu_copy(env); 4227 /* Init regs that differ from the parent. */ 4228 cpu_clone_regs(new_env, newsp); 4229 new_env->opaque = ts; 4230 ts->bprm = parent_ts->bprm; 4231 ts->info = parent_ts->info; 4232 nptl_flags = flags; 4233 flags &= ~CLONE_NPTL_FLAGS2; 4234 4235 if (nptl_flags & CLONE_CHILD_CLEARTID) { 4236 ts->child_tidptr = child_tidptr; 4237 } 4238 4239 if (nptl_flags & CLONE_SETTLS) 4240 cpu_set_tls (new_env, newtls); 4241 4242 /* Grab a mutex so that thread setup appears atomic. */ 4243 pthread_mutex_lock(&clone_lock); 4244 4245 memset(&info, 0, sizeof(info)); 4246 pthread_mutex_init(&info.mutex, NULL); 4247 pthread_mutex_lock(&info.mutex); 4248 pthread_cond_init(&info.cond, NULL); 4249 info.env = new_env; 4250 if (nptl_flags & CLONE_CHILD_SETTID) 4251 info.child_tidptr = child_tidptr; 4252 if (nptl_flags & CLONE_PARENT_SETTID) 4253 info.parent_tidptr = parent_tidptr; 4254 4255 ret = pthread_attr_init(&attr); 4256 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE); 4257 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 4258 /* It is not safe to deliver signals until the child has finished 4259 initializing, so temporarily block all signals. */ 4260 sigfillset(&sigmask); 4261 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); 4262 4263 ret = pthread_create(&info.thread, &attr, clone_func, &info); 4264 /* TODO: Free new CPU state if thread creation failed. */ 4265 4266 sigprocmask(SIG_SETMASK, &info.sigmask, NULL); 4267 pthread_attr_destroy(&attr); 4268 if (ret == 0) { 4269 /* Wait for the child to initialize. */ 4270 pthread_cond_wait(&info.cond, &info.mutex); 4271 ret = info.tid; 4272 if (flags & CLONE_PARENT_SETTID) 4273 put_user_u32(ret, parent_tidptr); 4274 } else { 4275 ret = -1; 4276 } 4277 pthread_mutex_unlock(&info.mutex); 4278 pthread_cond_destroy(&info.cond); 4279 pthread_mutex_destroy(&info.mutex); 4280 pthread_mutex_unlock(&clone_lock); 4281 } else { 4282 /* if no CLONE_VM, we consider it is a fork */ 4283 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) 4284 return -EINVAL; 4285 fork_start(); 4286 ret = fork(); 4287 if (ret == 0) { 4288 /* Child Process. */ 4289 cpu_clone_regs(env, newsp); 4290 fork_end(1); 4291 /* There is a race condition here. The parent process could 4292 theoretically read the TID in the child process before the child 4293 tid is set. This would require using either ptrace 4294 (not implemented) or having *_tidptr to point at a shared memory 4295 mapping. We can't repeat the spinlock hack used above because 4296 the child process gets its own copy of the lock. */ 4297 if (flags & CLONE_CHILD_SETTID) 4298 put_user_u32(gettid(), child_tidptr); 4299 if (flags & CLONE_PARENT_SETTID) 4300 put_user_u32(gettid(), parent_tidptr); 4301 ts = (TaskState *)env->opaque; 4302 if (flags & CLONE_SETTLS) 4303 cpu_set_tls (env, newtls); 4304 if (flags & CLONE_CHILD_CLEARTID) 4305 ts->child_tidptr = child_tidptr; 4306 } else { 4307 fork_end(0); 4308 } 4309 } 4310 return ret; 4311 } 4312 4313 /* warning : doesn't handle linux specific flags... */ 4314 static int target_to_host_fcntl_cmd(int cmd) 4315 { 4316 switch(cmd) { 4317 case TARGET_F_DUPFD: 4318 case TARGET_F_GETFD: 4319 case TARGET_F_SETFD: 4320 case TARGET_F_GETFL: 4321 case TARGET_F_SETFL: 4322 return cmd; 4323 case TARGET_F_GETLK: 4324 return F_GETLK; 4325 case TARGET_F_SETLK: 4326 return F_SETLK; 4327 case TARGET_F_SETLKW: 4328 return F_SETLKW; 4329 case TARGET_F_GETOWN: 4330 return F_GETOWN; 4331 case TARGET_F_SETOWN: 4332 return F_SETOWN; 4333 case TARGET_F_GETSIG: 4334 return F_GETSIG; 4335 case TARGET_F_SETSIG: 4336 return F_SETSIG; 4337 #if TARGET_ABI_BITS == 32 4338 case TARGET_F_GETLK64: 4339 return F_GETLK64; 4340 case TARGET_F_SETLK64: 4341 return F_SETLK64; 4342 case TARGET_F_SETLKW64: 4343 return F_SETLKW64; 4344 #endif 4345 case TARGET_F_SETLEASE: 4346 return F_SETLEASE; 4347 case TARGET_F_GETLEASE: 4348 return F_GETLEASE; 4349 #ifdef F_DUPFD_CLOEXEC 4350 case TARGET_F_DUPFD_CLOEXEC: 4351 return F_DUPFD_CLOEXEC; 4352 #endif 4353 case TARGET_F_NOTIFY: 4354 return F_NOTIFY; 4355 default: 4356 return -TARGET_EINVAL; 4357 } 4358 return -TARGET_EINVAL; 4359 } 4360 4361 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a } 4362 static const bitmask_transtbl flock_tbl[] = { 4363 TRANSTBL_CONVERT(F_RDLCK), 4364 TRANSTBL_CONVERT(F_WRLCK), 4365 TRANSTBL_CONVERT(F_UNLCK), 4366 TRANSTBL_CONVERT(F_EXLCK), 4367 TRANSTBL_CONVERT(F_SHLCK), 4368 { 0, 0, 0, 0 } 4369 }; 4370 4371 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) 4372 { 4373 struct flock fl; 4374 struct target_flock *target_fl; 4375 struct flock64 fl64; 4376 struct target_flock64 *target_fl64; 4377 abi_long ret; 4378 int host_cmd = target_to_host_fcntl_cmd(cmd); 4379 4380 if (host_cmd == -TARGET_EINVAL) 4381 return host_cmd; 4382 4383 switch(cmd) { 4384 case TARGET_F_GETLK: 4385 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4386 return -TARGET_EFAULT; 4387 fl.l_type = 4388 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl); 4389 fl.l_whence = tswap16(target_fl->l_whence); 4390 fl.l_start = tswapal(target_fl->l_start); 4391 fl.l_len = tswapal(target_fl->l_len); 4392 fl.l_pid = tswap32(target_fl->l_pid); 4393 unlock_user_struct(target_fl, arg, 0); 4394 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4395 if (ret == 0) { 4396 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0)) 4397 return -TARGET_EFAULT; 4398 target_fl->l_type = 4399 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl); 4400 target_fl->l_whence = tswap16(fl.l_whence); 4401 target_fl->l_start = tswapal(fl.l_start); 4402 target_fl->l_len = tswapal(fl.l_len); 4403 target_fl->l_pid = tswap32(fl.l_pid); 4404 unlock_user_struct(target_fl, arg, 1); 4405 } 4406 break; 4407 4408 case TARGET_F_SETLK: 4409 case TARGET_F_SETLKW: 4410 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4411 return -TARGET_EFAULT; 4412 fl.l_type = 4413 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl); 4414 fl.l_whence = tswap16(target_fl->l_whence); 4415 fl.l_start = tswapal(target_fl->l_start); 4416 fl.l_len = tswapal(target_fl->l_len); 4417 fl.l_pid = tswap32(target_fl->l_pid); 4418 unlock_user_struct(target_fl, arg, 0); 4419 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4420 break; 4421 4422 case TARGET_F_GETLK64: 4423 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4424 return -TARGET_EFAULT; 4425 fl64.l_type = 4426 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1; 4427 fl64.l_whence = tswap16(target_fl64->l_whence); 4428 fl64.l_start = tswap64(target_fl64->l_start); 4429 fl64.l_len = tswap64(target_fl64->l_len); 4430 fl64.l_pid = tswap32(target_fl64->l_pid); 4431 unlock_user_struct(target_fl64, arg, 0); 4432 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4433 if (ret == 0) { 4434 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0)) 4435 return -TARGET_EFAULT; 4436 target_fl64->l_type = 4437 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1; 4438 target_fl64->l_whence = tswap16(fl64.l_whence); 4439 target_fl64->l_start = tswap64(fl64.l_start); 4440 target_fl64->l_len = tswap64(fl64.l_len); 4441 target_fl64->l_pid = tswap32(fl64.l_pid); 4442 unlock_user_struct(target_fl64, arg, 1); 4443 } 4444 break; 4445 case TARGET_F_SETLK64: 4446 case TARGET_F_SETLKW64: 4447 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4448 return -TARGET_EFAULT; 4449 fl64.l_type = 4450 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1; 4451 fl64.l_whence = tswap16(target_fl64->l_whence); 4452 fl64.l_start = tswap64(target_fl64->l_start); 4453 fl64.l_len = tswap64(target_fl64->l_len); 4454 fl64.l_pid = tswap32(target_fl64->l_pid); 4455 unlock_user_struct(target_fl64, arg, 0); 4456 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4457 break; 4458 4459 case TARGET_F_GETFL: 4460 ret = get_errno(fcntl(fd, host_cmd, arg)); 4461 if (ret >= 0) { 4462 ret = host_to_target_bitmask(ret, fcntl_flags_tbl); 4463 } 4464 break; 4465 4466 case TARGET_F_SETFL: 4467 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl))); 4468 break; 4469 4470 case TARGET_F_SETOWN: 4471 case TARGET_F_GETOWN: 4472 case TARGET_F_SETSIG: 4473 case TARGET_F_GETSIG: 4474 case TARGET_F_SETLEASE: 4475 case TARGET_F_GETLEASE: 4476 ret = get_errno(fcntl(fd, host_cmd, arg)); 4477 break; 4478 4479 default: 4480 ret = get_errno(fcntl(fd, cmd, arg)); 4481 break; 4482 } 4483 return ret; 4484 } 4485 4486 #ifdef USE_UID16 4487 4488 static inline int high2lowuid(int uid) 4489 { 4490 if (uid > 65535) 4491 return 65534; 4492 else 4493 return uid; 4494 } 4495 4496 static inline int high2lowgid(int gid) 4497 { 4498 if (gid > 65535) 4499 return 65534; 4500 else 4501 return gid; 4502 } 4503 4504 static inline int low2highuid(int uid) 4505 { 4506 if ((int16_t)uid == -1) 4507 return -1; 4508 else 4509 return uid; 4510 } 4511 4512 static inline int low2highgid(int gid) 4513 { 4514 if ((int16_t)gid == -1) 4515 return -1; 4516 else 4517 return gid; 4518 } 4519 static inline int tswapid(int id) 4520 { 4521 return tswap16(id); 4522 } 4523 #else /* !USE_UID16 */ 4524 static inline int high2lowuid(int uid) 4525 { 4526 return uid; 4527 } 4528 static inline int high2lowgid(int gid) 4529 { 4530 return gid; 4531 } 4532 static inline int low2highuid(int uid) 4533 { 4534 return uid; 4535 } 4536 static inline int low2highgid(int gid) 4537 { 4538 return gid; 4539 } 4540 static inline int tswapid(int id) 4541 { 4542 return tswap32(id); 4543 } 4544 #endif /* USE_UID16 */ 4545 4546 void syscall_init(void) 4547 { 4548 IOCTLEntry *ie; 4549 const argtype *arg_type; 4550 int size; 4551 int i; 4552 4553 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def); 4554 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def); 4555 #include "syscall_types.h" 4556 #undef STRUCT 4557 #undef STRUCT_SPECIAL 4558 4559 /* Build target_to_host_errno_table[] table from 4560 * host_to_target_errno_table[]. */ 4561 for (i = 0; i < ERRNO_TABLE_SIZE; i++) { 4562 target_to_host_errno_table[host_to_target_errno_table[i]] = i; 4563 } 4564 4565 /* we patch the ioctl size if necessary. We rely on the fact that 4566 no ioctl has all the bits at '1' in the size field */ 4567 ie = ioctl_entries; 4568 while (ie->target_cmd != 0) { 4569 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) == 4570 TARGET_IOC_SIZEMASK) { 4571 arg_type = ie->arg_type; 4572 if (arg_type[0] != TYPE_PTR) { 4573 fprintf(stderr, "cannot patch size for ioctl 0x%x\n", 4574 ie->target_cmd); 4575 exit(1); 4576 } 4577 arg_type++; 4578 size = thunk_type_size(arg_type, 0); 4579 ie->target_cmd = (ie->target_cmd & 4580 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) | 4581 (size << TARGET_IOC_SIZESHIFT); 4582 } 4583 4584 /* automatic consistency check if same arch */ 4585 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 4586 (defined(__x86_64__) && defined(TARGET_X86_64)) 4587 if (unlikely(ie->target_cmd != ie->host_cmd)) { 4588 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n", 4589 ie->name, ie->target_cmd, ie->host_cmd); 4590 } 4591 #endif 4592 ie++; 4593 } 4594 } 4595 4596 #if TARGET_ABI_BITS == 32 4597 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1) 4598 { 4599 #ifdef TARGET_WORDS_BIGENDIAN 4600 return ((uint64_t)word0 << 32) | word1; 4601 #else 4602 return ((uint64_t)word1 << 32) | word0; 4603 #endif 4604 } 4605 #else /* TARGET_ABI_BITS == 32 */ 4606 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1) 4607 { 4608 return word0; 4609 } 4610 #endif /* TARGET_ABI_BITS != 32 */ 4611 4612 #ifdef TARGET_NR_truncate64 4613 static inline abi_long target_truncate64(void *cpu_env, const char *arg1, 4614 abi_long arg2, 4615 abi_long arg3, 4616 abi_long arg4) 4617 { 4618 if (regpairs_aligned(cpu_env)) { 4619 arg2 = arg3; 4620 arg3 = arg4; 4621 } 4622 return get_errno(truncate64(arg1, target_offset64(arg2, arg3))); 4623 } 4624 #endif 4625 4626 #ifdef TARGET_NR_ftruncate64 4627 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1, 4628 abi_long arg2, 4629 abi_long arg3, 4630 abi_long arg4) 4631 { 4632 if (regpairs_aligned(cpu_env)) { 4633 arg2 = arg3; 4634 arg3 = arg4; 4635 } 4636 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3))); 4637 } 4638 #endif 4639 4640 static inline abi_long target_to_host_timespec(struct timespec *host_ts, 4641 abi_ulong target_addr) 4642 { 4643 struct target_timespec *target_ts; 4644 4645 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) 4646 return -TARGET_EFAULT; 4647 host_ts->tv_sec = tswapal(target_ts->tv_sec); 4648 host_ts->tv_nsec = tswapal(target_ts->tv_nsec); 4649 unlock_user_struct(target_ts, target_addr, 0); 4650 return 0; 4651 } 4652 4653 static inline abi_long host_to_target_timespec(abi_ulong target_addr, 4654 struct timespec *host_ts) 4655 { 4656 struct target_timespec *target_ts; 4657 4658 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) 4659 return -TARGET_EFAULT; 4660 target_ts->tv_sec = tswapal(host_ts->tv_sec); 4661 target_ts->tv_nsec = tswapal(host_ts->tv_nsec); 4662 unlock_user_struct(target_ts, target_addr, 1); 4663 return 0; 4664 } 4665 4666 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec, 4667 abi_ulong target_addr) 4668 { 4669 struct target_itimerspec *target_itspec; 4670 4671 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) { 4672 return -TARGET_EFAULT; 4673 } 4674 4675 host_itspec->it_interval.tv_sec = 4676 tswapal(target_itspec->it_interval.tv_sec); 4677 host_itspec->it_interval.tv_nsec = 4678 tswapal(target_itspec->it_interval.tv_nsec); 4679 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec); 4680 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec); 4681 4682 unlock_user_struct(target_itspec, target_addr, 1); 4683 return 0; 4684 } 4685 4686 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr, 4687 struct itimerspec *host_its) 4688 { 4689 struct target_itimerspec *target_itspec; 4690 4691 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) { 4692 return -TARGET_EFAULT; 4693 } 4694 4695 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec); 4696 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec); 4697 4698 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec); 4699 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec); 4700 4701 unlock_user_struct(target_itspec, target_addr, 0); 4702 return 0; 4703 } 4704 4705 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat) 4706 static inline abi_long host_to_target_stat64(void *cpu_env, 4707 abi_ulong target_addr, 4708 struct stat *host_st) 4709 { 4710 #if defined(TARGET_ARM) && defined(TARGET_ABI32) 4711 if (((CPUARMState *)cpu_env)->eabi) { 4712 struct target_eabi_stat64 *target_st; 4713 4714 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 4715 return -TARGET_EFAULT; 4716 memset(target_st, 0, sizeof(struct target_eabi_stat64)); 4717 __put_user(host_st->st_dev, &target_st->st_dev); 4718 __put_user(host_st->st_ino, &target_st->st_ino); 4719 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 4720 __put_user(host_st->st_ino, &target_st->__st_ino); 4721 #endif 4722 __put_user(host_st->st_mode, &target_st->st_mode); 4723 __put_user(host_st->st_nlink, &target_st->st_nlink); 4724 __put_user(host_st->st_uid, &target_st->st_uid); 4725 __put_user(host_st->st_gid, &target_st->st_gid); 4726 __put_user(host_st->st_rdev, &target_st->st_rdev); 4727 __put_user(host_st->st_size, &target_st->st_size); 4728 __put_user(host_st->st_blksize, &target_st->st_blksize); 4729 __put_user(host_st->st_blocks, &target_st->st_blocks); 4730 __put_user(host_st->st_atime, &target_st->target_st_atime); 4731 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 4732 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 4733 unlock_user_struct(target_st, target_addr, 1); 4734 } else 4735 #endif 4736 { 4737 #if defined(TARGET_HAS_STRUCT_STAT64) 4738 struct target_stat64 *target_st; 4739 #else 4740 struct target_stat *target_st; 4741 #endif 4742 4743 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 4744 return -TARGET_EFAULT; 4745 memset(target_st, 0, sizeof(*target_st)); 4746 __put_user(host_st->st_dev, &target_st->st_dev); 4747 __put_user(host_st->st_ino, &target_st->st_ino); 4748 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 4749 __put_user(host_st->st_ino, &target_st->__st_ino); 4750 #endif 4751 __put_user(host_st->st_mode, &target_st->st_mode); 4752 __put_user(host_st->st_nlink, &target_st->st_nlink); 4753 __put_user(host_st->st_uid, &target_st->st_uid); 4754 __put_user(host_st->st_gid, &target_st->st_gid); 4755 __put_user(host_st->st_rdev, &target_st->st_rdev); 4756 /* XXX: better use of kernel struct */ 4757 __put_user(host_st->st_size, &target_st->st_size); 4758 __put_user(host_st->st_blksize, &target_st->st_blksize); 4759 __put_user(host_st->st_blocks, &target_st->st_blocks); 4760 __put_user(host_st->st_atime, &target_st->target_st_atime); 4761 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 4762 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 4763 unlock_user_struct(target_st, target_addr, 1); 4764 } 4765 4766 return 0; 4767 } 4768 #endif 4769 4770 /* ??? Using host futex calls even when target atomic operations 4771 are not really atomic probably breaks things. However implementing 4772 futexes locally would make futexes shared between multiple processes 4773 tricky. However they're probably useless because guest atomic 4774 operations won't work either. */ 4775 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout, 4776 target_ulong uaddr2, int val3) 4777 { 4778 struct timespec ts, *pts; 4779 int base_op; 4780 4781 /* ??? We assume FUTEX_* constants are the same on both host 4782 and target. */ 4783 #ifdef FUTEX_CMD_MASK 4784 base_op = op & FUTEX_CMD_MASK; 4785 #else 4786 base_op = op; 4787 #endif 4788 switch (base_op) { 4789 case FUTEX_WAIT: 4790 case FUTEX_WAIT_BITSET: 4791 if (timeout) { 4792 pts = &ts; 4793 target_to_host_timespec(pts, timeout); 4794 } else { 4795 pts = NULL; 4796 } 4797 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val), 4798 pts, NULL, val3)); 4799 case FUTEX_WAKE: 4800 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 4801 case FUTEX_FD: 4802 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 4803 case FUTEX_REQUEUE: 4804 case FUTEX_CMP_REQUEUE: 4805 case FUTEX_WAKE_OP: 4806 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 4807 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 4808 But the prototype takes a `struct timespec *'; insert casts 4809 to satisfy the compiler. We do not need to tswap TIMEOUT 4810 since it's not compared to guest memory. */ 4811 pts = (struct timespec *)(uintptr_t) timeout; 4812 return get_errno(sys_futex(g2h(uaddr), op, val, pts, 4813 g2h(uaddr2), 4814 (base_op == FUTEX_CMP_REQUEUE 4815 ? tswap32(val3) 4816 : val3))); 4817 default: 4818 return -TARGET_ENOSYS; 4819 } 4820 } 4821 4822 /* Map host to target signal numbers for the wait family of syscalls. 4823 Assume all other status bits are the same. */ 4824 int host_to_target_waitstatus(int status) 4825 { 4826 if (WIFSIGNALED(status)) { 4827 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f); 4828 } 4829 if (WIFSTOPPED(status)) { 4830 return (host_to_target_signal(WSTOPSIG(status)) << 8) 4831 | (status & 0xff); 4832 } 4833 return status; 4834 } 4835 4836 static int relstr_to_int(const char *s) 4837 { 4838 /* Convert a uname release string like "2.6.18" to an integer 4839 * of the form 0x020612. (Beware that 0x020612 is *not* 2.6.12.) 4840 */ 4841 int i, n, tmp; 4842 4843 tmp = 0; 4844 for (i = 0; i < 3; i++) { 4845 n = 0; 4846 while (*s >= '0' && *s <= '9') { 4847 n *= 10; 4848 n += *s - '0'; 4849 s++; 4850 } 4851 tmp = (tmp << 8) + n; 4852 if (*s == '.') { 4853 s++; 4854 } 4855 } 4856 return tmp; 4857 } 4858 4859 int get_osversion(void) 4860 { 4861 static int osversion; 4862 struct new_utsname buf; 4863 const char *s; 4864 4865 if (osversion) 4866 return osversion; 4867 if (qemu_uname_release && *qemu_uname_release) { 4868 s = qemu_uname_release; 4869 } else { 4870 if (sys_uname(&buf)) 4871 return 0; 4872 s = buf.release; 4873 } 4874 osversion = relstr_to_int(s); 4875 return osversion; 4876 } 4877 4878 void init_qemu_uname_release(void) 4879 { 4880 /* Initialize qemu_uname_release for later use. 4881 * If the host kernel is too old and the user hasn't asked for 4882 * a specific fake version number, we might want to fake a minimum 4883 * target kernel version. 4884 */ 4885 #ifdef UNAME_MINIMUM_RELEASE 4886 struct new_utsname buf; 4887 4888 if (qemu_uname_release && *qemu_uname_release) { 4889 return; 4890 } 4891 4892 if (sys_uname(&buf)) { 4893 return; 4894 } 4895 4896 if (relstr_to_int(buf.release) < relstr_to_int(UNAME_MINIMUM_RELEASE)) { 4897 qemu_uname_release = UNAME_MINIMUM_RELEASE; 4898 } 4899 #endif 4900 } 4901 4902 static int open_self_maps(void *cpu_env, int fd) 4903 { 4904 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32) 4905 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 4906 #endif 4907 FILE *fp; 4908 char *line = NULL; 4909 size_t len = 0; 4910 ssize_t read; 4911 4912 fp = fopen("/proc/self/maps", "r"); 4913 if (fp == NULL) { 4914 return -EACCES; 4915 } 4916 4917 while ((read = getline(&line, &len, fp)) != -1) { 4918 int fields, dev_maj, dev_min, inode; 4919 uint64_t min, max, offset; 4920 char flag_r, flag_w, flag_x, flag_p; 4921 char path[512] = ""; 4922 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d" 4923 " %512s", &min, &max, &flag_r, &flag_w, &flag_x, 4924 &flag_p, &offset, &dev_maj, &dev_min, &inode, path); 4925 4926 if ((fields < 10) || (fields > 11)) { 4927 continue; 4928 } 4929 if (!strncmp(path, "[stack]", 7)) { 4930 continue; 4931 } 4932 if (h2g_valid(min) && h2g_valid(max)) { 4933 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx 4934 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n", 4935 h2g(min), h2g(max), flag_r, flag_w, 4936 flag_x, flag_p, offset, dev_maj, dev_min, inode, 4937 path[0] ? " " : "", path); 4938 } 4939 } 4940 4941 free(line); 4942 fclose(fp); 4943 4944 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32) 4945 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n", 4946 (unsigned long long)ts->info->stack_limit, 4947 (unsigned long long)(ts->info->start_stack + 4948 (TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK, 4949 (unsigned long long)0); 4950 #endif 4951 4952 return 0; 4953 } 4954 4955 static int open_self_stat(void *cpu_env, int fd) 4956 { 4957 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 4958 abi_ulong start_stack = ts->info->start_stack; 4959 int i; 4960 4961 for (i = 0; i < 44; i++) { 4962 char buf[128]; 4963 int len; 4964 uint64_t val = 0; 4965 4966 if (i == 0) { 4967 /* pid */ 4968 val = getpid(); 4969 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 4970 } else if (i == 1) { 4971 /* app name */ 4972 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]); 4973 } else if (i == 27) { 4974 /* stack bottom */ 4975 val = start_stack; 4976 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 4977 } else { 4978 /* for the rest, there is MasterCard */ 4979 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' '); 4980 } 4981 4982 len = strlen(buf); 4983 if (write(fd, buf, len) != len) { 4984 return -1; 4985 } 4986 } 4987 4988 return 0; 4989 } 4990 4991 static int open_self_auxv(void *cpu_env, int fd) 4992 { 4993 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 4994 abi_ulong auxv = ts->info->saved_auxv; 4995 abi_ulong len = ts->info->auxv_len; 4996 char *ptr; 4997 4998 /* 4999 * Auxiliary vector is stored in target process stack. 5000 * read in whole auxv vector and copy it to file 5001 */ 5002 ptr = lock_user(VERIFY_READ, auxv, len, 0); 5003 if (ptr != NULL) { 5004 while (len > 0) { 5005 ssize_t r; 5006 r = write(fd, ptr, len); 5007 if (r <= 0) { 5008 break; 5009 } 5010 len -= r; 5011 ptr += r; 5012 } 5013 lseek(fd, 0, SEEK_SET); 5014 unlock_user(ptr, auxv, len); 5015 } 5016 5017 return 0; 5018 } 5019 5020 static int is_proc_myself(const char *filename, const char *entry) 5021 { 5022 if (!strncmp(filename, "/proc/", strlen("/proc/"))) { 5023 filename += strlen("/proc/"); 5024 if (!strncmp(filename, "self/", strlen("self/"))) { 5025 filename += strlen("self/"); 5026 } else if (*filename >= '1' && *filename <= '9') { 5027 char myself[80]; 5028 snprintf(myself, sizeof(myself), "%d/", getpid()); 5029 if (!strncmp(filename, myself, strlen(myself))) { 5030 filename += strlen(myself); 5031 } else { 5032 return 0; 5033 } 5034 } else { 5035 return 0; 5036 } 5037 if (!strcmp(filename, entry)) { 5038 return 1; 5039 } 5040 } 5041 return 0; 5042 } 5043 5044 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 5045 static int is_proc(const char *filename, const char *entry) 5046 { 5047 return strcmp(filename, entry) == 0; 5048 } 5049 5050 static int open_net_route(void *cpu_env, int fd) 5051 { 5052 FILE *fp; 5053 char *line = NULL; 5054 size_t len = 0; 5055 ssize_t read; 5056 5057 fp = fopen("/proc/net/route", "r"); 5058 if (fp == NULL) { 5059 return -EACCES; 5060 } 5061 5062 /* read header */ 5063 5064 read = getline(&line, &len, fp); 5065 dprintf(fd, "%s", line); 5066 5067 /* read routes */ 5068 5069 while ((read = getline(&line, &len, fp)) != -1) { 5070 char iface[16]; 5071 uint32_t dest, gw, mask; 5072 unsigned int flags, refcnt, use, metric, mtu, window, irtt; 5073 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 5074 iface, &dest, &gw, &flags, &refcnt, &use, &metric, 5075 &mask, &mtu, &window, &irtt); 5076 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 5077 iface, tswap32(dest), tswap32(gw), flags, refcnt, use, 5078 metric, tswap32(mask), mtu, window, irtt); 5079 } 5080 5081 free(line); 5082 fclose(fp); 5083 5084 return 0; 5085 } 5086 #endif 5087 5088 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode) 5089 { 5090 struct fake_open { 5091 const char *filename; 5092 int (*fill)(void *cpu_env, int fd); 5093 int (*cmp)(const char *s1, const char *s2); 5094 }; 5095 const struct fake_open *fake_open; 5096 static const struct fake_open fakes[] = { 5097 { "maps", open_self_maps, is_proc_myself }, 5098 { "stat", open_self_stat, is_proc_myself }, 5099 { "auxv", open_self_auxv, is_proc_myself }, 5100 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 5101 { "/proc/net/route", open_net_route, is_proc }, 5102 #endif 5103 { NULL, NULL, NULL } 5104 }; 5105 5106 for (fake_open = fakes; fake_open->filename; fake_open++) { 5107 if (fake_open->cmp(pathname, fake_open->filename)) { 5108 break; 5109 } 5110 } 5111 5112 if (fake_open->filename) { 5113 const char *tmpdir; 5114 char filename[PATH_MAX]; 5115 int fd, r; 5116 5117 /* create temporary file to map stat to */ 5118 tmpdir = getenv("TMPDIR"); 5119 if (!tmpdir) 5120 tmpdir = "/tmp"; 5121 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir); 5122 fd = mkstemp(filename); 5123 if (fd < 0) { 5124 return fd; 5125 } 5126 unlink(filename); 5127 5128 if ((r = fake_open->fill(cpu_env, fd))) { 5129 close(fd); 5130 return r; 5131 } 5132 lseek(fd, 0, SEEK_SET); 5133 5134 return fd; 5135 } 5136 5137 return get_errno(open(path(pathname), flags, mode)); 5138 } 5139 5140 /* do_syscall() should always have a single exit point at the end so 5141 that actions, such as logging of syscall results, can be performed. 5142 All errnos that do_syscall() returns must be -TARGET_<errcode>. */ 5143 abi_long do_syscall(void *cpu_env, int num, abi_long arg1, 5144 abi_long arg2, abi_long arg3, abi_long arg4, 5145 abi_long arg5, abi_long arg6, abi_long arg7, 5146 abi_long arg8) 5147 { 5148 CPUState *cpu = ENV_GET_CPU(cpu_env); 5149 abi_long ret; 5150 struct stat st; 5151 struct statfs stfs; 5152 void *p; 5153 5154 #ifdef DEBUG 5155 gemu_log("syscall %d", num); 5156 #endif 5157 if(do_strace) 5158 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); 5159 5160 switch(num) { 5161 case TARGET_NR_exit: 5162 /* In old applications this may be used to implement _exit(2). 5163 However in threaded applictions it is used for thread termination, 5164 and _exit_group is used for application termination. 5165 Do thread termination if we have more then one thread. */ 5166 /* FIXME: This probably breaks if a signal arrives. We should probably 5167 be disabling signals. */ 5168 if (CPU_NEXT(first_cpu)) { 5169 TaskState *ts; 5170 5171 cpu_list_lock(); 5172 /* Remove the CPU from the list. */ 5173 QTAILQ_REMOVE(&cpus, cpu, node); 5174 cpu_list_unlock(); 5175 ts = ((CPUArchState *)cpu_env)->opaque; 5176 if (ts->child_tidptr) { 5177 put_user_u32(0, ts->child_tidptr); 5178 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX, 5179 NULL, NULL, 0); 5180 } 5181 thread_cpu = NULL; 5182 object_unref(OBJECT(ENV_GET_CPU(cpu_env))); 5183 g_free(ts); 5184 pthread_exit(NULL); 5185 } 5186 #ifdef TARGET_GPROF 5187 _mcleanup(); 5188 #endif 5189 gdb_exit(cpu_env, arg1); 5190 _exit(arg1); 5191 ret = 0; /* avoid warning */ 5192 break; 5193 case TARGET_NR_read: 5194 if (arg3 == 0) 5195 ret = 0; 5196 else { 5197 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 5198 goto efault; 5199 ret = get_errno(read(arg1, p, arg3)); 5200 unlock_user(p, arg2, ret); 5201 } 5202 break; 5203 case TARGET_NR_write: 5204 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 5205 goto efault; 5206 ret = get_errno(write(arg1, p, arg3)); 5207 unlock_user(p, arg2, 0); 5208 break; 5209 case TARGET_NR_open: 5210 if (!(p = lock_user_string(arg1))) 5211 goto efault; 5212 ret = get_errno(do_open(cpu_env, p, 5213 target_to_host_bitmask(arg2, fcntl_flags_tbl), 5214 arg3)); 5215 unlock_user(p, arg1, 0); 5216 break; 5217 #if defined(TARGET_NR_openat) && defined(__NR_openat) 5218 case TARGET_NR_openat: 5219 if (!(p = lock_user_string(arg2))) 5220 goto efault; 5221 ret = get_errno(sys_openat(arg1, 5222 path(p), 5223 target_to_host_bitmask(arg3, fcntl_flags_tbl), 5224 arg4)); 5225 unlock_user(p, arg2, 0); 5226 break; 5227 #endif 5228 case TARGET_NR_close: 5229 ret = get_errno(close(arg1)); 5230 break; 5231 case TARGET_NR_brk: 5232 ret = do_brk(arg1); 5233 break; 5234 case TARGET_NR_fork: 5235 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0)); 5236 break; 5237 #ifdef TARGET_NR_waitpid 5238 case TARGET_NR_waitpid: 5239 { 5240 int status; 5241 ret = get_errno(waitpid(arg1, &status, arg3)); 5242 if (!is_error(ret) && arg2 && ret 5243 && put_user_s32(host_to_target_waitstatus(status), arg2)) 5244 goto efault; 5245 } 5246 break; 5247 #endif 5248 #ifdef TARGET_NR_waitid 5249 case TARGET_NR_waitid: 5250 { 5251 siginfo_t info; 5252 info.si_pid = 0; 5253 ret = get_errno(waitid(arg1, arg2, &info, arg4)); 5254 if (!is_error(ret) && arg3 && info.si_pid != 0) { 5255 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) 5256 goto efault; 5257 host_to_target_siginfo(p, &info); 5258 unlock_user(p, arg3, sizeof(target_siginfo_t)); 5259 } 5260 } 5261 break; 5262 #endif 5263 #ifdef TARGET_NR_creat /* not on alpha */ 5264 case TARGET_NR_creat: 5265 if (!(p = lock_user_string(arg1))) 5266 goto efault; 5267 ret = get_errno(creat(p, arg2)); 5268 unlock_user(p, arg1, 0); 5269 break; 5270 #endif 5271 case TARGET_NR_link: 5272 { 5273 void * p2; 5274 p = lock_user_string(arg1); 5275 p2 = lock_user_string(arg2); 5276 if (!p || !p2) 5277 ret = -TARGET_EFAULT; 5278 else 5279 ret = get_errno(link(p, p2)); 5280 unlock_user(p2, arg2, 0); 5281 unlock_user(p, arg1, 0); 5282 } 5283 break; 5284 #if defined(TARGET_NR_linkat) 5285 case TARGET_NR_linkat: 5286 { 5287 void * p2 = NULL; 5288 if (!arg2 || !arg4) 5289 goto efault; 5290 p = lock_user_string(arg2); 5291 p2 = lock_user_string(arg4); 5292 if (!p || !p2) 5293 ret = -TARGET_EFAULT; 5294 else 5295 ret = get_errno(linkat(arg1, p, arg3, p2, arg5)); 5296 unlock_user(p, arg2, 0); 5297 unlock_user(p2, arg4, 0); 5298 } 5299 break; 5300 #endif 5301 case TARGET_NR_unlink: 5302 if (!(p = lock_user_string(arg1))) 5303 goto efault; 5304 ret = get_errno(unlink(p)); 5305 unlock_user(p, arg1, 0); 5306 break; 5307 #if defined(TARGET_NR_unlinkat) 5308 case TARGET_NR_unlinkat: 5309 if (!(p = lock_user_string(arg2))) 5310 goto efault; 5311 ret = get_errno(unlinkat(arg1, p, arg3)); 5312 unlock_user(p, arg2, 0); 5313 break; 5314 #endif 5315 case TARGET_NR_execve: 5316 { 5317 char **argp, **envp; 5318 int argc, envc; 5319 abi_ulong gp; 5320 abi_ulong guest_argp; 5321 abi_ulong guest_envp; 5322 abi_ulong addr; 5323 char **q; 5324 int total_size = 0; 5325 5326 argc = 0; 5327 guest_argp = arg2; 5328 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { 5329 if (get_user_ual(addr, gp)) 5330 goto efault; 5331 if (!addr) 5332 break; 5333 argc++; 5334 } 5335 envc = 0; 5336 guest_envp = arg3; 5337 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { 5338 if (get_user_ual(addr, gp)) 5339 goto efault; 5340 if (!addr) 5341 break; 5342 envc++; 5343 } 5344 5345 argp = alloca((argc + 1) * sizeof(void *)); 5346 envp = alloca((envc + 1) * sizeof(void *)); 5347 5348 for (gp = guest_argp, q = argp; gp; 5349 gp += sizeof(abi_ulong), q++) { 5350 if (get_user_ual(addr, gp)) 5351 goto execve_efault; 5352 if (!addr) 5353 break; 5354 if (!(*q = lock_user_string(addr))) 5355 goto execve_efault; 5356 total_size += strlen(*q) + 1; 5357 } 5358 *q = NULL; 5359 5360 for (gp = guest_envp, q = envp; gp; 5361 gp += sizeof(abi_ulong), q++) { 5362 if (get_user_ual(addr, gp)) 5363 goto execve_efault; 5364 if (!addr) 5365 break; 5366 if (!(*q = lock_user_string(addr))) 5367 goto execve_efault; 5368 total_size += strlen(*q) + 1; 5369 } 5370 *q = NULL; 5371 5372 /* This case will not be caught by the host's execve() if its 5373 page size is bigger than the target's. */ 5374 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) { 5375 ret = -TARGET_E2BIG; 5376 goto execve_end; 5377 } 5378 if (!(p = lock_user_string(arg1))) 5379 goto execve_efault; 5380 ret = get_errno(execve(p, argp, envp)); 5381 unlock_user(p, arg1, 0); 5382 5383 goto execve_end; 5384 5385 execve_efault: 5386 ret = -TARGET_EFAULT; 5387 5388 execve_end: 5389 for (gp = guest_argp, q = argp; *q; 5390 gp += sizeof(abi_ulong), q++) { 5391 if (get_user_ual(addr, gp) 5392 || !addr) 5393 break; 5394 unlock_user(*q, addr, 0); 5395 } 5396 for (gp = guest_envp, q = envp; *q; 5397 gp += sizeof(abi_ulong), q++) { 5398 if (get_user_ual(addr, gp) 5399 || !addr) 5400 break; 5401 unlock_user(*q, addr, 0); 5402 } 5403 } 5404 break; 5405 case TARGET_NR_chdir: 5406 if (!(p = lock_user_string(arg1))) 5407 goto efault; 5408 ret = get_errno(chdir(p)); 5409 unlock_user(p, arg1, 0); 5410 break; 5411 #ifdef TARGET_NR_time 5412 case TARGET_NR_time: 5413 { 5414 time_t host_time; 5415 ret = get_errno(time(&host_time)); 5416 if (!is_error(ret) 5417 && arg1 5418 && put_user_sal(host_time, arg1)) 5419 goto efault; 5420 } 5421 break; 5422 #endif 5423 case TARGET_NR_mknod: 5424 if (!(p = lock_user_string(arg1))) 5425 goto efault; 5426 ret = get_errno(mknod(p, arg2, arg3)); 5427 unlock_user(p, arg1, 0); 5428 break; 5429 #if defined(TARGET_NR_mknodat) 5430 case TARGET_NR_mknodat: 5431 if (!(p = lock_user_string(arg2))) 5432 goto efault; 5433 ret = get_errno(mknodat(arg1, p, arg3, arg4)); 5434 unlock_user(p, arg2, 0); 5435 break; 5436 #endif 5437 case TARGET_NR_chmod: 5438 if (!(p = lock_user_string(arg1))) 5439 goto efault; 5440 ret = get_errno(chmod(p, arg2)); 5441 unlock_user(p, arg1, 0); 5442 break; 5443 #ifdef TARGET_NR_break 5444 case TARGET_NR_break: 5445 goto unimplemented; 5446 #endif 5447 #ifdef TARGET_NR_oldstat 5448 case TARGET_NR_oldstat: 5449 goto unimplemented; 5450 #endif 5451 case TARGET_NR_lseek: 5452 ret = get_errno(lseek(arg1, arg2, arg3)); 5453 break; 5454 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) 5455 /* Alpha specific */ 5456 case TARGET_NR_getxpid: 5457 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid(); 5458 ret = get_errno(getpid()); 5459 break; 5460 #endif 5461 #ifdef TARGET_NR_getpid 5462 case TARGET_NR_getpid: 5463 ret = get_errno(getpid()); 5464 break; 5465 #endif 5466 case TARGET_NR_mount: 5467 { 5468 /* need to look at the data field */ 5469 void *p2, *p3; 5470 p = lock_user_string(arg1); 5471 p2 = lock_user_string(arg2); 5472 p3 = lock_user_string(arg3); 5473 if (!p || !p2 || !p3) 5474 ret = -TARGET_EFAULT; 5475 else { 5476 /* FIXME - arg5 should be locked, but it isn't clear how to 5477 * do that since it's not guaranteed to be a NULL-terminated 5478 * string. 5479 */ 5480 if ( ! arg5 ) 5481 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL)); 5482 else 5483 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5))); 5484 } 5485 unlock_user(p, arg1, 0); 5486 unlock_user(p2, arg2, 0); 5487 unlock_user(p3, arg3, 0); 5488 break; 5489 } 5490 #ifdef TARGET_NR_umount 5491 case TARGET_NR_umount: 5492 if (!(p = lock_user_string(arg1))) 5493 goto efault; 5494 ret = get_errno(umount(p)); 5495 unlock_user(p, arg1, 0); 5496 break; 5497 #endif 5498 #ifdef TARGET_NR_stime /* not on alpha */ 5499 case TARGET_NR_stime: 5500 { 5501 time_t host_time; 5502 if (get_user_sal(host_time, arg1)) 5503 goto efault; 5504 ret = get_errno(stime(&host_time)); 5505 } 5506 break; 5507 #endif 5508 case TARGET_NR_ptrace: 5509 goto unimplemented; 5510 #ifdef TARGET_NR_alarm /* not on alpha */ 5511 case TARGET_NR_alarm: 5512 ret = alarm(arg1); 5513 break; 5514 #endif 5515 #ifdef TARGET_NR_oldfstat 5516 case TARGET_NR_oldfstat: 5517 goto unimplemented; 5518 #endif 5519 #ifdef TARGET_NR_pause /* not on alpha */ 5520 case TARGET_NR_pause: 5521 ret = get_errno(pause()); 5522 break; 5523 #endif 5524 #ifdef TARGET_NR_utime 5525 case TARGET_NR_utime: 5526 { 5527 struct utimbuf tbuf, *host_tbuf; 5528 struct target_utimbuf *target_tbuf; 5529 if (arg2) { 5530 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) 5531 goto efault; 5532 tbuf.actime = tswapal(target_tbuf->actime); 5533 tbuf.modtime = tswapal(target_tbuf->modtime); 5534 unlock_user_struct(target_tbuf, arg2, 0); 5535 host_tbuf = &tbuf; 5536 } else { 5537 host_tbuf = NULL; 5538 } 5539 if (!(p = lock_user_string(arg1))) 5540 goto efault; 5541 ret = get_errno(utime(p, host_tbuf)); 5542 unlock_user(p, arg1, 0); 5543 } 5544 break; 5545 #endif 5546 case TARGET_NR_utimes: 5547 { 5548 struct timeval *tvp, tv[2]; 5549 if (arg2) { 5550 if (copy_from_user_timeval(&tv[0], arg2) 5551 || copy_from_user_timeval(&tv[1], 5552 arg2 + sizeof(struct target_timeval))) 5553 goto efault; 5554 tvp = tv; 5555 } else { 5556 tvp = NULL; 5557 } 5558 if (!(p = lock_user_string(arg1))) 5559 goto efault; 5560 ret = get_errno(utimes(p, tvp)); 5561 unlock_user(p, arg1, 0); 5562 } 5563 break; 5564 #if defined(TARGET_NR_futimesat) 5565 case TARGET_NR_futimesat: 5566 { 5567 struct timeval *tvp, tv[2]; 5568 if (arg3) { 5569 if (copy_from_user_timeval(&tv[0], arg3) 5570 || copy_from_user_timeval(&tv[1], 5571 arg3 + sizeof(struct target_timeval))) 5572 goto efault; 5573 tvp = tv; 5574 } else { 5575 tvp = NULL; 5576 } 5577 if (!(p = lock_user_string(arg2))) 5578 goto efault; 5579 ret = get_errno(futimesat(arg1, path(p), tvp)); 5580 unlock_user(p, arg2, 0); 5581 } 5582 break; 5583 #endif 5584 #ifdef TARGET_NR_stty 5585 case TARGET_NR_stty: 5586 goto unimplemented; 5587 #endif 5588 #ifdef TARGET_NR_gtty 5589 case TARGET_NR_gtty: 5590 goto unimplemented; 5591 #endif 5592 case TARGET_NR_access: 5593 if (!(p = lock_user_string(arg1))) 5594 goto efault; 5595 ret = get_errno(access(path(p), arg2)); 5596 unlock_user(p, arg1, 0); 5597 break; 5598 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 5599 case TARGET_NR_faccessat: 5600 if (!(p = lock_user_string(arg2))) 5601 goto efault; 5602 ret = get_errno(faccessat(arg1, p, arg3, 0)); 5603 unlock_user(p, arg2, 0); 5604 break; 5605 #endif 5606 #ifdef TARGET_NR_nice /* not on alpha */ 5607 case TARGET_NR_nice: 5608 ret = get_errno(nice(arg1)); 5609 break; 5610 #endif 5611 #ifdef TARGET_NR_ftime 5612 case TARGET_NR_ftime: 5613 goto unimplemented; 5614 #endif 5615 case TARGET_NR_sync: 5616 sync(); 5617 ret = 0; 5618 break; 5619 case TARGET_NR_kill: 5620 ret = get_errno(kill(arg1, target_to_host_signal(arg2))); 5621 break; 5622 case TARGET_NR_rename: 5623 { 5624 void *p2; 5625 p = lock_user_string(arg1); 5626 p2 = lock_user_string(arg2); 5627 if (!p || !p2) 5628 ret = -TARGET_EFAULT; 5629 else 5630 ret = get_errno(rename(p, p2)); 5631 unlock_user(p2, arg2, 0); 5632 unlock_user(p, arg1, 0); 5633 } 5634 break; 5635 #if defined(TARGET_NR_renameat) 5636 case TARGET_NR_renameat: 5637 { 5638 void *p2; 5639 p = lock_user_string(arg2); 5640 p2 = lock_user_string(arg4); 5641 if (!p || !p2) 5642 ret = -TARGET_EFAULT; 5643 else 5644 ret = get_errno(renameat(arg1, p, arg3, p2)); 5645 unlock_user(p2, arg4, 0); 5646 unlock_user(p, arg2, 0); 5647 } 5648 break; 5649 #endif 5650 case TARGET_NR_mkdir: 5651 if (!(p = lock_user_string(arg1))) 5652 goto efault; 5653 ret = get_errno(mkdir(p, arg2)); 5654 unlock_user(p, arg1, 0); 5655 break; 5656 #if defined(TARGET_NR_mkdirat) 5657 case TARGET_NR_mkdirat: 5658 if (!(p = lock_user_string(arg2))) 5659 goto efault; 5660 ret = get_errno(mkdirat(arg1, p, arg3)); 5661 unlock_user(p, arg2, 0); 5662 break; 5663 #endif 5664 case TARGET_NR_rmdir: 5665 if (!(p = lock_user_string(arg1))) 5666 goto efault; 5667 ret = get_errno(rmdir(p)); 5668 unlock_user(p, arg1, 0); 5669 break; 5670 case TARGET_NR_dup: 5671 ret = get_errno(dup(arg1)); 5672 break; 5673 case TARGET_NR_pipe: 5674 ret = do_pipe(cpu_env, arg1, 0, 0); 5675 break; 5676 #ifdef TARGET_NR_pipe2 5677 case TARGET_NR_pipe2: 5678 ret = do_pipe(cpu_env, arg1, 5679 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1); 5680 break; 5681 #endif 5682 case TARGET_NR_times: 5683 { 5684 struct target_tms *tmsp; 5685 struct tms tms; 5686 ret = get_errno(times(&tms)); 5687 if (arg1) { 5688 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); 5689 if (!tmsp) 5690 goto efault; 5691 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime)); 5692 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime)); 5693 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime)); 5694 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime)); 5695 } 5696 if (!is_error(ret)) 5697 ret = host_to_target_clock_t(ret); 5698 } 5699 break; 5700 #ifdef TARGET_NR_prof 5701 case TARGET_NR_prof: 5702 goto unimplemented; 5703 #endif 5704 #ifdef TARGET_NR_signal 5705 case TARGET_NR_signal: 5706 goto unimplemented; 5707 #endif 5708 case TARGET_NR_acct: 5709 if (arg1 == 0) { 5710 ret = get_errno(acct(NULL)); 5711 } else { 5712 if (!(p = lock_user_string(arg1))) 5713 goto efault; 5714 ret = get_errno(acct(path(p))); 5715 unlock_user(p, arg1, 0); 5716 } 5717 break; 5718 #ifdef TARGET_NR_umount2 5719 case TARGET_NR_umount2: 5720 if (!(p = lock_user_string(arg1))) 5721 goto efault; 5722 ret = get_errno(umount2(p, arg2)); 5723 unlock_user(p, arg1, 0); 5724 break; 5725 #endif 5726 #ifdef TARGET_NR_lock 5727 case TARGET_NR_lock: 5728 goto unimplemented; 5729 #endif 5730 case TARGET_NR_ioctl: 5731 ret = do_ioctl(arg1, arg2, arg3); 5732 break; 5733 case TARGET_NR_fcntl: 5734 ret = do_fcntl(arg1, arg2, arg3); 5735 break; 5736 #ifdef TARGET_NR_mpx 5737 case TARGET_NR_mpx: 5738 goto unimplemented; 5739 #endif 5740 case TARGET_NR_setpgid: 5741 ret = get_errno(setpgid(arg1, arg2)); 5742 break; 5743 #ifdef TARGET_NR_ulimit 5744 case TARGET_NR_ulimit: 5745 goto unimplemented; 5746 #endif 5747 #ifdef TARGET_NR_oldolduname 5748 case TARGET_NR_oldolduname: 5749 goto unimplemented; 5750 #endif 5751 case TARGET_NR_umask: 5752 ret = get_errno(umask(arg1)); 5753 break; 5754 case TARGET_NR_chroot: 5755 if (!(p = lock_user_string(arg1))) 5756 goto efault; 5757 ret = get_errno(chroot(p)); 5758 unlock_user(p, arg1, 0); 5759 break; 5760 case TARGET_NR_ustat: 5761 goto unimplemented; 5762 case TARGET_NR_dup2: 5763 ret = get_errno(dup2(arg1, arg2)); 5764 break; 5765 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) 5766 case TARGET_NR_dup3: 5767 ret = get_errno(dup3(arg1, arg2, arg3)); 5768 break; 5769 #endif 5770 #ifdef TARGET_NR_getppid /* not on alpha */ 5771 case TARGET_NR_getppid: 5772 ret = get_errno(getppid()); 5773 break; 5774 #endif 5775 case TARGET_NR_getpgrp: 5776 ret = get_errno(getpgrp()); 5777 break; 5778 case TARGET_NR_setsid: 5779 ret = get_errno(setsid()); 5780 break; 5781 #ifdef TARGET_NR_sigaction 5782 case TARGET_NR_sigaction: 5783 { 5784 #if defined(TARGET_ALPHA) 5785 struct target_sigaction act, oact, *pact = 0; 5786 struct target_old_sigaction *old_act; 5787 if (arg2) { 5788 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5789 goto efault; 5790 act._sa_handler = old_act->_sa_handler; 5791 target_siginitset(&act.sa_mask, old_act->sa_mask); 5792 act.sa_flags = old_act->sa_flags; 5793 act.sa_restorer = 0; 5794 unlock_user_struct(old_act, arg2, 0); 5795 pact = &act; 5796 } 5797 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5798 if (!is_error(ret) && arg3) { 5799 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5800 goto efault; 5801 old_act->_sa_handler = oact._sa_handler; 5802 old_act->sa_mask = oact.sa_mask.sig[0]; 5803 old_act->sa_flags = oact.sa_flags; 5804 unlock_user_struct(old_act, arg3, 1); 5805 } 5806 #elif defined(TARGET_MIPS) 5807 struct target_sigaction act, oact, *pact, *old_act; 5808 5809 if (arg2) { 5810 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5811 goto efault; 5812 act._sa_handler = old_act->_sa_handler; 5813 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); 5814 act.sa_flags = old_act->sa_flags; 5815 unlock_user_struct(old_act, arg2, 0); 5816 pact = &act; 5817 } else { 5818 pact = NULL; 5819 } 5820 5821 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5822 5823 if (!is_error(ret) && arg3) { 5824 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5825 goto efault; 5826 old_act->_sa_handler = oact._sa_handler; 5827 old_act->sa_flags = oact.sa_flags; 5828 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; 5829 old_act->sa_mask.sig[1] = 0; 5830 old_act->sa_mask.sig[2] = 0; 5831 old_act->sa_mask.sig[3] = 0; 5832 unlock_user_struct(old_act, arg3, 1); 5833 } 5834 #else 5835 struct target_old_sigaction *old_act; 5836 struct target_sigaction act, oact, *pact; 5837 if (arg2) { 5838 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5839 goto efault; 5840 act._sa_handler = old_act->_sa_handler; 5841 target_siginitset(&act.sa_mask, old_act->sa_mask); 5842 act.sa_flags = old_act->sa_flags; 5843 act.sa_restorer = old_act->sa_restorer; 5844 unlock_user_struct(old_act, arg2, 0); 5845 pact = &act; 5846 } else { 5847 pact = NULL; 5848 } 5849 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5850 if (!is_error(ret) && arg3) { 5851 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5852 goto efault; 5853 old_act->_sa_handler = oact._sa_handler; 5854 old_act->sa_mask = oact.sa_mask.sig[0]; 5855 old_act->sa_flags = oact.sa_flags; 5856 old_act->sa_restorer = oact.sa_restorer; 5857 unlock_user_struct(old_act, arg3, 1); 5858 } 5859 #endif 5860 } 5861 break; 5862 #endif 5863 case TARGET_NR_rt_sigaction: 5864 { 5865 #if defined(TARGET_ALPHA) 5866 struct target_sigaction act, oact, *pact = 0; 5867 struct target_rt_sigaction *rt_act; 5868 /* ??? arg4 == sizeof(sigset_t). */ 5869 if (arg2) { 5870 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1)) 5871 goto efault; 5872 act._sa_handler = rt_act->_sa_handler; 5873 act.sa_mask = rt_act->sa_mask; 5874 act.sa_flags = rt_act->sa_flags; 5875 act.sa_restorer = arg5; 5876 unlock_user_struct(rt_act, arg2, 0); 5877 pact = &act; 5878 } 5879 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5880 if (!is_error(ret) && arg3) { 5881 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0)) 5882 goto efault; 5883 rt_act->_sa_handler = oact._sa_handler; 5884 rt_act->sa_mask = oact.sa_mask; 5885 rt_act->sa_flags = oact.sa_flags; 5886 unlock_user_struct(rt_act, arg3, 1); 5887 } 5888 #else 5889 struct target_sigaction *act; 5890 struct target_sigaction *oact; 5891 5892 if (arg2) { 5893 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) 5894 goto efault; 5895 } else 5896 act = NULL; 5897 if (arg3) { 5898 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { 5899 ret = -TARGET_EFAULT; 5900 goto rt_sigaction_fail; 5901 } 5902 } else 5903 oact = NULL; 5904 ret = get_errno(do_sigaction(arg1, act, oact)); 5905 rt_sigaction_fail: 5906 if (act) 5907 unlock_user_struct(act, arg2, 0); 5908 if (oact) 5909 unlock_user_struct(oact, arg3, 1); 5910 #endif 5911 } 5912 break; 5913 #ifdef TARGET_NR_sgetmask /* not on alpha */ 5914 case TARGET_NR_sgetmask: 5915 { 5916 sigset_t cur_set; 5917 abi_ulong target_set; 5918 sigprocmask(0, NULL, &cur_set); 5919 host_to_target_old_sigset(&target_set, &cur_set); 5920 ret = target_set; 5921 } 5922 break; 5923 #endif 5924 #ifdef TARGET_NR_ssetmask /* not on alpha */ 5925 case TARGET_NR_ssetmask: 5926 { 5927 sigset_t set, oset, cur_set; 5928 abi_ulong target_set = arg1; 5929 sigprocmask(0, NULL, &cur_set); 5930 target_to_host_old_sigset(&set, &target_set); 5931 sigorset(&set, &set, &cur_set); 5932 sigprocmask(SIG_SETMASK, &set, &oset); 5933 host_to_target_old_sigset(&target_set, &oset); 5934 ret = target_set; 5935 } 5936 break; 5937 #endif 5938 #ifdef TARGET_NR_sigprocmask 5939 case TARGET_NR_sigprocmask: 5940 { 5941 #if defined(TARGET_ALPHA) 5942 sigset_t set, oldset; 5943 abi_ulong mask; 5944 int how; 5945 5946 switch (arg1) { 5947 case TARGET_SIG_BLOCK: 5948 how = SIG_BLOCK; 5949 break; 5950 case TARGET_SIG_UNBLOCK: 5951 how = SIG_UNBLOCK; 5952 break; 5953 case TARGET_SIG_SETMASK: 5954 how = SIG_SETMASK; 5955 break; 5956 default: 5957 ret = -TARGET_EINVAL; 5958 goto fail; 5959 } 5960 mask = arg2; 5961 target_to_host_old_sigset(&set, &mask); 5962 5963 ret = get_errno(sigprocmask(how, &set, &oldset)); 5964 if (!is_error(ret)) { 5965 host_to_target_old_sigset(&mask, &oldset); 5966 ret = mask; 5967 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */ 5968 } 5969 #else 5970 sigset_t set, oldset, *set_ptr; 5971 int how; 5972 5973 if (arg2) { 5974 switch (arg1) { 5975 case TARGET_SIG_BLOCK: 5976 how = SIG_BLOCK; 5977 break; 5978 case TARGET_SIG_UNBLOCK: 5979 how = SIG_UNBLOCK; 5980 break; 5981 case TARGET_SIG_SETMASK: 5982 how = SIG_SETMASK; 5983 break; 5984 default: 5985 ret = -TARGET_EINVAL; 5986 goto fail; 5987 } 5988 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 5989 goto efault; 5990 target_to_host_old_sigset(&set, p); 5991 unlock_user(p, arg2, 0); 5992 set_ptr = &set; 5993 } else { 5994 how = 0; 5995 set_ptr = NULL; 5996 } 5997 ret = get_errno(sigprocmask(how, set_ptr, &oldset)); 5998 if (!is_error(ret) && arg3) { 5999 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 6000 goto efault; 6001 host_to_target_old_sigset(p, &oldset); 6002 unlock_user(p, arg3, sizeof(target_sigset_t)); 6003 } 6004 #endif 6005 } 6006 break; 6007 #endif 6008 case TARGET_NR_rt_sigprocmask: 6009 { 6010 int how = arg1; 6011 sigset_t set, oldset, *set_ptr; 6012 6013 if (arg2) { 6014 switch(how) { 6015 case TARGET_SIG_BLOCK: 6016 how = SIG_BLOCK; 6017 break; 6018 case TARGET_SIG_UNBLOCK: 6019 how = SIG_UNBLOCK; 6020 break; 6021 case TARGET_SIG_SETMASK: 6022 how = SIG_SETMASK; 6023 break; 6024 default: 6025 ret = -TARGET_EINVAL; 6026 goto fail; 6027 } 6028 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 6029 goto efault; 6030 target_to_host_sigset(&set, p); 6031 unlock_user(p, arg2, 0); 6032 set_ptr = &set; 6033 } else { 6034 how = 0; 6035 set_ptr = NULL; 6036 } 6037 ret = get_errno(sigprocmask(how, set_ptr, &oldset)); 6038 if (!is_error(ret) && arg3) { 6039 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 6040 goto efault; 6041 host_to_target_sigset(p, &oldset); 6042 unlock_user(p, arg3, sizeof(target_sigset_t)); 6043 } 6044 } 6045 break; 6046 #ifdef TARGET_NR_sigpending 6047 case TARGET_NR_sigpending: 6048 { 6049 sigset_t set; 6050 ret = get_errno(sigpending(&set)); 6051 if (!is_error(ret)) { 6052 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 6053 goto efault; 6054 host_to_target_old_sigset(p, &set); 6055 unlock_user(p, arg1, sizeof(target_sigset_t)); 6056 } 6057 } 6058 break; 6059 #endif 6060 case TARGET_NR_rt_sigpending: 6061 { 6062 sigset_t set; 6063 ret = get_errno(sigpending(&set)); 6064 if (!is_error(ret)) { 6065 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 6066 goto efault; 6067 host_to_target_sigset(p, &set); 6068 unlock_user(p, arg1, sizeof(target_sigset_t)); 6069 } 6070 } 6071 break; 6072 #ifdef TARGET_NR_sigsuspend 6073 case TARGET_NR_sigsuspend: 6074 { 6075 sigset_t set; 6076 #if defined(TARGET_ALPHA) 6077 abi_ulong mask = arg1; 6078 target_to_host_old_sigset(&set, &mask); 6079 #else 6080 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6081 goto efault; 6082 target_to_host_old_sigset(&set, p); 6083 unlock_user(p, arg1, 0); 6084 #endif 6085 ret = get_errno(sigsuspend(&set)); 6086 } 6087 break; 6088 #endif 6089 case TARGET_NR_rt_sigsuspend: 6090 { 6091 sigset_t set; 6092 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6093 goto efault; 6094 target_to_host_sigset(&set, p); 6095 unlock_user(p, arg1, 0); 6096 ret = get_errno(sigsuspend(&set)); 6097 } 6098 break; 6099 case TARGET_NR_rt_sigtimedwait: 6100 { 6101 sigset_t set; 6102 struct timespec uts, *puts; 6103 siginfo_t uinfo; 6104 6105 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6106 goto efault; 6107 target_to_host_sigset(&set, p); 6108 unlock_user(p, arg1, 0); 6109 if (arg3) { 6110 puts = &uts; 6111 target_to_host_timespec(puts, arg3); 6112 } else { 6113 puts = NULL; 6114 } 6115 ret = get_errno(sigtimedwait(&set, &uinfo, puts)); 6116 if (!is_error(ret) && arg2) { 6117 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0))) 6118 goto efault; 6119 host_to_target_siginfo(p, &uinfo); 6120 unlock_user(p, arg2, sizeof(target_siginfo_t)); 6121 } 6122 } 6123 break; 6124 case TARGET_NR_rt_sigqueueinfo: 6125 { 6126 siginfo_t uinfo; 6127 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1))) 6128 goto efault; 6129 target_to_host_siginfo(&uinfo, p); 6130 unlock_user(p, arg1, 0); 6131 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo)); 6132 } 6133 break; 6134 #ifdef TARGET_NR_sigreturn 6135 case TARGET_NR_sigreturn: 6136 /* NOTE: ret is eax, so not transcoding must be done */ 6137 ret = do_sigreturn(cpu_env); 6138 break; 6139 #endif 6140 case TARGET_NR_rt_sigreturn: 6141 /* NOTE: ret is eax, so not transcoding must be done */ 6142 ret = do_rt_sigreturn(cpu_env); 6143 break; 6144 case TARGET_NR_sethostname: 6145 if (!(p = lock_user_string(arg1))) 6146 goto efault; 6147 ret = get_errno(sethostname(p, arg2)); 6148 unlock_user(p, arg1, 0); 6149 break; 6150 case TARGET_NR_setrlimit: 6151 { 6152 int resource = target_to_host_resource(arg1); 6153 struct target_rlimit *target_rlim; 6154 struct rlimit rlim; 6155 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) 6156 goto efault; 6157 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur); 6158 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max); 6159 unlock_user_struct(target_rlim, arg2, 0); 6160 ret = get_errno(setrlimit(resource, &rlim)); 6161 } 6162 break; 6163 case TARGET_NR_getrlimit: 6164 { 6165 int resource = target_to_host_resource(arg1); 6166 struct target_rlimit *target_rlim; 6167 struct rlimit rlim; 6168 6169 ret = get_errno(getrlimit(resource, &rlim)); 6170 if (!is_error(ret)) { 6171 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 6172 goto efault; 6173 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 6174 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 6175 unlock_user_struct(target_rlim, arg2, 1); 6176 } 6177 } 6178 break; 6179 case TARGET_NR_getrusage: 6180 { 6181 struct rusage rusage; 6182 ret = get_errno(getrusage(arg1, &rusage)); 6183 if (!is_error(ret)) { 6184 host_to_target_rusage(arg2, &rusage); 6185 } 6186 } 6187 break; 6188 case TARGET_NR_gettimeofday: 6189 { 6190 struct timeval tv; 6191 ret = get_errno(gettimeofday(&tv, NULL)); 6192 if (!is_error(ret)) { 6193 if (copy_to_user_timeval(arg1, &tv)) 6194 goto efault; 6195 } 6196 } 6197 break; 6198 case TARGET_NR_settimeofday: 6199 { 6200 struct timeval tv; 6201 if (copy_from_user_timeval(&tv, arg1)) 6202 goto efault; 6203 ret = get_errno(settimeofday(&tv, NULL)); 6204 } 6205 break; 6206 #if defined(TARGET_NR_select) 6207 case TARGET_NR_select: 6208 #if defined(TARGET_S390X) || defined(TARGET_ALPHA) 6209 ret = do_select(arg1, arg2, arg3, arg4, arg5); 6210 #else 6211 { 6212 struct target_sel_arg_struct *sel; 6213 abi_ulong inp, outp, exp, tvp; 6214 long nsel; 6215 6216 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) 6217 goto efault; 6218 nsel = tswapal(sel->n); 6219 inp = tswapal(sel->inp); 6220 outp = tswapal(sel->outp); 6221 exp = tswapal(sel->exp); 6222 tvp = tswapal(sel->tvp); 6223 unlock_user_struct(sel, arg1, 0); 6224 ret = do_select(nsel, inp, outp, exp, tvp); 6225 } 6226 #endif 6227 break; 6228 #endif 6229 #ifdef TARGET_NR_pselect6 6230 case TARGET_NR_pselect6: 6231 { 6232 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr; 6233 fd_set rfds, wfds, efds; 6234 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 6235 struct timespec ts, *ts_ptr; 6236 6237 /* 6238 * The 6th arg is actually two args smashed together, 6239 * so we cannot use the C library. 6240 */ 6241 sigset_t set; 6242 struct { 6243 sigset_t *set; 6244 size_t size; 6245 } sig, *sig_ptr; 6246 6247 abi_ulong arg_sigset, arg_sigsize, *arg7; 6248 target_sigset_t *target_sigset; 6249 6250 n = arg1; 6251 rfd_addr = arg2; 6252 wfd_addr = arg3; 6253 efd_addr = arg4; 6254 ts_addr = arg5; 6255 6256 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 6257 if (ret) { 6258 goto fail; 6259 } 6260 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 6261 if (ret) { 6262 goto fail; 6263 } 6264 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 6265 if (ret) { 6266 goto fail; 6267 } 6268 6269 /* 6270 * This takes a timespec, and not a timeval, so we cannot 6271 * use the do_select() helper ... 6272 */ 6273 if (ts_addr) { 6274 if (target_to_host_timespec(&ts, ts_addr)) { 6275 goto efault; 6276 } 6277 ts_ptr = &ts; 6278 } else { 6279 ts_ptr = NULL; 6280 } 6281 6282 /* Extract the two packed args for the sigset */ 6283 if (arg6) { 6284 sig_ptr = &sig; 6285 sig.size = _NSIG / 8; 6286 6287 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1); 6288 if (!arg7) { 6289 goto efault; 6290 } 6291 arg_sigset = tswapal(arg7[0]); 6292 arg_sigsize = tswapal(arg7[1]); 6293 unlock_user(arg7, arg6, 0); 6294 6295 if (arg_sigset) { 6296 sig.set = &set; 6297 if (arg_sigsize != sizeof(*target_sigset)) { 6298 /* Like the kernel, we enforce correct size sigsets */ 6299 ret = -TARGET_EINVAL; 6300 goto fail; 6301 } 6302 target_sigset = lock_user(VERIFY_READ, arg_sigset, 6303 sizeof(*target_sigset), 1); 6304 if (!target_sigset) { 6305 goto efault; 6306 } 6307 target_to_host_sigset(&set, target_sigset); 6308 unlock_user(target_sigset, arg_sigset, 0); 6309 } else { 6310 sig.set = NULL; 6311 } 6312 } else { 6313 sig_ptr = NULL; 6314 } 6315 6316 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 6317 ts_ptr, sig_ptr)); 6318 6319 if (!is_error(ret)) { 6320 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 6321 goto efault; 6322 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 6323 goto efault; 6324 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 6325 goto efault; 6326 6327 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) 6328 goto efault; 6329 } 6330 } 6331 break; 6332 #endif 6333 case TARGET_NR_symlink: 6334 { 6335 void *p2; 6336 p = lock_user_string(arg1); 6337 p2 = lock_user_string(arg2); 6338 if (!p || !p2) 6339 ret = -TARGET_EFAULT; 6340 else 6341 ret = get_errno(symlink(p, p2)); 6342 unlock_user(p2, arg2, 0); 6343 unlock_user(p, arg1, 0); 6344 } 6345 break; 6346 #if defined(TARGET_NR_symlinkat) 6347 case TARGET_NR_symlinkat: 6348 { 6349 void *p2; 6350 p = lock_user_string(arg1); 6351 p2 = lock_user_string(arg3); 6352 if (!p || !p2) 6353 ret = -TARGET_EFAULT; 6354 else 6355 ret = get_errno(symlinkat(p, arg2, p2)); 6356 unlock_user(p2, arg3, 0); 6357 unlock_user(p, arg1, 0); 6358 } 6359 break; 6360 #endif 6361 #ifdef TARGET_NR_oldlstat 6362 case TARGET_NR_oldlstat: 6363 goto unimplemented; 6364 #endif 6365 case TARGET_NR_readlink: 6366 { 6367 void *p2; 6368 p = lock_user_string(arg1); 6369 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); 6370 if (!p || !p2) { 6371 ret = -TARGET_EFAULT; 6372 } else if (is_proc_myself((const char *)p, "exe")) { 6373 char real[PATH_MAX], *temp; 6374 temp = realpath(exec_path, real); 6375 ret = temp == NULL ? get_errno(-1) : strlen(real) ; 6376 snprintf((char *)p2, arg3, "%s", real); 6377 } else { 6378 ret = get_errno(readlink(path(p), p2, arg3)); 6379 } 6380 unlock_user(p2, arg2, ret); 6381 unlock_user(p, arg1, 0); 6382 } 6383 break; 6384 #if defined(TARGET_NR_readlinkat) 6385 case TARGET_NR_readlinkat: 6386 { 6387 void *p2; 6388 p = lock_user_string(arg2); 6389 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); 6390 if (!p || !p2) { 6391 ret = -TARGET_EFAULT; 6392 } else if (is_proc_myself((const char *)p, "exe")) { 6393 char real[PATH_MAX], *temp; 6394 temp = realpath(exec_path, real); 6395 ret = temp == NULL ? get_errno(-1) : strlen(real) ; 6396 snprintf((char *)p2, arg4, "%s", real); 6397 } else { 6398 ret = get_errno(readlinkat(arg1, path(p), p2, arg4)); 6399 } 6400 unlock_user(p2, arg3, ret); 6401 unlock_user(p, arg2, 0); 6402 } 6403 break; 6404 #endif 6405 #ifdef TARGET_NR_uselib 6406 case TARGET_NR_uselib: 6407 goto unimplemented; 6408 #endif 6409 #ifdef TARGET_NR_swapon 6410 case TARGET_NR_swapon: 6411 if (!(p = lock_user_string(arg1))) 6412 goto efault; 6413 ret = get_errno(swapon(p, arg2)); 6414 unlock_user(p, arg1, 0); 6415 break; 6416 #endif 6417 case TARGET_NR_reboot: 6418 if (arg3 == LINUX_REBOOT_CMD_RESTART2) { 6419 /* arg4 must be ignored in all other cases */ 6420 p = lock_user_string(arg4); 6421 if (!p) { 6422 goto efault; 6423 } 6424 ret = get_errno(reboot(arg1, arg2, arg3, p)); 6425 unlock_user(p, arg4, 0); 6426 } else { 6427 ret = get_errno(reboot(arg1, arg2, arg3, NULL)); 6428 } 6429 break; 6430 #ifdef TARGET_NR_readdir 6431 case TARGET_NR_readdir: 6432 goto unimplemented; 6433 #endif 6434 #ifdef TARGET_NR_mmap 6435 case TARGET_NR_mmap: 6436 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 6437 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \ 6438 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ 6439 || defined(TARGET_S390X) 6440 { 6441 abi_ulong *v; 6442 abi_ulong v1, v2, v3, v4, v5, v6; 6443 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) 6444 goto efault; 6445 v1 = tswapal(v[0]); 6446 v2 = tswapal(v[1]); 6447 v3 = tswapal(v[2]); 6448 v4 = tswapal(v[3]); 6449 v5 = tswapal(v[4]); 6450 v6 = tswapal(v[5]); 6451 unlock_user(v, arg1, 0); 6452 ret = get_errno(target_mmap(v1, v2, v3, 6453 target_to_host_bitmask(v4, mmap_flags_tbl), 6454 v5, v6)); 6455 } 6456 #else 6457 ret = get_errno(target_mmap(arg1, arg2, arg3, 6458 target_to_host_bitmask(arg4, mmap_flags_tbl), 6459 arg5, 6460 arg6)); 6461 #endif 6462 break; 6463 #endif 6464 #ifdef TARGET_NR_mmap2 6465 case TARGET_NR_mmap2: 6466 #ifndef MMAP_SHIFT 6467 #define MMAP_SHIFT 12 6468 #endif 6469 ret = get_errno(target_mmap(arg1, arg2, arg3, 6470 target_to_host_bitmask(arg4, mmap_flags_tbl), 6471 arg5, 6472 arg6 << MMAP_SHIFT)); 6473 break; 6474 #endif 6475 case TARGET_NR_munmap: 6476 ret = get_errno(target_munmap(arg1, arg2)); 6477 break; 6478 case TARGET_NR_mprotect: 6479 { 6480 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 6481 /* Special hack to detect libc making the stack executable. */ 6482 if ((arg3 & PROT_GROWSDOWN) 6483 && arg1 >= ts->info->stack_limit 6484 && arg1 <= ts->info->start_stack) { 6485 arg3 &= ~PROT_GROWSDOWN; 6486 arg2 = arg2 + arg1 - ts->info->stack_limit; 6487 arg1 = ts->info->stack_limit; 6488 } 6489 } 6490 ret = get_errno(target_mprotect(arg1, arg2, arg3)); 6491 break; 6492 #ifdef TARGET_NR_mremap 6493 case TARGET_NR_mremap: 6494 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); 6495 break; 6496 #endif 6497 /* ??? msync/mlock/munlock are broken for softmmu. */ 6498 #ifdef TARGET_NR_msync 6499 case TARGET_NR_msync: 6500 ret = get_errno(msync(g2h(arg1), arg2, arg3)); 6501 break; 6502 #endif 6503 #ifdef TARGET_NR_mlock 6504 case TARGET_NR_mlock: 6505 ret = get_errno(mlock(g2h(arg1), arg2)); 6506 break; 6507 #endif 6508 #ifdef TARGET_NR_munlock 6509 case TARGET_NR_munlock: 6510 ret = get_errno(munlock(g2h(arg1), arg2)); 6511 break; 6512 #endif 6513 #ifdef TARGET_NR_mlockall 6514 case TARGET_NR_mlockall: 6515 ret = get_errno(mlockall(arg1)); 6516 break; 6517 #endif 6518 #ifdef TARGET_NR_munlockall 6519 case TARGET_NR_munlockall: 6520 ret = get_errno(munlockall()); 6521 break; 6522 #endif 6523 case TARGET_NR_truncate: 6524 if (!(p = lock_user_string(arg1))) 6525 goto efault; 6526 ret = get_errno(truncate(p, arg2)); 6527 unlock_user(p, arg1, 0); 6528 break; 6529 case TARGET_NR_ftruncate: 6530 ret = get_errno(ftruncate(arg1, arg2)); 6531 break; 6532 case TARGET_NR_fchmod: 6533 ret = get_errno(fchmod(arg1, arg2)); 6534 break; 6535 #if defined(TARGET_NR_fchmodat) 6536 case TARGET_NR_fchmodat: 6537 if (!(p = lock_user_string(arg2))) 6538 goto efault; 6539 ret = get_errno(fchmodat(arg1, p, arg3, 0)); 6540 unlock_user(p, arg2, 0); 6541 break; 6542 #endif 6543 case TARGET_NR_getpriority: 6544 /* Note that negative values are valid for getpriority, so we must 6545 differentiate based on errno settings. */ 6546 errno = 0; 6547 ret = getpriority(arg1, arg2); 6548 if (ret == -1 && errno != 0) { 6549 ret = -host_to_target_errno(errno); 6550 break; 6551 } 6552 #ifdef TARGET_ALPHA 6553 /* Return value is the unbiased priority. Signal no error. */ 6554 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; 6555 #else 6556 /* Return value is a biased priority to avoid negative numbers. */ 6557 ret = 20 - ret; 6558 #endif 6559 break; 6560 case TARGET_NR_setpriority: 6561 ret = get_errno(setpriority(arg1, arg2, arg3)); 6562 break; 6563 #ifdef TARGET_NR_profil 6564 case TARGET_NR_profil: 6565 goto unimplemented; 6566 #endif 6567 case TARGET_NR_statfs: 6568 if (!(p = lock_user_string(arg1))) 6569 goto efault; 6570 ret = get_errno(statfs(path(p), &stfs)); 6571 unlock_user(p, arg1, 0); 6572 convert_statfs: 6573 if (!is_error(ret)) { 6574 struct target_statfs *target_stfs; 6575 6576 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) 6577 goto efault; 6578 __put_user(stfs.f_type, &target_stfs->f_type); 6579 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 6580 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 6581 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 6582 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 6583 __put_user(stfs.f_files, &target_stfs->f_files); 6584 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 6585 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 6586 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 6587 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 6588 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 6589 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 6590 unlock_user_struct(target_stfs, arg2, 1); 6591 } 6592 break; 6593 case TARGET_NR_fstatfs: 6594 ret = get_errno(fstatfs(arg1, &stfs)); 6595 goto convert_statfs; 6596 #ifdef TARGET_NR_statfs64 6597 case TARGET_NR_statfs64: 6598 if (!(p = lock_user_string(arg1))) 6599 goto efault; 6600 ret = get_errno(statfs(path(p), &stfs)); 6601 unlock_user(p, arg1, 0); 6602 convert_statfs64: 6603 if (!is_error(ret)) { 6604 struct target_statfs64 *target_stfs; 6605 6606 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) 6607 goto efault; 6608 __put_user(stfs.f_type, &target_stfs->f_type); 6609 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 6610 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 6611 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 6612 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 6613 __put_user(stfs.f_files, &target_stfs->f_files); 6614 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 6615 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 6616 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 6617 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 6618 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 6619 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 6620 unlock_user_struct(target_stfs, arg3, 1); 6621 } 6622 break; 6623 case TARGET_NR_fstatfs64: 6624 ret = get_errno(fstatfs(arg1, &stfs)); 6625 goto convert_statfs64; 6626 #endif 6627 #ifdef TARGET_NR_ioperm 6628 case TARGET_NR_ioperm: 6629 goto unimplemented; 6630 #endif 6631 #ifdef TARGET_NR_socketcall 6632 case TARGET_NR_socketcall: 6633 ret = do_socketcall(arg1, arg2); 6634 break; 6635 #endif 6636 #ifdef TARGET_NR_accept 6637 case TARGET_NR_accept: 6638 ret = do_accept4(arg1, arg2, arg3, 0); 6639 break; 6640 #endif 6641 #ifdef TARGET_NR_accept4 6642 case TARGET_NR_accept4: 6643 #ifdef CONFIG_ACCEPT4 6644 ret = do_accept4(arg1, arg2, arg3, arg4); 6645 #else 6646 goto unimplemented; 6647 #endif 6648 break; 6649 #endif 6650 #ifdef TARGET_NR_bind 6651 case TARGET_NR_bind: 6652 ret = do_bind(arg1, arg2, arg3); 6653 break; 6654 #endif 6655 #ifdef TARGET_NR_connect 6656 case TARGET_NR_connect: 6657 ret = do_connect(arg1, arg2, arg3); 6658 break; 6659 #endif 6660 #ifdef TARGET_NR_getpeername 6661 case TARGET_NR_getpeername: 6662 ret = do_getpeername(arg1, arg2, arg3); 6663 break; 6664 #endif 6665 #ifdef TARGET_NR_getsockname 6666 case TARGET_NR_getsockname: 6667 ret = do_getsockname(arg1, arg2, arg3); 6668 break; 6669 #endif 6670 #ifdef TARGET_NR_getsockopt 6671 case TARGET_NR_getsockopt: 6672 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5); 6673 break; 6674 #endif 6675 #ifdef TARGET_NR_listen 6676 case TARGET_NR_listen: 6677 ret = get_errno(listen(arg1, arg2)); 6678 break; 6679 #endif 6680 #ifdef TARGET_NR_recv 6681 case TARGET_NR_recv: 6682 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); 6683 break; 6684 #endif 6685 #ifdef TARGET_NR_recvfrom 6686 case TARGET_NR_recvfrom: 6687 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); 6688 break; 6689 #endif 6690 #ifdef TARGET_NR_recvmsg 6691 case TARGET_NR_recvmsg: 6692 ret = do_sendrecvmsg(arg1, arg2, arg3, 0); 6693 break; 6694 #endif 6695 #ifdef TARGET_NR_send 6696 case TARGET_NR_send: 6697 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0); 6698 break; 6699 #endif 6700 #ifdef TARGET_NR_sendmsg 6701 case TARGET_NR_sendmsg: 6702 ret = do_sendrecvmsg(arg1, arg2, arg3, 1); 6703 break; 6704 #endif 6705 #ifdef TARGET_NR_sendto 6706 case TARGET_NR_sendto: 6707 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); 6708 break; 6709 #endif 6710 #ifdef TARGET_NR_shutdown 6711 case TARGET_NR_shutdown: 6712 ret = get_errno(shutdown(arg1, arg2)); 6713 break; 6714 #endif 6715 #ifdef TARGET_NR_socket 6716 case TARGET_NR_socket: 6717 ret = do_socket(arg1, arg2, arg3); 6718 break; 6719 #endif 6720 #ifdef TARGET_NR_socketpair 6721 case TARGET_NR_socketpair: 6722 ret = do_socketpair(arg1, arg2, arg3, arg4); 6723 break; 6724 #endif 6725 #ifdef TARGET_NR_setsockopt 6726 case TARGET_NR_setsockopt: 6727 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); 6728 break; 6729 #endif 6730 6731 case TARGET_NR_syslog: 6732 if (!(p = lock_user_string(arg2))) 6733 goto efault; 6734 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); 6735 unlock_user(p, arg2, 0); 6736 break; 6737 6738 case TARGET_NR_setitimer: 6739 { 6740 struct itimerval value, ovalue, *pvalue; 6741 6742 if (arg2) { 6743 pvalue = &value; 6744 if (copy_from_user_timeval(&pvalue->it_interval, arg2) 6745 || copy_from_user_timeval(&pvalue->it_value, 6746 arg2 + sizeof(struct target_timeval))) 6747 goto efault; 6748 } else { 6749 pvalue = NULL; 6750 } 6751 ret = get_errno(setitimer(arg1, pvalue, &ovalue)); 6752 if (!is_error(ret) && arg3) { 6753 if (copy_to_user_timeval(arg3, 6754 &ovalue.it_interval) 6755 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), 6756 &ovalue.it_value)) 6757 goto efault; 6758 } 6759 } 6760 break; 6761 case TARGET_NR_getitimer: 6762 { 6763 struct itimerval value; 6764 6765 ret = get_errno(getitimer(arg1, &value)); 6766 if (!is_error(ret) && arg2) { 6767 if (copy_to_user_timeval(arg2, 6768 &value.it_interval) 6769 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), 6770 &value.it_value)) 6771 goto efault; 6772 } 6773 } 6774 break; 6775 case TARGET_NR_stat: 6776 if (!(p = lock_user_string(arg1))) 6777 goto efault; 6778 ret = get_errno(stat(path(p), &st)); 6779 unlock_user(p, arg1, 0); 6780 goto do_stat; 6781 case TARGET_NR_lstat: 6782 if (!(p = lock_user_string(arg1))) 6783 goto efault; 6784 ret = get_errno(lstat(path(p), &st)); 6785 unlock_user(p, arg1, 0); 6786 goto do_stat; 6787 case TARGET_NR_fstat: 6788 { 6789 ret = get_errno(fstat(arg1, &st)); 6790 do_stat: 6791 if (!is_error(ret)) { 6792 struct target_stat *target_st; 6793 6794 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) 6795 goto efault; 6796 memset(target_st, 0, sizeof(*target_st)); 6797 __put_user(st.st_dev, &target_st->st_dev); 6798 __put_user(st.st_ino, &target_st->st_ino); 6799 __put_user(st.st_mode, &target_st->st_mode); 6800 __put_user(st.st_uid, &target_st->st_uid); 6801 __put_user(st.st_gid, &target_st->st_gid); 6802 __put_user(st.st_nlink, &target_st->st_nlink); 6803 __put_user(st.st_rdev, &target_st->st_rdev); 6804 __put_user(st.st_size, &target_st->st_size); 6805 __put_user(st.st_blksize, &target_st->st_blksize); 6806 __put_user(st.st_blocks, &target_st->st_blocks); 6807 __put_user(st.st_atime, &target_st->target_st_atime); 6808 __put_user(st.st_mtime, &target_st->target_st_mtime); 6809 __put_user(st.st_ctime, &target_st->target_st_ctime); 6810 unlock_user_struct(target_st, arg2, 1); 6811 } 6812 } 6813 break; 6814 #ifdef TARGET_NR_olduname 6815 case TARGET_NR_olduname: 6816 goto unimplemented; 6817 #endif 6818 #ifdef TARGET_NR_iopl 6819 case TARGET_NR_iopl: 6820 goto unimplemented; 6821 #endif 6822 case TARGET_NR_vhangup: 6823 ret = get_errno(vhangup()); 6824 break; 6825 #ifdef TARGET_NR_idle 6826 case TARGET_NR_idle: 6827 goto unimplemented; 6828 #endif 6829 #ifdef TARGET_NR_syscall 6830 case TARGET_NR_syscall: 6831 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5, 6832 arg6, arg7, arg8, 0); 6833 break; 6834 #endif 6835 case TARGET_NR_wait4: 6836 { 6837 int status; 6838 abi_long status_ptr = arg2; 6839 struct rusage rusage, *rusage_ptr; 6840 abi_ulong target_rusage = arg4; 6841 if (target_rusage) 6842 rusage_ptr = &rusage; 6843 else 6844 rusage_ptr = NULL; 6845 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr)); 6846 if (!is_error(ret)) { 6847 if (status_ptr && ret) { 6848 status = host_to_target_waitstatus(status); 6849 if (put_user_s32(status, status_ptr)) 6850 goto efault; 6851 } 6852 if (target_rusage) 6853 host_to_target_rusage(target_rusage, &rusage); 6854 } 6855 } 6856 break; 6857 #ifdef TARGET_NR_swapoff 6858 case TARGET_NR_swapoff: 6859 if (!(p = lock_user_string(arg1))) 6860 goto efault; 6861 ret = get_errno(swapoff(p)); 6862 unlock_user(p, arg1, 0); 6863 break; 6864 #endif 6865 case TARGET_NR_sysinfo: 6866 { 6867 struct target_sysinfo *target_value; 6868 struct sysinfo value; 6869 ret = get_errno(sysinfo(&value)); 6870 if (!is_error(ret) && arg1) 6871 { 6872 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) 6873 goto efault; 6874 __put_user(value.uptime, &target_value->uptime); 6875 __put_user(value.loads[0], &target_value->loads[0]); 6876 __put_user(value.loads[1], &target_value->loads[1]); 6877 __put_user(value.loads[2], &target_value->loads[2]); 6878 __put_user(value.totalram, &target_value->totalram); 6879 __put_user(value.freeram, &target_value->freeram); 6880 __put_user(value.sharedram, &target_value->sharedram); 6881 __put_user(value.bufferram, &target_value->bufferram); 6882 __put_user(value.totalswap, &target_value->totalswap); 6883 __put_user(value.freeswap, &target_value->freeswap); 6884 __put_user(value.procs, &target_value->procs); 6885 __put_user(value.totalhigh, &target_value->totalhigh); 6886 __put_user(value.freehigh, &target_value->freehigh); 6887 __put_user(value.mem_unit, &target_value->mem_unit); 6888 unlock_user_struct(target_value, arg1, 1); 6889 } 6890 } 6891 break; 6892 #ifdef TARGET_NR_ipc 6893 case TARGET_NR_ipc: 6894 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6); 6895 break; 6896 #endif 6897 #ifdef TARGET_NR_semget 6898 case TARGET_NR_semget: 6899 ret = get_errno(semget(arg1, arg2, arg3)); 6900 break; 6901 #endif 6902 #ifdef TARGET_NR_semop 6903 case TARGET_NR_semop: 6904 ret = do_semop(arg1, arg2, arg3); 6905 break; 6906 #endif 6907 #ifdef TARGET_NR_semctl 6908 case TARGET_NR_semctl: 6909 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4); 6910 break; 6911 #endif 6912 #ifdef TARGET_NR_msgctl 6913 case TARGET_NR_msgctl: 6914 ret = do_msgctl(arg1, arg2, arg3); 6915 break; 6916 #endif 6917 #ifdef TARGET_NR_msgget 6918 case TARGET_NR_msgget: 6919 ret = get_errno(msgget(arg1, arg2)); 6920 break; 6921 #endif 6922 #ifdef TARGET_NR_msgrcv 6923 case TARGET_NR_msgrcv: 6924 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5); 6925 break; 6926 #endif 6927 #ifdef TARGET_NR_msgsnd 6928 case TARGET_NR_msgsnd: 6929 ret = do_msgsnd(arg1, arg2, arg3, arg4); 6930 break; 6931 #endif 6932 #ifdef TARGET_NR_shmget 6933 case TARGET_NR_shmget: 6934 ret = get_errno(shmget(arg1, arg2, arg3)); 6935 break; 6936 #endif 6937 #ifdef TARGET_NR_shmctl 6938 case TARGET_NR_shmctl: 6939 ret = do_shmctl(arg1, arg2, arg3); 6940 break; 6941 #endif 6942 #ifdef TARGET_NR_shmat 6943 case TARGET_NR_shmat: 6944 ret = do_shmat(arg1, arg2, arg3); 6945 break; 6946 #endif 6947 #ifdef TARGET_NR_shmdt 6948 case TARGET_NR_shmdt: 6949 ret = do_shmdt(arg1); 6950 break; 6951 #endif 6952 case TARGET_NR_fsync: 6953 ret = get_errno(fsync(arg1)); 6954 break; 6955 case TARGET_NR_clone: 6956 /* Linux manages to have three different orderings for its 6957 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines 6958 * match the kernel's CONFIG_CLONE_* settings. 6959 * Microblaze is further special in that it uses a sixth 6960 * implicit argument to clone for the TLS pointer. 6961 */ 6962 #if defined(TARGET_MICROBLAZE) 6963 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5)); 6964 #elif defined(TARGET_CLONE_BACKWARDS) 6965 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); 6966 #elif defined(TARGET_CLONE_BACKWARDS2) 6967 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); 6968 #else 6969 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); 6970 #endif 6971 break; 6972 #ifdef __NR_exit_group 6973 /* new thread calls */ 6974 case TARGET_NR_exit_group: 6975 #ifdef TARGET_GPROF 6976 _mcleanup(); 6977 #endif 6978 gdb_exit(cpu_env, arg1); 6979 ret = get_errno(exit_group(arg1)); 6980 break; 6981 #endif 6982 case TARGET_NR_setdomainname: 6983 if (!(p = lock_user_string(arg1))) 6984 goto efault; 6985 ret = get_errno(setdomainname(p, arg2)); 6986 unlock_user(p, arg1, 0); 6987 break; 6988 case TARGET_NR_uname: 6989 /* no need to transcode because we use the linux syscall */ 6990 { 6991 struct new_utsname * buf; 6992 6993 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) 6994 goto efault; 6995 ret = get_errno(sys_uname(buf)); 6996 if (!is_error(ret)) { 6997 /* Overrite the native machine name with whatever is being 6998 emulated. */ 6999 strcpy (buf->machine, cpu_to_uname_machine(cpu_env)); 7000 /* Allow the user to override the reported release. */ 7001 if (qemu_uname_release && *qemu_uname_release) 7002 strcpy (buf->release, qemu_uname_release); 7003 } 7004 unlock_user_struct(buf, arg1, 1); 7005 } 7006 break; 7007 #ifdef TARGET_I386 7008 case TARGET_NR_modify_ldt: 7009 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3); 7010 break; 7011 #if !defined(TARGET_X86_64) 7012 case TARGET_NR_vm86old: 7013 goto unimplemented; 7014 case TARGET_NR_vm86: 7015 ret = do_vm86(cpu_env, arg1, arg2); 7016 break; 7017 #endif 7018 #endif 7019 case TARGET_NR_adjtimex: 7020 goto unimplemented; 7021 #ifdef TARGET_NR_create_module 7022 case TARGET_NR_create_module: 7023 #endif 7024 case TARGET_NR_init_module: 7025 case TARGET_NR_delete_module: 7026 #ifdef TARGET_NR_get_kernel_syms 7027 case TARGET_NR_get_kernel_syms: 7028 #endif 7029 goto unimplemented; 7030 case TARGET_NR_quotactl: 7031 goto unimplemented; 7032 case TARGET_NR_getpgid: 7033 ret = get_errno(getpgid(arg1)); 7034 break; 7035 case TARGET_NR_fchdir: 7036 ret = get_errno(fchdir(arg1)); 7037 break; 7038 #ifdef TARGET_NR_bdflush /* not on x86_64 */ 7039 case TARGET_NR_bdflush: 7040 goto unimplemented; 7041 #endif 7042 #ifdef TARGET_NR_sysfs 7043 case TARGET_NR_sysfs: 7044 goto unimplemented; 7045 #endif 7046 case TARGET_NR_personality: 7047 ret = get_errno(personality(arg1)); 7048 break; 7049 #ifdef TARGET_NR_afs_syscall 7050 case TARGET_NR_afs_syscall: 7051 goto unimplemented; 7052 #endif 7053 #ifdef TARGET_NR__llseek /* Not on alpha */ 7054 case TARGET_NR__llseek: 7055 { 7056 int64_t res; 7057 #if !defined(__NR_llseek) 7058 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5); 7059 if (res == -1) { 7060 ret = get_errno(res); 7061 } else { 7062 ret = 0; 7063 } 7064 #else 7065 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); 7066 #endif 7067 if ((ret == 0) && put_user_s64(res, arg4)) { 7068 goto efault; 7069 } 7070 } 7071 break; 7072 #endif 7073 case TARGET_NR_getdents: 7074 #ifdef __NR_getdents 7075 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 7076 { 7077 struct target_dirent *target_dirp; 7078 struct linux_dirent *dirp; 7079 abi_long count = arg3; 7080 7081 dirp = malloc(count); 7082 if (!dirp) { 7083 ret = -TARGET_ENOMEM; 7084 goto fail; 7085 } 7086 7087 ret = get_errno(sys_getdents(arg1, dirp, count)); 7088 if (!is_error(ret)) { 7089 struct linux_dirent *de; 7090 struct target_dirent *tde; 7091 int len = ret; 7092 int reclen, treclen; 7093 int count1, tnamelen; 7094 7095 count1 = 0; 7096 de = dirp; 7097 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7098 goto efault; 7099 tde = target_dirp; 7100 while (len > 0) { 7101 reclen = de->d_reclen; 7102 tnamelen = reclen - offsetof(struct linux_dirent, d_name); 7103 assert(tnamelen >= 0); 7104 treclen = tnamelen + offsetof(struct target_dirent, d_name); 7105 assert(count1 + treclen <= count); 7106 tde->d_reclen = tswap16(treclen); 7107 tde->d_ino = tswapal(de->d_ino); 7108 tde->d_off = tswapal(de->d_off); 7109 memcpy(tde->d_name, de->d_name, tnamelen); 7110 de = (struct linux_dirent *)((char *)de + reclen); 7111 len -= reclen; 7112 tde = (struct target_dirent *)((char *)tde + treclen); 7113 count1 += treclen; 7114 } 7115 ret = count1; 7116 unlock_user(target_dirp, arg2, ret); 7117 } 7118 free(dirp); 7119 } 7120 #else 7121 { 7122 struct linux_dirent *dirp; 7123 abi_long count = arg3; 7124 7125 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7126 goto efault; 7127 ret = get_errno(sys_getdents(arg1, dirp, count)); 7128 if (!is_error(ret)) { 7129 struct linux_dirent *de; 7130 int len = ret; 7131 int reclen; 7132 de = dirp; 7133 while (len > 0) { 7134 reclen = de->d_reclen; 7135 if (reclen > len) 7136 break; 7137 de->d_reclen = tswap16(reclen); 7138 tswapls(&de->d_ino); 7139 tswapls(&de->d_off); 7140 de = (struct linux_dirent *)((char *)de + reclen); 7141 len -= reclen; 7142 } 7143 } 7144 unlock_user(dirp, arg2, ret); 7145 } 7146 #endif 7147 #else 7148 /* Implement getdents in terms of getdents64 */ 7149 { 7150 struct linux_dirent64 *dirp; 7151 abi_long count = arg3; 7152 7153 dirp = lock_user(VERIFY_WRITE, arg2, count, 0); 7154 if (!dirp) { 7155 goto efault; 7156 } 7157 ret = get_errno(sys_getdents64(arg1, dirp, count)); 7158 if (!is_error(ret)) { 7159 /* Convert the dirent64 structs to target dirent. We do this 7160 * in-place, since we can guarantee that a target_dirent is no 7161 * larger than a dirent64; however this means we have to be 7162 * careful to read everything before writing in the new format. 7163 */ 7164 struct linux_dirent64 *de; 7165 struct target_dirent *tde; 7166 int len = ret; 7167 int tlen = 0; 7168 7169 de = dirp; 7170 tde = (struct target_dirent *)dirp; 7171 while (len > 0) { 7172 int namelen, treclen; 7173 int reclen = de->d_reclen; 7174 uint64_t ino = de->d_ino; 7175 int64_t off = de->d_off; 7176 uint8_t type = de->d_type; 7177 7178 namelen = strlen(de->d_name); 7179 treclen = offsetof(struct target_dirent, d_name) 7180 + namelen + 2; 7181 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long)); 7182 7183 memmove(tde->d_name, de->d_name, namelen + 1); 7184 tde->d_ino = tswapal(ino); 7185 tde->d_off = tswapal(off); 7186 tde->d_reclen = tswap16(treclen); 7187 /* The target_dirent type is in what was formerly a padding 7188 * byte at the end of the structure: 7189 */ 7190 *(((char *)tde) + treclen - 1) = type; 7191 7192 de = (struct linux_dirent64 *)((char *)de + reclen); 7193 tde = (struct target_dirent *)((char *)tde + treclen); 7194 len -= reclen; 7195 tlen += treclen; 7196 } 7197 ret = tlen; 7198 } 7199 unlock_user(dirp, arg2, ret); 7200 } 7201 #endif 7202 break; 7203 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 7204 case TARGET_NR_getdents64: 7205 { 7206 struct linux_dirent64 *dirp; 7207 abi_long count = arg3; 7208 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7209 goto efault; 7210 ret = get_errno(sys_getdents64(arg1, dirp, count)); 7211 if (!is_error(ret)) { 7212 struct linux_dirent64 *de; 7213 int len = ret; 7214 int reclen; 7215 de = dirp; 7216 while (len > 0) { 7217 reclen = de->d_reclen; 7218 if (reclen > len) 7219 break; 7220 de->d_reclen = tswap16(reclen); 7221 tswap64s((uint64_t *)&de->d_ino); 7222 tswap64s((uint64_t *)&de->d_off); 7223 de = (struct linux_dirent64 *)((char *)de + reclen); 7224 len -= reclen; 7225 } 7226 } 7227 unlock_user(dirp, arg2, ret); 7228 } 7229 break; 7230 #endif /* TARGET_NR_getdents64 */ 7231 #if defined(TARGET_NR__newselect) 7232 case TARGET_NR__newselect: 7233 ret = do_select(arg1, arg2, arg3, arg4, arg5); 7234 break; 7235 #endif 7236 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) 7237 # ifdef TARGET_NR_poll 7238 case TARGET_NR_poll: 7239 # endif 7240 # ifdef TARGET_NR_ppoll 7241 case TARGET_NR_ppoll: 7242 # endif 7243 { 7244 struct target_pollfd *target_pfd; 7245 unsigned int nfds = arg2; 7246 int timeout = arg3; 7247 struct pollfd *pfd; 7248 unsigned int i; 7249 7250 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1); 7251 if (!target_pfd) 7252 goto efault; 7253 7254 pfd = alloca(sizeof(struct pollfd) * nfds); 7255 for(i = 0; i < nfds; i++) { 7256 pfd[i].fd = tswap32(target_pfd[i].fd); 7257 pfd[i].events = tswap16(target_pfd[i].events); 7258 } 7259 7260 # ifdef TARGET_NR_ppoll 7261 if (num == TARGET_NR_ppoll) { 7262 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; 7263 target_sigset_t *target_set; 7264 sigset_t _set, *set = &_set; 7265 7266 if (arg3) { 7267 if (target_to_host_timespec(timeout_ts, arg3)) { 7268 unlock_user(target_pfd, arg1, 0); 7269 goto efault; 7270 } 7271 } else { 7272 timeout_ts = NULL; 7273 } 7274 7275 if (arg4) { 7276 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1); 7277 if (!target_set) { 7278 unlock_user(target_pfd, arg1, 0); 7279 goto efault; 7280 } 7281 target_to_host_sigset(set, target_set); 7282 } else { 7283 set = NULL; 7284 } 7285 7286 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8)); 7287 7288 if (!is_error(ret) && arg3) { 7289 host_to_target_timespec(arg3, timeout_ts); 7290 } 7291 if (arg4) { 7292 unlock_user(target_set, arg4, 0); 7293 } 7294 } else 7295 # endif 7296 ret = get_errno(poll(pfd, nfds, timeout)); 7297 7298 if (!is_error(ret)) { 7299 for(i = 0; i < nfds; i++) { 7300 target_pfd[i].revents = tswap16(pfd[i].revents); 7301 } 7302 } 7303 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds); 7304 } 7305 break; 7306 #endif 7307 case TARGET_NR_flock: 7308 /* NOTE: the flock constant seems to be the same for every 7309 Linux platform */ 7310 ret = get_errno(flock(arg1, arg2)); 7311 break; 7312 case TARGET_NR_readv: 7313 { 7314 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 7315 if (vec != NULL) { 7316 ret = get_errno(readv(arg1, vec, arg3)); 7317 unlock_iovec(vec, arg2, arg3, 1); 7318 } else { 7319 ret = -host_to_target_errno(errno); 7320 } 7321 } 7322 break; 7323 case TARGET_NR_writev: 7324 { 7325 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 7326 if (vec != NULL) { 7327 ret = get_errno(writev(arg1, vec, arg3)); 7328 unlock_iovec(vec, arg2, arg3, 0); 7329 } else { 7330 ret = -host_to_target_errno(errno); 7331 } 7332 } 7333 break; 7334 case TARGET_NR_getsid: 7335 ret = get_errno(getsid(arg1)); 7336 break; 7337 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ 7338 case TARGET_NR_fdatasync: 7339 ret = get_errno(fdatasync(arg1)); 7340 break; 7341 #endif 7342 case TARGET_NR__sysctl: 7343 /* We don't implement this, but ENOTDIR is always a safe 7344 return value. */ 7345 ret = -TARGET_ENOTDIR; 7346 break; 7347 case TARGET_NR_sched_getaffinity: 7348 { 7349 unsigned int mask_size; 7350 unsigned long *mask; 7351 7352 /* 7353 * sched_getaffinity needs multiples of ulong, so need to take 7354 * care of mismatches between target ulong and host ulong sizes. 7355 */ 7356 if (arg2 & (sizeof(abi_ulong) - 1)) { 7357 ret = -TARGET_EINVAL; 7358 break; 7359 } 7360 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 7361 7362 mask = alloca(mask_size); 7363 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask)); 7364 7365 if (!is_error(ret)) { 7366 if (copy_to_user(arg3, mask, ret)) { 7367 goto efault; 7368 } 7369 } 7370 } 7371 break; 7372 case TARGET_NR_sched_setaffinity: 7373 { 7374 unsigned int mask_size; 7375 unsigned long *mask; 7376 7377 /* 7378 * sched_setaffinity needs multiples of ulong, so need to take 7379 * care of mismatches between target ulong and host ulong sizes. 7380 */ 7381 if (arg2 & (sizeof(abi_ulong) - 1)) { 7382 ret = -TARGET_EINVAL; 7383 break; 7384 } 7385 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 7386 7387 mask = alloca(mask_size); 7388 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) { 7389 goto efault; 7390 } 7391 memcpy(mask, p, arg2); 7392 unlock_user_struct(p, arg2, 0); 7393 7394 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask)); 7395 } 7396 break; 7397 case TARGET_NR_sched_setparam: 7398 { 7399 struct sched_param *target_schp; 7400 struct sched_param schp; 7401 7402 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) 7403 goto efault; 7404 schp.sched_priority = tswap32(target_schp->sched_priority); 7405 unlock_user_struct(target_schp, arg2, 0); 7406 ret = get_errno(sched_setparam(arg1, &schp)); 7407 } 7408 break; 7409 case TARGET_NR_sched_getparam: 7410 { 7411 struct sched_param *target_schp; 7412 struct sched_param schp; 7413 ret = get_errno(sched_getparam(arg1, &schp)); 7414 if (!is_error(ret)) { 7415 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) 7416 goto efault; 7417 target_schp->sched_priority = tswap32(schp.sched_priority); 7418 unlock_user_struct(target_schp, arg2, 1); 7419 } 7420 } 7421 break; 7422 case TARGET_NR_sched_setscheduler: 7423 { 7424 struct sched_param *target_schp; 7425 struct sched_param schp; 7426 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) 7427 goto efault; 7428 schp.sched_priority = tswap32(target_schp->sched_priority); 7429 unlock_user_struct(target_schp, arg3, 0); 7430 ret = get_errno(sched_setscheduler(arg1, arg2, &schp)); 7431 } 7432 break; 7433 case TARGET_NR_sched_getscheduler: 7434 ret = get_errno(sched_getscheduler(arg1)); 7435 break; 7436 case TARGET_NR_sched_yield: 7437 ret = get_errno(sched_yield()); 7438 break; 7439 case TARGET_NR_sched_get_priority_max: 7440 ret = get_errno(sched_get_priority_max(arg1)); 7441 break; 7442 case TARGET_NR_sched_get_priority_min: 7443 ret = get_errno(sched_get_priority_min(arg1)); 7444 break; 7445 case TARGET_NR_sched_rr_get_interval: 7446 { 7447 struct timespec ts; 7448 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 7449 if (!is_error(ret)) { 7450 host_to_target_timespec(arg2, &ts); 7451 } 7452 } 7453 break; 7454 case TARGET_NR_nanosleep: 7455 { 7456 struct timespec req, rem; 7457 target_to_host_timespec(&req, arg1); 7458 ret = get_errno(nanosleep(&req, &rem)); 7459 if (is_error(ret) && arg2) { 7460 host_to_target_timespec(arg2, &rem); 7461 } 7462 } 7463 break; 7464 #ifdef TARGET_NR_query_module 7465 case TARGET_NR_query_module: 7466 goto unimplemented; 7467 #endif 7468 #ifdef TARGET_NR_nfsservctl 7469 case TARGET_NR_nfsservctl: 7470 goto unimplemented; 7471 #endif 7472 case TARGET_NR_prctl: 7473 switch (arg1) { 7474 case PR_GET_PDEATHSIG: 7475 { 7476 int deathsig; 7477 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5)); 7478 if (!is_error(ret) && arg2 7479 && put_user_ual(deathsig, arg2)) { 7480 goto efault; 7481 } 7482 break; 7483 } 7484 #ifdef PR_GET_NAME 7485 case PR_GET_NAME: 7486 { 7487 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1); 7488 if (!name) { 7489 goto efault; 7490 } 7491 ret = get_errno(prctl(arg1, (unsigned long)name, 7492 arg3, arg4, arg5)); 7493 unlock_user(name, arg2, 16); 7494 break; 7495 } 7496 case PR_SET_NAME: 7497 { 7498 void *name = lock_user(VERIFY_READ, arg2, 16, 1); 7499 if (!name) { 7500 goto efault; 7501 } 7502 ret = get_errno(prctl(arg1, (unsigned long)name, 7503 arg3, arg4, arg5)); 7504 unlock_user(name, arg2, 0); 7505 break; 7506 } 7507 #endif 7508 default: 7509 /* Most prctl options have no pointer arguments */ 7510 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5)); 7511 break; 7512 } 7513 break; 7514 #ifdef TARGET_NR_arch_prctl 7515 case TARGET_NR_arch_prctl: 7516 #if defined(TARGET_I386) && !defined(TARGET_ABI32) 7517 ret = do_arch_prctl(cpu_env, arg1, arg2); 7518 break; 7519 #else 7520 goto unimplemented; 7521 #endif 7522 #endif 7523 #ifdef TARGET_NR_pread64 7524 case TARGET_NR_pread64: 7525 if (regpairs_aligned(cpu_env)) { 7526 arg4 = arg5; 7527 arg5 = arg6; 7528 } 7529 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 7530 goto efault; 7531 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); 7532 unlock_user(p, arg2, ret); 7533 break; 7534 case TARGET_NR_pwrite64: 7535 if (regpairs_aligned(cpu_env)) { 7536 arg4 = arg5; 7537 arg5 = arg6; 7538 } 7539 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 7540 goto efault; 7541 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); 7542 unlock_user(p, arg2, 0); 7543 break; 7544 #endif 7545 case TARGET_NR_getcwd: 7546 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) 7547 goto efault; 7548 ret = get_errno(sys_getcwd1(p, arg2)); 7549 unlock_user(p, arg1, ret); 7550 break; 7551 case TARGET_NR_capget: 7552 goto unimplemented; 7553 case TARGET_NR_capset: 7554 goto unimplemented; 7555 case TARGET_NR_sigaltstack: 7556 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \ 7557 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \ 7558 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC) 7559 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env)); 7560 break; 7561 #else 7562 goto unimplemented; 7563 #endif 7564 7565 #ifdef CONFIG_SENDFILE 7566 case TARGET_NR_sendfile: 7567 { 7568 off_t *offp = NULL; 7569 off_t off; 7570 if (arg3) { 7571 ret = get_user_sal(off, arg3); 7572 if (is_error(ret)) { 7573 break; 7574 } 7575 offp = &off; 7576 } 7577 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 7578 if (!is_error(ret) && arg3) { 7579 abi_long ret2 = put_user_sal(off, arg3); 7580 if (is_error(ret2)) { 7581 ret = ret2; 7582 } 7583 } 7584 break; 7585 } 7586 #ifdef TARGET_NR_sendfile64 7587 case TARGET_NR_sendfile64: 7588 { 7589 off_t *offp = NULL; 7590 off_t off; 7591 if (arg3) { 7592 ret = get_user_s64(off, arg3); 7593 if (is_error(ret)) { 7594 break; 7595 } 7596 offp = &off; 7597 } 7598 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 7599 if (!is_error(ret) && arg3) { 7600 abi_long ret2 = put_user_s64(off, arg3); 7601 if (is_error(ret2)) { 7602 ret = ret2; 7603 } 7604 } 7605 break; 7606 } 7607 #endif 7608 #else 7609 case TARGET_NR_sendfile: 7610 #ifdef TARGET_NR_sendfile64 7611 case TARGET_NR_sendfile64: 7612 #endif 7613 goto unimplemented; 7614 #endif 7615 7616 #ifdef TARGET_NR_getpmsg 7617 case TARGET_NR_getpmsg: 7618 goto unimplemented; 7619 #endif 7620 #ifdef TARGET_NR_putpmsg 7621 case TARGET_NR_putpmsg: 7622 goto unimplemented; 7623 #endif 7624 #ifdef TARGET_NR_vfork 7625 case TARGET_NR_vfork: 7626 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 7627 0, 0, 0, 0)); 7628 break; 7629 #endif 7630 #ifdef TARGET_NR_ugetrlimit 7631 case TARGET_NR_ugetrlimit: 7632 { 7633 struct rlimit rlim; 7634 int resource = target_to_host_resource(arg1); 7635 ret = get_errno(getrlimit(resource, &rlim)); 7636 if (!is_error(ret)) { 7637 struct target_rlimit *target_rlim; 7638 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 7639 goto efault; 7640 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 7641 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 7642 unlock_user_struct(target_rlim, arg2, 1); 7643 } 7644 break; 7645 } 7646 #endif 7647 #ifdef TARGET_NR_truncate64 7648 case TARGET_NR_truncate64: 7649 if (!(p = lock_user_string(arg1))) 7650 goto efault; 7651 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); 7652 unlock_user(p, arg1, 0); 7653 break; 7654 #endif 7655 #ifdef TARGET_NR_ftruncate64 7656 case TARGET_NR_ftruncate64: 7657 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); 7658 break; 7659 #endif 7660 #ifdef TARGET_NR_stat64 7661 case TARGET_NR_stat64: 7662 if (!(p = lock_user_string(arg1))) 7663 goto efault; 7664 ret = get_errno(stat(path(p), &st)); 7665 unlock_user(p, arg1, 0); 7666 if (!is_error(ret)) 7667 ret = host_to_target_stat64(cpu_env, arg2, &st); 7668 break; 7669 #endif 7670 #ifdef TARGET_NR_lstat64 7671 case TARGET_NR_lstat64: 7672 if (!(p = lock_user_string(arg1))) 7673 goto efault; 7674 ret = get_errno(lstat(path(p), &st)); 7675 unlock_user(p, arg1, 0); 7676 if (!is_error(ret)) 7677 ret = host_to_target_stat64(cpu_env, arg2, &st); 7678 break; 7679 #endif 7680 #ifdef TARGET_NR_fstat64 7681 case TARGET_NR_fstat64: 7682 ret = get_errno(fstat(arg1, &st)); 7683 if (!is_error(ret)) 7684 ret = host_to_target_stat64(cpu_env, arg2, &st); 7685 break; 7686 #endif 7687 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) 7688 #ifdef TARGET_NR_fstatat64 7689 case TARGET_NR_fstatat64: 7690 #endif 7691 #ifdef TARGET_NR_newfstatat 7692 case TARGET_NR_newfstatat: 7693 #endif 7694 if (!(p = lock_user_string(arg2))) 7695 goto efault; 7696 ret = get_errno(fstatat(arg1, path(p), &st, arg4)); 7697 if (!is_error(ret)) 7698 ret = host_to_target_stat64(cpu_env, arg3, &st); 7699 break; 7700 #endif 7701 case TARGET_NR_lchown: 7702 if (!(p = lock_user_string(arg1))) 7703 goto efault; 7704 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); 7705 unlock_user(p, arg1, 0); 7706 break; 7707 #ifdef TARGET_NR_getuid 7708 case TARGET_NR_getuid: 7709 ret = get_errno(high2lowuid(getuid())); 7710 break; 7711 #endif 7712 #ifdef TARGET_NR_getgid 7713 case TARGET_NR_getgid: 7714 ret = get_errno(high2lowgid(getgid())); 7715 break; 7716 #endif 7717 #ifdef TARGET_NR_geteuid 7718 case TARGET_NR_geteuid: 7719 ret = get_errno(high2lowuid(geteuid())); 7720 break; 7721 #endif 7722 #ifdef TARGET_NR_getegid 7723 case TARGET_NR_getegid: 7724 ret = get_errno(high2lowgid(getegid())); 7725 break; 7726 #endif 7727 case TARGET_NR_setreuid: 7728 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); 7729 break; 7730 case TARGET_NR_setregid: 7731 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); 7732 break; 7733 case TARGET_NR_getgroups: 7734 { 7735 int gidsetsize = arg1; 7736 target_id *target_grouplist; 7737 gid_t *grouplist; 7738 int i; 7739 7740 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7741 ret = get_errno(getgroups(gidsetsize, grouplist)); 7742 if (gidsetsize == 0) 7743 break; 7744 if (!is_error(ret)) { 7745 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0); 7746 if (!target_grouplist) 7747 goto efault; 7748 for(i = 0;i < ret; i++) 7749 target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); 7750 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id)); 7751 } 7752 } 7753 break; 7754 case TARGET_NR_setgroups: 7755 { 7756 int gidsetsize = arg1; 7757 target_id *target_grouplist; 7758 gid_t *grouplist = NULL; 7759 int i; 7760 if (gidsetsize) { 7761 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7762 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1); 7763 if (!target_grouplist) { 7764 ret = -TARGET_EFAULT; 7765 goto fail; 7766 } 7767 for (i = 0; i < gidsetsize; i++) { 7768 grouplist[i] = low2highgid(tswapid(target_grouplist[i])); 7769 } 7770 unlock_user(target_grouplist, arg2, 0); 7771 } 7772 ret = get_errno(setgroups(gidsetsize, grouplist)); 7773 } 7774 break; 7775 case TARGET_NR_fchown: 7776 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); 7777 break; 7778 #if defined(TARGET_NR_fchownat) 7779 case TARGET_NR_fchownat: 7780 if (!(p = lock_user_string(arg2))) 7781 goto efault; 7782 ret = get_errno(fchownat(arg1, p, low2highuid(arg3), 7783 low2highgid(arg4), arg5)); 7784 unlock_user(p, arg2, 0); 7785 break; 7786 #endif 7787 #ifdef TARGET_NR_setresuid 7788 case TARGET_NR_setresuid: 7789 ret = get_errno(setresuid(low2highuid(arg1), 7790 low2highuid(arg2), 7791 low2highuid(arg3))); 7792 break; 7793 #endif 7794 #ifdef TARGET_NR_getresuid 7795 case TARGET_NR_getresuid: 7796 { 7797 uid_t ruid, euid, suid; 7798 ret = get_errno(getresuid(&ruid, &euid, &suid)); 7799 if (!is_error(ret)) { 7800 if (put_user_u16(high2lowuid(ruid), arg1) 7801 || put_user_u16(high2lowuid(euid), arg2) 7802 || put_user_u16(high2lowuid(suid), arg3)) 7803 goto efault; 7804 } 7805 } 7806 break; 7807 #endif 7808 #ifdef TARGET_NR_getresgid 7809 case TARGET_NR_setresgid: 7810 ret = get_errno(setresgid(low2highgid(arg1), 7811 low2highgid(arg2), 7812 low2highgid(arg3))); 7813 break; 7814 #endif 7815 #ifdef TARGET_NR_getresgid 7816 case TARGET_NR_getresgid: 7817 { 7818 gid_t rgid, egid, sgid; 7819 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 7820 if (!is_error(ret)) { 7821 if (put_user_u16(high2lowgid(rgid), arg1) 7822 || put_user_u16(high2lowgid(egid), arg2) 7823 || put_user_u16(high2lowgid(sgid), arg3)) 7824 goto efault; 7825 } 7826 } 7827 break; 7828 #endif 7829 case TARGET_NR_chown: 7830 if (!(p = lock_user_string(arg1))) 7831 goto efault; 7832 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); 7833 unlock_user(p, arg1, 0); 7834 break; 7835 case TARGET_NR_setuid: 7836 ret = get_errno(setuid(low2highuid(arg1))); 7837 break; 7838 case TARGET_NR_setgid: 7839 ret = get_errno(setgid(low2highgid(arg1))); 7840 break; 7841 case TARGET_NR_setfsuid: 7842 ret = get_errno(setfsuid(arg1)); 7843 break; 7844 case TARGET_NR_setfsgid: 7845 ret = get_errno(setfsgid(arg1)); 7846 break; 7847 7848 #ifdef TARGET_NR_lchown32 7849 case TARGET_NR_lchown32: 7850 if (!(p = lock_user_string(arg1))) 7851 goto efault; 7852 ret = get_errno(lchown(p, arg2, arg3)); 7853 unlock_user(p, arg1, 0); 7854 break; 7855 #endif 7856 #ifdef TARGET_NR_getuid32 7857 case TARGET_NR_getuid32: 7858 ret = get_errno(getuid()); 7859 break; 7860 #endif 7861 7862 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) 7863 /* Alpha specific */ 7864 case TARGET_NR_getxuid: 7865 { 7866 uid_t euid; 7867 euid=geteuid(); 7868 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid; 7869 } 7870 ret = get_errno(getuid()); 7871 break; 7872 #endif 7873 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) 7874 /* Alpha specific */ 7875 case TARGET_NR_getxgid: 7876 { 7877 uid_t egid; 7878 egid=getegid(); 7879 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid; 7880 } 7881 ret = get_errno(getgid()); 7882 break; 7883 #endif 7884 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) 7885 /* Alpha specific */ 7886 case TARGET_NR_osf_getsysinfo: 7887 ret = -TARGET_EOPNOTSUPP; 7888 switch (arg1) { 7889 case TARGET_GSI_IEEE_FP_CONTROL: 7890 { 7891 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env); 7892 7893 /* Copied from linux ieee_fpcr_to_swcr. */ 7894 swcr = (fpcr >> 35) & SWCR_STATUS_MASK; 7895 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ; 7896 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV 7897 | SWCR_TRAP_ENABLE_DZE 7898 | SWCR_TRAP_ENABLE_OVF); 7899 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF 7900 | SWCR_TRAP_ENABLE_INE); 7901 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ; 7902 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO; 7903 7904 if (put_user_u64 (swcr, arg2)) 7905 goto efault; 7906 ret = 0; 7907 } 7908 break; 7909 7910 /* case GSI_IEEE_STATE_AT_SIGNAL: 7911 -- Not implemented in linux kernel. 7912 case GSI_UACPROC: 7913 -- Retrieves current unaligned access state; not much used. 7914 case GSI_PROC_TYPE: 7915 -- Retrieves implver information; surely not used. 7916 case GSI_GET_HWRPB: 7917 -- Grabs a copy of the HWRPB; surely not used. 7918 */ 7919 } 7920 break; 7921 #endif 7922 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) 7923 /* Alpha specific */ 7924 case TARGET_NR_osf_setsysinfo: 7925 ret = -TARGET_EOPNOTSUPP; 7926 switch (arg1) { 7927 case TARGET_SSI_IEEE_FP_CONTROL: 7928 { 7929 uint64_t swcr, fpcr, orig_fpcr; 7930 7931 if (get_user_u64 (swcr, arg2)) { 7932 goto efault; 7933 } 7934 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 7935 fpcr = orig_fpcr & FPCR_DYN_MASK; 7936 7937 /* Copied from linux ieee_swcr_to_fpcr. */ 7938 fpcr |= (swcr & SWCR_STATUS_MASK) << 35; 7939 fpcr |= (swcr & SWCR_MAP_DMZ) << 36; 7940 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV 7941 | SWCR_TRAP_ENABLE_DZE 7942 | SWCR_TRAP_ENABLE_OVF)) << 48; 7943 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF 7944 | SWCR_TRAP_ENABLE_INE)) << 57; 7945 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0); 7946 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41; 7947 7948 cpu_alpha_store_fpcr(cpu_env, fpcr); 7949 ret = 0; 7950 } 7951 break; 7952 7953 case TARGET_SSI_IEEE_RAISE_EXCEPTION: 7954 { 7955 uint64_t exc, fpcr, orig_fpcr; 7956 int si_code; 7957 7958 if (get_user_u64(exc, arg2)) { 7959 goto efault; 7960 } 7961 7962 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 7963 7964 /* We only add to the exception status here. */ 7965 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35); 7966 7967 cpu_alpha_store_fpcr(cpu_env, fpcr); 7968 ret = 0; 7969 7970 /* Old exceptions are not signaled. */ 7971 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK); 7972 7973 /* If any exceptions set by this call, 7974 and are unmasked, send a signal. */ 7975 si_code = 0; 7976 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) { 7977 si_code = TARGET_FPE_FLTRES; 7978 } 7979 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) { 7980 si_code = TARGET_FPE_FLTUND; 7981 } 7982 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) { 7983 si_code = TARGET_FPE_FLTOVF; 7984 } 7985 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) { 7986 si_code = TARGET_FPE_FLTDIV; 7987 } 7988 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) { 7989 si_code = TARGET_FPE_FLTINV; 7990 } 7991 if (si_code != 0) { 7992 target_siginfo_t info; 7993 info.si_signo = SIGFPE; 7994 info.si_errno = 0; 7995 info.si_code = si_code; 7996 info._sifields._sigfault._addr 7997 = ((CPUArchState *)cpu_env)->pc; 7998 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info); 7999 } 8000 } 8001 break; 8002 8003 /* case SSI_NVPAIRS: 8004 -- Used with SSIN_UACPROC to enable unaligned accesses. 8005 case SSI_IEEE_STATE_AT_SIGNAL: 8006 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: 8007 -- Not implemented in linux kernel 8008 */ 8009 } 8010 break; 8011 #endif 8012 #ifdef TARGET_NR_osf_sigprocmask 8013 /* Alpha specific. */ 8014 case TARGET_NR_osf_sigprocmask: 8015 { 8016 abi_ulong mask; 8017 int how; 8018 sigset_t set, oldset; 8019 8020 switch(arg1) { 8021 case TARGET_SIG_BLOCK: 8022 how = SIG_BLOCK; 8023 break; 8024 case TARGET_SIG_UNBLOCK: 8025 how = SIG_UNBLOCK; 8026 break; 8027 case TARGET_SIG_SETMASK: 8028 how = SIG_SETMASK; 8029 break; 8030 default: 8031 ret = -TARGET_EINVAL; 8032 goto fail; 8033 } 8034 mask = arg2; 8035 target_to_host_old_sigset(&set, &mask); 8036 sigprocmask(how, &set, &oldset); 8037 host_to_target_old_sigset(&mask, &oldset); 8038 ret = mask; 8039 } 8040 break; 8041 #endif 8042 8043 #ifdef TARGET_NR_getgid32 8044 case TARGET_NR_getgid32: 8045 ret = get_errno(getgid()); 8046 break; 8047 #endif 8048 #ifdef TARGET_NR_geteuid32 8049 case TARGET_NR_geteuid32: 8050 ret = get_errno(geteuid()); 8051 break; 8052 #endif 8053 #ifdef TARGET_NR_getegid32 8054 case TARGET_NR_getegid32: 8055 ret = get_errno(getegid()); 8056 break; 8057 #endif 8058 #ifdef TARGET_NR_setreuid32 8059 case TARGET_NR_setreuid32: 8060 ret = get_errno(setreuid(arg1, arg2)); 8061 break; 8062 #endif 8063 #ifdef TARGET_NR_setregid32 8064 case TARGET_NR_setregid32: 8065 ret = get_errno(setregid(arg1, arg2)); 8066 break; 8067 #endif 8068 #ifdef TARGET_NR_getgroups32 8069 case TARGET_NR_getgroups32: 8070 { 8071 int gidsetsize = arg1; 8072 uint32_t *target_grouplist; 8073 gid_t *grouplist; 8074 int i; 8075 8076 grouplist = alloca(gidsetsize * sizeof(gid_t)); 8077 ret = get_errno(getgroups(gidsetsize, grouplist)); 8078 if (gidsetsize == 0) 8079 break; 8080 if (!is_error(ret)) { 8081 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); 8082 if (!target_grouplist) { 8083 ret = -TARGET_EFAULT; 8084 goto fail; 8085 } 8086 for(i = 0;i < ret; i++) 8087 target_grouplist[i] = tswap32(grouplist[i]); 8088 unlock_user(target_grouplist, arg2, gidsetsize * 4); 8089 } 8090 } 8091 break; 8092 #endif 8093 #ifdef TARGET_NR_setgroups32 8094 case TARGET_NR_setgroups32: 8095 { 8096 int gidsetsize = arg1; 8097 uint32_t *target_grouplist; 8098 gid_t *grouplist; 8099 int i; 8100 8101 grouplist = alloca(gidsetsize * sizeof(gid_t)); 8102 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); 8103 if (!target_grouplist) { 8104 ret = -TARGET_EFAULT; 8105 goto fail; 8106 } 8107 for(i = 0;i < gidsetsize; i++) 8108 grouplist[i] = tswap32(target_grouplist[i]); 8109 unlock_user(target_grouplist, arg2, 0); 8110 ret = get_errno(setgroups(gidsetsize, grouplist)); 8111 } 8112 break; 8113 #endif 8114 #ifdef TARGET_NR_fchown32 8115 case TARGET_NR_fchown32: 8116 ret = get_errno(fchown(arg1, arg2, arg3)); 8117 break; 8118 #endif 8119 #ifdef TARGET_NR_setresuid32 8120 case TARGET_NR_setresuid32: 8121 ret = get_errno(setresuid(arg1, arg2, arg3)); 8122 break; 8123 #endif 8124 #ifdef TARGET_NR_getresuid32 8125 case TARGET_NR_getresuid32: 8126 { 8127 uid_t ruid, euid, suid; 8128 ret = get_errno(getresuid(&ruid, &euid, &suid)); 8129 if (!is_error(ret)) { 8130 if (put_user_u32(ruid, arg1) 8131 || put_user_u32(euid, arg2) 8132 || put_user_u32(suid, arg3)) 8133 goto efault; 8134 } 8135 } 8136 break; 8137 #endif 8138 #ifdef TARGET_NR_setresgid32 8139 case TARGET_NR_setresgid32: 8140 ret = get_errno(setresgid(arg1, arg2, arg3)); 8141 break; 8142 #endif 8143 #ifdef TARGET_NR_getresgid32 8144 case TARGET_NR_getresgid32: 8145 { 8146 gid_t rgid, egid, sgid; 8147 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 8148 if (!is_error(ret)) { 8149 if (put_user_u32(rgid, arg1) 8150 || put_user_u32(egid, arg2) 8151 || put_user_u32(sgid, arg3)) 8152 goto efault; 8153 } 8154 } 8155 break; 8156 #endif 8157 #ifdef TARGET_NR_chown32 8158 case TARGET_NR_chown32: 8159 if (!(p = lock_user_string(arg1))) 8160 goto efault; 8161 ret = get_errno(chown(p, arg2, arg3)); 8162 unlock_user(p, arg1, 0); 8163 break; 8164 #endif 8165 #ifdef TARGET_NR_setuid32 8166 case TARGET_NR_setuid32: 8167 ret = get_errno(setuid(arg1)); 8168 break; 8169 #endif 8170 #ifdef TARGET_NR_setgid32 8171 case TARGET_NR_setgid32: 8172 ret = get_errno(setgid(arg1)); 8173 break; 8174 #endif 8175 #ifdef TARGET_NR_setfsuid32 8176 case TARGET_NR_setfsuid32: 8177 ret = get_errno(setfsuid(arg1)); 8178 break; 8179 #endif 8180 #ifdef TARGET_NR_setfsgid32 8181 case TARGET_NR_setfsgid32: 8182 ret = get_errno(setfsgid(arg1)); 8183 break; 8184 #endif 8185 8186 case TARGET_NR_pivot_root: 8187 goto unimplemented; 8188 #ifdef TARGET_NR_mincore 8189 case TARGET_NR_mincore: 8190 { 8191 void *a; 8192 ret = -TARGET_EFAULT; 8193 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0))) 8194 goto efault; 8195 if (!(p = lock_user_string(arg3))) 8196 goto mincore_fail; 8197 ret = get_errno(mincore(a, arg2, p)); 8198 unlock_user(p, arg3, ret); 8199 mincore_fail: 8200 unlock_user(a, arg1, 0); 8201 } 8202 break; 8203 #endif 8204 #ifdef TARGET_NR_arm_fadvise64_64 8205 case TARGET_NR_arm_fadvise64_64: 8206 { 8207 /* 8208 * arm_fadvise64_64 looks like fadvise64_64 but 8209 * with different argument order 8210 */ 8211 abi_long temp; 8212 temp = arg3; 8213 arg3 = arg4; 8214 arg4 = temp; 8215 } 8216 #endif 8217 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64) 8218 #ifdef TARGET_NR_fadvise64_64 8219 case TARGET_NR_fadvise64_64: 8220 #endif 8221 #ifdef TARGET_NR_fadvise64 8222 case TARGET_NR_fadvise64: 8223 #endif 8224 #ifdef TARGET_S390X 8225 switch (arg4) { 8226 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ 8227 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ 8228 case 6: arg4 = POSIX_FADV_DONTNEED; break; 8229 case 7: arg4 = POSIX_FADV_NOREUSE; break; 8230 default: break; 8231 } 8232 #endif 8233 ret = -posix_fadvise(arg1, arg2, arg3, arg4); 8234 break; 8235 #endif 8236 #ifdef TARGET_NR_madvise 8237 case TARGET_NR_madvise: 8238 /* A straight passthrough may not be safe because qemu sometimes 8239 turns private file-backed mappings into anonymous mappings. 8240 This will break MADV_DONTNEED. 8241 This is a hint, so ignoring and returning success is ok. */ 8242 ret = get_errno(0); 8243 break; 8244 #endif 8245 #if TARGET_ABI_BITS == 32 8246 case TARGET_NR_fcntl64: 8247 { 8248 int cmd; 8249 struct flock64 fl; 8250 struct target_flock64 *target_fl; 8251 #ifdef TARGET_ARM 8252 struct target_eabi_flock64 *target_efl; 8253 #endif 8254 8255 cmd = target_to_host_fcntl_cmd(arg2); 8256 if (cmd == -TARGET_EINVAL) { 8257 ret = cmd; 8258 break; 8259 } 8260 8261 switch(arg2) { 8262 case TARGET_F_GETLK64: 8263 #ifdef TARGET_ARM 8264 if (((CPUARMState *)cpu_env)->eabi) { 8265 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 8266 goto efault; 8267 fl.l_type = tswap16(target_efl->l_type); 8268 fl.l_whence = tswap16(target_efl->l_whence); 8269 fl.l_start = tswap64(target_efl->l_start); 8270 fl.l_len = tswap64(target_efl->l_len); 8271 fl.l_pid = tswap32(target_efl->l_pid); 8272 unlock_user_struct(target_efl, arg3, 0); 8273 } else 8274 #endif 8275 { 8276 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 8277 goto efault; 8278 fl.l_type = tswap16(target_fl->l_type); 8279 fl.l_whence = tswap16(target_fl->l_whence); 8280 fl.l_start = tswap64(target_fl->l_start); 8281 fl.l_len = tswap64(target_fl->l_len); 8282 fl.l_pid = tswap32(target_fl->l_pid); 8283 unlock_user_struct(target_fl, arg3, 0); 8284 } 8285 ret = get_errno(fcntl(arg1, cmd, &fl)); 8286 if (ret == 0) { 8287 #ifdef TARGET_ARM 8288 if (((CPUARMState *)cpu_env)->eabi) { 8289 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0)) 8290 goto efault; 8291 target_efl->l_type = tswap16(fl.l_type); 8292 target_efl->l_whence = tswap16(fl.l_whence); 8293 target_efl->l_start = tswap64(fl.l_start); 8294 target_efl->l_len = tswap64(fl.l_len); 8295 target_efl->l_pid = tswap32(fl.l_pid); 8296 unlock_user_struct(target_efl, arg3, 1); 8297 } else 8298 #endif 8299 { 8300 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0)) 8301 goto efault; 8302 target_fl->l_type = tswap16(fl.l_type); 8303 target_fl->l_whence = tswap16(fl.l_whence); 8304 target_fl->l_start = tswap64(fl.l_start); 8305 target_fl->l_len = tswap64(fl.l_len); 8306 target_fl->l_pid = tswap32(fl.l_pid); 8307 unlock_user_struct(target_fl, arg3, 1); 8308 } 8309 } 8310 break; 8311 8312 case TARGET_F_SETLK64: 8313 case TARGET_F_SETLKW64: 8314 #ifdef TARGET_ARM 8315 if (((CPUARMState *)cpu_env)->eabi) { 8316 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 8317 goto efault; 8318 fl.l_type = tswap16(target_efl->l_type); 8319 fl.l_whence = tswap16(target_efl->l_whence); 8320 fl.l_start = tswap64(target_efl->l_start); 8321 fl.l_len = tswap64(target_efl->l_len); 8322 fl.l_pid = tswap32(target_efl->l_pid); 8323 unlock_user_struct(target_efl, arg3, 0); 8324 } else 8325 #endif 8326 { 8327 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 8328 goto efault; 8329 fl.l_type = tswap16(target_fl->l_type); 8330 fl.l_whence = tswap16(target_fl->l_whence); 8331 fl.l_start = tswap64(target_fl->l_start); 8332 fl.l_len = tswap64(target_fl->l_len); 8333 fl.l_pid = tswap32(target_fl->l_pid); 8334 unlock_user_struct(target_fl, arg3, 0); 8335 } 8336 ret = get_errno(fcntl(arg1, cmd, &fl)); 8337 break; 8338 default: 8339 ret = do_fcntl(arg1, arg2, arg3); 8340 break; 8341 } 8342 break; 8343 } 8344 #endif 8345 #ifdef TARGET_NR_cacheflush 8346 case TARGET_NR_cacheflush: 8347 /* self-modifying code is handled automatically, so nothing needed */ 8348 ret = 0; 8349 break; 8350 #endif 8351 #ifdef TARGET_NR_security 8352 case TARGET_NR_security: 8353 goto unimplemented; 8354 #endif 8355 #ifdef TARGET_NR_getpagesize 8356 case TARGET_NR_getpagesize: 8357 ret = TARGET_PAGE_SIZE; 8358 break; 8359 #endif 8360 case TARGET_NR_gettid: 8361 ret = get_errno(gettid()); 8362 break; 8363 #ifdef TARGET_NR_readahead 8364 case TARGET_NR_readahead: 8365 #if TARGET_ABI_BITS == 32 8366 if (regpairs_aligned(cpu_env)) { 8367 arg2 = arg3; 8368 arg3 = arg4; 8369 arg4 = arg5; 8370 } 8371 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4)); 8372 #else 8373 ret = get_errno(readahead(arg1, arg2, arg3)); 8374 #endif 8375 break; 8376 #endif 8377 #ifdef CONFIG_ATTR 8378 #ifdef TARGET_NR_setxattr 8379 case TARGET_NR_listxattr: 8380 case TARGET_NR_llistxattr: 8381 { 8382 void *p, *b = 0; 8383 if (arg2) { 8384 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 8385 if (!b) { 8386 ret = -TARGET_EFAULT; 8387 break; 8388 } 8389 } 8390 p = lock_user_string(arg1); 8391 if (p) { 8392 if (num == TARGET_NR_listxattr) { 8393 ret = get_errno(listxattr(p, b, arg3)); 8394 } else { 8395 ret = get_errno(llistxattr(p, b, arg3)); 8396 } 8397 } else { 8398 ret = -TARGET_EFAULT; 8399 } 8400 unlock_user(p, arg1, 0); 8401 unlock_user(b, arg2, arg3); 8402 break; 8403 } 8404 case TARGET_NR_flistxattr: 8405 { 8406 void *b = 0; 8407 if (arg2) { 8408 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 8409 if (!b) { 8410 ret = -TARGET_EFAULT; 8411 break; 8412 } 8413 } 8414 ret = get_errno(flistxattr(arg1, b, arg3)); 8415 unlock_user(b, arg2, arg3); 8416 break; 8417 } 8418 case TARGET_NR_setxattr: 8419 case TARGET_NR_lsetxattr: 8420 { 8421 void *p, *n, *v = 0; 8422 if (arg3) { 8423 v = lock_user(VERIFY_READ, arg3, arg4, 1); 8424 if (!v) { 8425 ret = -TARGET_EFAULT; 8426 break; 8427 } 8428 } 8429 p = lock_user_string(arg1); 8430 n = lock_user_string(arg2); 8431 if (p && n) { 8432 if (num == TARGET_NR_setxattr) { 8433 ret = get_errno(setxattr(p, n, v, arg4, arg5)); 8434 } else { 8435 ret = get_errno(lsetxattr(p, n, v, arg4, arg5)); 8436 } 8437 } else { 8438 ret = -TARGET_EFAULT; 8439 } 8440 unlock_user(p, arg1, 0); 8441 unlock_user(n, arg2, 0); 8442 unlock_user(v, arg3, 0); 8443 } 8444 break; 8445 case TARGET_NR_fsetxattr: 8446 { 8447 void *n, *v = 0; 8448 if (arg3) { 8449 v = lock_user(VERIFY_READ, arg3, arg4, 1); 8450 if (!v) { 8451 ret = -TARGET_EFAULT; 8452 break; 8453 } 8454 } 8455 n = lock_user_string(arg2); 8456 if (n) { 8457 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5)); 8458 } else { 8459 ret = -TARGET_EFAULT; 8460 } 8461 unlock_user(n, arg2, 0); 8462 unlock_user(v, arg3, 0); 8463 } 8464 break; 8465 case TARGET_NR_getxattr: 8466 case TARGET_NR_lgetxattr: 8467 { 8468 void *p, *n, *v = 0; 8469 if (arg3) { 8470 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 8471 if (!v) { 8472 ret = -TARGET_EFAULT; 8473 break; 8474 } 8475 } 8476 p = lock_user_string(arg1); 8477 n = lock_user_string(arg2); 8478 if (p && n) { 8479 if (num == TARGET_NR_getxattr) { 8480 ret = get_errno(getxattr(p, n, v, arg4)); 8481 } else { 8482 ret = get_errno(lgetxattr(p, n, v, arg4)); 8483 } 8484 } else { 8485 ret = -TARGET_EFAULT; 8486 } 8487 unlock_user(p, arg1, 0); 8488 unlock_user(n, arg2, 0); 8489 unlock_user(v, arg3, arg4); 8490 } 8491 break; 8492 case TARGET_NR_fgetxattr: 8493 { 8494 void *n, *v = 0; 8495 if (arg3) { 8496 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 8497 if (!v) { 8498 ret = -TARGET_EFAULT; 8499 break; 8500 } 8501 } 8502 n = lock_user_string(arg2); 8503 if (n) { 8504 ret = get_errno(fgetxattr(arg1, n, v, arg4)); 8505 } else { 8506 ret = -TARGET_EFAULT; 8507 } 8508 unlock_user(n, arg2, 0); 8509 unlock_user(v, arg3, arg4); 8510 } 8511 break; 8512 case TARGET_NR_removexattr: 8513 case TARGET_NR_lremovexattr: 8514 { 8515 void *p, *n; 8516 p = lock_user_string(arg1); 8517 n = lock_user_string(arg2); 8518 if (p && n) { 8519 if (num == TARGET_NR_removexattr) { 8520 ret = get_errno(removexattr(p, n)); 8521 } else { 8522 ret = get_errno(lremovexattr(p, n)); 8523 } 8524 } else { 8525 ret = -TARGET_EFAULT; 8526 } 8527 unlock_user(p, arg1, 0); 8528 unlock_user(n, arg2, 0); 8529 } 8530 break; 8531 case TARGET_NR_fremovexattr: 8532 { 8533 void *n; 8534 n = lock_user_string(arg2); 8535 if (n) { 8536 ret = get_errno(fremovexattr(arg1, n)); 8537 } else { 8538 ret = -TARGET_EFAULT; 8539 } 8540 unlock_user(n, arg2, 0); 8541 } 8542 break; 8543 #endif 8544 #endif /* CONFIG_ATTR */ 8545 #ifdef TARGET_NR_set_thread_area 8546 case TARGET_NR_set_thread_area: 8547 #if defined(TARGET_MIPS) 8548 ((CPUMIPSState *) cpu_env)->tls_value = arg1; 8549 ret = 0; 8550 break; 8551 #elif defined(TARGET_CRIS) 8552 if (arg1 & 0xff) 8553 ret = -TARGET_EINVAL; 8554 else { 8555 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1; 8556 ret = 0; 8557 } 8558 break; 8559 #elif defined(TARGET_I386) && defined(TARGET_ABI32) 8560 ret = do_set_thread_area(cpu_env, arg1); 8561 break; 8562 #elif defined(TARGET_M68K) 8563 { 8564 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 8565 ts->tp_value = arg1; 8566 ret = 0; 8567 break; 8568 } 8569 #else 8570 goto unimplemented_nowarn; 8571 #endif 8572 #endif 8573 #ifdef TARGET_NR_get_thread_area 8574 case TARGET_NR_get_thread_area: 8575 #if defined(TARGET_I386) && defined(TARGET_ABI32) 8576 ret = do_get_thread_area(cpu_env, arg1); 8577 break; 8578 #elif defined(TARGET_M68K) 8579 { 8580 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 8581 ret = ts->tp_value; 8582 break; 8583 } 8584 #else 8585 goto unimplemented_nowarn; 8586 #endif 8587 #endif 8588 #ifdef TARGET_NR_getdomainname 8589 case TARGET_NR_getdomainname: 8590 goto unimplemented_nowarn; 8591 #endif 8592 8593 #ifdef TARGET_NR_clock_gettime 8594 case TARGET_NR_clock_gettime: 8595 { 8596 struct timespec ts; 8597 ret = get_errno(clock_gettime(arg1, &ts)); 8598 if (!is_error(ret)) { 8599 host_to_target_timespec(arg2, &ts); 8600 } 8601 break; 8602 } 8603 #endif 8604 #ifdef TARGET_NR_clock_getres 8605 case TARGET_NR_clock_getres: 8606 { 8607 struct timespec ts; 8608 ret = get_errno(clock_getres(arg1, &ts)); 8609 if (!is_error(ret)) { 8610 host_to_target_timespec(arg2, &ts); 8611 } 8612 break; 8613 } 8614 #endif 8615 #ifdef TARGET_NR_clock_nanosleep 8616 case TARGET_NR_clock_nanosleep: 8617 { 8618 struct timespec ts; 8619 target_to_host_timespec(&ts, arg3); 8620 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL)); 8621 if (arg4) 8622 host_to_target_timespec(arg4, &ts); 8623 break; 8624 } 8625 #endif 8626 8627 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 8628 case TARGET_NR_set_tid_address: 8629 ret = get_errno(set_tid_address((int *)g2h(arg1))); 8630 break; 8631 #endif 8632 8633 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 8634 case TARGET_NR_tkill: 8635 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2))); 8636 break; 8637 #endif 8638 8639 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 8640 case TARGET_NR_tgkill: 8641 ret = get_errno(sys_tgkill((int)arg1, (int)arg2, 8642 target_to_host_signal(arg3))); 8643 break; 8644 #endif 8645 8646 #ifdef TARGET_NR_set_robust_list 8647 case TARGET_NR_set_robust_list: 8648 case TARGET_NR_get_robust_list: 8649 /* The ABI for supporting robust futexes has userspace pass 8650 * the kernel a pointer to a linked list which is updated by 8651 * userspace after the syscall; the list is walked by the kernel 8652 * when the thread exits. Since the linked list in QEMU guest 8653 * memory isn't a valid linked list for the host and we have 8654 * no way to reliably intercept the thread-death event, we can't 8655 * support these. Silently return ENOSYS so that guest userspace 8656 * falls back to a non-robust futex implementation (which should 8657 * be OK except in the corner case of the guest crashing while 8658 * holding a mutex that is shared with another process via 8659 * shared memory). 8660 */ 8661 goto unimplemented_nowarn; 8662 #endif 8663 8664 #if defined(TARGET_NR_utimensat) 8665 case TARGET_NR_utimensat: 8666 { 8667 struct timespec *tsp, ts[2]; 8668 if (!arg3) { 8669 tsp = NULL; 8670 } else { 8671 target_to_host_timespec(ts, arg3); 8672 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec)); 8673 tsp = ts; 8674 } 8675 if (!arg2) 8676 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 8677 else { 8678 if (!(p = lock_user_string(arg2))) { 8679 ret = -TARGET_EFAULT; 8680 goto fail; 8681 } 8682 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 8683 unlock_user(p, arg2, 0); 8684 } 8685 } 8686 break; 8687 #endif 8688 case TARGET_NR_futex: 8689 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6); 8690 break; 8691 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 8692 case TARGET_NR_inotify_init: 8693 ret = get_errno(sys_inotify_init()); 8694 break; 8695 #endif 8696 #ifdef CONFIG_INOTIFY1 8697 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 8698 case TARGET_NR_inotify_init1: 8699 ret = get_errno(sys_inotify_init1(arg1)); 8700 break; 8701 #endif 8702 #endif 8703 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 8704 case TARGET_NR_inotify_add_watch: 8705 p = lock_user_string(arg2); 8706 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3)); 8707 unlock_user(p, arg2, 0); 8708 break; 8709 #endif 8710 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 8711 case TARGET_NR_inotify_rm_watch: 8712 ret = get_errno(sys_inotify_rm_watch(arg1, arg2)); 8713 break; 8714 #endif 8715 8716 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 8717 case TARGET_NR_mq_open: 8718 { 8719 struct mq_attr posix_mq_attr; 8720 8721 p = lock_user_string(arg1 - 1); 8722 if (arg4 != 0) 8723 copy_from_user_mq_attr (&posix_mq_attr, arg4); 8724 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr)); 8725 unlock_user (p, arg1, 0); 8726 } 8727 break; 8728 8729 case TARGET_NR_mq_unlink: 8730 p = lock_user_string(arg1 - 1); 8731 ret = get_errno(mq_unlink(p)); 8732 unlock_user (p, arg1, 0); 8733 break; 8734 8735 case TARGET_NR_mq_timedsend: 8736 { 8737 struct timespec ts; 8738 8739 p = lock_user (VERIFY_READ, arg2, arg3, 1); 8740 if (arg5 != 0) { 8741 target_to_host_timespec(&ts, arg5); 8742 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts)); 8743 host_to_target_timespec(arg5, &ts); 8744 } 8745 else 8746 ret = get_errno(mq_send(arg1, p, arg3, arg4)); 8747 unlock_user (p, arg2, arg3); 8748 } 8749 break; 8750 8751 case TARGET_NR_mq_timedreceive: 8752 { 8753 struct timespec ts; 8754 unsigned int prio; 8755 8756 p = lock_user (VERIFY_READ, arg2, arg3, 1); 8757 if (arg5 != 0) { 8758 target_to_host_timespec(&ts, arg5); 8759 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts)); 8760 host_to_target_timespec(arg5, &ts); 8761 } 8762 else 8763 ret = get_errno(mq_receive(arg1, p, arg3, &prio)); 8764 unlock_user (p, arg2, arg3); 8765 if (arg4 != 0) 8766 put_user_u32(prio, arg4); 8767 } 8768 break; 8769 8770 /* Not implemented for now... */ 8771 /* case TARGET_NR_mq_notify: */ 8772 /* break; */ 8773 8774 case TARGET_NR_mq_getsetattr: 8775 { 8776 struct mq_attr posix_mq_attr_in, posix_mq_attr_out; 8777 ret = 0; 8778 if (arg3 != 0) { 8779 ret = mq_getattr(arg1, &posix_mq_attr_out); 8780 copy_to_user_mq_attr(arg3, &posix_mq_attr_out); 8781 } 8782 if (arg2 != 0) { 8783 copy_from_user_mq_attr(&posix_mq_attr_in, arg2); 8784 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out); 8785 } 8786 8787 } 8788 break; 8789 #endif 8790 8791 #ifdef CONFIG_SPLICE 8792 #ifdef TARGET_NR_tee 8793 case TARGET_NR_tee: 8794 { 8795 ret = get_errno(tee(arg1,arg2,arg3,arg4)); 8796 } 8797 break; 8798 #endif 8799 #ifdef TARGET_NR_splice 8800 case TARGET_NR_splice: 8801 { 8802 loff_t loff_in, loff_out; 8803 loff_t *ploff_in = NULL, *ploff_out = NULL; 8804 if(arg2) { 8805 get_user_u64(loff_in, arg2); 8806 ploff_in = &loff_in; 8807 } 8808 if(arg4) { 8809 get_user_u64(loff_out, arg2); 8810 ploff_out = &loff_out; 8811 } 8812 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); 8813 } 8814 break; 8815 #endif 8816 #ifdef TARGET_NR_vmsplice 8817 case TARGET_NR_vmsplice: 8818 { 8819 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 8820 if (vec != NULL) { 8821 ret = get_errno(vmsplice(arg1, vec, arg3, arg4)); 8822 unlock_iovec(vec, arg2, arg3, 0); 8823 } else { 8824 ret = -host_to_target_errno(errno); 8825 } 8826 } 8827 break; 8828 #endif 8829 #endif /* CONFIG_SPLICE */ 8830 #ifdef CONFIG_EVENTFD 8831 #if defined(TARGET_NR_eventfd) 8832 case TARGET_NR_eventfd: 8833 ret = get_errno(eventfd(arg1, 0)); 8834 break; 8835 #endif 8836 #if defined(TARGET_NR_eventfd2) 8837 case TARGET_NR_eventfd2: 8838 { 8839 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)); 8840 if (arg2 & TARGET_O_NONBLOCK) { 8841 host_flags |= O_NONBLOCK; 8842 } 8843 if (arg2 & TARGET_O_CLOEXEC) { 8844 host_flags |= O_CLOEXEC; 8845 } 8846 ret = get_errno(eventfd(arg1, host_flags)); 8847 break; 8848 } 8849 #endif 8850 #endif /* CONFIG_EVENTFD */ 8851 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) 8852 case TARGET_NR_fallocate: 8853 #if TARGET_ABI_BITS == 32 8854 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4), 8855 target_offset64(arg5, arg6))); 8856 #else 8857 ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); 8858 #endif 8859 break; 8860 #endif 8861 #if defined(CONFIG_SYNC_FILE_RANGE) 8862 #if defined(TARGET_NR_sync_file_range) 8863 case TARGET_NR_sync_file_range: 8864 #if TARGET_ABI_BITS == 32 8865 #if defined(TARGET_MIPS) 8866 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 8867 target_offset64(arg5, arg6), arg7)); 8868 #else 8869 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3), 8870 target_offset64(arg4, arg5), arg6)); 8871 #endif /* !TARGET_MIPS */ 8872 #else 8873 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4)); 8874 #endif 8875 break; 8876 #endif 8877 #if defined(TARGET_NR_sync_file_range2) 8878 case TARGET_NR_sync_file_range2: 8879 /* This is like sync_file_range but the arguments are reordered */ 8880 #if TARGET_ABI_BITS == 32 8881 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 8882 target_offset64(arg5, arg6), arg2)); 8883 #else 8884 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2)); 8885 #endif 8886 break; 8887 #endif 8888 #endif 8889 #if defined(CONFIG_EPOLL) 8890 #if defined(TARGET_NR_epoll_create) 8891 case TARGET_NR_epoll_create: 8892 ret = get_errno(epoll_create(arg1)); 8893 break; 8894 #endif 8895 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1) 8896 case TARGET_NR_epoll_create1: 8897 ret = get_errno(epoll_create1(arg1)); 8898 break; 8899 #endif 8900 #if defined(TARGET_NR_epoll_ctl) 8901 case TARGET_NR_epoll_ctl: 8902 { 8903 struct epoll_event ep; 8904 struct epoll_event *epp = 0; 8905 if (arg4) { 8906 struct target_epoll_event *target_ep; 8907 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) { 8908 goto efault; 8909 } 8910 ep.events = tswap32(target_ep->events); 8911 /* The epoll_data_t union is just opaque data to the kernel, 8912 * so we transfer all 64 bits across and need not worry what 8913 * actual data type it is. 8914 */ 8915 ep.data.u64 = tswap64(target_ep->data.u64); 8916 unlock_user_struct(target_ep, arg4, 0); 8917 epp = &ep; 8918 } 8919 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp)); 8920 break; 8921 } 8922 #endif 8923 8924 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT) 8925 #define IMPLEMENT_EPOLL_PWAIT 8926 #endif 8927 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT) 8928 #if defined(TARGET_NR_epoll_wait) 8929 case TARGET_NR_epoll_wait: 8930 #endif 8931 #if defined(IMPLEMENT_EPOLL_PWAIT) 8932 case TARGET_NR_epoll_pwait: 8933 #endif 8934 { 8935 struct target_epoll_event *target_ep; 8936 struct epoll_event *ep; 8937 int epfd = arg1; 8938 int maxevents = arg3; 8939 int timeout = arg4; 8940 8941 target_ep = lock_user(VERIFY_WRITE, arg2, 8942 maxevents * sizeof(struct target_epoll_event), 1); 8943 if (!target_ep) { 8944 goto efault; 8945 } 8946 8947 ep = alloca(maxevents * sizeof(struct epoll_event)); 8948 8949 switch (num) { 8950 #if defined(IMPLEMENT_EPOLL_PWAIT) 8951 case TARGET_NR_epoll_pwait: 8952 { 8953 target_sigset_t *target_set; 8954 sigset_t _set, *set = &_set; 8955 8956 if (arg5) { 8957 target_set = lock_user(VERIFY_READ, arg5, 8958 sizeof(target_sigset_t), 1); 8959 if (!target_set) { 8960 unlock_user(target_ep, arg2, 0); 8961 goto efault; 8962 } 8963 target_to_host_sigset(set, target_set); 8964 unlock_user(target_set, arg5, 0); 8965 } else { 8966 set = NULL; 8967 } 8968 8969 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set)); 8970 break; 8971 } 8972 #endif 8973 #if defined(TARGET_NR_epoll_wait) 8974 case TARGET_NR_epoll_wait: 8975 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout)); 8976 break; 8977 #endif 8978 default: 8979 ret = -TARGET_ENOSYS; 8980 } 8981 if (!is_error(ret)) { 8982 int i; 8983 for (i = 0; i < ret; i++) { 8984 target_ep[i].events = tswap32(ep[i].events); 8985 target_ep[i].data.u64 = tswap64(ep[i].data.u64); 8986 } 8987 } 8988 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event)); 8989 break; 8990 } 8991 #endif 8992 #endif 8993 #ifdef TARGET_NR_prlimit64 8994 case TARGET_NR_prlimit64: 8995 { 8996 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */ 8997 struct target_rlimit64 *target_rnew, *target_rold; 8998 struct host_rlimit64 rnew, rold, *rnewp = 0; 8999 if (arg3) { 9000 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { 9001 goto efault; 9002 } 9003 rnew.rlim_cur = tswap64(target_rnew->rlim_cur); 9004 rnew.rlim_max = tswap64(target_rnew->rlim_max); 9005 unlock_user_struct(target_rnew, arg3, 0); 9006 rnewp = &rnew; 9007 } 9008 9009 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0)); 9010 if (!is_error(ret) && arg4) { 9011 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { 9012 goto efault; 9013 } 9014 target_rold->rlim_cur = tswap64(rold.rlim_cur); 9015 target_rold->rlim_max = tswap64(rold.rlim_max); 9016 unlock_user_struct(target_rold, arg4, 1); 9017 } 9018 break; 9019 } 9020 #endif 9021 #ifdef TARGET_NR_gethostname 9022 case TARGET_NR_gethostname: 9023 { 9024 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0); 9025 if (name) { 9026 ret = get_errno(gethostname(name, arg2)); 9027 unlock_user(name, arg1, arg2); 9028 } else { 9029 ret = -TARGET_EFAULT; 9030 } 9031 break; 9032 } 9033 #endif 9034 #ifdef TARGET_NR_atomic_cmpxchg_32 9035 case TARGET_NR_atomic_cmpxchg_32: 9036 { 9037 /* should use start_exclusive from main.c */ 9038 abi_ulong mem_value; 9039 if (get_user_u32(mem_value, arg6)) { 9040 target_siginfo_t info; 9041 info.si_signo = SIGSEGV; 9042 info.si_errno = 0; 9043 info.si_code = TARGET_SEGV_MAPERR; 9044 info._sifields._sigfault._addr = arg6; 9045 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info); 9046 ret = 0xdeadbeef; 9047 9048 } 9049 if (mem_value == arg2) 9050 put_user_u32(arg1, arg6); 9051 ret = mem_value; 9052 break; 9053 } 9054 #endif 9055 #ifdef TARGET_NR_atomic_barrier 9056 case TARGET_NR_atomic_barrier: 9057 { 9058 /* Like the kernel implementation and the qemu arm barrier, no-op this? */ 9059 break; 9060 } 9061 #endif 9062 9063 #ifdef TARGET_NR_timer_create 9064 case TARGET_NR_timer_create: 9065 { 9066 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */ 9067 9068 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL; 9069 struct target_sigevent *ptarget_sevp; 9070 struct target_timer_t *ptarget_timer; 9071 9072 int clkid = arg1; 9073 int timer_index = next_free_host_timer(); 9074 9075 if (timer_index < 0) { 9076 ret = -TARGET_EAGAIN; 9077 } else { 9078 timer_t *phtimer = g_posix_timers + timer_index; 9079 9080 if (arg2) { 9081 if (!lock_user_struct(VERIFY_READ, ptarget_sevp, arg2, 1)) { 9082 goto efault; 9083 } 9084 9085 host_sevp.sigev_signo = tswap32(ptarget_sevp->sigev_signo); 9086 host_sevp.sigev_notify = tswap32(ptarget_sevp->sigev_notify); 9087 9088 phost_sevp = &host_sevp; 9089 } 9090 9091 ret = get_errno(timer_create(clkid, phost_sevp, phtimer)); 9092 if (ret) { 9093 phtimer = NULL; 9094 } else { 9095 if (!lock_user_struct(VERIFY_WRITE, ptarget_timer, arg3, 1)) { 9096 goto efault; 9097 } 9098 ptarget_timer->ptr = tswap32(0xcafe0000 | timer_index); 9099 unlock_user_struct(ptarget_timer, arg3, 1); 9100 } 9101 } 9102 break; 9103 } 9104 #endif 9105 9106 #ifdef TARGET_NR_timer_settime 9107 case TARGET_NR_timer_settime: 9108 { 9109 /* args: timer_t timerid, int flags, const struct itimerspec *new_value, 9110 * struct itimerspec * old_value */ 9111 arg1 &= 0xffff; 9112 if (arg3 == 0 || arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9113 ret = -TARGET_EINVAL; 9114 } else { 9115 timer_t htimer = g_posix_timers[arg1]; 9116 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},}; 9117 9118 target_to_host_itimerspec(&hspec_new, arg3); 9119 ret = get_errno( 9120 timer_settime(htimer, arg2, &hspec_new, &hspec_old)); 9121 host_to_target_itimerspec(arg2, &hspec_old); 9122 } 9123 break; 9124 } 9125 #endif 9126 9127 #ifdef TARGET_NR_timer_gettime 9128 case TARGET_NR_timer_gettime: 9129 { 9130 /* args: timer_t timerid, struct itimerspec *curr_value */ 9131 arg1 &= 0xffff; 9132 if (!arg2) { 9133 return -TARGET_EFAULT; 9134 } else if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9135 ret = -TARGET_EINVAL; 9136 } else { 9137 timer_t htimer = g_posix_timers[arg1]; 9138 struct itimerspec hspec; 9139 ret = get_errno(timer_gettime(htimer, &hspec)); 9140 9141 if (host_to_target_itimerspec(arg2, &hspec)) { 9142 ret = -TARGET_EFAULT; 9143 } 9144 } 9145 break; 9146 } 9147 #endif 9148 9149 #ifdef TARGET_NR_timer_getoverrun 9150 case TARGET_NR_timer_getoverrun: 9151 { 9152 /* args: timer_t timerid */ 9153 arg1 &= 0xffff; 9154 if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9155 ret = -TARGET_EINVAL; 9156 } else { 9157 timer_t htimer = g_posix_timers[arg1]; 9158 ret = get_errno(timer_getoverrun(htimer)); 9159 } 9160 break; 9161 } 9162 #endif 9163 9164 #ifdef TARGET_NR_timer_delete 9165 case TARGET_NR_timer_delete: 9166 { 9167 /* args: timer_t timerid */ 9168 arg1 &= 0xffff; 9169 if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9170 ret = -TARGET_EINVAL; 9171 } else { 9172 timer_t htimer = g_posix_timers[arg1]; 9173 ret = get_errno(timer_delete(htimer)); 9174 g_posix_timers[arg1] = 0; 9175 } 9176 break; 9177 } 9178 #endif 9179 9180 default: 9181 unimplemented: 9182 gemu_log("qemu: Unsupported syscall: %d\n", num); 9183 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list) 9184 unimplemented_nowarn: 9185 #endif 9186 ret = -TARGET_ENOSYS; 9187 break; 9188 } 9189 fail: 9190 #ifdef DEBUG 9191 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret); 9192 #endif 9193 if(do_strace) 9194 print_syscall_ret(num, ret); 9195 return ret; 9196 efault: 9197 ret = -TARGET_EFAULT; 9198 goto fail; 9199 } 9200