1 /* 2 * Linux syscalls 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #define _ATFILE_SOURCE 20 #include <stdlib.h> 21 #include <stdio.h> 22 #include <stdarg.h> 23 #include <string.h> 24 #include <elf.h> 25 #include <endian.h> 26 #include <errno.h> 27 #include <unistd.h> 28 #include <fcntl.h> 29 #include <time.h> 30 #include <limits.h> 31 #include <grp.h> 32 #include <sys/types.h> 33 #include <sys/ipc.h> 34 #include <sys/msg.h> 35 #include <sys/wait.h> 36 #include <sys/time.h> 37 #include <sys/stat.h> 38 #include <sys/mount.h> 39 #include <sys/file.h> 40 #include <sys/fsuid.h> 41 #include <sys/personality.h> 42 #include <sys/prctl.h> 43 #include <sys/resource.h> 44 #include <sys/mman.h> 45 #include <sys/swap.h> 46 #include <signal.h> 47 #include <sched.h> 48 #ifdef __ia64__ 49 int __clone2(int (*fn)(void *), void *child_stack_base, 50 size_t stack_size, int flags, void *arg, ...); 51 #endif 52 #include <sys/socket.h> 53 #include <sys/un.h> 54 #include <sys/uio.h> 55 #include <sys/poll.h> 56 #include <sys/times.h> 57 #include <sys/shm.h> 58 #include <sys/sem.h> 59 #include <sys/statfs.h> 60 #include <utime.h> 61 #include <sys/sysinfo.h> 62 #include <sys/utsname.h> 63 //#include <sys/user.h> 64 #include <netinet/ip.h> 65 #include <netinet/tcp.h> 66 #include <linux/wireless.h> 67 #include <linux/icmp.h> 68 #include "qemu-common.h" 69 #ifdef TARGET_GPROF 70 #include <sys/gmon.h> 71 #endif 72 #ifdef CONFIG_EVENTFD 73 #include <sys/eventfd.h> 74 #endif 75 #ifdef CONFIG_EPOLL 76 #include <sys/epoll.h> 77 #endif 78 #ifdef CONFIG_ATTR 79 #include "qemu/xattr.h" 80 #endif 81 #ifdef CONFIG_SENDFILE 82 #include <sys/sendfile.h> 83 #endif 84 85 #define termios host_termios 86 #define winsize host_winsize 87 #define termio host_termio 88 #define sgttyb host_sgttyb /* same as target */ 89 #define tchars host_tchars /* same as target */ 90 #define ltchars host_ltchars /* same as target */ 91 92 #include <linux/termios.h> 93 #include <linux/unistd.h> 94 #include <linux/utsname.h> 95 #include <linux/cdrom.h> 96 #include <linux/hdreg.h> 97 #include <linux/soundcard.h> 98 #include <linux/kd.h> 99 #include <linux/mtio.h> 100 #include <linux/fs.h> 101 #if defined(CONFIG_FIEMAP) 102 #include <linux/fiemap.h> 103 #endif 104 #include <linux/fb.h> 105 #include <linux/vt.h> 106 #include <linux/dm-ioctl.h> 107 #include <linux/reboot.h> 108 #include <linux/route.h> 109 #include <linux/filter.h> 110 #include <linux/blkpg.h> 111 #include "linux_loop.h" 112 #include "cpu-uname.h" 113 114 #include "qemu.h" 115 116 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \ 117 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID) 118 119 //#define DEBUG 120 121 //#include <linux/msdos_fs.h> 122 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2]) 123 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2]) 124 125 126 #undef _syscall0 127 #undef _syscall1 128 #undef _syscall2 129 #undef _syscall3 130 #undef _syscall4 131 #undef _syscall5 132 #undef _syscall6 133 134 #define _syscall0(type,name) \ 135 static type name (void) \ 136 { \ 137 return syscall(__NR_##name); \ 138 } 139 140 #define _syscall1(type,name,type1,arg1) \ 141 static type name (type1 arg1) \ 142 { \ 143 return syscall(__NR_##name, arg1); \ 144 } 145 146 #define _syscall2(type,name,type1,arg1,type2,arg2) \ 147 static type name (type1 arg1,type2 arg2) \ 148 { \ 149 return syscall(__NR_##name, arg1, arg2); \ 150 } 151 152 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 153 static type name (type1 arg1,type2 arg2,type3 arg3) \ 154 { \ 155 return syscall(__NR_##name, arg1, arg2, arg3); \ 156 } 157 158 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 159 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ 160 { \ 161 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 162 } 163 164 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 165 type5,arg5) \ 166 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ 167 { \ 168 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 169 } 170 171 172 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 173 type5,arg5,type6,arg6) \ 174 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \ 175 type6 arg6) \ 176 { \ 177 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 178 } 179 180 181 #define __NR_sys_uname __NR_uname 182 #define __NR_sys_getcwd1 __NR_getcwd 183 #define __NR_sys_getdents __NR_getdents 184 #define __NR_sys_getdents64 __NR_getdents64 185 #define __NR_sys_getpriority __NR_getpriority 186 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo 187 #define __NR_sys_syslog __NR_syslog 188 #define __NR_sys_tgkill __NR_tgkill 189 #define __NR_sys_tkill __NR_tkill 190 #define __NR_sys_futex __NR_futex 191 #define __NR_sys_inotify_init __NR_inotify_init 192 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch 193 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch 194 195 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \ 196 defined(__s390x__) 197 #define __NR__llseek __NR_lseek 198 #endif 199 200 #ifdef __NR_gettid 201 _syscall0(int, gettid) 202 #else 203 /* This is a replacement for the host gettid() and must return a host 204 errno. */ 205 static int gettid(void) { 206 return -ENOSYS; 207 } 208 #endif 209 #ifdef __NR_getdents 210 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count); 211 #endif 212 #if !defined(__NR_getdents) || \ 213 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64)) 214 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count); 215 #endif 216 #if defined(TARGET_NR__llseek) && defined(__NR_llseek) 217 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo, 218 loff_t *, res, uint, wh); 219 #endif 220 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo) 221 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len) 222 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 223 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig) 224 #endif 225 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 226 _syscall2(int,sys_tkill,int,tid,int,sig) 227 #endif 228 #ifdef __NR_exit_group 229 _syscall1(int,exit_group,int,error_code) 230 #endif 231 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 232 _syscall1(int,set_tid_address,int *,tidptr) 233 #endif 234 #if defined(TARGET_NR_futex) && defined(__NR_futex) 235 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, 236 const struct timespec *,timeout,int *,uaddr2,int,val3) 237 #endif 238 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity 239 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, 240 unsigned long *, user_mask_ptr); 241 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity 242 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len, 243 unsigned long *, user_mask_ptr); 244 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd, 245 void *, arg); 246 247 static bitmask_transtbl fcntl_flags_tbl[] = { 248 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, }, 249 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, }, 250 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, }, 251 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, }, 252 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, }, 253 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, }, 254 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, }, 255 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, }, 256 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, }, 257 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, }, 258 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, }, 259 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, }, 260 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, }, 261 #if defined(O_DIRECT) 262 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, }, 263 #endif 264 #if defined(O_NOATIME) 265 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME }, 266 #endif 267 #if defined(O_CLOEXEC) 268 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC }, 269 #endif 270 #if defined(O_PATH) 271 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH }, 272 #endif 273 /* Don't terminate the list prematurely on 64-bit host+guest. */ 274 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0 275 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, }, 276 #endif 277 { 0, 0, 0, 0 } 278 }; 279 280 #define COPY_UTSNAME_FIELD(dest, src) \ 281 do { \ 282 /* __NEW_UTS_LEN doesn't include terminating null */ \ 283 (void) strncpy((dest), (src), __NEW_UTS_LEN); \ 284 (dest)[__NEW_UTS_LEN] = '\0'; \ 285 } while (0) 286 287 static int sys_uname(struct new_utsname *buf) 288 { 289 struct utsname uts_buf; 290 291 if (uname(&uts_buf) < 0) 292 return (-1); 293 294 /* 295 * Just in case these have some differences, we 296 * translate utsname to new_utsname (which is the 297 * struct linux kernel uses). 298 */ 299 300 memset(buf, 0, sizeof(*buf)); 301 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname); 302 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename); 303 COPY_UTSNAME_FIELD(buf->release, uts_buf.release); 304 COPY_UTSNAME_FIELD(buf->version, uts_buf.version); 305 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine); 306 #ifdef _GNU_SOURCE 307 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname); 308 #endif 309 return (0); 310 311 #undef COPY_UTSNAME_FIELD 312 } 313 314 static int sys_getcwd1(char *buf, size_t size) 315 { 316 if (getcwd(buf, size) == NULL) { 317 /* getcwd() sets errno */ 318 return (-1); 319 } 320 return strlen(buf)+1; 321 } 322 323 #ifdef TARGET_NR_openat 324 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode) 325 { 326 /* 327 * open(2) has extra parameter 'mode' when called with 328 * flag O_CREAT. 329 */ 330 if ((flags & O_CREAT) != 0) { 331 return (openat(dirfd, pathname, flags, mode)); 332 } 333 return (openat(dirfd, pathname, flags)); 334 } 335 #endif 336 337 #ifdef TARGET_NR_utimensat 338 #ifdef CONFIG_UTIMENSAT 339 static int sys_utimensat(int dirfd, const char *pathname, 340 const struct timespec times[2], int flags) 341 { 342 if (pathname == NULL) 343 return futimens(dirfd, times); 344 else 345 return utimensat(dirfd, pathname, times, flags); 346 } 347 #elif defined(__NR_utimensat) 348 #define __NR_sys_utimensat __NR_utimensat 349 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname, 350 const struct timespec *,tsp,int,flags) 351 #else 352 static int sys_utimensat(int dirfd, const char *pathname, 353 const struct timespec times[2], int flags) 354 { 355 errno = ENOSYS; 356 return -1; 357 } 358 #endif 359 #endif /* TARGET_NR_utimensat */ 360 361 #ifdef CONFIG_INOTIFY 362 #include <sys/inotify.h> 363 364 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 365 static int sys_inotify_init(void) 366 { 367 return (inotify_init()); 368 } 369 #endif 370 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 371 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask) 372 { 373 return (inotify_add_watch(fd, pathname, mask)); 374 } 375 #endif 376 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 377 static int sys_inotify_rm_watch(int fd, int32_t wd) 378 { 379 return (inotify_rm_watch(fd, wd)); 380 } 381 #endif 382 #ifdef CONFIG_INOTIFY1 383 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 384 static int sys_inotify_init1(int flags) 385 { 386 return (inotify_init1(flags)); 387 } 388 #endif 389 #endif 390 #else 391 /* Userspace can usually survive runtime without inotify */ 392 #undef TARGET_NR_inotify_init 393 #undef TARGET_NR_inotify_init1 394 #undef TARGET_NR_inotify_add_watch 395 #undef TARGET_NR_inotify_rm_watch 396 #endif /* CONFIG_INOTIFY */ 397 398 #if defined(TARGET_NR_ppoll) 399 #ifndef __NR_ppoll 400 # define __NR_ppoll -1 401 #endif 402 #define __NR_sys_ppoll __NR_ppoll 403 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds, 404 struct timespec *, timeout, const __sigset_t *, sigmask, 405 size_t, sigsetsize) 406 #endif 407 408 #if defined(TARGET_NR_pselect6) 409 #ifndef __NR_pselect6 410 # define __NR_pselect6 -1 411 #endif 412 #define __NR_sys_pselect6 __NR_pselect6 413 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, 414 fd_set *, exceptfds, struct timespec *, timeout, void *, sig); 415 #endif 416 417 #if defined(TARGET_NR_prlimit64) 418 #ifndef __NR_prlimit64 419 # define __NR_prlimit64 -1 420 #endif 421 #define __NR_sys_prlimit64 __NR_prlimit64 422 /* The glibc rlimit structure may not be that used by the underlying syscall */ 423 struct host_rlimit64 { 424 uint64_t rlim_cur; 425 uint64_t rlim_max; 426 }; 427 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource, 428 const struct host_rlimit64 *, new_limit, 429 struct host_rlimit64 *, old_limit) 430 #endif 431 432 433 #if defined(TARGET_NR_timer_create) 434 /* Maxiumum of 32 active POSIX timers allowed at any one time. */ 435 static timer_t g_posix_timers[32] = { 0, } ; 436 437 static inline int next_free_host_timer(void) 438 { 439 int k ; 440 /* FIXME: Does finding the next free slot require a lock? */ 441 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) { 442 if (g_posix_timers[k] == 0) { 443 g_posix_timers[k] = (timer_t) 1; 444 return k; 445 } 446 } 447 return -1; 448 } 449 #endif 450 451 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */ 452 #ifdef TARGET_ARM 453 static inline int regpairs_aligned(void *cpu_env) { 454 return ((((CPUARMState *)cpu_env)->eabi) == 1) ; 455 } 456 #elif defined(TARGET_MIPS) 457 static inline int regpairs_aligned(void *cpu_env) { return 1; } 458 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64) 459 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs 460 * of registers which translates to the same as ARM/MIPS, because we start with 461 * r3 as arg1 */ 462 static inline int regpairs_aligned(void *cpu_env) { return 1; } 463 #else 464 static inline int regpairs_aligned(void *cpu_env) { return 0; } 465 #endif 466 467 #define ERRNO_TABLE_SIZE 1200 468 469 /* target_to_host_errno_table[] is initialized from 470 * host_to_target_errno_table[] in syscall_init(). */ 471 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = { 472 }; 473 474 /* 475 * This list is the union of errno values overridden in asm-<arch>/errno.h 476 * minus the errnos that are not actually generic to all archs. 477 */ 478 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = { 479 [EIDRM] = TARGET_EIDRM, 480 [ECHRNG] = TARGET_ECHRNG, 481 [EL2NSYNC] = TARGET_EL2NSYNC, 482 [EL3HLT] = TARGET_EL3HLT, 483 [EL3RST] = TARGET_EL3RST, 484 [ELNRNG] = TARGET_ELNRNG, 485 [EUNATCH] = TARGET_EUNATCH, 486 [ENOCSI] = TARGET_ENOCSI, 487 [EL2HLT] = TARGET_EL2HLT, 488 [EDEADLK] = TARGET_EDEADLK, 489 [ENOLCK] = TARGET_ENOLCK, 490 [EBADE] = TARGET_EBADE, 491 [EBADR] = TARGET_EBADR, 492 [EXFULL] = TARGET_EXFULL, 493 [ENOANO] = TARGET_ENOANO, 494 [EBADRQC] = TARGET_EBADRQC, 495 [EBADSLT] = TARGET_EBADSLT, 496 [EBFONT] = TARGET_EBFONT, 497 [ENOSTR] = TARGET_ENOSTR, 498 [ENODATA] = TARGET_ENODATA, 499 [ETIME] = TARGET_ETIME, 500 [ENOSR] = TARGET_ENOSR, 501 [ENONET] = TARGET_ENONET, 502 [ENOPKG] = TARGET_ENOPKG, 503 [EREMOTE] = TARGET_EREMOTE, 504 [ENOLINK] = TARGET_ENOLINK, 505 [EADV] = TARGET_EADV, 506 [ESRMNT] = TARGET_ESRMNT, 507 [ECOMM] = TARGET_ECOMM, 508 [EPROTO] = TARGET_EPROTO, 509 [EDOTDOT] = TARGET_EDOTDOT, 510 [EMULTIHOP] = TARGET_EMULTIHOP, 511 [EBADMSG] = TARGET_EBADMSG, 512 [ENAMETOOLONG] = TARGET_ENAMETOOLONG, 513 [EOVERFLOW] = TARGET_EOVERFLOW, 514 [ENOTUNIQ] = TARGET_ENOTUNIQ, 515 [EBADFD] = TARGET_EBADFD, 516 [EREMCHG] = TARGET_EREMCHG, 517 [ELIBACC] = TARGET_ELIBACC, 518 [ELIBBAD] = TARGET_ELIBBAD, 519 [ELIBSCN] = TARGET_ELIBSCN, 520 [ELIBMAX] = TARGET_ELIBMAX, 521 [ELIBEXEC] = TARGET_ELIBEXEC, 522 [EILSEQ] = TARGET_EILSEQ, 523 [ENOSYS] = TARGET_ENOSYS, 524 [ELOOP] = TARGET_ELOOP, 525 [ERESTART] = TARGET_ERESTART, 526 [ESTRPIPE] = TARGET_ESTRPIPE, 527 [ENOTEMPTY] = TARGET_ENOTEMPTY, 528 [EUSERS] = TARGET_EUSERS, 529 [ENOTSOCK] = TARGET_ENOTSOCK, 530 [EDESTADDRREQ] = TARGET_EDESTADDRREQ, 531 [EMSGSIZE] = TARGET_EMSGSIZE, 532 [EPROTOTYPE] = TARGET_EPROTOTYPE, 533 [ENOPROTOOPT] = TARGET_ENOPROTOOPT, 534 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT, 535 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT, 536 [EOPNOTSUPP] = TARGET_EOPNOTSUPP, 537 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT, 538 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT, 539 [EADDRINUSE] = TARGET_EADDRINUSE, 540 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL, 541 [ENETDOWN] = TARGET_ENETDOWN, 542 [ENETUNREACH] = TARGET_ENETUNREACH, 543 [ENETRESET] = TARGET_ENETRESET, 544 [ECONNABORTED] = TARGET_ECONNABORTED, 545 [ECONNRESET] = TARGET_ECONNRESET, 546 [ENOBUFS] = TARGET_ENOBUFS, 547 [EISCONN] = TARGET_EISCONN, 548 [ENOTCONN] = TARGET_ENOTCONN, 549 [EUCLEAN] = TARGET_EUCLEAN, 550 [ENOTNAM] = TARGET_ENOTNAM, 551 [ENAVAIL] = TARGET_ENAVAIL, 552 [EISNAM] = TARGET_EISNAM, 553 [EREMOTEIO] = TARGET_EREMOTEIO, 554 [ESHUTDOWN] = TARGET_ESHUTDOWN, 555 [ETOOMANYREFS] = TARGET_ETOOMANYREFS, 556 [ETIMEDOUT] = TARGET_ETIMEDOUT, 557 [ECONNREFUSED] = TARGET_ECONNREFUSED, 558 [EHOSTDOWN] = TARGET_EHOSTDOWN, 559 [EHOSTUNREACH] = TARGET_EHOSTUNREACH, 560 [EALREADY] = TARGET_EALREADY, 561 [EINPROGRESS] = TARGET_EINPROGRESS, 562 [ESTALE] = TARGET_ESTALE, 563 [ECANCELED] = TARGET_ECANCELED, 564 [ENOMEDIUM] = TARGET_ENOMEDIUM, 565 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE, 566 #ifdef ENOKEY 567 [ENOKEY] = TARGET_ENOKEY, 568 #endif 569 #ifdef EKEYEXPIRED 570 [EKEYEXPIRED] = TARGET_EKEYEXPIRED, 571 #endif 572 #ifdef EKEYREVOKED 573 [EKEYREVOKED] = TARGET_EKEYREVOKED, 574 #endif 575 #ifdef EKEYREJECTED 576 [EKEYREJECTED] = TARGET_EKEYREJECTED, 577 #endif 578 #ifdef EOWNERDEAD 579 [EOWNERDEAD] = TARGET_EOWNERDEAD, 580 #endif 581 #ifdef ENOTRECOVERABLE 582 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE, 583 #endif 584 }; 585 586 static inline int host_to_target_errno(int err) 587 { 588 if(host_to_target_errno_table[err]) 589 return host_to_target_errno_table[err]; 590 return err; 591 } 592 593 static inline int target_to_host_errno(int err) 594 { 595 if (target_to_host_errno_table[err]) 596 return target_to_host_errno_table[err]; 597 return err; 598 } 599 600 static inline abi_long get_errno(abi_long ret) 601 { 602 if (ret == -1) 603 return -host_to_target_errno(errno); 604 else 605 return ret; 606 } 607 608 static inline int is_error(abi_long ret) 609 { 610 return (abi_ulong)ret >= (abi_ulong)(-4096); 611 } 612 613 char *target_strerror(int err) 614 { 615 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) { 616 return NULL; 617 } 618 return strerror(target_to_host_errno(err)); 619 } 620 621 static abi_ulong target_brk; 622 static abi_ulong target_original_brk; 623 static abi_ulong brk_page; 624 625 void target_set_brk(abi_ulong new_brk) 626 { 627 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk); 628 brk_page = HOST_PAGE_ALIGN(target_brk); 629 } 630 631 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0) 632 #define DEBUGF_BRK(message, args...) 633 634 /* do_brk() must return target values and target errnos. */ 635 abi_long do_brk(abi_ulong new_brk) 636 { 637 abi_long mapped_addr; 638 int new_alloc_size; 639 640 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk); 641 642 if (!new_brk) { 643 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk); 644 return target_brk; 645 } 646 if (new_brk < target_original_brk) { 647 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n", 648 target_brk); 649 return target_brk; 650 } 651 652 /* If the new brk is less than the highest page reserved to the 653 * target heap allocation, set it and we're almost done... */ 654 if (new_brk <= brk_page) { 655 /* Heap contents are initialized to zero, as for anonymous 656 * mapped pages. */ 657 if (new_brk > target_brk) { 658 memset(g2h(target_brk), 0, new_brk - target_brk); 659 } 660 target_brk = new_brk; 661 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk); 662 return target_brk; 663 } 664 665 /* We need to allocate more memory after the brk... Note that 666 * we don't use MAP_FIXED because that will map over the top of 667 * any existing mapping (like the one with the host libc or qemu 668 * itself); instead we treat "mapped but at wrong address" as 669 * a failure and unmap again. 670 */ 671 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page); 672 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, 673 PROT_READ|PROT_WRITE, 674 MAP_ANON|MAP_PRIVATE, 0, 0)); 675 676 if (mapped_addr == brk_page) { 677 /* Heap contents are initialized to zero, as for anonymous 678 * mapped pages. Technically the new pages are already 679 * initialized to zero since they *are* anonymous mapped 680 * pages, however we have to take care with the contents that 681 * come from the remaining part of the previous page: it may 682 * contains garbage data due to a previous heap usage (grown 683 * then shrunken). */ 684 memset(g2h(target_brk), 0, brk_page - target_brk); 685 686 target_brk = new_brk; 687 brk_page = HOST_PAGE_ALIGN(target_brk); 688 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n", 689 target_brk); 690 return target_brk; 691 } else if (mapped_addr != -1) { 692 /* Mapped but at wrong address, meaning there wasn't actually 693 * enough space for this brk. 694 */ 695 target_munmap(mapped_addr, new_alloc_size); 696 mapped_addr = -1; 697 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk); 698 } 699 else { 700 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk); 701 } 702 703 #if defined(TARGET_ALPHA) 704 /* We (partially) emulate OSF/1 on Alpha, which requires we 705 return a proper errno, not an unchanged brk value. */ 706 return -TARGET_ENOMEM; 707 #endif 708 /* For everything else, return the previous break. */ 709 return target_brk; 710 } 711 712 static inline abi_long copy_from_user_fdset(fd_set *fds, 713 abi_ulong target_fds_addr, 714 int n) 715 { 716 int i, nw, j, k; 717 abi_ulong b, *target_fds; 718 719 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 720 if (!(target_fds = lock_user(VERIFY_READ, 721 target_fds_addr, 722 sizeof(abi_ulong) * nw, 723 1))) 724 return -TARGET_EFAULT; 725 726 FD_ZERO(fds); 727 k = 0; 728 for (i = 0; i < nw; i++) { 729 /* grab the abi_ulong */ 730 __get_user(b, &target_fds[i]); 731 for (j = 0; j < TARGET_ABI_BITS; j++) { 732 /* check the bit inside the abi_ulong */ 733 if ((b >> j) & 1) 734 FD_SET(k, fds); 735 k++; 736 } 737 } 738 739 unlock_user(target_fds, target_fds_addr, 0); 740 741 return 0; 742 } 743 744 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr, 745 abi_ulong target_fds_addr, 746 int n) 747 { 748 if (target_fds_addr) { 749 if (copy_from_user_fdset(fds, target_fds_addr, n)) 750 return -TARGET_EFAULT; 751 *fds_ptr = fds; 752 } else { 753 *fds_ptr = NULL; 754 } 755 return 0; 756 } 757 758 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr, 759 const fd_set *fds, 760 int n) 761 { 762 int i, nw, j, k; 763 abi_long v; 764 abi_ulong *target_fds; 765 766 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 767 if (!(target_fds = lock_user(VERIFY_WRITE, 768 target_fds_addr, 769 sizeof(abi_ulong) * nw, 770 0))) 771 return -TARGET_EFAULT; 772 773 k = 0; 774 for (i = 0; i < nw; i++) { 775 v = 0; 776 for (j = 0; j < TARGET_ABI_BITS; j++) { 777 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j); 778 k++; 779 } 780 __put_user(v, &target_fds[i]); 781 } 782 783 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw); 784 785 return 0; 786 } 787 788 #if defined(__alpha__) 789 #define HOST_HZ 1024 790 #else 791 #define HOST_HZ 100 792 #endif 793 794 static inline abi_long host_to_target_clock_t(long ticks) 795 { 796 #if HOST_HZ == TARGET_HZ 797 return ticks; 798 #else 799 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ; 800 #endif 801 } 802 803 static inline abi_long host_to_target_rusage(abi_ulong target_addr, 804 const struct rusage *rusage) 805 { 806 struct target_rusage *target_rusage; 807 808 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0)) 809 return -TARGET_EFAULT; 810 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec); 811 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec); 812 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec); 813 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec); 814 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss); 815 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss); 816 target_rusage->ru_idrss = tswapal(rusage->ru_idrss); 817 target_rusage->ru_isrss = tswapal(rusage->ru_isrss); 818 target_rusage->ru_minflt = tswapal(rusage->ru_minflt); 819 target_rusage->ru_majflt = tswapal(rusage->ru_majflt); 820 target_rusage->ru_nswap = tswapal(rusage->ru_nswap); 821 target_rusage->ru_inblock = tswapal(rusage->ru_inblock); 822 target_rusage->ru_oublock = tswapal(rusage->ru_oublock); 823 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd); 824 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv); 825 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals); 826 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw); 827 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw); 828 unlock_user_struct(target_rusage, target_addr, 1); 829 830 return 0; 831 } 832 833 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim) 834 { 835 abi_ulong target_rlim_swap; 836 rlim_t result; 837 838 target_rlim_swap = tswapal(target_rlim); 839 if (target_rlim_swap == TARGET_RLIM_INFINITY) 840 return RLIM_INFINITY; 841 842 result = target_rlim_swap; 843 if (target_rlim_swap != (rlim_t)result) 844 return RLIM_INFINITY; 845 846 return result; 847 } 848 849 static inline abi_ulong host_to_target_rlim(rlim_t rlim) 850 { 851 abi_ulong target_rlim_swap; 852 abi_ulong result; 853 854 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim) 855 target_rlim_swap = TARGET_RLIM_INFINITY; 856 else 857 target_rlim_swap = rlim; 858 result = tswapal(target_rlim_swap); 859 860 return result; 861 } 862 863 static inline int target_to_host_resource(int code) 864 { 865 switch (code) { 866 case TARGET_RLIMIT_AS: 867 return RLIMIT_AS; 868 case TARGET_RLIMIT_CORE: 869 return RLIMIT_CORE; 870 case TARGET_RLIMIT_CPU: 871 return RLIMIT_CPU; 872 case TARGET_RLIMIT_DATA: 873 return RLIMIT_DATA; 874 case TARGET_RLIMIT_FSIZE: 875 return RLIMIT_FSIZE; 876 case TARGET_RLIMIT_LOCKS: 877 return RLIMIT_LOCKS; 878 case TARGET_RLIMIT_MEMLOCK: 879 return RLIMIT_MEMLOCK; 880 case TARGET_RLIMIT_MSGQUEUE: 881 return RLIMIT_MSGQUEUE; 882 case TARGET_RLIMIT_NICE: 883 return RLIMIT_NICE; 884 case TARGET_RLIMIT_NOFILE: 885 return RLIMIT_NOFILE; 886 case TARGET_RLIMIT_NPROC: 887 return RLIMIT_NPROC; 888 case TARGET_RLIMIT_RSS: 889 return RLIMIT_RSS; 890 case TARGET_RLIMIT_RTPRIO: 891 return RLIMIT_RTPRIO; 892 case TARGET_RLIMIT_SIGPENDING: 893 return RLIMIT_SIGPENDING; 894 case TARGET_RLIMIT_STACK: 895 return RLIMIT_STACK; 896 default: 897 return code; 898 } 899 } 900 901 static inline abi_long copy_from_user_timeval(struct timeval *tv, 902 abi_ulong target_tv_addr) 903 { 904 struct target_timeval *target_tv; 905 906 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) 907 return -TARGET_EFAULT; 908 909 __get_user(tv->tv_sec, &target_tv->tv_sec); 910 __get_user(tv->tv_usec, &target_tv->tv_usec); 911 912 unlock_user_struct(target_tv, target_tv_addr, 0); 913 914 return 0; 915 } 916 917 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr, 918 const struct timeval *tv) 919 { 920 struct target_timeval *target_tv; 921 922 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) 923 return -TARGET_EFAULT; 924 925 __put_user(tv->tv_sec, &target_tv->tv_sec); 926 __put_user(tv->tv_usec, &target_tv->tv_usec); 927 928 unlock_user_struct(target_tv, target_tv_addr, 1); 929 930 return 0; 931 } 932 933 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 934 #include <mqueue.h> 935 936 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr, 937 abi_ulong target_mq_attr_addr) 938 { 939 struct target_mq_attr *target_mq_attr; 940 941 if (!lock_user_struct(VERIFY_READ, target_mq_attr, 942 target_mq_attr_addr, 1)) 943 return -TARGET_EFAULT; 944 945 __get_user(attr->mq_flags, &target_mq_attr->mq_flags); 946 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 947 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 948 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 949 950 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0); 951 952 return 0; 953 } 954 955 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr, 956 const struct mq_attr *attr) 957 { 958 struct target_mq_attr *target_mq_attr; 959 960 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr, 961 target_mq_attr_addr, 0)) 962 return -TARGET_EFAULT; 963 964 __put_user(attr->mq_flags, &target_mq_attr->mq_flags); 965 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 966 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 967 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 968 969 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1); 970 971 return 0; 972 } 973 #endif 974 975 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) 976 /* do_select() must return target values and target errnos. */ 977 static abi_long do_select(int n, 978 abi_ulong rfd_addr, abi_ulong wfd_addr, 979 abi_ulong efd_addr, abi_ulong target_tv_addr) 980 { 981 fd_set rfds, wfds, efds; 982 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 983 struct timeval tv, *tv_ptr; 984 abi_long ret; 985 986 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 987 if (ret) { 988 return ret; 989 } 990 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 991 if (ret) { 992 return ret; 993 } 994 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 995 if (ret) { 996 return ret; 997 } 998 999 if (target_tv_addr) { 1000 if (copy_from_user_timeval(&tv, target_tv_addr)) 1001 return -TARGET_EFAULT; 1002 tv_ptr = &tv; 1003 } else { 1004 tv_ptr = NULL; 1005 } 1006 1007 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr)); 1008 1009 if (!is_error(ret)) { 1010 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 1011 return -TARGET_EFAULT; 1012 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 1013 return -TARGET_EFAULT; 1014 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 1015 return -TARGET_EFAULT; 1016 1017 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv)) 1018 return -TARGET_EFAULT; 1019 } 1020 1021 return ret; 1022 } 1023 #endif 1024 1025 static abi_long do_pipe2(int host_pipe[], int flags) 1026 { 1027 #ifdef CONFIG_PIPE2 1028 return pipe2(host_pipe, flags); 1029 #else 1030 return -ENOSYS; 1031 #endif 1032 } 1033 1034 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes, 1035 int flags, int is_pipe2) 1036 { 1037 int host_pipe[2]; 1038 abi_long ret; 1039 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe); 1040 1041 if (is_error(ret)) 1042 return get_errno(ret); 1043 1044 /* Several targets have special calling conventions for the original 1045 pipe syscall, but didn't replicate this into the pipe2 syscall. */ 1046 if (!is_pipe2) { 1047 #if defined(TARGET_ALPHA) 1048 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1]; 1049 return host_pipe[0]; 1050 #elif defined(TARGET_MIPS) 1051 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1]; 1052 return host_pipe[0]; 1053 #elif defined(TARGET_SH4) 1054 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1]; 1055 return host_pipe[0]; 1056 #elif defined(TARGET_SPARC) 1057 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1]; 1058 return host_pipe[0]; 1059 #endif 1060 } 1061 1062 if (put_user_s32(host_pipe[0], pipedes) 1063 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0]))) 1064 return -TARGET_EFAULT; 1065 return get_errno(ret); 1066 } 1067 1068 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn, 1069 abi_ulong target_addr, 1070 socklen_t len) 1071 { 1072 struct target_ip_mreqn *target_smreqn; 1073 1074 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1); 1075 if (!target_smreqn) 1076 return -TARGET_EFAULT; 1077 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr; 1078 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr; 1079 if (len == sizeof(struct target_ip_mreqn)) 1080 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex); 1081 unlock_user(target_smreqn, target_addr, 0); 1082 1083 return 0; 1084 } 1085 1086 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr, 1087 abi_ulong target_addr, 1088 socklen_t len) 1089 { 1090 const socklen_t unix_maxlen = sizeof (struct sockaddr_un); 1091 sa_family_t sa_family; 1092 struct target_sockaddr *target_saddr; 1093 1094 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 1095 if (!target_saddr) 1096 return -TARGET_EFAULT; 1097 1098 sa_family = tswap16(target_saddr->sa_family); 1099 1100 /* Oops. The caller might send a incomplete sun_path; sun_path 1101 * must be terminated by \0 (see the manual page), but 1102 * unfortunately it is quite common to specify sockaddr_un 1103 * length as "strlen(x->sun_path)" while it should be 1104 * "strlen(...) + 1". We'll fix that here if needed. 1105 * Linux kernel has a similar feature. 1106 */ 1107 1108 if (sa_family == AF_UNIX) { 1109 if (len < unix_maxlen && len > 0) { 1110 char *cp = (char*)target_saddr; 1111 1112 if ( cp[len-1] && !cp[len] ) 1113 len++; 1114 } 1115 if (len > unix_maxlen) 1116 len = unix_maxlen; 1117 } 1118 1119 memcpy(addr, target_saddr, len); 1120 addr->sa_family = sa_family; 1121 unlock_user(target_saddr, target_addr, 0); 1122 1123 return 0; 1124 } 1125 1126 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr, 1127 struct sockaddr *addr, 1128 socklen_t len) 1129 { 1130 struct target_sockaddr *target_saddr; 1131 1132 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0); 1133 if (!target_saddr) 1134 return -TARGET_EFAULT; 1135 memcpy(target_saddr, addr, len); 1136 target_saddr->sa_family = tswap16(addr->sa_family); 1137 unlock_user(target_saddr, target_addr, len); 1138 1139 return 0; 1140 } 1141 1142 static inline abi_long target_to_host_cmsg(struct msghdr *msgh, 1143 struct target_msghdr *target_msgh) 1144 { 1145 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1146 abi_long msg_controllen; 1147 abi_ulong target_cmsg_addr; 1148 struct target_cmsghdr *target_cmsg; 1149 socklen_t space = 0; 1150 1151 msg_controllen = tswapal(target_msgh->msg_controllen); 1152 if (msg_controllen < sizeof (struct target_cmsghdr)) 1153 goto the_end; 1154 target_cmsg_addr = tswapal(target_msgh->msg_control); 1155 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1); 1156 if (!target_cmsg) 1157 return -TARGET_EFAULT; 1158 1159 while (cmsg && target_cmsg) { 1160 void *data = CMSG_DATA(cmsg); 1161 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1162 1163 int len = tswapal(target_cmsg->cmsg_len) 1164 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr)); 1165 1166 space += CMSG_SPACE(len); 1167 if (space > msgh->msg_controllen) { 1168 space -= CMSG_SPACE(len); 1169 gemu_log("Host cmsg overflow\n"); 1170 break; 1171 } 1172 1173 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) { 1174 cmsg->cmsg_level = SOL_SOCKET; 1175 } else { 1176 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level); 1177 } 1178 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type); 1179 cmsg->cmsg_len = CMSG_LEN(len); 1180 1181 if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) { 1182 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type); 1183 memcpy(data, target_data, len); 1184 } else { 1185 int *fd = (int *)data; 1186 int *target_fd = (int *)target_data; 1187 int i, numfds = len / sizeof(int); 1188 1189 for (i = 0; i < numfds; i++) 1190 fd[i] = tswap32(target_fd[i]); 1191 } 1192 1193 cmsg = CMSG_NXTHDR(msgh, cmsg); 1194 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1195 } 1196 unlock_user(target_cmsg, target_cmsg_addr, 0); 1197 the_end: 1198 msgh->msg_controllen = space; 1199 return 0; 1200 } 1201 1202 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, 1203 struct msghdr *msgh) 1204 { 1205 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1206 abi_long msg_controllen; 1207 abi_ulong target_cmsg_addr; 1208 struct target_cmsghdr *target_cmsg; 1209 socklen_t space = 0; 1210 1211 msg_controllen = tswapal(target_msgh->msg_controllen); 1212 if (msg_controllen < sizeof (struct target_cmsghdr)) 1213 goto the_end; 1214 target_cmsg_addr = tswapal(target_msgh->msg_control); 1215 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0); 1216 if (!target_cmsg) 1217 return -TARGET_EFAULT; 1218 1219 while (cmsg && target_cmsg) { 1220 void *data = CMSG_DATA(cmsg); 1221 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1222 1223 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr)); 1224 1225 space += TARGET_CMSG_SPACE(len); 1226 if (space > msg_controllen) { 1227 space -= TARGET_CMSG_SPACE(len); 1228 gemu_log("Target cmsg overflow\n"); 1229 break; 1230 } 1231 1232 if (cmsg->cmsg_level == SOL_SOCKET) { 1233 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET); 1234 } else { 1235 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level); 1236 } 1237 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type); 1238 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len)); 1239 1240 if ((cmsg->cmsg_level == SOL_SOCKET) && 1241 (cmsg->cmsg_type == SCM_RIGHTS)) { 1242 int *fd = (int *)data; 1243 int *target_fd = (int *)target_data; 1244 int i, numfds = len / sizeof(int); 1245 1246 for (i = 0; i < numfds; i++) 1247 target_fd[i] = tswap32(fd[i]); 1248 } else if ((cmsg->cmsg_level == SOL_SOCKET) && 1249 (cmsg->cmsg_type == SO_TIMESTAMP) && 1250 (len == sizeof(struct timeval))) { 1251 /* copy struct timeval to target */ 1252 struct timeval *tv = (struct timeval *)data; 1253 struct target_timeval *target_tv = 1254 (struct target_timeval *)target_data; 1255 1256 target_tv->tv_sec = tswapal(tv->tv_sec); 1257 target_tv->tv_usec = tswapal(tv->tv_usec); 1258 } else { 1259 gemu_log("Unsupported ancillary data: %d/%d\n", 1260 cmsg->cmsg_level, cmsg->cmsg_type); 1261 memcpy(target_data, data, len); 1262 } 1263 1264 cmsg = CMSG_NXTHDR(msgh, cmsg); 1265 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1266 } 1267 unlock_user(target_cmsg, target_cmsg_addr, space); 1268 the_end: 1269 target_msgh->msg_controllen = tswapal(space); 1270 return 0; 1271 } 1272 1273 /* do_setsockopt() Must return target values and target errnos. */ 1274 static abi_long do_setsockopt(int sockfd, int level, int optname, 1275 abi_ulong optval_addr, socklen_t optlen) 1276 { 1277 abi_long ret; 1278 int val; 1279 struct ip_mreqn *ip_mreq; 1280 struct ip_mreq_source *ip_mreq_source; 1281 1282 switch(level) { 1283 case SOL_TCP: 1284 /* TCP options all take an 'int' value. */ 1285 if (optlen < sizeof(uint32_t)) 1286 return -TARGET_EINVAL; 1287 1288 if (get_user_u32(val, optval_addr)) 1289 return -TARGET_EFAULT; 1290 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1291 break; 1292 case SOL_IP: 1293 switch(optname) { 1294 case IP_TOS: 1295 case IP_TTL: 1296 case IP_HDRINCL: 1297 case IP_ROUTER_ALERT: 1298 case IP_RECVOPTS: 1299 case IP_RETOPTS: 1300 case IP_PKTINFO: 1301 case IP_MTU_DISCOVER: 1302 case IP_RECVERR: 1303 case IP_RECVTOS: 1304 #ifdef IP_FREEBIND 1305 case IP_FREEBIND: 1306 #endif 1307 case IP_MULTICAST_TTL: 1308 case IP_MULTICAST_LOOP: 1309 val = 0; 1310 if (optlen >= sizeof(uint32_t)) { 1311 if (get_user_u32(val, optval_addr)) 1312 return -TARGET_EFAULT; 1313 } else if (optlen >= 1) { 1314 if (get_user_u8(val, optval_addr)) 1315 return -TARGET_EFAULT; 1316 } 1317 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1318 break; 1319 case IP_ADD_MEMBERSHIP: 1320 case IP_DROP_MEMBERSHIP: 1321 if (optlen < sizeof (struct target_ip_mreq) || 1322 optlen > sizeof (struct target_ip_mreqn)) 1323 return -TARGET_EINVAL; 1324 1325 ip_mreq = (struct ip_mreqn *) alloca(optlen); 1326 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen); 1327 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen)); 1328 break; 1329 1330 case IP_BLOCK_SOURCE: 1331 case IP_UNBLOCK_SOURCE: 1332 case IP_ADD_SOURCE_MEMBERSHIP: 1333 case IP_DROP_SOURCE_MEMBERSHIP: 1334 if (optlen != sizeof (struct target_ip_mreq_source)) 1335 return -TARGET_EINVAL; 1336 1337 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1); 1338 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen)); 1339 unlock_user (ip_mreq_source, optval_addr, 0); 1340 break; 1341 1342 default: 1343 goto unimplemented; 1344 } 1345 break; 1346 case SOL_IPV6: 1347 switch (optname) { 1348 case IPV6_MTU_DISCOVER: 1349 case IPV6_MTU: 1350 case IPV6_V6ONLY: 1351 case IPV6_RECVPKTINFO: 1352 val = 0; 1353 if (optlen < sizeof(uint32_t)) { 1354 return -TARGET_EINVAL; 1355 } 1356 if (get_user_u32(val, optval_addr)) { 1357 return -TARGET_EFAULT; 1358 } 1359 ret = get_errno(setsockopt(sockfd, level, optname, 1360 &val, sizeof(val))); 1361 break; 1362 default: 1363 goto unimplemented; 1364 } 1365 break; 1366 case SOL_RAW: 1367 switch (optname) { 1368 case ICMP_FILTER: 1369 /* struct icmp_filter takes an u32 value */ 1370 if (optlen < sizeof(uint32_t)) { 1371 return -TARGET_EINVAL; 1372 } 1373 1374 if (get_user_u32(val, optval_addr)) { 1375 return -TARGET_EFAULT; 1376 } 1377 ret = get_errno(setsockopt(sockfd, level, optname, 1378 &val, sizeof(val))); 1379 break; 1380 1381 default: 1382 goto unimplemented; 1383 } 1384 break; 1385 case TARGET_SOL_SOCKET: 1386 switch (optname) { 1387 case TARGET_SO_RCVTIMEO: 1388 { 1389 struct timeval tv; 1390 1391 optname = SO_RCVTIMEO; 1392 1393 set_timeout: 1394 if (optlen != sizeof(struct target_timeval)) { 1395 return -TARGET_EINVAL; 1396 } 1397 1398 if (copy_from_user_timeval(&tv, optval_addr)) { 1399 return -TARGET_EFAULT; 1400 } 1401 1402 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 1403 &tv, sizeof(tv))); 1404 return ret; 1405 } 1406 case TARGET_SO_SNDTIMEO: 1407 optname = SO_SNDTIMEO; 1408 goto set_timeout; 1409 case TARGET_SO_ATTACH_FILTER: 1410 { 1411 struct target_sock_fprog *tfprog; 1412 struct target_sock_filter *tfilter; 1413 struct sock_fprog fprog; 1414 struct sock_filter *filter; 1415 int i; 1416 1417 if (optlen != sizeof(*tfprog)) { 1418 return -TARGET_EINVAL; 1419 } 1420 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) { 1421 return -TARGET_EFAULT; 1422 } 1423 if (!lock_user_struct(VERIFY_READ, tfilter, 1424 tswapal(tfprog->filter), 0)) { 1425 unlock_user_struct(tfprog, optval_addr, 1); 1426 return -TARGET_EFAULT; 1427 } 1428 1429 fprog.len = tswap16(tfprog->len); 1430 filter = malloc(fprog.len * sizeof(*filter)); 1431 if (filter == NULL) { 1432 unlock_user_struct(tfilter, tfprog->filter, 1); 1433 unlock_user_struct(tfprog, optval_addr, 1); 1434 return -TARGET_ENOMEM; 1435 } 1436 for (i = 0; i < fprog.len; i++) { 1437 filter[i].code = tswap16(tfilter[i].code); 1438 filter[i].jt = tfilter[i].jt; 1439 filter[i].jf = tfilter[i].jf; 1440 filter[i].k = tswap32(tfilter[i].k); 1441 } 1442 fprog.filter = filter; 1443 1444 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, 1445 SO_ATTACH_FILTER, &fprog, sizeof(fprog))); 1446 free(filter); 1447 1448 unlock_user_struct(tfilter, tfprog->filter, 1); 1449 unlock_user_struct(tfprog, optval_addr, 1); 1450 return ret; 1451 } 1452 /* Options with 'int' argument. */ 1453 case TARGET_SO_DEBUG: 1454 optname = SO_DEBUG; 1455 break; 1456 case TARGET_SO_REUSEADDR: 1457 optname = SO_REUSEADDR; 1458 break; 1459 case TARGET_SO_TYPE: 1460 optname = SO_TYPE; 1461 break; 1462 case TARGET_SO_ERROR: 1463 optname = SO_ERROR; 1464 break; 1465 case TARGET_SO_DONTROUTE: 1466 optname = SO_DONTROUTE; 1467 break; 1468 case TARGET_SO_BROADCAST: 1469 optname = SO_BROADCAST; 1470 break; 1471 case TARGET_SO_SNDBUF: 1472 optname = SO_SNDBUF; 1473 break; 1474 case TARGET_SO_RCVBUF: 1475 optname = SO_RCVBUF; 1476 break; 1477 case TARGET_SO_KEEPALIVE: 1478 optname = SO_KEEPALIVE; 1479 break; 1480 case TARGET_SO_OOBINLINE: 1481 optname = SO_OOBINLINE; 1482 break; 1483 case TARGET_SO_NO_CHECK: 1484 optname = SO_NO_CHECK; 1485 break; 1486 case TARGET_SO_PRIORITY: 1487 optname = SO_PRIORITY; 1488 break; 1489 #ifdef SO_BSDCOMPAT 1490 case TARGET_SO_BSDCOMPAT: 1491 optname = SO_BSDCOMPAT; 1492 break; 1493 #endif 1494 case TARGET_SO_PASSCRED: 1495 optname = SO_PASSCRED; 1496 break; 1497 case TARGET_SO_TIMESTAMP: 1498 optname = SO_TIMESTAMP; 1499 break; 1500 case TARGET_SO_RCVLOWAT: 1501 optname = SO_RCVLOWAT; 1502 break; 1503 break; 1504 default: 1505 goto unimplemented; 1506 } 1507 if (optlen < sizeof(uint32_t)) 1508 return -TARGET_EINVAL; 1509 1510 if (get_user_u32(val, optval_addr)) 1511 return -TARGET_EFAULT; 1512 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val))); 1513 break; 1514 default: 1515 unimplemented: 1516 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname); 1517 ret = -TARGET_ENOPROTOOPT; 1518 } 1519 return ret; 1520 } 1521 1522 /* do_getsockopt() Must return target values and target errnos. */ 1523 static abi_long do_getsockopt(int sockfd, int level, int optname, 1524 abi_ulong optval_addr, abi_ulong optlen) 1525 { 1526 abi_long ret; 1527 int len, val; 1528 socklen_t lv; 1529 1530 switch(level) { 1531 case TARGET_SOL_SOCKET: 1532 level = SOL_SOCKET; 1533 switch (optname) { 1534 /* These don't just return a single integer */ 1535 case TARGET_SO_LINGER: 1536 case TARGET_SO_RCVTIMEO: 1537 case TARGET_SO_SNDTIMEO: 1538 case TARGET_SO_PEERNAME: 1539 goto unimplemented; 1540 case TARGET_SO_PEERCRED: { 1541 struct ucred cr; 1542 socklen_t crlen; 1543 struct target_ucred *tcr; 1544 1545 if (get_user_u32(len, optlen)) { 1546 return -TARGET_EFAULT; 1547 } 1548 if (len < 0) { 1549 return -TARGET_EINVAL; 1550 } 1551 1552 crlen = sizeof(cr); 1553 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED, 1554 &cr, &crlen)); 1555 if (ret < 0) { 1556 return ret; 1557 } 1558 if (len > crlen) { 1559 len = crlen; 1560 } 1561 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) { 1562 return -TARGET_EFAULT; 1563 } 1564 __put_user(cr.pid, &tcr->pid); 1565 __put_user(cr.uid, &tcr->uid); 1566 __put_user(cr.gid, &tcr->gid); 1567 unlock_user_struct(tcr, optval_addr, 1); 1568 if (put_user_u32(len, optlen)) { 1569 return -TARGET_EFAULT; 1570 } 1571 break; 1572 } 1573 /* Options with 'int' argument. */ 1574 case TARGET_SO_DEBUG: 1575 optname = SO_DEBUG; 1576 goto int_case; 1577 case TARGET_SO_REUSEADDR: 1578 optname = SO_REUSEADDR; 1579 goto int_case; 1580 case TARGET_SO_TYPE: 1581 optname = SO_TYPE; 1582 goto int_case; 1583 case TARGET_SO_ERROR: 1584 optname = SO_ERROR; 1585 goto int_case; 1586 case TARGET_SO_DONTROUTE: 1587 optname = SO_DONTROUTE; 1588 goto int_case; 1589 case TARGET_SO_BROADCAST: 1590 optname = SO_BROADCAST; 1591 goto int_case; 1592 case TARGET_SO_SNDBUF: 1593 optname = SO_SNDBUF; 1594 goto int_case; 1595 case TARGET_SO_RCVBUF: 1596 optname = SO_RCVBUF; 1597 goto int_case; 1598 case TARGET_SO_KEEPALIVE: 1599 optname = SO_KEEPALIVE; 1600 goto int_case; 1601 case TARGET_SO_OOBINLINE: 1602 optname = SO_OOBINLINE; 1603 goto int_case; 1604 case TARGET_SO_NO_CHECK: 1605 optname = SO_NO_CHECK; 1606 goto int_case; 1607 case TARGET_SO_PRIORITY: 1608 optname = SO_PRIORITY; 1609 goto int_case; 1610 #ifdef SO_BSDCOMPAT 1611 case TARGET_SO_BSDCOMPAT: 1612 optname = SO_BSDCOMPAT; 1613 goto int_case; 1614 #endif 1615 case TARGET_SO_PASSCRED: 1616 optname = SO_PASSCRED; 1617 goto int_case; 1618 case TARGET_SO_TIMESTAMP: 1619 optname = SO_TIMESTAMP; 1620 goto int_case; 1621 case TARGET_SO_RCVLOWAT: 1622 optname = SO_RCVLOWAT; 1623 goto int_case; 1624 default: 1625 goto int_case; 1626 } 1627 break; 1628 case SOL_TCP: 1629 /* TCP options all take an 'int' value. */ 1630 int_case: 1631 if (get_user_u32(len, optlen)) 1632 return -TARGET_EFAULT; 1633 if (len < 0) 1634 return -TARGET_EINVAL; 1635 lv = sizeof(lv); 1636 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1637 if (ret < 0) 1638 return ret; 1639 if (len > lv) 1640 len = lv; 1641 if (len == 4) { 1642 if (put_user_u32(val, optval_addr)) 1643 return -TARGET_EFAULT; 1644 } else { 1645 if (put_user_u8(val, optval_addr)) 1646 return -TARGET_EFAULT; 1647 } 1648 if (put_user_u32(len, optlen)) 1649 return -TARGET_EFAULT; 1650 break; 1651 case SOL_IP: 1652 switch(optname) { 1653 case IP_TOS: 1654 case IP_TTL: 1655 case IP_HDRINCL: 1656 case IP_ROUTER_ALERT: 1657 case IP_RECVOPTS: 1658 case IP_RETOPTS: 1659 case IP_PKTINFO: 1660 case IP_MTU_DISCOVER: 1661 case IP_RECVERR: 1662 case IP_RECVTOS: 1663 #ifdef IP_FREEBIND 1664 case IP_FREEBIND: 1665 #endif 1666 case IP_MULTICAST_TTL: 1667 case IP_MULTICAST_LOOP: 1668 if (get_user_u32(len, optlen)) 1669 return -TARGET_EFAULT; 1670 if (len < 0) 1671 return -TARGET_EINVAL; 1672 lv = sizeof(lv); 1673 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1674 if (ret < 0) 1675 return ret; 1676 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 1677 len = 1; 1678 if (put_user_u32(len, optlen) 1679 || put_user_u8(val, optval_addr)) 1680 return -TARGET_EFAULT; 1681 } else { 1682 if (len > sizeof(int)) 1683 len = sizeof(int); 1684 if (put_user_u32(len, optlen) 1685 || put_user_u32(val, optval_addr)) 1686 return -TARGET_EFAULT; 1687 } 1688 break; 1689 default: 1690 ret = -TARGET_ENOPROTOOPT; 1691 break; 1692 } 1693 break; 1694 default: 1695 unimplemented: 1696 gemu_log("getsockopt level=%d optname=%d not yet supported\n", 1697 level, optname); 1698 ret = -TARGET_EOPNOTSUPP; 1699 break; 1700 } 1701 return ret; 1702 } 1703 1704 static struct iovec *lock_iovec(int type, abi_ulong target_addr, 1705 int count, int copy) 1706 { 1707 struct target_iovec *target_vec; 1708 struct iovec *vec; 1709 abi_ulong total_len, max_len; 1710 int i; 1711 int err = 0; 1712 1713 if (count == 0) { 1714 errno = 0; 1715 return NULL; 1716 } 1717 if (count < 0 || count > IOV_MAX) { 1718 errno = EINVAL; 1719 return NULL; 1720 } 1721 1722 vec = calloc(count, sizeof(struct iovec)); 1723 if (vec == NULL) { 1724 errno = ENOMEM; 1725 return NULL; 1726 } 1727 1728 target_vec = lock_user(VERIFY_READ, target_addr, 1729 count * sizeof(struct target_iovec), 1); 1730 if (target_vec == NULL) { 1731 err = EFAULT; 1732 goto fail2; 1733 } 1734 1735 /* ??? If host page size > target page size, this will result in a 1736 value larger than what we can actually support. */ 1737 max_len = 0x7fffffff & TARGET_PAGE_MASK; 1738 total_len = 0; 1739 1740 for (i = 0; i < count; i++) { 1741 abi_ulong base = tswapal(target_vec[i].iov_base); 1742 abi_long len = tswapal(target_vec[i].iov_len); 1743 1744 if (len < 0) { 1745 err = EINVAL; 1746 goto fail; 1747 } else if (len == 0) { 1748 /* Zero length pointer is ignored. */ 1749 vec[i].iov_base = 0; 1750 } else { 1751 vec[i].iov_base = lock_user(type, base, len, copy); 1752 if (!vec[i].iov_base) { 1753 err = EFAULT; 1754 goto fail; 1755 } 1756 if (len > max_len - total_len) { 1757 len = max_len - total_len; 1758 } 1759 } 1760 vec[i].iov_len = len; 1761 total_len += len; 1762 } 1763 1764 unlock_user(target_vec, target_addr, 0); 1765 return vec; 1766 1767 fail: 1768 unlock_user(target_vec, target_addr, 0); 1769 fail2: 1770 free(vec); 1771 errno = err; 1772 return NULL; 1773 } 1774 1775 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr, 1776 int count, int copy) 1777 { 1778 struct target_iovec *target_vec; 1779 int i; 1780 1781 target_vec = lock_user(VERIFY_READ, target_addr, 1782 count * sizeof(struct target_iovec), 1); 1783 if (target_vec) { 1784 for (i = 0; i < count; i++) { 1785 abi_ulong base = tswapal(target_vec[i].iov_base); 1786 abi_long len = tswapal(target_vec[i].iov_base); 1787 if (len < 0) { 1788 break; 1789 } 1790 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0); 1791 } 1792 unlock_user(target_vec, target_addr, 0); 1793 } 1794 1795 free(vec); 1796 } 1797 1798 static inline int target_to_host_sock_type(int *type) 1799 { 1800 int host_type = 0; 1801 int target_type = *type; 1802 1803 switch (target_type & TARGET_SOCK_TYPE_MASK) { 1804 case TARGET_SOCK_DGRAM: 1805 host_type = SOCK_DGRAM; 1806 break; 1807 case TARGET_SOCK_STREAM: 1808 host_type = SOCK_STREAM; 1809 break; 1810 default: 1811 host_type = target_type & TARGET_SOCK_TYPE_MASK; 1812 break; 1813 } 1814 if (target_type & TARGET_SOCK_CLOEXEC) { 1815 #if defined(SOCK_CLOEXEC) 1816 host_type |= SOCK_CLOEXEC; 1817 #else 1818 return -TARGET_EINVAL; 1819 #endif 1820 } 1821 if (target_type & TARGET_SOCK_NONBLOCK) { 1822 #if defined(SOCK_NONBLOCK) 1823 host_type |= SOCK_NONBLOCK; 1824 #elif !defined(O_NONBLOCK) 1825 return -TARGET_EINVAL; 1826 #endif 1827 } 1828 *type = host_type; 1829 return 0; 1830 } 1831 1832 /* Try to emulate socket type flags after socket creation. */ 1833 static int sock_flags_fixup(int fd, int target_type) 1834 { 1835 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK) 1836 if (target_type & TARGET_SOCK_NONBLOCK) { 1837 int flags = fcntl(fd, F_GETFL); 1838 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) { 1839 close(fd); 1840 return -TARGET_EINVAL; 1841 } 1842 } 1843 #endif 1844 return fd; 1845 } 1846 1847 /* do_socket() Must return target values and target errnos. */ 1848 static abi_long do_socket(int domain, int type, int protocol) 1849 { 1850 int target_type = type; 1851 int ret; 1852 1853 ret = target_to_host_sock_type(&type); 1854 if (ret) { 1855 return ret; 1856 } 1857 1858 if (domain == PF_NETLINK) 1859 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */ 1860 ret = get_errno(socket(domain, type, protocol)); 1861 if (ret >= 0) { 1862 ret = sock_flags_fixup(ret, target_type); 1863 } 1864 return ret; 1865 } 1866 1867 /* do_bind() Must return target values and target errnos. */ 1868 static abi_long do_bind(int sockfd, abi_ulong target_addr, 1869 socklen_t addrlen) 1870 { 1871 void *addr; 1872 abi_long ret; 1873 1874 if ((int)addrlen < 0) { 1875 return -TARGET_EINVAL; 1876 } 1877 1878 addr = alloca(addrlen+1); 1879 1880 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1881 if (ret) 1882 return ret; 1883 1884 return get_errno(bind(sockfd, addr, addrlen)); 1885 } 1886 1887 /* do_connect() Must return target values and target errnos. */ 1888 static abi_long do_connect(int sockfd, abi_ulong target_addr, 1889 socklen_t addrlen) 1890 { 1891 void *addr; 1892 abi_long ret; 1893 1894 if ((int)addrlen < 0) { 1895 return -TARGET_EINVAL; 1896 } 1897 1898 addr = alloca(addrlen); 1899 1900 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1901 if (ret) 1902 return ret; 1903 1904 return get_errno(connect(sockfd, addr, addrlen)); 1905 } 1906 1907 /* do_sendrecvmsg() Must return target values and target errnos. */ 1908 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg, 1909 int flags, int send) 1910 { 1911 abi_long ret, len; 1912 struct target_msghdr *msgp; 1913 struct msghdr msg; 1914 int count; 1915 struct iovec *vec; 1916 abi_ulong target_vec; 1917 1918 /* FIXME */ 1919 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE, 1920 msgp, 1921 target_msg, 1922 send ? 1 : 0)) 1923 return -TARGET_EFAULT; 1924 if (msgp->msg_name) { 1925 msg.msg_namelen = tswap32(msgp->msg_namelen); 1926 msg.msg_name = alloca(msg.msg_namelen); 1927 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name), 1928 msg.msg_namelen); 1929 if (ret) { 1930 goto out2; 1931 } 1932 } else { 1933 msg.msg_name = NULL; 1934 msg.msg_namelen = 0; 1935 } 1936 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen); 1937 msg.msg_control = alloca(msg.msg_controllen); 1938 msg.msg_flags = tswap32(msgp->msg_flags); 1939 1940 count = tswapal(msgp->msg_iovlen); 1941 target_vec = tswapal(msgp->msg_iov); 1942 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, 1943 target_vec, count, send); 1944 if (vec == NULL) { 1945 ret = -host_to_target_errno(errno); 1946 goto out2; 1947 } 1948 msg.msg_iovlen = count; 1949 msg.msg_iov = vec; 1950 1951 if (send) { 1952 ret = target_to_host_cmsg(&msg, msgp); 1953 if (ret == 0) 1954 ret = get_errno(sendmsg(fd, &msg, flags)); 1955 } else { 1956 ret = get_errno(recvmsg(fd, &msg, flags)); 1957 if (!is_error(ret)) { 1958 len = ret; 1959 ret = host_to_target_cmsg(msgp, &msg); 1960 if (!is_error(ret)) { 1961 msgp->msg_namelen = tswap32(msg.msg_namelen); 1962 if (msg.msg_name != NULL) { 1963 ret = host_to_target_sockaddr(tswapal(msgp->msg_name), 1964 msg.msg_name, msg.msg_namelen); 1965 if (ret) { 1966 goto out; 1967 } 1968 } 1969 1970 ret = len; 1971 } 1972 } 1973 } 1974 1975 out: 1976 unlock_iovec(vec, target_vec, count, !send); 1977 out2: 1978 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 1979 return ret; 1980 } 1981 1982 /* If we don't have a system accept4() then just call accept. 1983 * The callsites to do_accept4() will ensure that they don't 1984 * pass a non-zero flags argument in this config. 1985 */ 1986 #ifndef CONFIG_ACCEPT4 1987 static inline int accept4(int sockfd, struct sockaddr *addr, 1988 socklen_t *addrlen, int flags) 1989 { 1990 assert(flags == 0); 1991 return accept(sockfd, addr, addrlen); 1992 } 1993 #endif 1994 1995 /* do_accept4() Must return target values and target errnos. */ 1996 static abi_long do_accept4(int fd, abi_ulong target_addr, 1997 abi_ulong target_addrlen_addr, int flags) 1998 { 1999 socklen_t addrlen; 2000 void *addr; 2001 abi_long ret; 2002 2003 if (target_addr == 0) { 2004 return get_errno(accept4(fd, NULL, NULL, flags)); 2005 } 2006 2007 /* linux returns EINVAL if addrlen pointer is invalid */ 2008 if (get_user_u32(addrlen, target_addrlen_addr)) 2009 return -TARGET_EINVAL; 2010 2011 if ((int)addrlen < 0) { 2012 return -TARGET_EINVAL; 2013 } 2014 2015 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2016 return -TARGET_EINVAL; 2017 2018 addr = alloca(addrlen); 2019 2020 ret = get_errno(accept4(fd, addr, &addrlen, flags)); 2021 if (!is_error(ret)) { 2022 host_to_target_sockaddr(target_addr, addr, addrlen); 2023 if (put_user_u32(addrlen, target_addrlen_addr)) 2024 ret = -TARGET_EFAULT; 2025 } 2026 return ret; 2027 } 2028 2029 /* do_getpeername() Must return target values and target errnos. */ 2030 static abi_long do_getpeername(int fd, abi_ulong target_addr, 2031 abi_ulong target_addrlen_addr) 2032 { 2033 socklen_t addrlen; 2034 void *addr; 2035 abi_long ret; 2036 2037 if (get_user_u32(addrlen, target_addrlen_addr)) 2038 return -TARGET_EFAULT; 2039 2040 if ((int)addrlen < 0) { 2041 return -TARGET_EINVAL; 2042 } 2043 2044 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2045 return -TARGET_EFAULT; 2046 2047 addr = alloca(addrlen); 2048 2049 ret = get_errno(getpeername(fd, addr, &addrlen)); 2050 if (!is_error(ret)) { 2051 host_to_target_sockaddr(target_addr, addr, addrlen); 2052 if (put_user_u32(addrlen, target_addrlen_addr)) 2053 ret = -TARGET_EFAULT; 2054 } 2055 return ret; 2056 } 2057 2058 /* do_getsockname() Must return target values and target errnos. */ 2059 static abi_long do_getsockname(int fd, abi_ulong target_addr, 2060 abi_ulong target_addrlen_addr) 2061 { 2062 socklen_t addrlen; 2063 void *addr; 2064 abi_long ret; 2065 2066 if (get_user_u32(addrlen, target_addrlen_addr)) 2067 return -TARGET_EFAULT; 2068 2069 if ((int)addrlen < 0) { 2070 return -TARGET_EINVAL; 2071 } 2072 2073 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2074 return -TARGET_EFAULT; 2075 2076 addr = alloca(addrlen); 2077 2078 ret = get_errno(getsockname(fd, addr, &addrlen)); 2079 if (!is_error(ret)) { 2080 host_to_target_sockaddr(target_addr, addr, addrlen); 2081 if (put_user_u32(addrlen, target_addrlen_addr)) 2082 ret = -TARGET_EFAULT; 2083 } 2084 return ret; 2085 } 2086 2087 /* do_socketpair() Must return target values and target errnos. */ 2088 static abi_long do_socketpair(int domain, int type, int protocol, 2089 abi_ulong target_tab_addr) 2090 { 2091 int tab[2]; 2092 abi_long ret; 2093 2094 target_to_host_sock_type(&type); 2095 2096 ret = get_errno(socketpair(domain, type, protocol, tab)); 2097 if (!is_error(ret)) { 2098 if (put_user_s32(tab[0], target_tab_addr) 2099 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0]))) 2100 ret = -TARGET_EFAULT; 2101 } 2102 return ret; 2103 } 2104 2105 /* do_sendto() Must return target values and target errnos. */ 2106 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags, 2107 abi_ulong target_addr, socklen_t addrlen) 2108 { 2109 void *addr; 2110 void *host_msg; 2111 abi_long ret; 2112 2113 if ((int)addrlen < 0) { 2114 return -TARGET_EINVAL; 2115 } 2116 2117 host_msg = lock_user(VERIFY_READ, msg, len, 1); 2118 if (!host_msg) 2119 return -TARGET_EFAULT; 2120 if (target_addr) { 2121 addr = alloca(addrlen); 2122 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 2123 if (ret) { 2124 unlock_user(host_msg, msg, 0); 2125 return ret; 2126 } 2127 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen)); 2128 } else { 2129 ret = get_errno(send(fd, host_msg, len, flags)); 2130 } 2131 unlock_user(host_msg, msg, 0); 2132 return ret; 2133 } 2134 2135 /* do_recvfrom() Must return target values and target errnos. */ 2136 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags, 2137 abi_ulong target_addr, 2138 abi_ulong target_addrlen) 2139 { 2140 socklen_t addrlen; 2141 void *addr; 2142 void *host_msg; 2143 abi_long ret; 2144 2145 host_msg = lock_user(VERIFY_WRITE, msg, len, 0); 2146 if (!host_msg) 2147 return -TARGET_EFAULT; 2148 if (target_addr) { 2149 if (get_user_u32(addrlen, target_addrlen)) { 2150 ret = -TARGET_EFAULT; 2151 goto fail; 2152 } 2153 if ((int)addrlen < 0) { 2154 ret = -TARGET_EINVAL; 2155 goto fail; 2156 } 2157 addr = alloca(addrlen); 2158 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen)); 2159 } else { 2160 addr = NULL; /* To keep compiler quiet. */ 2161 ret = get_errno(qemu_recv(fd, host_msg, len, flags)); 2162 } 2163 if (!is_error(ret)) { 2164 if (target_addr) { 2165 host_to_target_sockaddr(target_addr, addr, addrlen); 2166 if (put_user_u32(addrlen, target_addrlen)) { 2167 ret = -TARGET_EFAULT; 2168 goto fail; 2169 } 2170 } 2171 unlock_user(host_msg, msg, len); 2172 } else { 2173 fail: 2174 unlock_user(host_msg, msg, 0); 2175 } 2176 return ret; 2177 } 2178 2179 #ifdef TARGET_NR_socketcall 2180 /* do_socketcall() Must return target values and target errnos. */ 2181 static abi_long do_socketcall(int num, abi_ulong vptr) 2182 { 2183 static const unsigned ac[] = { /* number of arguments per call */ 2184 [SOCKOP_socket] = 3, /* domain, type, protocol */ 2185 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */ 2186 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */ 2187 [SOCKOP_listen] = 2, /* sockfd, backlog */ 2188 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */ 2189 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */ 2190 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */ 2191 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */ 2192 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */ 2193 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */ 2194 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */ 2195 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */ 2196 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */ 2197 [SOCKOP_shutdown] = 2, /* sockfd, how */ 2198 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */ 2199 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */ 2200 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */ 2201 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */ 2202 }; 2203 abi_long a[6]; /* max 6 args */ 2204 2205 /* first, collect the arguments in a[] according to ac[] */ 2206 if (num >= 0 && num < ARRAY_SIZE(ac)) { 2207 unsigned i; 2208 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */ 2209 for (i = 0; i < ac[num]; ++i) { 2210 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) { 2211 return -TARGET_EFAULT; 2212 } 2213 } 2214 } 2215 2216 /* now when we have the args, actually handle the call */ 2217 switch (num) { 2218 case SOCKOP_socket: /* domain, type, protocol */ 2219 return do_socket(a[0], a[1], a[2]); 2220 case SOCKOP_bind: /* sockfd, addr, addrlen */ 2221 return do_bind(a[0], a[1], a[2]); 2222 case SOCKOP_connect: /* sockfd, addr, addrlen */ 2223 return do_connect(a[0], a[1], a[2]); 2224 case SOCKOP_listen: /* sockfd, backlog */ 2225 return get_errno(listen(a[0], a[1])); 2226 case SOCKOP_accept: /* sockfd, addr, addrlen */ 2227 return do_accept4(a[0], a[1], a[2], 0); 2228 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */ 2229 return do_accept4(a[0], a[1], a[2], a[3]); 2230 case SOCKOP_getsockname: /* sockfd, addr, addrlen */ 2231 return do_getsockname(a[0], a[1], a[2]); 2232 case SOCKOP_getpeername: /* sockfd, addr, addrlen */ 2233 return do_getpeername(a[0], a[1], a[2]); 2234 case SOCKOP_socketpair: /* domain, type, protocol, tab */ 2235 return do_socketpair(a[0], a[1], a[2], a[3]); 2236 case SOCKOP_send: /* sockfd, msg, len, flags */ 2237 return do_sendto(a[0], a[1], a[2], a[3], 0, 0); 2238 case SOCKOP_recv: /* sockfd, msg, len, flags */ 2239 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0); 2240 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */ 2241 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]); 2242 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */ 2243 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]); 2244 case SOCKOP_shutdown: /* sockfd, how */ 2245 return get_errno(shutdown(a[0], a[1])); 2246 case SOCKOP_sendmsg: /* sockfd, msg, flags */ 2247 return do_sendrecvmsg(a[0], a[1], a[2], 1); 2248 case SOCKOP_recvmsg: /* sockfd, msg, flags */ 2249 return do_sendrecvmsg(a[0], a[1], a[2], 0); 2250 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */ 2251 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]); 2252 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */ 2253 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]); 2254 default: 2255 gemu_log("Unsupported socketcall: %d\n", num); 2256 return -TARGET_ENOSYS; 2257 } 2258 } 2259 #endif 2260 2261 #define N_SHM_REGIONS 32 2262 2263 static struct shm_region { 2264 abi_ulong start; 2265 abi_ulong size; 2266 } shm_regions[N_SHM_REGIONS]; 2267 2268 struct target_semid_ds 2269 { 2270 struct target_ipc_perm sem_perm; 2271 abi_ulong sem_otime; 2272 abi_ulong __unused1; 2273 abi_ulong sem_ctime; 2274 abi_ulong __unused2; 2275 abi_ulong sem_nsems; 2276 abi_ulong __unused3; 2277 abi_ulong __unused4; 2278 }; 2279 2280 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip, 2281 abi_ulong target_addr) 2282 { 2283 struct target_ipc_perm *target_ip; 2284 struct target_semid_ds *target_sd; 2285 2286 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2287 return -TARGET_EFAULT; 2288 target_ip = &(target_sd->sem_perm); 2289 host_ip->__key = tswap32(target_ip->__key); 2290 host_ip->uid = tswap32(target_ip->uid); 2291 host_ip->gid = tswap32(target_ip->gid); 2292 host_ip->cuid = tswap32(target_ip->cuid); 2293 host_ip->cgid = tswap32(target_ip->cgid); 2294 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 2295 host_ip->mode = tswap32(target_ip->mode); 2296 #else 2297 host_ip->mode = tswap16(target_ip->mode); 2298 #endif 2299 #if defined(TARGET_PPC) 2300 host_ip->__seq = tswap32(target_ip->__seq); 2301 #else 2302 host_ip->__seq = tswap16(target_ip->__seq); 2303 #endif 2304 unlock_user_struct(target_sd, target_addr, 0); 2305 return 0; 2306 } 2307 2308 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr, 2309 struct ipc_perm *host_ip) 2310 { 2311 struct target_ipc_perm *target_ip; 2312 struct target_semid_ds *target_sd; 2313 2314 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2315 return -TARGET_EFAULT; 2316 target_ip = &(target_sd->sem_perm); 2317 target_ip->__key = tswap32(host_ip->__key); 2318 target_ip->uid = tswap32(host_ip->uid); 2319 target_ip->gid = tswap32(host_ip->gid); 2320 target_ip->cuid = tswap32(host_ip->cuid); 2321 target_ip->cgid = tswap32(host_ip->cgid); 2322 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 2323 target_ip->mode = tswap32(host_ip->mode); 2324 #else 2325 target_ip->mode = tswap16(host_ip->mode); 2326 #endif 2327 #if defined(TARGET_PPC) 2328 target_ip->__seq = tswap32(host_ip->__seq); 2329 #else 2330 target_ip->__seq = tswap16(host_ip->__seq); 2331 #endif 2332 unlock_user_struct(target_sd, target_addr, 1); 2333 return 0; 2334 } 2335 2336 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd, 2337 abi_ulong target_addr) 2338 { 2339 struct target_semid_ds *target_sd; 2340 2341 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2342 return -TARGET_EFAULT; 2343 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr)) 2344 return -TARGET_EFAULT; 2345 host_sd->sem_nsems = tswapal(target_sd->sem_nsems); 2346 host_sd->sem_otime = tswapal(target_sd->sem_otime); 2347 host_sd->sem_ctime = tswapal(target_sd->sem_ctime); 2348 unlock_user_struct(target_sd, target_addr, 0); 2349 return 0; 2350 } 2351 2352 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr, 2353 struct semid_ds *host_sd) 2354 { 2355 struct target_semid_ds *target_sd; 2356 2357 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2358 return -TARGET_EFAULT; 2359 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm))) 2360 return -TARGET_EFAULT; 2361 target_sd->sem_nsems = tswapal(host_sd->sem_nsems); 2362 target_sd->sem_otime = tswapal(host_sd->sem_otime); 2363 target_sd->sem_ctime = tswapal(host_sd->sem_ctime); 2364 unlock_user_struct(target_sd, target_addr, 1); 2365 return 0; 2366 } 2367 2368 struct target_seminfo { 2369 int semmap; 2370 int semmni; 2371 int semmns; 2372 int semmnu; 2373 int semmsl; 2374 int semopm; 2375 int semume; 2376 int semusz; 2377 int semvmx; 2378 int semaem; 2379 }; 2380 2381 static inline abi_long host_to_target_seminfo(abi_ulong target_addr, 2382 struct seminfo *host_seminfo) 2383 { 2384 struct target_seminfo *target_seminfo; 2385 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0)) 2386 return -TARGET_EFAULT; 2387 __put_user(host_seminfo->semmap, &target_seminfo->semmap); 2388 __put_user(host_seminfo->semmni, &target_seminfo->semmni); 2389 __put_user(host_seminfo->semmns, &target_seminfo->semmns); 2390 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu); 2391 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl); 2392 __put_user(host_seminfo->semopm, &target_seminfo->semopm); 2393 __put_user(host_seminfo->semume, &target_seminfo->semume); 2394 __put_user(host_seminfo->semusz, &target_seminfo->semusz); 2395 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx); 2396 __put_user(host_seminfo->semaem, &target_seminfo->semaem); 2397 unlock_user_struct(target_seminfo, target_addr, 1); 2398 return 0; 2399 } 2400 2401 union semun { 2402 int val; 2403 struct semid_ds *buf; 2404 unsigned short *array; 2405 struct seminfo *__buf; 2406 }; 2407 2408 union target_semun { 2409 int val; 2410 abi_ulong buf; 2411 abi_ulong array; 2412 abi_ulong __buf; 2413 }; 2414 2415 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array, 2416 abi_ulong target_addr) 2417 { 2418 int nsems; 2419 unsigned short *array; 2420 union semun semun; 2421 struct semid_ds semid_ds; 2422 int i, ret; 2423 2424 semun.buf = &semid_ds; 2425 2426 ret = semctl(semid, 0, IPC_STAT, semun); 2427 if (ret == -1) 2428 return get_errno(ret); 2429 2430 nsems = semid_ds.sem_nsems; 2431 2432 *host_array = malloc(nsems*sizeof(unsigned short)); 2433 if (!*host_array) { 2434 return -TARGET_ENOMEM; 2435 } 2436 array = lock_user(VERIFY_READ, target_addr, 2437 nsems*sizeof(unsigned short), 1); 2438 if (!array) { 2439 free(*host_array); 2440 return -TARGET_EFAULT; 2441 } 2442 2443 for(i=0; i<nsems; i++) { 2444 __get_user((*host_array)[i], &array[i]); 2445 } 2446 unlock_user(array, target_addr, 0); 2447 2448 return 0; 2449 } 2450 2451 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr, 2452 unsigned short **host_array) 2453 { 2454 int nsems; 2455 unsigned short *array; 2456 union semun semun; 2457 struct semid_ds semid_ds; 2458 int i, ret; 2459 2460 semun.buf = &semid_ds; 2461 2462 ret = semctl(semid, 0, IPC_STAT, semun); 2463 if (ret == -1) 2464 return get_errno(ret); 2465 2466 nsems = semid_ds.sem_nsems; 2467 2468 array = lock_user(VERIFY_WRITE, target_addr, 2469 nsems*sizeof(unsigned short), 0); 2470 if (!array) 2471 return -TARGET_EFAULT; 2472 2473 for(i=0; i<nsems; i++) { 2474 __put_user((*host_array)[i], &array[i]); 2475 } 2476 free(*host_array); 2477 unlock_user(array, target_addr, 1); 2478 2479 return 0; 2480 } 2481 2482 static inline abi_long do_semctl(int semid, int semnum, int cmd, 2483 union target_semun target_su) 2484 { 2485 union semun arg; 2486 struct semid_ds dsarg; 2487 unsigned short *array = NULL; 2488 struct seminfo seminfo; 2489 abi_long ret = -TARGET_EINVAL; 2490 abi_long err; 2491 cmd &= 0xff; 2492 2493 switch( cmd ) { 2494 case GETVAL: 2495 case SETVAL: 2496 arg.val = tswap32(target_su.val); 2497 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2498 target_su.val = tswap32(arg.val); 2499 break; 2500 case GETALL: 2501 case SETALL: 2502 err = target_to_host_semarray(semid, &array, target_su.array); 2503 if (err) 2504 return err; 2505 arg.array = array; 2506 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2507 err = host_to_target_semarray(semid, target_su.array, &array); 2508 if (err) 2509 return err; 2510 break; 2511 case IPC_STAT: 2512 case IPC_SET: 2513 case SEM_STAT: 2514 err = target_to_host_semid_ds(&dsarg, target_su.buf); 2515 if (err) 2516 return err; 2517 arg.buf = &dsarg; 2518 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2519 err = host_to_target_semid_ds(target_su.buf, &dsarg); 2520 if (err) 2521 return err; 2522 break; 2523 case IPC_INFO: 2524 case SEM_INFO: 2525 arg.__buf = &seminfo; 2526 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2527 err = host_to_target_seminfo(target_su.__buf, &seminfo); 2528 if (err) 2529 return err; 2530 break; 2531 case IPC_RMID: 2532 case GETPID: 2533 case GETNCNT: 2534 case GETZCNT: 2535 ret = get_errno(semctl(semid, semnum, cmd, NULL)); 2536 break; 2537 } 2538 2539 return ret; 2540 } 2541 2542 struct target_sembuf { 2543 unsigned short sem_num; 2544 short sem_op; 2545 short sem_flg; 2546 }; 2547 2548 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf, 2549 abi_ulong target_addr, 2550 unsigned nsops) 2551 { 2552 struct target_sembuf *target_sembuf; 2553 int i; 2554 2555 target_sembuf = lock_user(VERIFY_READ, target_addr, 2556 nsops*sizeof(struct target_sembuf), 1); 2557 if (!target_sembuf) 2558 return -TARGET_EFAULT; 2559 2560 for(i=0; i<nsops; i++) { 2561 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num); 2562 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op); 2563 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg); 2564 } 2565 2566 unlock_user(target_sembuf, target_addr, 0); 2567 2568 return 0; 2569 } 2570 2571 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops) 2572 { 2573 struct sembuf sops[nsops]; 2574 2575 if (target_to_host_sembuf(sops, ptr, nsops)) 2576 return -TARGET_EFAULT; 2577 2578 return get_errno(semop(semid, sops, nsops)); 2579 } 2580 2581 struct target_msqid_ds 2582 { 2583 struct target_ipc_perm msg_perm; 2584 abi_ulong msg_stime; 2585 #if TARGET_ABI_BITS == 32 2586 abi_ulong __unused1; 2587 #endif 2588 abi_ulong msg_rtime; 2589 #if TARGET_ABI_BITS == 32 2590 abi_ulong __unused2; 2591 #endif 2592 abi_ulong msg_ctime; 2593 #if TARGET_ABI_BITS == 32 2594 abi_ulong __unused3; 2595 #endif 2596 abi_ulong __msg_cbytes; 2597 abi_ulong msg_qnum; 2598 abi_ulong msg_qbytes; 2599 abi_ulong msg_lspid; 2600 abi_ulong msg_lrpid; 2601 abi_ulong __unused4; 2602 abi_ulong __unused5; 2603 }; 2604 2605 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md, 2606 abi_ulong target_addr) 2607 { 2608 struct target_msqid_ds *target_md; 2609 2610 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1)) 2611 return -TARGET_EFAULT; 2612 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr)) 2613 return -TARGET_EFAULT; 2614 host_md->msg_stime = tswapal(target_md->msg_stime); 2615 host_md->msg_rtime = tswapal(target_md->msg_rtime); 2616 host_md->msg_ctime = tswapal(target_md->msg_ctime); 2617 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes); 2618 host_md->msg_qnum = tswapal(target_md->msg_qnum); 2619 host_md->msg_qbytes = tswapal(target_md->msg_qbytes); 2620 host_md->msg_lspid = tswapal(target_md->msg_lspid); 2621 host_md->msg_lrpid = tswapal(target_md->msg_lrpid); 2622 unlock_user_struct(target_md, target_addr, 0); 2623 return 0; 2624 } 2625 2626 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr, 2627 struct msqid_ds *host_md) 2628 { 2629 struct target_msqid_ds *target_md; 2630 2631 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0)) 2632 return -TARGET_EFAULT; 2633 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm))) 2634 return -TARGET_EFAULT; 2635 target_md->msg_stime = tswapal(host_md->msg_stime); 2636 target_md->msg_rtime = tswapal(host_md->msg_rtime); 2637 target_md->msg_ctime = tswapal(host_md->msg_ctime); 2638 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes); 2639 target_md->msg_qnum = tswapal(host_md->msg_qnum); 2640 target_md->msg_qbytes = tswapal(host_md->msg_qbytes); 2641 target_md->msg_lspid = tswapal(host_md->msg_lspid); 2642 target_md->msg_lrpid = tswapal(host_md->msg_lrpid); 2643 unlock_user_struct(target_md, target_addr, 1); 2644 return 0; 2645 } 2646 2647 struct target_msginfo { 2648 int msgpool; 2649 int msgmap; 2650 int msgmax; 2651 int msgmnb; 2652 int msgmni; 2653 int msgssz; 2654 int msgtql; 2655 unsigned short int msgseg; 2656 }; 2657 2658 static inline abi_long host_to_target_msginfo(abi_ulong target_addr, 2659 struct msginfo *host_msginfo) 2660 { 2661 struct target_msginfo *target_msginfo; 2662 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0)) 2663 return -TARGET_EFAULT; 2664 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool); 2665 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap); 2666 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax); 2667 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb); 2668 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni); 2669 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz); 2670 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql); 2671 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg); 2672 unlock_user_struct(target_msginfo, target_addr, 1); 2673 return 0; 2674 } 2675 2676 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr) 2677 { 2678 struct msqid_ds dsarg; 2679 struct msginfo msginfo; 2680 abi_long ret = -TARGET_EINVAL; 2681 2682 cmd &= 0xff; 2683 2684 switch (cmd) { 2685 case IPC_STAT: 2686 case IPC_SET: 2687 case MSG_STAT: 2688 if (target_to_host_msqid_ds(&dsarg,ptr)) 2689 return -TARGET_EFAULT; 2690 ret = get_errno(msgctl(msgid, cmd, &dsarg)); 2691 if (host_to_target_msqid_ds(ptr,&dsarg)) 2692 return -TARGET_EFAULT; 2693 break; 2694 case IPC_RMID: 2695 ret = get_errno(msgctl(msgid, cmd, NULL)); 2696 break; 2697 case IPC_INFO: 2698 case MSG_INFO: 2699 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo)); 2700 if (host_to_target_msginfo(ptr, &msginfo)) 2701 return -TARGET_EFAULT; 2702 break; 2703 } 2704 2705 return ret; 2706 } 2707 2708 struct target_msgbuf { 2709 abi_long mtype; 2710 char mtext[1]; 2711 }; 2712 2713 static inline abi_long do_msgsnd(int msqid, abi_long msgp, 2714 unsigned int msgsz, int msgflg) 2715 { 2716 struct target_msgbuf *target_mb; 2717 struct msgbuf *host_mb; 2718 abi_long ret = 0; 2719 2720 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0)) 2721 return -TARGET_EFAULT; 2722 host_mb = malloc(msgsz+sizeof(long)); 2723 host_mb->mtype = (abi_long) tswapal(target_mb->mtype); 2724 memcpy(host_mb->mtext, target_mb->mtext, msgsz); 2725 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg)); 2726 free(host_mb); 2727 unlock_user_struct(target_mb, msgp, 0); 2728 2729 return ret; 2730 } 2731 2732 static inline abi_long do_msgrcv(int msqid, abi_long msgp, 2733 unsigned int msgsz, abi_long msgtyp, 2734 int msgflg) 2735 { 2736 struct target_msgbuf *target_mb; 2737 char *target_mtext; 2738 struct msgbuf *host_mb; 2739 abi_long ret = 0; 2740 2741 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0)) 2742 return -TARGET_EFAULT; 2743 2744 host_mb = g_malloc(msgsz+sizeof(long)); 2745 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg)); 2746 2747 if (ret > 0) { 2748 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong); 2749 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0); 2750 if (!target_mtext) { 2751 ret = -TARGET_EFAULT; 2752 goto end; 2753 } 2754 memcpy(target_mb->mtext, host_mb->mtext, ret); 2755 unlock_user(target_mtext, target_mtext_addr, ret); 2756 } 2757 2758 target_mb->mtype = tswapal(host_mb->mtype); 2759 2760 end: 2761 if (target_mb) 2762 unlock_user_struct(target_mb, msgp, 1); 2763 g_free(host_mb); 2764 return ret; 2765 } 2766 2767 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd, 2768 abi_ulong target_addr) 2769 { 2770 struct target_shmid_ds *target_sd; 2771 2772 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2773 return -TARGET_EFAULT; 2774 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr)) 2775 return -TARGET_EFAULT; 2776 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2777 __get_user(host_sd->shm_atime, &target_sd->shm_atime); 2778 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2779 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2780 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2781 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2782 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2783 unlock_user_struct(target_sd, target_addr, 0); 2784 return 0; 2785 } 2786 2787 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr, 2788 struct shmid_ds *host_sd) 2789 { 2790 struct target_shmid_ds *target_sd; 2791 2792 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2793 return -TARGET_EFAULT; 2794 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm))) 2795 return -TARGET_EFAULT; 2796 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2797 __put_user(host_sd->shm_atime, &target_sd->shm_atime); 2798 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2799 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2800 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2801 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2802 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2803 unlock_user_struct(target_sd, target_addr, 1); 2804 return 0; 2805 } 2806 2807 struct target_shminfo { 2808 abi_ulong shmmax; 2809 abi_ulong shmmin; 2810 abi_ulong shmmni; 2811 abi_ulong shmseg; 2812 abi_ulong shmall; 2813 }; 2814 2815 static inline abi_long host_to_target_shminfo(abi_ulong target_addr, 2816 struct shminfo *host_shminfo) 2817 { 2818 struct target_shminfo *target_shminfo; 2819 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0)) 2820 return -TARGET_EFAULT; 2821 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax); 2822 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin); 2823 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni); 2824 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg); 2825 __put_user(host_shminfo->shmall, &target_shminfo->shmall); 2826 unlock_user_struct(target_shminfo, target_addr, 1); 2827 return 0; 2828 } 2829 2830 struct target_shm_info { 2831 int used_ids; 2832 abi_ulong shm_tot; 2833 abi_ulong shm_rss; 2834 abi_ulong shm_swp; 2835 abi_ulong swap_attempts; 2836 abi_ulong swap_successes; 2837 }; 2838 2839 static inline abi_long host_to_target_shm_info(abi_ulong target_addr, 2840 struct shm_info *host_shm_info) 2841 { 2842 struct target_shm_info *target_shm_info; 2843 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0)) 2844 return -TARGET_EFAULT; 2845 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids); 2846 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot); 2847 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss); 2848 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp); 2849 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts); 2850 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes); 2851 unlock_user_struct(target_shm_info, target_addr, 1); 2852 return 0; 2853 } 2854 2855 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf) 2856 { 2857 struct shmid_ds dsarg; 2858 struct shminfo shminfo; 2859 struct shm_info shm_info; 2860 abi_long ret = -TARGET_EINVAL; 2861 2862 cmd &= 0xff; 2863 2864 switch(cmd) { 2865 case IPC_STAT: 2866 case IPC_SET: 2867 case SHM_STAT: 2868 if (target_to_host_shmid_ds(&dsarg, buf)) 2869 return -TARGET_EFAULT; 2870 ret = get_errno(shmctl(shmid, cmd, &dsarg)); 2871 if (host_to_target_shmid_ds(buf, &dsarg)) 2872 return -TARGET_EFAULT; 2873 break; 2874 case IPC_INFO: 2875 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo)); 2876 if (host_to_target_shminfo(buf, &shminfo)) 2877 return -TARGET_EFAULT; 2878 break; 2879 case SHM_INFO: 2880 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info)); 2881 if (host_to_target_shm_info(buf, &shm_info)) 2882 return -TARGET_EFAULT; 2883 break; 2884 case IPC_RMID: 2885 case SHM_LOCK: 2886 case SHM_UNLOCK: 2887 ret = get_errno(shmctl(shmid, cmd, NULL)); 2888 break; 2889 } 2890 2891 return ret; 2892 } 2893 2894 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg) 2895 { 2896 abi_long raddr; 2897 void *host_raddr; 2898 struct shmid_ds shm_info; 2899 int i,ret; 2900 2901 /* find out the length of the shared memory segment */ 2902 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); 2903 if (is_error(ret)) { 2904 /* can't get length, bail out */ 2905 return ret; 2906 } 2907 2908 mmap_lock(); 2909 2910 if (shmaddr) 2911 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg); 2912 else { 2913 abi_ulong mmap_start; 2914 2915 mmap_start = mmap_find_vma(0, shm_info.shm_segsz); 2916 2917 if (mmap_start == -1) { 2918 errno = ENOMEM; 2919 host_raddr = (void *)-1; 2920 } else 2921 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP); 2922 } 2923 2924 if (host_raddr == (void *)-1) { 2925 mmap_unlock(); 2926 return get_errno((long)host_raddr); 2927 } 2928 raddr=h2g((unsigned long)host_raddr); 2929 2930 page_set_flags(raddr, raddr + shm_info.shm_segsz, 2931 PAGE_VALID | PAGE_READ | 2932 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE)); 2933 2934 for (i = 0; i < N_SHM_REGIONS; i++) { 2935 if (shm_regions[i].start == 0) { 2936 shm_regions[i].start = raddr; 2937 shm_regions[i].size = shm_info.shm_segsz; 2938 break; 2939 } 2940 } 2941 2942 mmap_unlock(); 2943 return raddr; 2944 2945 } 2946 2947 static inline abi_long do_shmdt(abi_ulong shmaddr) 2948 { 2949 int i; 2950 2951 for (i = 0; i < N_SHM_REGIONS; ++i) { 2952 if (shm_regions[i].start == shmaddr) { 2953 shm_regions[i].start = 0; 2954 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0); 2955 break; 2956 } 2957 } 2958 2959 return get_errno(shmdt(g2h(shmaddr))); 2960 } 2961 2962 #ifdef TARGET_NR_ipc 2963 /* ??? This only works with linear mappings. */ 2964 /* do_ipc() must return target values and target errnos. */ 2965 static abi_long do_ipc(unsigned int call, int first, 2966 int second, int third, 2967 abi_long ptr, abi_long fifth) 2968 { 2969 int version; 2970 abi_long ret = 0; 2971 2972 version = call >> 16; 2973 call &= 0xffff; 2974 2975 switch (call) { 2976 case IPCOP_semop: 2977 ret = do_semop(first, ptr, second); 2978 break; 2979 2980 case IPCOP_semget: 2981 ret = get_errno(semget(first, second, third)); 2982 break; 2983 2984 case IPCOP_semctl: 2985 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr); 2986 break; 2987 2988 case IPCOP_msgget: 2989 ret = get_errno(msgget(first, second)); 2990 break; 2991 2992 case IPCOP_msgsnd: 2993 ret = do_msgsnd(first, ptr, second, third); 2994 break; 2995 2996 case IPCOP_msgctl: 2997 ret = do_msgctl(first, second, ptr); 2998 break; 2999 3000 case IPCOP_msgrcv: 3001 switch (version) { 3002 case 0: 3003 { 3004 struct target_ipc_kludge { 3005 abi_long msgp; 3006 abi_long msgtyp; 3007 } *tmp; 3008 3009 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) { 3010 ret = -TARGET_EFAULT; 3011 break; 3012 } 3013 3014 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third); 3015 3016 unlock_user_struct(tmp, ptr, 0); 3017 break; 3018 } 3019 default: 3020 ret = do_msgrcv(first, ptr, second, fifth, third); 3021 } 3022 break; 3023 3024 case IPCOP_shmat: 3025 switch (version) { 3026 default: 3027 { 3028 abi_ulong raddr; 3029 raddr = do_shmat(first, ptr, second); 3030 if (is_error(raddr)) 3031 return get_errno(raddr); 3032 if (put_user_ual(raddr, third)) 3033 return -TARGET_EFAULT; 3034 break; 3035 } 3036 case 1: 3037 ret = -TARGET_EINVAL; 3038 break; 3039 } 3040 break; 3041 case IPCOP_shmdt: 3042 ret = do_shmdt(ptr); 3043 break; 3044 3045 case IPCOP_shmget: 3046 /* IPC_* flag values are the same on all linux platforms */ 3047 ret = get_errno(shmget(first, second, third)); 3048 break; 3049 3050 /* IPC_* and SHM_* command values are the same on all linux platforms */ 3051 case IPCOP_shmctl: 3052 ret = do_shmctl(first, second, ptr); 3053 break; 3054 default: 3055 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version); 3056 ret = -TARGET_ENOSYS; 3057 break; 3058 } 3059 return ret; 3060 } 3061 #endif 3062 3063 /* kernel structure types definitions */ 3064 3065 #define STRUCT(name, ...) STRUCT_ ## name, 3066 #define STRUCT_SPECIAL(name) STRUCT_ ## name, 3067 enum { 3068 #include "syscall_types.h" 3069 }; 3070 #undef STRUCT 3071 #undef STRUCT_SPECIAL 3072 3073 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL }; 3074 #define STRUCT_SPECIAL(name) 3075 #include "syscall_types.h" 3076 #undef STRUCT 3077 #undef STRUCT_SPECIAL 3078 3079 typedef struct IOCTLEntry IOCTLEntry; 3080 3081 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp, 3082 int fd, abi_long cmd, abi_long arg); 3083 3084 struct IOCTLEntry { 3085 unsigned int target_cmd; 3086 unsigned int host_cmd; 3087 const char *name; 3088 int access; 3089 do_ioctl_fn *do_ioctl; 3090 const argtype arg_type[5]; 3091 }; 3092 3093 #define IOC_R 0x0001 3094 #define IOC_W 0x0002 3095 #define IOC_RW (IOC_R | IOC_W) 3096 3097 #define MAX_STRUCT_SIZE 4096 3098 3099 #ifdef CONFIG_FIEMAP 3100 /* So fiemap access checks don't overflow on 32 bit systems. 3101 * This is very slightly smaller than the limit imposed by 3102 * the underlying kernel. 3103 */ 3104 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \ 3105 / sizeof(struct fiemap_extent)) 3106 3107 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp, 3108 int fd, abi_long cmd, abi_long arg) 3109 { 3110 /* The parameter for this ioctl is a struct fiemap followed 3111 * by an array of struct fiemap_extent whose size is set 3112 * in fiemap->fm_extent_count. The array is filled in by the 3113 * ioctl. 3114 */ 3115 int target_size_in, target_size_out; 3116 struct fiemap *fm; 3117 const argtype *arg_type = ie->arg_type; 3118 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) }; 3119 void *argptr, *p; 3120 abi_long ret; 3121 int i, extent_size = thunk_type_size(extent_arg_type, 0); 3122 uint32_t outbufsz; 3123 int free_fm = 0; 3124 3125 assert(arg_type[0] == TYPE_PTR); 3126 assert(ie->access == IOC_RW); 3127 arg_type++; 3128 target_size_in = thunk_type_size(arg_type, 0); 3129 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1); 3130 if (!argptr) { 3131 return -TARGET_EFAULT; 3132 } 3133 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3134 unlock_user(argptr, arg, 0); 3135 fm = (struct fiemap *)buf_temp; 3136 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) { 3137 return -TARGET_EINVAL; 3138 } 3139 3140 outbufsz = sizeof (*fm) + 3141 (sizeof(struct fiemap_extent) * fm->fm_extent_count); 3142 3143 if (outbufsz > MAX_STRUCT_SIZE) { 3144 /* We can't fit all the extents into the fixed size buffer. 3145 * Allocate one that is large enough and use it instead. 3146 */ 3147 fm = malloc(outbufsz); 3148 if (!fm) { 3149 return -TARGET_ENOMEM; 3150 } 3151 memcpy(fm, buf_temp, sizeof(struct fiemap)); 3152 free_fm = 1; 3153 } 3154 ret = get_errno(ioctl(fd, ie->host_cmd, fm)); 3155 if (!is_error(ret)) { 3156 target_size_out = target_size_in; 3157 /* An extent_count of 0 means we were only counting the extents 3158 * so there are no structs to copy 3159 */ 3160 if (fm->fm_extent_count != 0) { 3161 target_size_out += fm->fm_mapped_extents * extent_size; 3162 } 3163 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0); 3164 if (!argptr) { 3165 ret = -TARGET_EFAULT; 3166 } else { 3167 /* Convert the struct fiemap */ 3168 thunk_convert(argptr, fm, arg_type, THUNK_TARGET); 3169 if (fm->fm_extent_count != 0) { 3170 p = argptr + target_size_in; 3171 /* ...and then all the struct fiemap_extents */ 3172 for (i = 0; i < fm->fm_mapped_extents; i++) { 3173 thunk_convert(p, &fm->fm_extents[i], extent_arg_type, 3174 THUNK_TARGET); 3175 p += extent_size; 3176 } 3177 } 3178 unlock_user(argptr, arg, target_size_out); 3179 } 3180 } 3181 if (free_fm) { 3182 free(fm); 3183 } 3184 return ret; 3185 } 3186 #endif 3187 3188 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, 3189 int fd, abi_long cmd, abi_long arg) 3190 { 3191 const argtype *arg_type = ie->arg_type; 3192 int target_size; 3193 void *argptr; 3194 int ret; 3195 struct ifconf *host_ifconf; 3196 uint32_t outbufsz; 3197 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) }; 3198 int target_ifreq_size; 3199 int nb_ifreq; 3200 int free_buf = 0; 3201 int i; 3202 int target_ifc_len; 3203 abi_long target_ifc_buf; 3204 int host_ifc_len; 3205 char *host_ifc_buf; 3206 3207 assert(arg_type[0] == TYPE_PTR); 3208 assert(ie->access == IOC_RW); 3209 3210 arg_type++; 3211 target_size = thunk_type_size(arg_type, 0); 3212 3213 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3214 if (!argptr) 3215 return -TARGET_EFAULT; 3216 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3217 unlock_user(argptr, arg, 0); 3218 3219 host_ifconf = (struct ifconf *)(unsigned long)buf_temp; 3220 target_ifc_len = host_ifconf->ifc_len; 3221 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf; 3222 3223 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0); 3224 nb_ifreq = target_ifc_len / target_ifreq_size; 3225 host_ifc_len = nb_ifreq * sizeof(struct ifreq); 3226 3227 outbufsz = sizeof(*host_ifconf) + host_ifc_len; 3228 if (outbufsz > MAX_STRUCT_SIZE) { 3229 /* We can't fit all the extents into the fixed size buffer. 3230 * Allocate one that is large enough and use it instead. 3231 */ 3232 host_ifconf = malloc(outbufsz); 3233 if (!host_ifconf) { 3234 return -TARGET_ENOMEM; 3235 } 3236 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf)); 3237 free_buf = 1; 3238 } 3239 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf); 3240 3241 host_ifconf->ifc_len = host_ifc_len; 3242 host_ifconf->ifc_buf = host_ifc_buf; 3243 3244 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf)); 3245 if (!is_error(ret)) { 3246 /* convert host ifc_len to target ifc_len */ 3247 3248 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq); 3249 target_ifc_len = nb_ifreq * target_ifreq_size; 3250 host_ifconf->ifc_len = target_ifc_len; 3251 3252 /* restore target ifc_buf */ 3253 3254 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf; 3255 3256 /* copy struct ifconf to target user */ 3257 3258 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3259 if (!argptr) 3260 return -TARGET_EFAULT; 3261 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET); 3262 unlock_user(argptr, arg, target_size); 3263 3264 /* copy ifreq[] to target user */ 3265 3266 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0); 3267 for (i = 0; i < nb_ifreq ; i++) { 3268 thunk_convert(argptr + i * target_ifreq_size, 3269 host_ifc_buf + i * sizeof(struct ifreq), 3270 ifreq_arg_type, THUNK_TARGET); 3271 } 3272 unlock_user(argptr, target_ifc_buf, target_ifc_len); 3273 } 3274 3275 if (free_buf) { 3276 free(host_ifconf); 3277 } 3278 3279 return ret; 3280 } 3281 3282 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 3283 abi_long cmd, abi_long arg) 3284 { 3285 void *argptr; 3286 struct dm_ioctl *host_dm; 3287 abi_long guest_data; 3288 uint32_t guest_data_size; 3289 int target_size; 3290 const argtype *arg_type = ie->arg_type; 3291 abi_long ret; 3292 void *big_buf = NULL; 3293 char *host_data; 3294 3295 arg_type++; 3296 target_size = thunk_type_size(arg_type, 0); 3297 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3298 if (!argptr) { 3299 ret = -TARGET_EFAULT; 3300 goto out; 3301 } 3302 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3303 unlock_user(argptr, arg, 0); 3304 3305 /* buf_temp is too small, so fetch things into a bigger buffer */ 3306 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2); 3307 memcpy(big_buf, buf_temp, target_size); 3308 buf_temp = big_buf; 3309 host_dm = big_buf; 3310 3311 guest_data = arg + host_dm->data_start; 3312 if ((guest_data - arg) < 0) { 3313 ret = -EINVAL; 3314 goto out; 3315 } 3316 guest_data_size = host_dm->data_size - host_dm->data_start; 3317 host_data = (char*)host_dm + host_dm->data_start; 3318 3319 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1); 3320 switch (ie->host_cmd) { 3321 case DM_REMOVE_ALL: 3322 case DM_LIST_DEVICES: 3323 case DM_DEV_CREATE: 3324 case DM_DEV_REMOVE: 3325 case DM_DEV_SUSPEND: 3326 case DM_DEV_STATUS: 3327 case DM_DEV_WAIT: 3328 case DM_TABLE_STATUS: 3329 case DM_TABLE_CLEAR: 3330 case DM_TABLE_DEPS: 3331 case DM_LIST_VERSIONS: 3332 /* no input data */ 3333 break; 3334 case DM_DEV_RENAME: 3335 case DM_DEV_SET_GEOMETRY: 3336 /* data contains only strings */ 3337 memcpy(host_data, argptr, guest_data_size); 3338 break; 3339 case DM_TARGET_MSG: 3340 memcpy(host_data, argptr, guest_data_size); 3341 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr); 3342 break; 3343 case DM_TABLE_LOAD: 3344 { 3345 void *gspec = argptr; 3346 void *cur_data = host_data; 3347 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 3348 int spec_size = thunk_type_size(arg_type, 0); 3349 int i; 3350 3351 for (i = 0; i < host_dm->target_count; i++) { 3352 struct dm_target_spec *spec = cur_data; 3353 uint32_t next; 3354 int slen; 3355 3356 thunk_convert(spec, gspec, arg_type, THUNK_HOST); 3357 slen = strlen((char*)gspec + spec_size) + 1; 3358 next = spec->next; 3359 spec->next = sizeof(*spec) + slen; 3360 strcpy((char*)&spec[1], gspec + spec_size); 3361 gspec += next; 3362 cur_data += spec->next; 3363 } 3364 break; 3365 } 3366 default: 3367 ret = -TARGET_EINVAL; 3368 goto out; 3369 } 3370 unlock_user(argptr, guest_data, 0); 3371 3372 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3373 if (!is_error(ret)) { 3374 guest_data = arg + host_dm->data_start; 3375 guest_data_size = host_dm->data_size - host_dm->data_start; 3376 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0); 3377 switch (ie->host_cmd) { 3378 case DM_REMOVE_ALL: 3379 case DM_DEV_CREATE: 3380 case DM_DEV_REMOVE: 3381 case DM_DEV_RENAME: 3382 case DM_DEV_SUSPEND: 3383 case DM_DEV_STATUS: 3384 case DM_TABLE_LOAD: 3385 case DM_TABLE_CLEAR: 3386 case DM_TARGET_MSG: 3387 case DM_DEV_SET_GEOMETRY: 3388 /* no return data */ 3389 break; 3390 case DM_LIST_DEVICES: 3391 { 3392 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start; 3393 uint32_t remaining_data = guest_data_size; 3394 void *cur_data = argptr; 3395 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) }; 3396 int nl_size = 12; /* can't use thunk_size due to alignment */ 3397 3398 while (1) { 3399 uint32_t next = nl->next; 3400 if (next) { 3401 nl->next = nl_size + (strlen(nl->name) + 1); 3402 } 3403 if (remaining_data < nl->next) { 3404 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3405 break; 3406 } 3407 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET); 3408 strcpy(cur_data + nl_size, nl->name); 3409 cur_data += nl->next; 3410 remaining_data -= nl->next; 3411 if (!next) { 3412 break; 3413 } 3414 nl = (void*)nl + next; 3415 } 3416 break; 3417 } 3418 case DM_DEV_WAIT: 3419 case DM_TABLE_STATUS: 3420 { 3421 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start; 3422 void *cur_data = argptr; 3423 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 3424 int spec_size = thunk_type_size(arg_type, 0); 3425 int i; 3426 3427 for (i = 0; i < host_dm->target_count; i++) { 3428 uint32_t next = spec->next; 3429 int slen = strlen((char*)&spec[1]) + 1; 3430 spec->next = (cur_data - argptr) + spec_size + slen; 3431 if (guest_data_size < spec->next) { 3432 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3433 break; 3434 } 3435 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET); 3436 strcpy(cur_data + spec_size, (char*)&spec[1]); 3437 cur_data = argptr + spec->next; 3438 spec = (void*)host_dm + host_dm->data_start + next; 3439 } 3440 break; 3441 } 3442 case DM_TABLE_DEPS: 3443 { 3444 void *hdata = (void*)host_dm + host_dm->data_start; 3445 int count = *(uint32_t*)hdata; 3446 uint64_t *hdev = hdata + 8; 3447 uint64_t *gdev = argptr + 8; 3448 int i; 3449 3450 *(uint32_t*)argptr = tswap32(count); 3451 for (i = 0; i < count; i++) { 3452 *gdev = tswap64(*hdev); 3453 gdev++; 3454 hdev++; 3455 } 3456 break; 3457 } 3458 case DM_LIST_VERSIONS: 3459 { 3460 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start; 3461 uint32_t remaining_data = guest_data_size; 3462 void *cur_data = argptr; 3463 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) }; 3464 int vers_size = thunk_type_size(arg_type, 0); 3465 3466 while (1) { 3467 uint32_t next = vers->next; 3468 if (next) { 3469 vers->next = vers_size + (strlen(vers->name) + 1); 3470 } 3471 if (remaining_data < vers->next) { 3472 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3473 break; 3474 } 3475 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET); 3476 strcpy(cur_data + vers_size, vers->name); 3477 cur_data += vers->next; 3478 remaining_data -= vers->next; 3479 if (!next) { 3480 break; 3481 } 3482 vers = (void*)vers + next; 3483 } 3484 break; 3485 } 3486 default: 3487 ret = -TARGET_EINVAL; 3488 goto out; 3489 } 3490 unlock_user(argptr, guest_data, guest_data_size); 3491 3492 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3493 if (!argptr) { 3494 ret = -TARGET_EFAULT; 3495 goto out; 3496 } 3497 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3498 unlock_user(argptr, arg, target_size); 3499 } 3500 out: 3501 g_free(big_buf); 3502 return ret; 3503 } 3504 3505 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp, 3506 int fd, abi_long cmd, abi_long arg) 3507 { 3508 const argtype *arg_type = ie->arg_type; 3509 const StructEntry *se; 3510 const argtype *field_types; 3511 const int *dst_offsets, *src_offsets; 3512 int target_size; 3513 void *argptr; 3514 abi_ulong *target_rt_dev_ptr; 3515 unsigned long *host_rt_dev_ptr; 3516 abi_long ret; 3517 int i; 3518 3519 assert(ie->access == IOC_W); 3520 assert(*arg_type == TYPE_PTR); 3521 arg_type++; 3522 assert(*arg_type == TYPE_STRUCT); 3523 target_size = thunk_type_size(arg_type, 0); 3524 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3525 if (!argptr) { 3526 return -TARGET_EFAULT; 3527 } 3528 arg_type++; 3529 assert(*arg_type == (int)STRUCT_rtentry); 3530 se = struct_entries + *arg_type++; 3531 assert(se->convert[0] == NULL); 3532 /* convert struct here to be able to catch rt_dev string */ 3533 field_types = se->field_types; 3534 dst_offsets = se->field_offsets[THUNK_HOST]; 3535 src_offsets = se->field_offsets[THUNK_TARGET]; 3536 for (i = 0; i < se->nb_fields; i++) { 3537 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) { 3538 assert(*field_types == TYPE_PTRVOID); 3539 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]); 3540 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]); 3541 if (*target_rt_dev_ptr != 0) { 3542 *host_rt_dev_ptr = (unsigned long)lock_user_string( 3543 tswapal(*target_rt_dev_ptr)); 3544 if (!*host_rt_dev_ptr) { 3545 unlock_user(argptr, arg, 0); 3546 return -TARGET_EFAULT; 3547 } 3548 } else { 3549 *host_rt_dev_ptr = 0; 3550 } 3551 field_types++; 3552 continue; 3553 } 3554 field_types = thunk_convert(buf_temp + dst_offsets[i], 3555 argptr + src_offsets[i], 3556 field_types, THUNK_HOST); 3557 } 3558 unlock_user(argptr, arg, 0); 3559 3560 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3561 if (*host_rt_dev_ptr != 0) { 3562 unlock_user((void *)*host_rt_dev_ptr, 3563 *target_rt_dev_ptr, 0); 3564 } 3565 return ret; 3566 } 3567 3568 static IOCTLEntry ioctl_entries[] = { 3569 #define IOCTL(cmd, access, ...) \ 3570 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } }, 3571 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \ 3572 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } }, 3573 #include "ioctls.h" 3574 { 0, 0, }, 3575 }; 3576 3577 /* ??? Implement proper locking for ioctls. */ 3578 /* do_ioctl() Must return target values and target errnos. */ 3579 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg) 3580 { 3581 const IOCTLEntry *ie; 3582 const argtype *arg_type; 3583 abi_long ret; 3584 uint8_t buf_temp[MAX_STRUCT_SIZE]; 3585 int target_size; 3586 void *argptr; 3587 3588 ie = ioctl_entries; 3589 for(;;) { 3590 if (ie->target_cmd == 0) { 3591 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd); 3592 return -TARGET_ENOSYS; 3593 } 3594 if (ie->target_cmd == cmd) 3595 break; 3596 ie++; 3597 } 3598 arg_type = ie->arg_type; 3599 #if defined(DEBUG) 3600 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name); 3601 #endif 3602 if (ie->do_ioctl) { 3603 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg); 3604 } 3605 3606 switch(arg_type[0]) { 3607 case TYPE_NULL: 3608 /* no argument */ 3609 ret = get_errno(ioctl(fd, ie->host_cmd)); 3610 break; 3611 case TYPE_PTRVOID: 3612 case TYPE_INT: 3613 /* int argment */ 3614 ret = get_errno(ioctl(fd, ie->host_cmd, arg)); 3615 break; 3616 case TYPE_PTR: 3617 arg_type++; 3618 target_size = thunk_type_size(arg_type, 0); 3619 switch(ie->access) { 3620 case IOC_R: 3621 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3622 if (!is_error(ret)) { 3623 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3624 if (!argptr) 3625 return -TARGET_EFAULT; 3626 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3627 unlock_user(argptr, arg, target_size); 3628 } 3629 break; 3630 case IOC_W: 3631 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3632 if (!argptr) 3633 return -TARGET_EFAULT; 3634 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3635 unlock_user(argptr, arg, 0); 3636 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3637 break; 3638 default: 3639 case IOC_RW: 3640 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3641 if (!argptr) 3642 return -TARGET_EFAULT; 3643 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3644 unlock_user(argptr, arg, 0); 3645 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3646 if (!is_error(ret)) { 3647 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3648 if (!argptr) 3649 return -TARGET_EFAULT; 3650 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3651 unlock_user(argptr, arg, target_size); 3652 } 3653 break; 3654 } 3655 break; 3656 default: 3657 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n", 3658 (long)cmd, arg_type[0]); 3659 ret = -TARGET_ENOSYS; 3660 break; 3661 } 3662 return ret; 3663 } 3664 3665 static const bitmask_transtbl iflag_tbl[] = { 3666 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK }, 3667 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT }, 3668 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR }, 3669 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK }, 3670 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK }, 3671 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP }, 3672 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR }, 3673 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR }, 3674 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL }, 3675 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC }, 3676 { TARGET_IXON, TARGET_IXON, IXON, IXON }, 3677 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY }, 3678 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF }, 3679 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL }, 3680 { 0, 0, 0, 0 } 3681 }; 3682 3683 static const bitmask_transtbl oflag_tbl[] = { 3684 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST }, 3685 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC }, 3686 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR }, 3687 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL }, 3688 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR }, 3689 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET }, 3690 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL }, 3691 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL }, 3692 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 }, 3693 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 }, 3694 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 }, 3695 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 }, 3696 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 }, 3697 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 }, 3698 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 }, 3699 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 }, 3700 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 }, 3701 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 }, 3702 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 }, 3703 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 }, 3704 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 }, 3705 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 }, 3706 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 }, 3707 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 }, 3708 { 0, 0, 0, 0 } 3709 }; 3710 3711 static const bitmask_transtbl cflag_tbl[] = { 3712 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 }, 3713 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 }, 3714 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 }, 3715 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 }, 3716 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 }, 3717 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 }, 3718 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 }, 3719 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 }, 3720 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 }, 3721 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 }, 3722 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 }, 3723 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 }, 3724 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 }, 3725 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 }, 3726 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 }, 3727 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 }, 3728 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 }, 3729 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 }, 3730 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 }, 3731 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 }, 3732 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 }, 3733 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 }, 3734 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 }, 3735 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 }, 3736 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB }, 3737 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD }, 3738 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB }, 3739 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD }, 3740 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL }, 3741 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL }, 3742 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS }, 3743 { 0, 0, 0, 0 } 3744 }; 3745 3746 static const bitmask_transtbl lflag_tbl[] = { 3747 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG }, 3748 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON }, 3749 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE }, 3750 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO }, 3751 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE }, 3752 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK }, 3753 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL }, 3754 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH }, 3755 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP }, 3756 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL }, 3757 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT }, 3758 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE }, 3759 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO }, 3760 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN }, 3761 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN }, 3762 { 0, 0, 0, 0 } 3763 }; 3764 3765 static void target_to_host_termios (void *dst, const void *src) 3766 { 3767 struct host_termios *host = dst; 3768 const struct target_termios *target = src; 3769 3770 host->c_iflag = 3771 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl); 3772 host->c_oflag = 3773 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl); 3774 host->c_cflag = 3775 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl); 3776 host->c_lflag = 3777 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl); 3778 host->c_line = target->c_line; 3779 3780 memset(host->c_cc, 0, sizeof(host->c_cc)); 3781 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR]; 3782 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT]; 3783 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE]; 3784 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL]; 3785 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF]; 3786 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME]; 3787 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN]; 3788 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC]; 3789 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART]; 3790 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP]; 3791 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP]; 3792 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL]; 3793 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT]; 3794 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD]; 3795 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE]; 3796 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT]; 3797 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2]; 3798 } 3799 3800 static void host_to_target_termios (void *dst, const void *src) 3801 { 3802 struct target_termios *target = dst; 3803 const struct host_termios *host = src; 3804 3805 target->c_iflag = 3806 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl)); 3807 target->c_oflag = 3808 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl)); 3809 target->c_cflag = 3810 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl)); 3811 target->c_lflag = 3812 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl)); 3813 target->c_line = host->c_line; 3814 3815 memset(target->c_cc, 0, sizeof(target->c_cc)); 3816 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR]; 3817 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT]; 3818 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE]; 3819 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL]; 3820 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF]; 3821 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME]; 3822 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN]; 3823 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC]; 3824 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART]; 3825 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP]; 3826 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP]; 3827 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL]; 3828 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT]; 3829 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD]; 3830 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE]; 3831 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT]; 3832 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2]; 3833 } 3834 3835 static const StructEntry struct_termios_def = { 3836 .convert = { host_to_target_termios, target_to_host_termios }, 3837 .size = { sizeof(struct target_termios), sizeof(struct host_termios) }, 3838 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) }, 3839 }; 3840 3841 static bitmask_transtbl mmap_flags_tbl[] = { 3842 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED }, 3843 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE }, 3844 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED }, 3845 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS }, 3846 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN }, 3847 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE }, 3848 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE }, 3849 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED }, 3850 { 0, 0, 0, 0 } 3851 }; 3852 3853 #if defined(TARGET_I386) 3854 3855 /* NOTE: there is really one LDT for all the threads */ 3856 static uint8_t *ldt_table; 3857 3858 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount) 3859 { 3860 int size; 3861 void *p; 3862 3863 if (!ldt_table) 3864 return 0; 3865 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE; 3866 if (size > bytecount) 3867 size = bytecount; 3868 p = lock_user(VERIFY_WRITE, ptr, size, 0); 3869 if (!p) 3870 return -TARGET_EFAULT; 3871 /* ??? Should this by byteswapped? */ 3872 memcpy(p, ldt_table, size); 3873 unlock_user(p, ptr, size); 3874 return size; 3875 } 3876 3877 /* XXX: add locking support */ 3878 static abi_long write_ldt(CPUX86State *env, 3879 abi_ulong ptr, unsigned long bytecount, int oldmode) 3880 { 3881 struct target_modify_ldt_ldt_s ldt_info; 3882 struct target_modify_ldt_ldt_s *target_ldt_info; 3883 int seg_32bit, contents, read_exec_only, limit_in_pages; 3884 int seg_not_present, useable, lm; 3885 uint32_t *lp, entry_1, entry_2; 3886 3887 if (bytecount != sizeof(ldt_info)) 3888 return -TARGET_EINVAL; 3889 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1)) 3890 return -TARGET_EFAULT; 3891 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 3892 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 3893 ldt_info.limit = tswap32(target_ldt_info->limit); 3894 ldt_info.flags = tswap32(target_ldt_info->flags); 3895 unlock_user_struct(target_ldt_info, ptr, 0); 3896 3897 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES) 3898 return -TARGET_EINVAL; 3899 seg_32bit = ldt_info.flags & 1; 3900 contents = (ldt_info.flags >> 1) & 3; 3901 read_exec_only = (ldt_info.flags >> 3) & 1; 3902 limit_in_pages = (ldt_info.flags >> 4) & 1; 3903 seg_not_present = (ldt_info.flags >> 5) & 1; 3904 useable = (ldt_info.flags >> 6) & 1; 3905 #ifdef TARGET_ABI32 3906 lm = 0; 3907 #else 3908 lm = (ldt_info.flags >> 7) & 1; 3909 #endif 3910 if (contents == 3) { 3911 if (oldmode) 3912 return -TARGET_EINVAL; 3913 if (seg_not_present == 0) 3914 return -TARGET_EINVAL; 3915 } 3916 /* allocate the LDT */ 3917 if (!ldt_table) { 3918 env->ldt.base = target_mmap(0, 3919 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE, 3920 PROT_READ|PROT_WRITE, 3921 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 3922 if (env->ldt.base == -1) 3923 return -TARGET_ENOMEM; 3924 memset(g2h(env->ldt.base), 0, 3925 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE); 3926 env->ldt.limit = 0xffff; 3927 ldt_table = g2h(env->ldt.base); 3928 } 3929 3930 /* NOTE: same code as Linux kernel */ 3931 /* Allow LDTs to be cleared by the user. */ 3932 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 3933 if (oldmode || 3934 (contents == 0 && 3935 read_exec_only == 1 && 3936 seg_32bit == 0 && 3937 limit_in_pages == 0 && 3938 seg_not_present == 1 && 3939 useable == 0 )) { 3940 entry_1 = 0; 3941 entry_2 = 0; 3942 goto install; 3943 } 3944 } 3945 3946 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 3947 (ldt_info.limit & 0x0ffff); 3948 entry_2 = (ldt_info.base_addr & 0xff000000) | 3949 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 3950 (ldt_info.limit & 0xf0000) | 3951 ((read_exec_only ^ 1) << 9) | 3952 (contents << 10) | 3953 ((seg_not_present ^ 1) << 15) | 3954 (seg_32bit << 22) | 3955 (limit_in_pages << 23) | 3956 (lm << 21) | 3957 0x7000; 3958 if (!oldmode) 3959 entry_2 |= (useable << 20); 3960 3961 /* Install the new entry ... */ 3962 install: 3963 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3)); 3964 lp[0] = tswap32(entry_1); 3965 lp[1] = tswap32(entry_2); 3966 return 0; 3967 } 3968 3969 /* specific and weird i386 syscalls */ 3970 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr, 3971 unsigned long bytecount) 3972 { 3973 abi_long ret; 3974 3975 switch (func) { 3976 case 0: 3977 ret = read_ldt(ptr, bytecount); 3978 break; 3979 case 1: 3980 ret = write_ldt(env, ptr, bytecount, 1); 3981 break; 3982 case 0x11: 3983 ret = write_ldt(env, ptr, bytecount, 0); 3984 break; 3985 default: 3986 ret = -TARGET_ENOSYS; 3987 break; 3988 } 3989 return ret; 3990 } 3991 3992 #if defined(TARGET_I386) && defined(TARGET_ABI32) 3993 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr) 3994 { 3995 uint64_t *gdt_table = g2h(env->gdt.base); 3996 struct target_modify_ldt_ldt_s ldt_info; 3997 struct target_modify_ldt_ldt_s *target_ldt_info; 3998 int seg_32bit, contents, read_exec_only, limit_in_pages; 3999 int seg_not_present, useable, lm; 4000 uint32_t *lp, entry_1, entry_2; 4001 int i; 4002 4003 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 4004 if (!target_ldt_info) 4005 return -TARGET_EFAULT; 4006 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 4007 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 4008 ldt_info.limit = tswap32(target_ldt_info->limit); 4009 ldt_info.flags = tswap32(target_ldt_info->flags); 4010 if (ldt_info.entry_number == -1) { 4011 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) { 4012 if (gdt_table[i] == 0) { 4013 ldt_info.entry_number = i; 4014 target_ldt_info->entry_number = tswap32(i); 4015 break; 4016 } 4017 } 4018 } 4019 unlock_user_struct(target_ldt_info, ptr, 1); 4020 4021 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 4022 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX) 4023 return -TARGET_EINVAL; 4024 seg_32bit = ldt_info.flags & 1; 4025 contents = (ldt_info.flags >> 1) & 3; 4026 read_exec_only = (ldt_info.flags >> 3) & 1; 4027 limit_in_pages = (ldt_info.flags >> 4) & 1; 4028 seg_not_present = (ldt_info.flags >> 5) & 1; 4029 useable = (ldt_info.flags >> 6) & 1; 4030 #ifdef TARGET_ABI32 4031 lm = 0; 4032 #else 4033 lm = (ldt_info.flags >> 7) & 1; 4034 #endif 4035 4036 if (contents == 3) { 4037 if (seg_not_present == 0) 4038 return -TARGET_EINVAL; 4039 } 4040 4041 /* NOTE: same code as Linux kernel */ 4042 /* Allow LDTs to be cleared by the user. */ 4043 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 4044 if ((contents == 0 && 4045 read_exec_only == 1 && 4046 seg_32bit == 0 && 4047 limit_in_pages == 0 && 4048 seg_not_present == 1 && 4049 useable == 0 )) { 4050 entry_1 = 0; 4051 entry_2 = 0; 4052 goto install; 4053 } 4054 } 4055 4056 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 4057 (ldt_info.limit & 0x0ffff); 4058 entry_2 = (ldt_info.base_addr & 0xff000000) | 4059 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 4060 (ldt_info.limit & 0xf0000) | 4061 ((read_exec_only ^ 1) << 9) | 4062 (contents << 10) | 4063 ((seg_not_present ^ 1) << 15) | 4064 (seg_32bit << 22) | 4065 (limit_in_pages << 23) | 4066 (useable << 20) | 4067 (lm << 21) | 4068 0x7000; 4069 4070 /* Install the new entry ... */ 4071 install: 4072 lp = (uint32_t *)(gdt_table + ldt_info.entry_number); 4073 lp[0] = tswap32(entry_1); 4074 lp[1] = tswap32(entry_2); 4075 return 0; 4076 } 4077 4078 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr) 4079 { 4080 struct target_modify_ldt_ldt_s *target_ldt_info; 4081 uint64_t *gdt_table = g2h(env->gdt.base); 4082 uint32_t base_addr, limit, flags; 4083 int seg_32bit, contents, read_exec_only, limit_in_pages, idx; 4084 int seg_not_present, useable, lm; 4085 uint32_t *lp, entry_1, entry_2; 4086 4087 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 4088 if (!target_ldt_info) 4089 return -TARGET_EFAULT; 4090 idx = tswap32(target_ldt_info->entry_number); 4091 if (idx < TARGET_GDT_ENTRY_TLS_MIN || 4092 idx > TARGET_GDT_ENTRY_TLS_MAX) { 4093 unlock_user_struct(target_ldt_info, ptr, 1); 4094 return -TARGET_EINVAL; 4095 } 4096 lp = (uint32_t *)(gdt_table + idx); 4097 entry_1 = tswap32(lp[0]); 4098 entry_2 = tswap32(lp[1]); 4099 4100 read_exec_only = ((entry_2 >> 9) & 1) ^ 1; 4101 contents = (entry_2 >> 10) & 3; 4102 seg_not_present = ((entry_2 >> 15) & 1) ^ 1; 4103 seg_32bit = (entry_2 >> 22) & 1; 4104 limit_in_pages = (entry_2 >> 23) & 1; 4105 useable = (entry_2 >> 20) & 1; 4106 #ifdef TARGET_ABI32 4107 lm = 0; 4108 #else 4109 lm = (entry_2 >> 21) & 1; 4110 #endif 4111 flags = (seg_32bit << 0) | (contents << 1) | 4112 (read_exec_only << 3) | (limit_in_pages << 4) | 4113 (seg_not_present << 5) | (useable << 6) | (lm << 7); 4114 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000); 4115 base_addr = (entry_1 >> 16) | 4116 (entry_2 & 0xff000000) | 4117 ((entry_2 & 0xff) << 16); 4118 target_ldt_info->base_addr = tswapal(base_addr); 4119 target_ldt_info->limit = tswap32(limit); 4120 target_ldt_info->flags = tswap32(flags); 4121 unlock_user_struct(target_ldt_info, ptr, 1); 4122 return 0; 4123 } 4124 #endif /* TARGET_I386 && TARGET_ABI32 */ 4125 4126 #ifndef TARGET_ABI32 4127 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 4128 { 4129 abi_long ret = 0; 4130 abi_ulong val; 4131 int idx; 4132 4133 switch(code) { 4134 case TARGET_ARCH_SET_GS: 4135 case TARGET_ARCH_SET_FS: 4136 if (code == TARGET_ARCH_SET_GS) 4137 idx = R_GS; 4138 else 4139 idx = R_FS; 4140 cpu_x86_load_seg(env, idx, 0); 4141 env->segs[idx].base = addr; 4142 break; 4143 case TARGET_ARCH_GET_GS: 4144 case TARGET_ARCH_GET_FS: 4145 if (code == TARGET_ARCH_GET_GS) 4146 idx = R_GS; 4147 else 4148 idx = R_FS; 4149 val = env->segs[idx].base; 4150 if (put_user(val, addr, abi_ulong)) 4151 ret = -TARGET_EFAULT; 4152 break; 4153 default: 4154 ret = -TARGET_EINVAL; 4155 break; 4156 } 4157 return ret; 4158 } 4159 #endif 4160 4161 #endif /* defined(TARGET_I386) */ 4162 4163 #define NEW_STACK_SIZE 0x40000 4164 4165 4166 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; 4167 typedef struct { 4168 CPUArchState *env; 4169 pthread_mutex_t mutex; 4170 pthread_cond_t cond; 4171 pthread_t thread; 4172 uint32_t tid; 4173 abi_ulong child_tidptr; 4174 abi_ulong parent_tidptr; 4175 sigset_t sigmask; 4176 } new_thread_info; 4177 4178 static void *clone_func(void *arg) 4179 { 4180 new_thread_info *info = arg; 4181 CPUArchState *env; 4182 CPUState *cpu; 4183 TaskState *ts; 4184 4185 env = info->env; 4186 cpu = ENV_GET_CPU(env); 4187 thread_cpu = cpu; 4188 ts = (TaskState *)env->opaque; 4189 info->tid = gettid(); 4190 cpu->host_tid = info->tid; 4191 task_settid(ts); 4192 if (info->child_tidptr) 4193 put_user_u32(info->tid, info->child_tidptr); 4194 if (info->parent_tidptr) 4195 put_user_u32(info->tid, info->parent_tidptr); 4196 /* Enable signals. */ 4197 sigprocmask(SIG_SETMASK, &info->sigmask, NULL); 4198 /* Signal to the parent that we're ready. */ 4199 pthread_mutex_lock(&info->mutex); 4200 pthread_cond_broadcast(&info->cond); 4201 pthread_mutex_unlock(&info->mutex); 4202 /* Wait until the parent has finshed initializing the tls state. */ 4203 pthread_mutex_lock(&clone_lock); 4204 pthread_mutex_unlock(&clone_lock); 4205 cpu_loop(env); 4206 /* never exits */ 4207 return NULL; 4208 } 4209 4210 /* do_fork() Must return host values and target errnos (unlike most 4211 do_*() functions). */ 4212 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, 4213 abi_ulong parent_tidptr, target_ulong newtls, 4214 abi_ulong child_tidptr) 4215 { 4216 int ret; 4217 TaskState *ts; 4218 CPUArchState *new_env; 4219 unsigned int nptl_flags; 4220 sigset_t sigmask; 4221 4222 /* Emulate vfork() with fork() */ 4223 if (flags & CLONE_VFORK) 4224 flags &= ~(CLONE_VFORK | CLONE_VM); 4225 4226 if (flags & CLONE_VM) { 4227 TaskState *parent_ts = (TaskState *)env->opaque; 4228 new_thread_info info; 4229 pthread_attr_t attr; 4230 4231 ts = g_malloc0(sizeof(TaskState)); 4232 init_task_state(ts); 4233 /* we create a new CPU instance. */ 4234 new_env = cpu_copy(env); 4235 /* Init regs that differ from the parent. */ 4236 cpu_clone_regs(new_env, newsp); 4237 new_env->opaque = ts; 4238 ts->bprm = parent_ts->bprm; 4239 ts->info = parent_ts->info; 4240 nptl_flags = flags; 4241 flags &= ~CLONE_NPTL_FLAGS2; 4242 4243 if (nptl_flags & CLONE_CHILD_CLEARTID) { 4244 ts->child_tidptr = child_tidptr; 4245 } 4246 4247 if (nptl_flags & CLONE_SETTLS) 4248 cpu_set_tls (new_env, newtls); 4249 4250 /* Grab a mutex so that thread setup appears atomic. */ 4251 pthread_mutex_lock(&clone_lock); 4252 4253 memset(&info, 0, sizeof(info)); 4254 pthread_mutex_init(&info.mutex, NULL); 4255 pthread_mutex_lock(&info.mutex); 4256 pthread_cond_init(&info.cond, NULL); 4257 info.env = new_env; 4258 if (nptl_flags & CLONE_CHILD_SETTID) 4259 info.child_tidptr = child_tidptr; 4260 if (nptl_flags & CLONE_PARENT_SETTID) 4261 info.parent_tidptr = parent_tidptr; 4262 4263 ret = pthread_attr_init(&attr); 4264 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE); 4265 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 4266 /* It is not safe to deliver signals until the child has finished 4267 initializing, so temporarily block all signals. */ 4268 sigfillset(&sigmask); 4269 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); 4270 4271 ret = pthread_create(&info.thread, &attr, clone_func, &info); 4272 /* TODO: Free new CPU state if thread creation failed. */ 4273 4274 sigprocmask(SIG_SETMASK, &info.sigmask, NULL); 4275 pthread_attr_destroy(&attr); 4276 if (ret == 0) { 4277 /* Wait for the child to initialize. */ 4278 pthread_cond_wait(&info.cond, &info.mutex); 4279 ret = info.tid; 4280 if (flags & CLONE_PARENT_SETTID) 4281 put_user_u32(ret, parent_tidptr); 4282 } else { 4283 ret = -1; 4284 } 4285 pthread_mutex_unlock(&info.mutex); 4286 pthread_cond_destroy(&info.cond); 4287 pthread_mutex_destroy(&info.mutex); 4288 pthread_mutex_unlock(&clone_lock); 4289 } else { 4290 /* if no CLONE_VM, we consider it is a fork */ 4291 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) 4292 return -EINVAL; 4293 fork_start(); 4294 ret = fork(); 4295 if (ret == 0) { 4296 /* Child Process. */ 4297 cpu_clone_regs(env, newsp); 4298 fork_end(1); 4299 /* There is a race condition here. The parent process could 4300 theoretically read the TID in the child process before the child 4301 tid is set. This would require using either ptrace 4302 (not implemented) or having *_tidptr to point at a shared memory 4303 mapping. We can't repeat the spinlock hack used above because 4304 the child process gets its own copy of the lock. */ 4305 if (flags & CLONE_CHILD_SETTID) 4306 put_user_u32(gettid(), child_tidptr); 4307 if (flags & CLONE_PARENT_SETTID) 4308 put_user_u32(gettid(), parent_tidptr); 4309 ts = (TaskState *)env->opaque; 4310 if (flags & CLONE_SETTLS) 4311 cpu_set_tls (env, newtls); 4312 if (flags & CLONE_CHILD_CLEARTID) 4313 ts->child_tidptr = child_tidptr; 4314 } else { 4315 fork_end(0); 4316 } 4317 } 4318 return ret; 4319 } 4320 4321 /* warning : doesn't handle linux specific flags... */ 4322 static int target_to_host_fcntl_cmd(int cmd) 4323 { 4324 switch(cmd) { 4325 case TARGET_F_DUPFD: 4326 case TARGET_F_GETFD: 4327 case TARGET_F_SETFD: 4328 case TARGET_F_GETFL: 4329 case TARGET_F_SETFL: 4330 return cmd; 4331 case TARGET_F_GETLK: 4332 return F_GETLK; 4333 case TARGET_F_SETLK: 4334 return F_SETLK; 4335 case TARGET_F_SETLKW: 4336 return F_SETLKW; 4337 case TARGET_F_GETOWN: 4338 return F_GETOWN; 4339 case TARGET_F_SETOWN: 4340 return F_SETOWN; 4341 case TARGET_F_GETSIG: 4342 return F_GETSIG; 4343 case TARGET_F_SETSIG: 4344 return F_SETSIG; 4345 #if TARGET_ABI_BITS == 32 4346 case TARGET_F_GETLK64: 4347 return F_GETLK64; 4348 case TARGET_F_SETLK64: 4349 return F_SETLK64; 4350 case TARGET_F_SETLKW64: 4351 return F_SETLKW64; 4352 #endif 4353 case TARGET_F_SETLEASE: 4354 return F_SETLEASE; 4355 case TARGET_F_GETLEASE: 4356 return F_GETLEASE; 4357 #ifdef F_DUPFD_CLOEXEC 4358 case TARGET_F_DUPFD_CLOEXEC: 4359 return F_DUPFD_CLOEXEC; 4360 #endif 4361 case TARGET_F_NOTIFY: 4362 return F_NOTIFY; 4363 default: 4364 return -TARGET_EINVAL; 4365 } 4366 return -TARGET_EINVAL; 4367 } 4368 4369 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a } 4370 static const bitmask_transtbl flock_tbl[] = { 4371 TRANSTBL_CONVERT(F_RDLCK), 4372 TRANSTBL_CONVERT(F_WRLCK), 4373 TRANSTBL_CONVERT(F_UNLCK), 4374 TRANSTBL_CONVERT(F_EXLCK), 4375 TRANSTBL_CONVERT(F_SHLCK), 4376 { 0, 0, 0, 0 } 4377 }; 4378 4379 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) 4380 { 4381 struct flock fl; 4382 struct target_flock *target_fl; 4383 struct flock64 fl64; 4384 struct target_flock64 *target_fl64; 4385 abi_long ret; 4386 int host_cmd = target_to_host_fcntl_cmd(cmd); 4387 4388 if (host_cmd == -TARGET_EINVAL) 4389 return host_cmd; 4390 4391 switch(cmd) { 4392 case TARGET_F_GETLK: 4393 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4394 return -TARGET_EFAULT; 4395 fl.l_type = 4396 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl); 4397 fl.l_whence = tswap16(target_fl->l_whence); 4398 fl.l_start = tswapal(target_fl->l_start); 4399 fl.l_len = tswapal(target_fl->l_len); 4400 fl.l_pid = tswap32(target_fl->l_pid); 4401 unlock_user_struct(target_fl, arg, 0); 4402 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4403 if (ret == 0) { 4404 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0)) 4405 return -TARGET_EFAULT; 4406 target_fl->l_type = 4407 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl); 4408 target_fl->l_whence = tswap16(fl.l_whence); 4409 target_fl->l_start = tswapal(fl.l_start); 4410 target_fl->l_len = tswapal(fl.l_len); 4411 target_fl->l_pid = tswap32(fl.l_pid); 4412 unlock_user_struct(target_fl, arg, 1); 4413 } 4414 break; 4415 4416 case TARGET_F_SETLK: 4417 case TARGET_F_SETLKW: 4418 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4419 return -TARGET_EFAULT; 4420 fl.l_type = 4421 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl); 4422 fl.l_whence = tswap16(target_fl->l_whence); 4423 fl.l_start = tswapal(target_fl->l_start); 4424 fl.l_len = tswapal(target_fl->l_len); 4425 fl.l_pid = tswap32(target_fl->l_pid); 4426 unlock_user_struct(target_fl, arg, 0); 4427 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4428 break; 4429 4430 case TARGET_F_GETLK64: 4431 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4432 return -TARGET_EFAULT; 4433 fl64.l_type = 4434 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1; 4435 fl64.l_whence = tswap16(target_fl64->l_whence); 4436 fl64.l_start = tswap64(target_fl64->l_start); 4437 fl64.l_len = tswap64(target_fl64->l_len); 4438 fl64.l_pid = tswap32(target_fl64->l_pid); 4439 unlock_user_struct(target_fl64, arg, 0); 4440 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4441 if (ret == 0) { 4442 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0)) 4443 return -TARGET_EFAULT; 4444 target_fl64->l_type = 4445 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1; 4446 target_fl64->l_whence = tswap16(fl64.l_whence); 4447 target_fl64->l_start = tswap64(fl64.l_start); 4448 target_fl64->l_len = tswap64(fl64.l_len); 4449 target_fl64->l_pid = tswap32(fl64.l_pid); 4450 unlock_user_struct(target_fl64, arg, 1); 4451 } 4452 break; 4453 case TARGET_F_SETLK64: 4454 case TARGET_F_SETLKW64: 4455 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4456 return -TARGET_EFAULT; 4457 fl64.l_type = 4458 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1; 4459 fl64.l_whence = tswap16(target_fl64->l_whence); 4460 fl64.l_start = tswap64(target_fl64->l_start); 4461 fl64.l_len = tswap64(target_fl64->l_len); 4462 fl64.l_pid = tswap32(target_fl64->l_pid); 4463 unlock_user_struct(target_fl64, arg, 0); 4464 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4465 break; 4466 4467 case TARGET_F_GETFL: 4468 ret = get_errno(fcntl(fd, host_cmd, arg)); 4469 if (ret >= 0) { 4470 ret = host_to_target_bitmask(ret, fcntl_flags_tbl); 4471 } 4472 break; 4473 4474 case TARGET_F_SETFL: 4475 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl))); 4476 break; 4477 4478 case TARGET_F_SETOWN: 4479 case TARGET_F_GETOWN: 4480 case TARGET_F_SETSIG: 4481 case TARGET_F_GETSIG: 4482 case TARGET_F_SETLEASE: 4483 case TARGET_F_GETLEASE: 4484 ret = get_errno(fcntl(fd, host_cmd, arg)); 4485 break; 4486 4487 default: 4488 ret = get_errno(fcntl(fd, cmd, arg)); 4489 break; 4490 } 4491 return ret; 4492 } 4493 4494 #ifdef USE_UID16 4495 4496 static inline int high2lowuid(int uid) 4497 { 4498 if (uid > 65535) 4499 return 65534; 4500 else 4501 return uid; 4502 } 4503 4504 static inline int high2lowgid(int gid) 4505 { 4506 if (gid > 65535) 4507 return 65534; 4508 else 4509 return gid; 4510 } 4511 4512 static inline int low2highuid(int uid) 4513 { 4514 if ((int16_t)uid == -1) 4515 return -1; 4516 else 4517 return uid; 4518 } 4519 4520 static inline int low2highgid(int gid) 4521 { 4522 if ((int16_t)gid == -1) 4523 return -1; 4524 else 4525 return gid; 4526 } 4527 static inline int tswapid(int id) 4528 { 4529 return tswap16(id); 4530 } 4531 #else /* !USE_UID16 */ 4532 static inline int high2lowuid(int uid) 4533 { 4534 return uid; 4535 } 4536 static inline int high2lowgid(int gid) 4537 { 4538 return gid; 4539 } 4540 static inline int low2highuid(int uid) 4541 { 4542 return uid; 4543 } 4544 static inline int low2highgid(int gid) 4545 { 4546 return gid; 4547 } 4548 static inline int tswapid(int id) 4549 { 4550 return tswap32(id); 4551 } 4552 #endif /* USE_UID16 */ 4553 4554 void syscall_init(void) 4555 { 4556 IOCTLEntry *ie; 4557 const argtype *arg_type; 4558 int size; 4559 int i; 4560 4561 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def); 4562 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def); 4563 #include "syscall_types.h" 4564 #undef STRUCT 4565 #undef STRUCT_SPECIAL 4566 4567 /* Build target_to_host_errno_table[] table from 4568 * host_to_target_errno_table[]. */ 4569 for (i = 0; i < ERRNO_TABLE_SIZE; i++) { 4570 target_to_host_errno_table[host_to_target_errno_table[i]] = i; 4571 } 4572 4573 /* we patch the ioctl size if necessary. We rely on the fact that 4574 no ioctl has all the bits at '1' in the size field */ 4575 ie = ioctl_entries; 4576 while (ie->target_cmd != 0) { 4577 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) == 4578 TARGET_IOC_SIZEMASK) { 4579 arg_type = ie->arg_type; 4580 if (arg_type[0] != TYPE_PTR) { 4581 fprintf(stderr, "cannot patch size for ioctl 0x%x\n", 4582 ie->target_cmd); 4583 exit(1); 4584 } 4585 arg_type++; 4586 size = thunk_type_size(arg_type, 0); 4587 ie->target_cmd = (ie->target_cmd & 4588 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) | 4589 (size << TARGET_IOC_SIZESHIFT); 4590 } 4591 4592 /* automatic consistency check if same arch */ 4593 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 4594 (defined(__x86_64__) && defined(TARGET_X86_64)) 4595 if (unlikely(ie->target_cmd != ie->host_cmd)) { 4596 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n", 4597 ie->name, ie->target_cmd, ie->host_cmd); 4598 } 4599 #endif 4600 ie++; 4601 } 4602 } 4603 4604 #if TARGET_ABI_BITS == 32 4605 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1) 4606 { 4607 #ifdef TARGET_WORDS_BIGENDIAN 4608 return ((uint64_t)word0 << 32) | word1; 4609 #else 4610 return ((uint64_t)word1 << 32) | word0; 4611 #endif 4612 } 4613 #else /* TARGET_ABI_BITS == 32 */ 4614 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1) 4615 { 4616 return word0; 4617 } 4618 #endif /* TARGET_ABI_BITS != 32 */ 4619 4620 #ifdef TARGET_NR_truncate64 4621 static inline abi_long target_truncate64(void *cpu_env, const char *arg1, 4622 abi_long arg2, 4623 abi_long arg3, 4624 abi_long arg4) 4625 { 4626 if (regpairs_aligned(cpu_env)) { 4627 arg2 = arg3; 4628 arg3 = arg4; 4629 } 4630 return get_errno(truncate64(arg1, target_offset64(arg2, arg3))); 4631 } 4632 #endif 4633 4634 #ifdef TARGET_NR_ftruncate64 4635 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1, 4636 abi_long arg2, 4637 abi_long arg3, 4638 abi_long arg4) 4639 { 4640 if (regpairs_aligned(cpu_env)) { 4641 arg2 = arg3; 4642 arg3 = arg4; 4643 } 4644 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3))); 4645 } 4646 #endif 4647 4648 static inline abi_long target_to_host_timespec(struct timespec *host_ts, 4649 abi_ulong target_addr) 4650 { 4651 struct target_timespec *target_ts; 4652 4653 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) 4654 return -TARGET_EFAULT; 4655 host_ts->tv_sec = tswapal(target_ts->tv_sec); 4656 host_ts->tv_nsec = tswapal(target_ts->tv_nsec); 4657 unlock_user_struct(target_ts, target_addr, 0); 4658 return 0; 4659 } 4660 4661 static inline abi_long host_to_target_timespec(abi_ulong target_addr, 4662 struct timespec *host_ts) 4663 { 4664 struct target_timespec *target_ts; 4665 4666 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) 4667 return -TARGET_EFAULT; 4668 target_ts->tv_sec = tswapal(host_ts->tv_sec); 4669 target_ts->tv_nsec = tswapal(host_ts->tv_nsec); 4670 unlock_user_struct(target_ts, target_addr, 1); 4671 return 0; 4672 } 4673 4674 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec, 4675 abi_ulong target_addr) 4676 { 4677 struct target_itimerspec *target_itspec; 4678 4679 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) { 4680 return -TARGET_EFAULT; 4681 } 4682 4683 host_itspec->it_interval.tv_sec = 4684 tswapal(target_itspec->it_interval.tv_sec); 4685 host_itspec->it_interval.tv_nsec = 4686 tswapal(target_itspec->it_interval.tv_nsec); 4687 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec); 4688 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec); 4689 4690 unlock_user_struct(target_itspec, target_addr, 1); 4691 return 0; 4692 } 4693 4694 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr, 4695 struct itimerspec *host_its) 4696 { 4697 struct target_itimerspec *target_itspec; 4698 4699 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) { 4700 return -TARGET_EFAULT; 4701 } 4702 4703 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec); 4704 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec); 4705 4706 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec); 4707 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec); 4708 4709 unlock_user_struct(target_itspec, target_addr, 0); 4710 return 0; 4711 } 4712 4713 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat) 4714 static inline abi_long host_to_target_stat64(void *cpu_env, 4715 abi_ulong target_addr, 4716 struct stat *host_st) 4717 { 4718 #if defined(TARGET_ARM) && defined(TARGET_ABI32) 4719 if (((CPUARMState *)cpu_env)->eabi) { 4720 struct target_eabi_stat64 *target_st; 4721 4722 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 4723 return -TARGET_EFAULT; 4724 memset(target_st, 0, sizeof(struct target_eabi_stat64)); 4725 __put_user(host_st->st_dev, &target_st->st_dev); 4726 __put_user(host_st->st_ino, &target_st->st_ino); 4727 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 4728 __put_user(host_st->st_ino, &target_st->__st_ino); 4729 #endif 4730 __put_user(host_st->st_mode, &target_st->st_mode); 4731 __put_user(host_st->st_nlink, &target_st->st_nlink); 4732 __put_user(host_st->st_uid, &target_st->st_uid); 4733 __put_user(host_st->st_gid, &target_st->st_gid); 4734 __put_user(host_st->st_rdev, &target_st->st_rdev); 4735 __put_user(host_st->st_size, &target_st->st_size); 4736 __put_user(host_st->st_blksize, &target_st->st_blksize); 4737 __put_user(host_st->st_blocks, &target_st->st_blocks); 4738 __put_user(host_st->st_atime, &target_st->target_st_atime); 4739 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 4740 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 4741 unlock_user_struct(target_st, target_addr, 1); 4742 } else 4743 #endif 4744 { 4745 #if defined(TARGET_HAS_STRUCT_STAT64) 4746 struct target_stat64 *target_st; 4747 #else 4748 struct target_stat *target_st; 4749 #endif 4750 4751 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 4752 return -TARGET_EFAULT; 4753 memset(target_st, 0, sizeof(*target_st)); 4754 __put_user(host_st->st_dev, &target_st->st_dev); 4755 __put_user(host_st->st_ino, &target_st->st_ino); 4756 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 4757 __put_user(host_st->st_ino, &target_st->__st_ino); 4758 #endif 4759 __put_user(host_st->st_mode, &target_st->st_mode); 4760 __put_user(host_st->st_nlink, &target_st->st_nlink); 4761 __put_user(host_st->st_uid, &target_st->st_uid); 4762 __put_user(host_st->st_gid, &target_st->st_gid); 4763 __put_user(host_st->st_rdev, &target_st->st_rdev); 4764 /* XXX: better use of kernel struct */ 4765 __put_user(host_st->st_size, &target_st->st_size); 4766 __put_user(host_st->st_blksize, &target_st->st_blksize); 4767 __put_user(host_st->st_blocks, &target_st->st_blocks); 4768 __put_user(host_st->st_atime, &target_st->target_st_atime); 4769 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 4770 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 4771 unlock_user_struct(target_st, target_addr, 1); 4772 } 4773 4774 return 0; 4775 } 4776 #endif 4777 4778 /* ??? Using host futex calls even when target atomic operations 4779 are not really atomic probably breaks things. However implementing 4780 futexes locally would make futexes shared between multiple processes 4781 tricky. However they're probably useless because guest atomic 4782 operations won't work either. */ 4783 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout, 4784 target_ulong uaddr2, int val3) 4785 { 4786 struct timespec ts, *pts; 4787 int base_op; 4788 4789 /* ??? We assume FUTEX_* constants are the same on both host 4790 and target. */ 4791 #ifdef FUTEX_CMD_MASK 4792 base_op = op & FUTEX_CMD_MASK; 4793 #else 4794 base_op = op; 4795 #endif 4796 switch (base_op) { 4797 case FUTEX_WAIT: 4798 case FUTEX_WAIT_BITSET: 4799 if (timeout) { 4800 pts = &ts; 4801 target_to_host_timespec(pts, timeout); 4802 } else { 4803 pts = NULL; 4804 } 4805 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val), 4806 pts, NULL, val3)); 4807 case FUTEX_WAKE: 4808 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 4809 case FUTEX_FD: 4810 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 4811 case FUTEX_REQUEUE: 4812 case FUTEX_CMP_REQUEUE: 4813 case FUTEX_WAKE_OP: 4814 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 4815 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 4816 But the prototype takes a `struct timespec *'; insert casts 4817 to satisfy the compiler. We do not need to tswap TIMEOUT 4818 since it's not compared to guest memory. */ 4819 pts = (struct timespec *)(uintptr_t) timeout; 4820 return get_errno(sys_futex(g2h(uaddr), op, val, pts, 4821 g2h(uaddr2), 4822 (base_op == FUTEX_CMP_REQUEUE 4823 ? tswap32(val3) 4824 : val3))); 4825 default: 4826 return -TARGET_ENOSYS; 4827 } 4828 } 4829 4830 /* Map host to target signal numbers for the wait family of syscalls. 4831 Assume all other status bits are the same. */ 4832 int host_to_target_waitstatus(int status) 4833 { 4834 if (WIFSIGNALED(status)) { 4835 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f); 4836 } 4837 if (WIFSTOPPED(status)) { 4838 return (host_to_target_signal(WSTOPSIG(status)) << 8) 4839 | (status & 0xff); 4840 } 4841 return status; 4842 } 4843 4844 static int relstr_to_int(const char *s) 4845 { 4846 /* Convert a uname release string like "2.6.18" to an integer 4847 * of the form 0x020612. (Beware that 0x020612 is *not* 2.6.12.) 4848 */ 4849 int i, n, tmp; 4850 4851 tmp = 0; 4852 for (i = 0; i < 3; i++) { 4853 n = 0; 4854 while (*s >= '0' && *s <= '9') { 4855 n *= 10; 4856 n += *s - '0'; 4857 s++; 4858 } 4859 tmp = (tmp << 8) + n; 4860 if (*s == '.') { 4861 s++; 4862 } 4863 } 4864 return tmp; 4865 } 4866 4867 int get_osversion(void) 4868 { 4869 static int osversion; 4870 struct new_utsname buf; 4871 const char *s; 4872 4873 if (osversion) 4874 return osversion; 4875 if (qemu_uname_release && *qemu_uname_release) { 4876 s = qemu_uname_release; 4877 } else { 4878 if (sys_uname(&buf)) 4879 return 0; 4880 s = buf.release; 4881 } 4882 osversion = relstr_to_int(s); 4883 return osversion; 4884 } 4885 4886 void init_qemu_uname_release(void) 4887 { 4888 /* Initialize qemu_uname_release for later use. 4889 * If the host kernel is too old and the user hasn't asked for 4890 * a specific fake version number, we might want to fake a minimum 4891 * target kernel version. 4892 */ 4893 #ifdef UNAME_MINIMUM_RELEASE 4894 struct new_utsname buf; 4895 4896 if (qemu_uname_release && *qemu_uname_release) { 4897 return; 4898 } 4899 4900 if (sys_uname(&buf)) { 4901 return; 4902 } 4903 4904 if (relstr_to_int(buf.release) < relstr_to_int(UNAME_MINIMUM_RELEASE)) { 4905 qemu_uname_release = UNAME_MINIMUM_RELEASE; 4906 } 4907 #endif 4908 } 4909 4910 static int open_self_maps(void *cpu_env, int fd) 4911 { 4912 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32) 4913 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 4914 #endif 4915 FILE *fp; 4916 char *line = NULL; 4917 size_t len = 0; 4918 ssize_t read; 4919 4920 fp = fopen("/proc/self/maps", "r"); 4921 if (fp == NULL) { 4922 return -EACCES; 4923 } 4924 4925 while ((read = getline(&line, &len, fp)) != -1) { 4926 int fields, dev_maj, dev_min, inode; 4927 uint64_t min, max, offset; 4928 char flag_r, flag_w, flag_x, flag_p; 4929 char path[512] = ""; 4930 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d" 4931 " %512s", &min, &max, &flag_r, &flag_w, &flag_x, 4932 &flag_p, &offset, &dev_maj, &dev_min, &inode, path); 4933 4934 if ((fields < 10) || (fields > 11)) { 4935 continue; 4936 } 4937 if (!strncmp(path, "[stack]", 7)) { 4938 continue; 4939 } 4940 if (h2g_valid(min) && h2g_valid(max)) { 4941 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx 4942 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n", 4943 h2g(min), h2g(max), flag_r, flag_w, 4944 flag_x, flag_p, offset, dev_maj, dev_min, inode, 4945 path[0] ? " " : "", path); 4946 } 4947 } 4948 4949 free(line); 4950 fclose(fp); 4951 4952 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32) 4953 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n", 4954 (unsigned long long)ts->info->stack_limit, 4955 (unsigned long long)(ts->info->start_stack + 4956 (TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK, 4957 (unsigned long long)0); 4958 #endif 4959 4960 return 0; 4961 } 4962 4963 static int open_self_stat(void *cpu_env, int fd) 4964 { 4965 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 4966 abi_ulong start_stack = ts->info->start_stack; 4967 int i; 4968 4969 for (i = 0; i < 44; i++) { 4970 char buf[128]; 4971 int len; 4972 uint64_t val = 0; 4973 4974 if (i == 0) { 4975 /* pid */ 4976 val = getpid(); 4977 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 4978 } else if (i == 1) { 4979 /* app name */ 4980 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]); 4981 } else if (i == 27) { 4982 /* stack bottom */ 4983 val = start_stack; 4984 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 4985 } else { 4986 /* for the rest, there is MasterCard */ 4987 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' '); 4988 } 4989 4990 len = strlen(buf); 4991 if (write(fd, buf, len) != len) { 4992 return -1; 4993 } 4994 } 4995 4996 return 0; 4997 } 4998 4999 static int open_self_auxv(void *cpu_env, int fd) 5000 { 5001 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 5002 abi_ulong auxv = ts->info->saved_auxv; 5003 abi_ulong len = ts->info->auxv_len; 5004 char *ptr; 5005 5006 /* 5007 * Auxiliary vector is stored in target process stack. 5008 * read in whole auxv vector and copy it to file 5009 */ 5010 ptr = lock_user(VERIFY_READ, auxv, len, 0); 5011 if (ptr != NULL) { 5012 while (len > 0) { 5013 ssize_t r; 5014 r = write(fd, ptr, len); 5015 if (r <= 0) { 5016 break; 5017 } 5018 len -= r; 5019 ptr += r; 5020 } 5021 lseek(fd, 0, SEEK_SET); 5022 unlock_user(ptr, auxv, len); 5023 } 5024 5025 return 0; 5026 } 5027 5028 static int is_proc_myself(const char *filename, const char *entry) 5029 { 5030 if (!strncmp(filename, "/proc/", strlen("/proc/"))) { 5031 filename += strlen("/proc/"); 5032 if (!strncmp(filename, "self/", strlen("self/"))) { 5033 filename += strlen("self/"); 5034 } else if (*filename >= '1' && *filename <= '9') { 5035 char myself[80]; 5036 snprintf(myself, sizeof(myself), "%d/", getpid()); 5037 if (!strncmp(filename, myself, strlen(myself))) { 5038 filename += strlen(myself); 5039 } else { 5040 return 0; 5041 } 5042 } else { 5043 return 0; 5044 } 5045 if (!strcmp(filename, entry)) { 5046 return 1; 5047 } 5048 } 5049 return 0; 5050 } 5051 5052 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 5053 static int is_proc(const char *filename, const char *entry) 5054 { 5055 return strcmp(filename, entry) == 0; 5056 } 5057 5058 static int open_net_route(void *cpu_env, int fd) 5059 { 5060 FILE *fp; 5061 char *line = NULL; 5062 size_t len = 0; 5063 ssize_t read; 5064 5065 fp = fopen("/proc/net/route", "r"); 5066 if (fp == NULL) { 5067 return -EACCES; 5068 } 5069 5070 /* read header */ 5071 5072 read = getline(&line, &len, fp); 5073 dprintf(fd, "%s", line); 5074 5075 /* read routes */ 5076 5077 while ((read = getline(&line, &len, fp)) != -1) { 5078 char iface[16]; 5079 uint32_t dest, gw, mask; 5080 unsigned int flags, refcnt, use, metric, mtu, window, irtt; 5081 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 5082 iface, &dest, &gw, &flags, &refcnt, &use, &metric, 5083 &mask, &mtu, &window, &irtt); 5084 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 5085 iface, tswap32(dest), tswap32(gw), flags, refcnt, use, 5086 metric, tswap32(mask), mtu, window, irtt); 5087 } 5088 5089 free(line); 5090 fclose(fp); 5091 5092 return 0; 5093 } 5094 #endif 5095 5096 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode) 5097 { 5098 struct fake_open { 5099 const char *filename; 5100 int (*fill)(void *cpu_env, int fd); 5101 int (*cmp)(const char *s1, const char *s2); 5102 }; 5103 const struct fake_open *fake_open; 5104 static const struct fake_open fakes[] = { 5105 { "maps", open_self_maps, is_proc_myself }, 5106 { "stat", open_self_stat, is_proc_myself }, 5107 { "auxv", open_self_auxv, is_proc_myself }, 5108 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 5109 { "/proc/net/route", open_net_route, is_proc }, 5110 #endif 5111 { NULL, NULL, NULL } 5112 }; 5113 5114 for (fake_open = fakes; fake_open->filename; fake_open++) { 5115 if (fake_open->cmp(pathname, fake_open->filename)) { 5116 break; 5117 } 5118 } 5119 5120 if (fake_open->filename) { 5121 const char *tmpdir; 5122 char filename[PATH_MAX]; 5123 int fd, r; 5124 5125 /* create temporary file to map stat to */ 5126 tmpdir = getenv("TMPDIR"); 5127 if (!tmpdir) 5128 tmpdir = "/tmp"; 5129 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir); 5130 fd = mkstemp(filename); 5131 if (fd < 0) { 5132 return fd; 5133 } 5134 unlink(filename); 5135 5136 if ((r = fake_open->fill(cpu_env, fd))) { 5137 close(fd); 5138 return r; 5139 } 5140 lseek(fd, 0, SEEK_SET); 5141 5142 return fd; 5143 } 5144 5145 return get_errno(open(path(pathname), flags, mode)); 5146 } 5147 5148 /* do_syscall() should always have a single exit point at the end so 5149 that actions, such as logging of syscall results, can be performed. 5150 All errnos that do_syscall() returns must be -TARGET_<errcode>. */ 5151 abi_long do_syscall(void *cpu_env, int num, abi_long arg1, 5152 abi_long arg2, abi_long arg3, abi_long arg4, 5153 abi_long arg5, abi_long arg6, abi_long arg7, 5154 abi_long arg8) 5155 { 5156 CPUState *cpu = ENV_GET_CPU(cpu_env); 5157 abi_long ret; 5158 struct stat st; 5159 struct statfs stfs; 5160 void *p; 5161 5162 #ifdef DEBUG 5163 gemu_log("syscall %d", num); 5164 #endif 5165 if(do_strace) 5166 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); 5167 5168 switch(num) { 5169 case TARGET_NR_exit: 5170 /* In old applications this may be used to implement _exit(2). 5171 However in threaded applictions it is used for thread termination, 5172 and _exit_group is used for application termination. 5173 Do thread termination if we have more then one thread. */ 5174 /* FIXME: This probably breaks if a signal arrives. We should probably 5175 be disabling signals. */ 5176 if (CPU_NEXT(first_cpu)) { 5177 TaskState *ts; 5178 5179 cpu_list_lock(); 5180 /* Remove the CPU from the list. */ 5181 QTAILQ_REMOVE(&cpus, cpu, node); 5182 cpu_list_unlock(); 5183 ts = ((CPUArchState *)cpu_env)->opaque; 5184 if (ts->child_tidptr) { 5185 put_user_u32(0, ts->child_tidptr); 5186 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX, 5187 NULL, NULL, 0); 5188 } 5189 thread_cpu = NULL; 5190 object_unref(OBJECT(ENV_GET_CPU(cpu_env))); 5191 g_free(ts); 5192 pthread_exit(NULL); 5193 } 5194 #ifdef TARGET_GPROF 5195 _mcleanup(); 5196 #endif 5197 gdb_exit(cpu_env, arg1); 5198 _exit(arg1); 5199 ret = 0; /* avoid warning */ 5200 break; 5201 case TARGET_NR_read: 5202 if (arg3 == 0) 5203 ret = 0; 5204 else { 5205 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 5206 goto efault; 5207 ret = get_errno(read(arg1, p, arg3)); 5208 unlock_user(p, arg2, ret); 5209 } 5210 break; 5211 case TARGET_NR_write: 5212 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 5213 goto efault; 5214 ret = get_errno(write(arg1, p, arg3)); 5215 unlock_user(p, arg2, 0); 5216 break; 5217 case TARGET_NR_open: 5218 if (!(p = lock_user_string(arg1))) 5219 goto efault; 5220 ret = get_errno(do_open(cpu_env, p, 5221 target_to_host_bitmask(arg2, fcntl_flags_tbl), 5222 arg3)); 5223 unlock_user(p, arg1, 0); 5224 break; 5225 #if defined(TARGET_NR_openat) && defined(__NR_openat) 5226 case TARGET_NR_openat: 5227 if (!(p = lock_user_string(arg2))) 5228 goto efault; 5229 ret = get_errno(sys_openat(arg1, 5230 path(p), 5231 target_to_host_bitmask(arg3, fcntl_flags_tbl), 5232 arg4)); 5233 unlock_user(p, arg2, 0); 5234 break; 5235 #endif 5236 case TARGET_NR_close: 5237 ret = get_errno(close(arg1)); 5238 break; 5239 case TARGET_NR_brk: 5240 ret = do_brk(arg1); 5241 break; 5242 case TARGET_NR_fork: 5243 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0)); 5244 break; 5245 #ifdef TARGET_NR_waitpid 5246 case TARGET_NR_waitpid: 5247 { 5248 int status; 5249 ret = get_errno(waitpid(arg1, &status, arg3)); 5250 if (!is_error(ret) && arg2 && ret 5251 && put_user_s32(host_to_target_waitstatus(status), arg2)) 5252 goto efault; 5253 } 5254 break; 5255 #endif 5256 #ifdef TARGET_NR_waitid 5257 case TARGET_NR_waitid: 5258 { 5259 siginfo_t info; 5260 info.si_pid = 0; 5261 ret = get_errno(waitid(arg1, arg2, &info, arg4)); 5262 if (!is_error(ret) && arg3 && info.si_pid != 0) { 5263 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) 5264 goto efault; 5265 host_to_target_siginfo(p, &info); 5266 unlock_user(p, arg3, sizeof(target_siginfo_t)); 5267 } 5268 } 5269 break; 5270 #endif 5271 #ifdef TARGET_NR_creat /* not on alpha */ 5272 case TARGET_NR_creat: 5273 if (!(p = lock_user_string(arg1))) 5274 goto efault; 5275 ret = get_errno(creat(p, arg2)); 5276 unlock_user(p, arg1, 0); 5277 break; 5278 #endif 5279 case TARGET_NR_link: 5280 { 5281 void * p2; 5282 p = lock_user_string(arg1); 5283 p2 = lock_user_string(arg2); 5284 if (!p || !p2) 5285 ret = -TARGET_EFAULT; 5286 else 5287 ret = get_errno(link(p, p2)); 5288 unlock_user(p2, arg2, 0); 5289 unlock_user(p, arg1, 0); 5290 } 5291 break; 5292 #if defined(TARGET_NR_linkat) 5293 case TARGET_NR_linkat: 5294 { 5295 void * p2 = NULL; 5296 if (!arg2 || !arg4) 5297 goto efault; 5298 p = lock_user_string(arg2); 5299 p2 = lock_user_string(arg4); 5300 if (!p || !p2) 5301 ret = -TARGET_EFAULT; 5302 else 5303 ret = get_errno(linkat(arg1, p, arg3, p2, arg5)); 5304 unlock_user(p, arg2, 0); 5305 unlock_user(p2, arg4, 0); 5306 } 5307 break; 5308 #endif 5309 case TARGET_NR_unlink: 5310 if (!(p = lock_user_string(arg1))) 5311 goto efault; 5312 ret = get_errno(unlink(p)); 5313 unlock_user(p, arg1, 0); 5314 break; 5315 #if defined(TARGET_NR_unlinkat) 5316 case TARGET_NR_unlinkat: 5317 if (!(p = lock_user_string(arg2))) 5318 goto efault; 5319 ret = get_errno(unlinkat(arg1, p, arg3)); 5320 unlock_user(p, arg2, 0); 5321 break; 5322 #endif 5323 case TARGET_NR_execve: 5324 { 5325 char **argp, **envp; 5326 int argc, envc; 5327 abi_ulong gp; 5328 abi_ulong guest_argp; 5329 abi_ulong guest_envp; 5330 abi_ulong addr; 5331 char **q; 5332 int total_size = 0; 5333 5334 argc = 0; 5335 guest_argp = arg2; 5336 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { 5337 if (get_user_ual(addr, gp)) 5338 goto efault; 5339 if (!addr) 5340 break; 5341 argc++; 5342 } 5343 envc = 0; 5344 guest_envp = arg3; 5345 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { 5346 if (get_user_ual(addr, gp)) 5347 goto efault; 5348 if (!addr) 5349 break; 5350 envc++; 5351 } 5352 5353 argp = alloca((argc + 1) * sizeof(void *)); 5354 envp = alloca((envc + 1) * sizeof(void *)); 5355 5356 for (gp = guest_argp, q = argp; gp; 5357 gp += sizeof(abi_ulong), q++) { 5358 if (get_user_ual(addr, gp)) 5359 goto execve_efault; 5360 if (!addr) 5361 break; 5362 if (!(*q = lock_user_string(addr))) 5363 goto execve_efault; 5364 total_size += strlen(*q) + 1; 5365 } 5366 *q = NULL; 5367 5368 for (gp = guest_envp, q = envp; gp; 5369 gp += sizeof(abi_ulong), q++) { 5370 if (get_user_ual(addr, gp)) 5371 goto execve_efault; 5372 if (!addr) 5373 break; 5374 if (!(*q = lock_user_string(addr))) 5375 goto execve_efault; 5376 total_size += strlen(*q) + 1; 5377 } 5378 *q = NULL; 5379 5380 /* This case will not be caught by the host's execve() if its 5381 page size is bigger than the target's. */ 5382 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) { 5383 ret = -TARGET_E2BIG; 5384 goto execve_end; 5385 } 5386 if (!(p = lock_user_string(arg1))) 5387 goto execve_efault; 5388 ret = get_errno(execve(p, argp, envp)); 5389 unlock_user(p, arg1, 0); 5390 5391 goto execve_end; 5392 5393 execve_efault: 5394 ret = -TARGET_EFAULT; 5395 5396 execve_end: 5397 for (gp = guest_argp, q = argp; *q; 5398 gp += sizeof(abi_ulong), q++) { 5399 if (get_user_ual(addr, gp) 5400 || !addr) 5401 break; 5402 unlock_user(*q, addr, 0); 5403 } 5404 for (gp = guest_envp, q = envp; *q; 5405 gp += sizeof(abi_ulong), q++) { 5406 if (get_user_ual(addr, gp) 5407 || !addr) 5408 break; 5409 unlock_user(*q, addr, 0); 5410 } 5411 } 5412 break; 5413 case TARGET_NR_chdir: 5414 if (!(p = lock_user_string(arg1))) 5415 goto efault; 5416 ret = get_errno(chdir(p)); 5417 unlock_user(p, arg1, 0); 5418 break; 5419 #ifdef TARGET_NR_time 5420 case TARGET_NR_time: 5421 { 5422 time_t host_time; 5423 ret = get_errno(time(&host_time)); 5424 if (!is_error(ret) 5425 && arg1 5426 && put_user_sal(host_time, arg1)) 5427 goto efault; 5428 } 5429 break; 5430 #endif 5431 case TARGET_NR_mknod: 5432 if (!(p = lock_user_string(arg1))) 5433 goto efault; 5434 ret = get_errno(mknod(p, arg2, arg3)); 5435 unlock_user(p, arg1, 0); 5436 break; 5437 #if defined(TARGET_NR_mknodat) 5438 case TARGET_NR_mknodat: 5439 if (!(p = lock_user_string(arg2))) 5440 goto efault; 5441 ret = get_errno(mknodat(arg1, p, arg3, arg4)); 5442 unlock_user(p, arg2, 0); 5443 break; 5444 #endif 5445 case TARGET_NR_chmod: 5446 if (!(p = lock_user_string(arg1))) 5447 goto efault; 5448 ret = get_errno(chmod(p, arg2)); 5449 unlock_user(p, arg1, 0); 5450 break; 5451 #ifdef TARGET_NR_break 5452 case TARGET_NR_break: 5453 goto unimplemented; 5454 #endif 5455 #ifdef TARGET_NR_oldstat 5456 case TARGET_NR_oldstat: 5457 goto unimplemented; 5458 #endif 5459 case TARGET_NR_lseek: 5460 ret = get_errno(lseek(arg1, arg2, arg3)); 5461 break; 5462 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) 5463 /* Alpha specific */ 5464 case TARGET_NR_getxpid: 5465 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid(); 5466 ret = get_errno(getpid()); 5467 break; 5468 #endif 5469 #ifdef TARGET_NR_getpid 5470 case TARGET_NR_getpid: 5471 ret = get_errno(getpid()); 5472 break; 5473 #endif 5474 case TARGET_NR_mount: 5475 { 5476 /* need to look at the data field */ 5477 void *p2, *p3; 5478 p = lock_user_string(arg1); 5479 p2 = lock_user_string(arg2); 5480 p3 = lock_user_string(arg3); 5481 if (!p || !p2 || !p3) 5482 ret = -TARGET_EFAULT; 5483 else { 5484 /* FIXME - arg5 should be locked, but it isn't clear how to 5485 * do that since it's not guaranteed to be a NULL-terminated 5486 * string. 5487 */ 5488 if ( ! arg5 ) 5489 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL)); 5490 else 5491 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5))); 5492 } 5493 unlock_user(p, arg1, 0); 5494 unlock_user(p2, arg2, 0); 5495 unlock_user(p3, arg3, 0); 5496 break; 5497 } 5498 #ifdef TARGET_NR_umount 5499 case TARGET_NR_umount: 5500 if (!(p = lock_user_string(arg1))) 5501 goto efault; 5502 ret = get_errno(umount(p)); 5503 unlock_user(p, arg1, 0); 5504 break; 5505 #endif 5506 #ifdef TARGET_NR_stime /* not on alpha */ 5507 case TARGET_NR_stime: 5508 { 5509 time_t host_time; 5510 if (get_user_sal(host_time, arg1)) 5511 goto efault; 5512 ret = get_errno(stime(&host_time)); 5513 } 5514 break; 5515 #endif 5516 case TARGET_NR_ptrace: 5517 goto unimplemented; 5518 #ifdef TARGET_NR_alarm /* not on alpha */ 5519 case TARGET_NR_alarm: 5520 ret = alarm(arg1); 5521 break; 5522 #endif 5523 #ifdef TARGET_NR_oldfstat 5524 case TARGET_NR_oldfstat: 5525 goto unimplemented; 5526 #endif 5527 #ifdef TARGET_NR_pause /* not on alpha */ 5528 case TARGET_NR_pause: 5529 ret = get_errno(pause()); 5530 break; 5531 #endif 5532 #ifdef TARGET_NR_utime 5533 case TARGET_NR_utime: 5534 { 5535 struct utimbuf tbuf, *host_tbuf; 5536 struct target_utimbuf *target_tbuf; 5537 if (arg2) { 5538 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) 5539 goto efault; 5540 tbuf.actime = tswapal(target_tbuf->actime); 5541 tbuf.modtime = tswapal(target_tbuf->modtime); 5542 unlock_user_struct(target_tbuf, arg2, 0); 5543 host_tbuf = &tbuf; 5544 } else { 5545 host_tbuf = NULL; 5546 } 5547 if (!(p = lock_user_string(arg1))) 5548 goto efault; 5549 ret = get_errno(utime(p, host_tbuf)); 5550 unlock_user(p, arg1, 0); 5551 } 5552 break; 5553 #endif 5554 case TARGET_NR_utimes: 5555 { 5556 struct timeval *tvp, tv[2]; 5557 if (arg2) { 5558 if (copy_from_user_timeval(&tv[0], arg2) 5559 || copy_from_user_timeval(&tv[1], 5560 arg2 + sizeof(struct target_timeval))) 5561 goto efault; 5562 tvp = tv; 5563 } else { 5564 tvp = NULL; 5565 } 5566 if (!(p = lock_user_string(arg1))) 5567 goto efault; 5568 ret = get_errno(utimes(p, tvp)); 5569 unlock_user(p, arg1, 0); 5570 } 5571 break; 5572 #if defined(TARGET_NR_futimesat) 5573 case TARGET_NR_futimesat: 5574 { 5575 struct timeval *tvp, tv[2]; 5576 if (arg3) { 5577 if (copy_from_user_timeval(&tv[0], arg3) 5578 || copy_from_user_timeval(&tv[1], 5579 arg3 + sizeof(struct target_timeval))) 5580 goto efault; 5581 tvp = tv; 5582 } else { 5583 tvp = NULL; 5584 } 5585 if (!(p = lock_user_string(arg2))) 5586 goto efault; 5587 ret = get_errno(futimesat(arg1, path(p), tvp)); 5588 unlock_user(p, arg2, 0); 5589 } 5590 break; 5591 #endif 5592 #ifdef TARGET_NR_stty 5593 case TARGET_NR_stty: 5594 goto unimplemented; 5595 #endif 5596 #ifdef TARGET_NR_gtty 5597 case TARGET_NR_gtty: 5598 goto unimplemented; 5599 #endif 5600 case TARGET_NR_access: 5601 if (!(p = lock_user_string(arg1))) 5602 goto efault; 5603 ret = get_errno(access(path(p), arg2)); 5604 unlock_user(p, arg1, 0); 5605 break; 5606 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 5607 case TARGET_NR_faccessat: 5608 if (!(p = lock_user_string(arg2))) 5609 goto efault; 5610 ret = get_errno(faccessat(arg1, p, arg3, 0)); 5611 unlock_user(p, arg2, 0); 5612 break; 5613 #endif 5614 #ifdef TARGET_NR_nice /* not on alpha */ 5615 case TARGET_NR_nice: 5616 ret = get_errno(nice(arg1)); 5617 break; 5618 #endif 5619 #ifdef TARGET_NR_ftime 5620 case TARGET_NR_ftime: 5621 goto unimplemented; 5622 #endif 5623 case TARGET_NR_sync: 5624 sync(); 5625 ret = 0; 5626 break; 5627 case TARGET_NR_kill: 5628 ret = get_errno(kill(arg1, target_to_host_signal(arg2))); 5629 break; 5630 case TARGET_NR_rename: 5631 { 5632 void *p2; 5633 p = lock_user_string(arg1); 5634 p2 = lock_user_string(arg2); 5635 if (!p || !p2) 5636 ret = -TARGET_EFAULT; 5637 else 5638 ret = get_errno(rename(p, p2)); 5639 unlock_user(p2, arg2, 0); 5640 unlock_user(p, arg1, 0); 5641 } 5642 break; 5643 #if defined(TARGET_NR_renameat) 5644 case TARGET_NR_renameat: 5645 { 5646 void *p2; 5647 p = lock_user_string(arg2); 5648 p2 = lock_user_string(arg4); 5649 if (!p || !p2) 5650 ret = -TARGET_EFAULT; 5651 else 5652 ret = get_errno(renameat(arg1, p, arg3, p2)); 5653 unlock_user(p2, arg4, 0); 5654 unlock_user(p, arg2, 0); 5655 } 5656 break; 5657 #endif 5658 case TARGET_NR_mkdir: 5659 if (!(p = lock_user_string(arg1))) 5660 goto efault; 5661 ret = get_errno(mkdir(p, arg2)); 5662 unlock_user(p, arg1, 0); 5663 break; 5664 #if defined(TARGET_NR_mkdirat) 5665 case TARGET_NR_mkdirat: 5666 if (!(p = lock_user_string(arg2))) 5667 goto efault; 5668 ret = get_errno(mkdirat(arg1, p, arg3)); 5669 unlock_user(p, arg2, 0); 5670 break; 5671 #endif 5672 case TARGET_NR_rmdir: 5673 if (!(p = lock_user_string(arg1))) 5674 goto efault; 5675 ret = get_errno(rmdir(p)); 5676 unlock_user(p, arg1, 0); 5677 break; 5678 case TARGET_NR_dup: 5679 ret = get_errno(dup(arg1)); 5680 break; 5681 case TARGET_NR_pipe: 5682 ret = do_pipe(cpu_env, arg1, 0, 0); 5683 break; 5684 #ifdef TARGET_NR_pipe2 5685 case TARGET_NR_pipe2: 5686 ret = do_pipe(cpu_env, arg1, 5687 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1); 5688 break; 5689 #endif 5690 case TARGET_NR_times: 5691 { 5692 struct target_tms *tmsp; 5693 struct tms tms; 5694 ret = get_errno(times(&tms)); 5695 if (arg1) { 5696 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); 5697 if (!tmsp) 5698 goto efault; 5699 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime)); 5700 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime)); 5701 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime)); 5702 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime)); 5703 } 5704 if (!is_error(ret)) 5705 ret = host_to_target_clock_t(ret); 5706 } 5707 break; 5708 #ifdef TARGET_NR_prof 5709 case TARGET_NR_prof: 5710 goto unimplemented; 5711 #endif 5712 #ifdef TARGET_NR_signal 5713 case TARGET_NR_signal: 5714 goto unimplemented; 5715 #endif 5716 case TARGET_NR_acct: 5717 if (arg1 == 0) { 5718 ret = get_errno(acct(NULL)); 5719 } else { 5720 if (!(p = lock_user_string(arg1))) 5721 goto efault; 5722 ret = get_errno(acct(path(p))); 5723 unlock_user(p, arg1, 0); 5724 } 5725 break; 5726 #ifdef TARGET_NR_umount2 5727 case TARGET_NR_umount2: 5728 if (!(p = lock_user_string(arg1))) 5729 goto efault; 5730 ret = get_errno(umount2(p, arg2)); 5731 unlock_user(p, arg1, 0); 5732 break; 5733 #endif 5734 #ifdef TARGET_NR_lock 5735 case TARGET_NR_lock: 5736 goto unimplemented; 5737 #endif 5738 case TARGET_NR_ioctl: 5739 ret = do_ioctl(arg1, arg2, arg3); 5740 break; 5741 case TARGET_NR_fcntl: 5742 ret = do_fcntl(arg1, arg2, arg3); 5743 break; 5744 #ifdef TARGET_NR_mpx 5745 case TARGET_NR_mpx: 5746 goto unimplemented; 5747 #endif 5748 case TARGET_NR_setpgid: 5749 ret = get_errno(setpgid(arg1, arg2)); 5750 break; 5751 #ifdef TARGET_NR_ulimit 5752 case TARGET_NR_ulimit: 5753 goto unimplemented; 5754 #endif 5755 #ifdef TARGET_NR_oldolduname 5756 case TARGET_NR_oldolduname: 5757 goto unimplemented; 5758 #endif 5759 case TARGET_NR_umask: 5760 ret = get_errno(umask(arg1)); 5761 break; 5762 case TARGET_NR_chroot: 5763 if (!(p = lock_user_string(arg1))) 5764 goto efault; 5765 ret = get_errno(chroot(p)); 5766 unlock_user(p, arg1, 0); 5767 break; 5768 case TARGET_NR_ustat: 5769 goto unimplemented; 5770 case TARGET_NR_dup2: 5771 ret = get_errno(dup2(arg1, arg2)); 5772 break; 5773 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) 5774 case TARGET_NR_dup3: 5775 ret = get_errno(dup3(arg1, arg2, arg3)); 5776 break; 5777 #endif 5778 #ifdef TARGET_NR_getppid /* not on alpha */ 5779 case TARGET_NR_getppid: 5780 ret = get_errno(getppid()); 5781 break; 5782 #endif 5783 case TARGET_NR_getpgrp: 5784 ret = get_errno(getpgrp()); 5785 break; 5786 case TARGET_NR_setsid: 5787 ret = get_errno(setsid()); 5788 break; 5789 #ifdef TARGET_NR_sigaction 5790 case TARGET_NR_sigaction: 5791 { 5792 #if defined(TARGET_ALPHA) 5793 struct target_sigaction act, oact, *pact = 0; 5794 struct target_old_sigaction *old_act; 5795 if (arg2) { 5796 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5797 goto efault; 5798 act._sa_handler = old_act->_sa_handler; 5799 target_siginitset(&act.sa_mask, old_act->sa_mask); 5800 act.sa_flags = old_act->sa_flags; 5801 act.sa_restorer = 0; 5802 unlock_user_struct(old_act, arg2, 0); 5803 pact = &act; 5804 } 5805 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5806 if (!is_error(ret) && arg3) { 5807 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5808 goto efault; 5809 old_act->_sa_handler = oact._sa_handler; 5810 old_act->sa_mask = oact.sa_mask.sig[0]; 5811 old_act->sa_flags = oact.sa_flags; 5812 unlock_user_struct(old_act, arg3, 1); 5813 } 5814 #elif defined(TARGET_MIPS) 5815 struct target_sigaction act, oact, *pact, *old_act; 5816 5817 if (arg2) { 5818 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5819 goto efault; 5820 act._sa_handler = old_act->_sa_handler; 5821 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); 5822 act.sa_flags = old_act->sa_flags; 5823 unlock_user_struct(old_act, arg2, 0); 5824 pact = &act; 5825 } else { 5826 pact = NULL; 5827 } 5828 5829 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5830 5831 if (!is_error(ret) && arg3) { 5832 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5833 goto efault; 5834 old_act->_sa_handler = oact._sa_handler; 5835 old_act->sa_flags = oact.sa_flags; 5836 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; 5837 old_act->sa_mask.sig[1] = 0; 5838 old_act->sa_mask.sig[2] = 0; 5839 old_act->sa_mask.sig[3] = 0; 5840 unlock_user_struct(old_act, arg3, 1); 5841 } 5842 #else 5843 struct target_old_sigaction *old_act; 5844 struct target_sigaction act, oact, *pact; 5845 if (arg2) { 5846 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5847 goto efault; 5848 act._sa_handler = old_act->_sa_handler; 5849 target_siginitset(&act.sa_mask, old_act->sa_mask); 5850 act.sa_flags = old_act->sa_flags; 5851 act.sa_restorer = old_act->sa_restorer; 5852 unlock_user_struct(old_act, arg2, 0); 5853 pact = &act; 5854 } else { 5855 pact = NULL; 5856 } 5857 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5858 if (!is_error(ret) && arg3) { 5859 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5860 goto efault; 5861 old_act->_sa_handler = oact._sa_handler; 5862 old_act->sa_mask = oact.sa_mask.sig[0]; 5863 old_act->sa_flags = oact.sa_flags; 5864 old_act->sa_restorer = oact.sa_restorer; 5865 unlock_user_struct(old_act, arg3, 1); 5866 } 5867 #endif 5868 } 5869 break; 5870 #endif 5871 case TARGET_NR_rt_sigaction: 5872 { 5873 #if defined(TARGET_ALPHA) 5874 struct target_sigaction act, oact, *pact = 0; 5875 struct target_rt_sigaction *rt_act; 5876 /* ??? arg4 == sizeof(sigset_t). */ 5877 if (arg2) { 5878 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1)) 5879 goto efault; 5880 act._sa_handler = rt_act->_sa_handler; 5881 act.sa_mask = rt_act->sa_mask; 5882 act.sa_flags = rt_act->sa_flags; 5883 act.sa_restorer = arg5; 5884 unlock_user_struct(rt_act, arg2, 0); 5885 pact = &act; 5886 } 5887 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5888 if (!is_error(ret) && arg3) { 5889 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0)) 5890 goto efault; 5891 rt_act->_sa_handler = oact._sa_handler; 5892 rt_act->sa_mask = oact.sa_mask; 5893 rt_act->sa_flags = oact.sa_flags; 5894 unlock_user_struct(rt_act, arg3, 1); 5895 } 5896 #else 5897 struct target_sigaction *act; 5898 struct target_sigaction *oact; 5899 5900 if (arg2) { 5901 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) 5902 goto efault; 5903 } else 5904 act = NULL; 5905 if (arg3) { 5906 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { 5907 ret = -TARGET_EFAULT; 5908 goto rt_sigaction_fail; 5909 } 5910 } else 5911 oact = NULL; 5912 ret = get_errno(do_sigaction(arg1, act, oact)); 5913 rt_sigaction_fail: 5914 if (act) 5915 unlock_user_struct(act, arg2, 0); 5916 if (oact) 5917 unlock_user_struct(oact, arg3, 1); 5918 #endif 5919 } 5920 break; 5921 #ifdef TARGET_NR_sgetmask /* not on alpha */ 5922 case TARGET_NR_sgetmask: 5923 { 5924 sigset_t cur_set; 5925 abi_ulong target_set; 5926 sigprocmask(0, NULL, &cur_set); 5927 host_to_target_old_sigset(&target_set, &cur_set); 5928 ret = target_set; 5929 } 5930 break; 5931 #endif 5932 #ifdef TARGET_NR_ssetmask /* not on alpha */ 5933 case TARGET_NR_ssetmask: 5934 { 5935 sigset_t set, oset, cur_set; 5936 abi_ulong target_set = arg1; 5937 sigprocmask(0, NULL, &cur_set); 5938 target_to_host_old_sigset(&set, &target_set); 5939 sigorset(&set, &set, &cur_set); 5940 sigprocmask(SIG_SETMASK, &set, &oset); 5941 host_to_target_old_sigset(&target_set, &oset); 5942 ret = target_set; 5943 } 5944 break; 5945 #endif 5946 #ifdef TARGET_NR_sigprocmask 5947 case TARGET_NR_sigprocmask: 5948 { 5949 #if defined(TARGET_ALPHA) 5950 sigset_t set, oldset; 5951 abi_ulong mask; 5952 int how; 5953 5954 switch (arg1) { 5955 case TARGET_SIG_BLOCK: 5956 how = SIG_BLOCK; 5957 break; 5958 case TARGET_SIG_UNBLOCK: 5959 how = SIG_UNBLOCK; 5960 break; 5961 case TARGET_SIG_SETMASK: 5962 how = SIG_SETMASK; 5963 break; 5964 default: 5965 ret = -TARGET_EINVAL; 5966 goto fail; 5967 } 5968 mask = arg2; 5969 target_to_host_old_sigset(&set, &mask); 5970 5971 ret = get_errno(sigprocmask(how, &set, &oldset)); 5972 if (!is_error(ret)) { 5973 host_to_target_old_sigset(&mask, &oldset); 5974 ret = mask; 5975 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */ 5976 } 5977 #else 5978 sigset_t set, oldset, *set_ptr; 5979 int how; 5980 5981 if (arg2) { 5982 switch (arg1) { 5983 case TARGET_SIG_BLOCK: 5984 how = SIG_BLOCK; 5985 break; 5986 case TARGET_SIG_UNBLOCK: 5987 how = SIG_UNBLOCK; 5988 break; 5989 case TARGET_SIG_SETMASK: 5990 how = SIG_SETMASK; 5991 break; 5992 default: 5993 ret = -TARGET_EINVAL; 5994 goto fail; 5995 } 5996 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 5997 goto efault; 5998 target_to_host_old_sigset(&set, p); 5999 unlock_user(p, arg2, 0); 6000 set_ptr = &set; 6001 } else { 6002 how = 0; 6003 set_ptr = NULL; 6004 } 6005 ret = get_errno(sigprocmask(how, set_ptr, &oldset)); 6006 if (!is_error(ret) && arg3) { 6007 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 6008 goto efault; 6009 host_to_target_old_sigset(p, &oldset); 6010 unlock_user(p, arg3, sizeof(target_sigset_t)); 6011 } 6012 #endif 6013 } 6014 break; 6015 #endif 6016 case TARGET_NR_rt_sigprocmask: 6017 { 6018 int how = arg1; 6019 sigset_t set, oldset, *set_ptr; 6020 6021 if (arg2) { 6022 switch(how) { 6023 case TARGET_SIG_BLOCK: 6024 how = SIG_BLOCK; 6025 break; 6026 case TARGET_SIG_UNBLOCK: 6027 how = SIG_UNBLOCK; 6028 break; 6029 case TARGET_SIG_SETMASK: 6030 how = SIG_SETMASK; 6031 break; 6032 default: 6033 ret = -TARGET_EINVAL; 6034 goto fail; 6035 } 6036 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 6037 goto efault; 6038 target_to_host_sigset(&set, p); 6039 unlock_user(p, arg2, 0); 6040 set_ptr = &set; 6041 } else { 6042 how = 0; 6043 set_ptr = NULL; 6044 } 6045 ret = get_errno(sigprocmask(how, set_ptr, &oldset)); 6046 if (!is_error(ret) && arg3) { 6047 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 6048 goto efault; 6049 host_to_target_sigset(p, &oldset); 6050 unlock_user(p, arg3, sizeof(target_sigset_t)); 6051 } 6052 } 6053 break; 6054 #ifdef TARGET_NR_sigpending 6055 case TARGET_NR_sigpending: 6056 { 6057 sigset_t set; 6058 ret = get_errno(sigpending(&set)); 6059 if (!is_error(ret)) { 6060 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 6061 goto efault; 6062 host_to_target_old_sigset(p, &set); 6063 unlock_user(p, arg1, sizeof(target_sigset_t)); 6064 } 6065 } 6066 break; 6067 #endif 6068 case TARGET_NR_rt_sigpending: 6069 { 6070 sigset_t set; 6071 ret = get_errno(sigpending(&set)); 6072 if (!is_error(ret)) { 6073 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 6074 goto efault; 6075 host_to_target_sigset(p, &set); 6076 unlock_user(p, arg1, sizeof(target_sigset_t)); 6077 } 6078 } 6079 break; 6080 #ifdef TARGET_NR_sigsuspend 6081 case TARGET_NR_sigsuspend: 6082 { 6083 sigset_t set; 6084 #if defined(TARGET_ALPHA) 6085 abi_ulong mask = arg1; 6086 target_to_host_old_sigset(&set, &mask); 6087 #else 6088 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6089 goto efault; 6090 target_to_host_old_sigset(&set, p); 6091 unlock_user(p, arg1, 0); 6092 #endif 6093 ret = get_errno(sigsuspend(&set)); 6094 } 6095 break; 6096 #endif 6097 case TARGET_NR_rt_sigsuspend: 6098 { 6099 sigset_t set; 6100 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6101 goto efault; 6102 target_to_host_sigset(&set, p); 6103 unlock_user(p, arg1, 0); 6104 ret = get_errno(sigsuspend(&set)); 6105 } 6106 break; 6107 case TARGET_NR_rt_sigtimedwait: 6108 { 6109 sigset_t set; 6110 struct timespec uts, *puts; 6111 siginfo_t uinfo; 6112 6113 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6114 goto efault; 6115 target_to_host_sigset(&set, p); 6116 unlock_user(p, arg1, 0); 6117 if (arg3) { 6118 puts = &uts; 6119 target_to_host_timespec(puts, arg3); 6120 } else { 6121 puts = NULL; 6122 } 6123 ret = get_errno(sigtimedwait(&set, &uinfo, puts)); 6124 if (!is_error(ret) && arg2) { 6125 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0))) 6126 goto efault; 6127 host_to_target_siginfo(p, &uinfo); 6128 unlock_user(p, arg2, sizeof(target_siginfo_t)); 6129 } 6130 } 6131 break; 6132 case TARGET_NR_rt_sigqueueinfo: 6133 { 6134 siginfo_t uinfo; 6135 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1))) 6136 goto efault; 6137 target_to_host_siginfo(&uinfo, p); 6138 unlock_user(p, arg1, 0); 6139 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo)); 6140 } 6141 break; 6142 #ifdef TARGET_NR_sigreturn 6143 case TARGET_NR_sigreturn: 6144 /* NOTE: ret is eax, so not transcoding must be done */ 6145 ret = do_sigreturn(cpu_env); 6146 break; 6147 #endif 6148 case TARGET_NR_rt_sigreturn: 6149 /* NOTE: ret is eax, so not transcoding must be done */ 6150 ret = do_rt_sigreturn(cpu_env); 6151 break; 6152 case TARGET_NR_sethostname: 6153 if (!(p = lock_user_string(arg1))) 6154 goto efault; 6155 ret = get_errno(sethostname(p, arg2)); 6156 unlock_user(p, arg1, 0); 6157 break; 6158 case TARGET_NR_setrlimit: 6159 { 6160 int resource = target_to_host_resource(arg1); 6161 struct target_rlimit *target_rlim; 6162 struct rlimit rlim; 6163 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) 6164 goto efault; 6165 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur); 6166 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max); 6167 unlock_user_struct(target_rlim, arg2, 0); 6168 ret = get_errno(setrlimit(resource, &rlim)); 6169 } 6170 break; 6171 case TARGET_NR_getrlimit: 6172 { 6173 int resource = target_to_host_resource(arg1); 6174 struct target_rlimit *target_rlim; 6175 struct rlimit rlim; 6176 6177 ret = get_errno(getrlimit(resource, &rlim)); 6178 if (!is_error(ret)) { 6179 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 6180 goto efault; 6181 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 6182 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 6183 unlock_user_struct(target_rlim, arg2, 1); 6184 } 6185 } 6186 break; 6187 case TARGET_NR_getrusage: 6188 { 6189 struct rusage rusage; 6190 ret = get_errno(getrusage(arg1, &rusage)); 6191 if (!is_error(ret)) { 6192 host_to_target_rusage(arg2, &rusage); 6193 } 6194 } 6195 break; 6196 case TARGET_NR_gettimeofday: 6197 { 6198 struct timeval tv; 6199 ret = get_errno(gettimeofday(&tv, NULL)); 6200 if (!is_error(ret)) { 6201 if (copy_to_user_timeval(arg1, &tv)) 6202 goto efault; 6203 } 6204 } 6205 break; 6206 case TARGET_NR_settimeofday: 6207 { 6208 struct timeval tv; 6209 if (copy_from_user_timeval(&tv, arg1)) 6210 goto efault; 6211 ret = get_errno(settimeofday(&tv, NULL)); 6212 } 6213 break; 6214 #if defined(TARGET_NR_select) 6215 case TARGET_NR_select: 6216 #if defined(TARGET_S390X) || defined(TARGET_ALPHA) 6217 ret = do_select(arg1, arg2, arg3, arg4, arg5); 6218 #else 6219 { 6220 struct target_sel_arg_struct *sel; 6221 abi_ulong inp, outp, exp, tvp; 6222 long nsel; 6223 6224 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) 6225 goto efault; 6226 nsel = tswapal(sel->n); 6227 inp = tswapal(sel->inp); 6228 outp = tswapal(sel->outp); 6229 exp = tswapal(sel->exp); 6230 tvp = tswapal(sel->tvp); 6231 unlock_user_struct(sel, arg1, 0); 6232 ret = do_select(nsel, inp, outp, exp, tvp); 6233 } 6234 #endif 6235 break; 6236 #endif 6237 #ifdef TARGET_NR_pselect6 6238 case TARGET_NR_pselect6: 6239 { 6240 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr; 6241 fd_set rfds, wfds, efds; 6242 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 6243 struct timespec ts, *ts_ptr; 6244 6245 /* 6246 * The 6th arg is actually two args smashed together, 6247 * so we cannot use the C library. 6248 */ 6249 sigset_t set; 6250 struct { 6251 sigset_t *set; 6252 size_t size; 6253 } sig, *sig_ptr; 6254 6255 abi_ulong arg_sigset, arg_sigsize, *arg7; 6256 target_sigset_t *target_sigset; 6257 6258 n = arg1; 6259 rfd_addr = arg2; 6260 wfd_addr = arg3; 6261 efd_addr = arg4; 6262 ts_addr = arg5; 6263 6264 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 6265 if (ret) { 6266 goto fail; 6267 } 6268 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 6269 if (ret) { 6270 goto fail; 6271 } 6272 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 6273 if (ret) { 6274 goto fail; 6275 } 6276 6277 /* 6278 * This takes a timespec, and not a timeval, so we cannot 6279 * use the do_select() helper ... 6280 */ 6281 if (ts_addr) { 6282 if (target_to_host_timespec(&ts, ts_addr)) { 6283 goto efault; 6284 } 6285 ts_ptr = &ts; 6286 } else { 6287 ts_ptr = NULL; 6288 } 6289 6290 /* Extract the two packed args for the sigset */ 6291 if (arg6) { 6292 sig_ptr = &sig; 6293 sig.size = _NSIG / 8; 6294 6295 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1); 6296 if (!arg7) { 6297 goto efault; 6298 } 6299 arg_sigset = tswapal(arg7[0]); 6300 arg_sigsize = tswapal(arg7[1]); 6301 unlock_user(arg7, arg6, 0); 6302 6303 if (arg_sigset) { 6304 sig.set = &set; 6305 if (arg_sigsize != sizeof(*target_sigset)) { 6306 /* Like the kernel, we enforce correct size sigsets */ 6307 ret = -TARGET_EINVAL; 6308 goto fail; 6309 } 6310 target_sigset = lock_user(VERIFY_READ, arg_sigset, 6311 sizeof(*target_sigset), 1); 6312 if (!target_sigset) { 6313 goto efault; 6314 } 6315 target_to_host_sigset(&set, target_sigset); 6316 unlock_user(target_sigset, arg_sigset, 0); 6317 } else { 6318 sig.set = NULL; 6319 } 6320 } else { 6321 sig_ptr = NULL; 6322 } 6323 6324 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 6325 ts_ptr, sig_ptr)); 6326 6327 if (!is_error(ret)) { 6328 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 6329 goto efault; 6330 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 6331 goto efault; 6332 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 6333 goto efault; 6334 6335 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) 6336 goto efault; 6337 } 6338 } 6339 break; 6340 #endif 6341 case TARGET_NR_symlink: 6342 { 6343 void *p2; 6344 p = lock_user_string(arg1); 6345 p2 = lock_user_string(arg2); 6346 if (!p || !p2) 6347 ret = -TARGET_EFAULT; 6348 else 6349 ret = get_errno(symlink(p, p2)); 6350 unlock_user(p2, arg2, 0); 6351 unlock_user(p, arg1, 0); 6352 } 6353 break; 6354 #if defined(TARGET_NR_symlinkat) 6355 case TARGET_NR_symlinkat: 6356 { 6357 void *p2; 6358 p = lock_user_string(arg1); 6359 p2 = lock_user_string(arg3); 6360 if (!p || !p2) 6361 ret = -TARGET_EFAULT; 6362 else 6363 ret = get_errno(symlinkat(p, arg2, p2)); 6364 unlock_user(p2, arg3, 0); 6365 unlock_user(p, arg1, 0); 6366 } 6367 break; 6368 #endif 6369 #ifdef TARGET_NR_oldlstat 6370 case TARGET_NR_oldlstat: 6371 goto unimplemented; 6372 #endif 6373 case TARGET_NR_readlink: 6374 { 6375 void *p2; 6376 p = lock_user_string(arg1); 6377 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); 6378 if (!p || !p2) { 6379 ret = -TARGET_EFAULT; 6380 } else if (is_proc_myself((const char *)p, "exe")) { 6381 char real[PATH_MAX], *temp; 6382 temp = realpath(exec_path, real); 6383 ret = temp == NULL ? get_errno(-1) : strlen(real) ; 6384 snprintf((char *)p2, arg3, "%s", real); 6385 } else { 6386 ret = get_errno(readlink(path(p), p2, arg3)); 6387 } 6388 unlock_user(p2, arg2, ret); 6389 unlock_user(p, arg1, 0); 6390 } 6391 break; 6392 #if defined(TARGET_NR_readlinkat) 6393 case TARGET_NR_readlinkat: 6394 { 6395 void *p2; 6396 p = lock_user_string(arg2); 6397 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); 6398 if (!p || !p2) { 6399 ret = -TARGET_EFAULT; 6400 } else if (is_proc_myself((const char *)p, "exe")) { 6401 char real[PATH_MAX], *temp; 6402 temp = realpath(exec_path, real); 6403 ret = temp == NULL ? get_errno(-1) : strlen(real) ; 6404 snprintf((char *)p2, arg4, "%s", real); 6405 } else { 6406 ret = get_errno(readlinkat(arg1, path(p), p2, arg4)); 6407 } 6408 unlock_user(p2, arg3, ret); 6409 unlock_user(p, arg2, 0); 6410 } 6411 break; 6412 #endif 6413 #ifdef TARGET_NR_uselib 6414 case TARGET_NR_uselib: 6415 goto unimplemented; 6416 #endif 6417 #ifdef TARGET_NR_swapon 6418 case TARGET_NR_swapon: 6419 if (!(p = lock_user_string(arg1))) 6420 goto efault; 6421 ret = get_errno(swapon(p, arg2)); 6422 unlock_user(p, arg1, 0); 6423 break; 6424 #endif 6425 case TARGET_NR_reboot: 6426 if (arg3 == LINUX_REBOOT_CMD_RESTART2) { 6427 /* arg4 must be ignored in all other cases */ 6428 p = lock_user_string(arg4); 6429 if (!p) { 6430 goto efault; 6431 } 6432 ret = get_errno(reboot(arg1, arg2, arg3, p)); 6433 unlock_user(p, arg4, 0); 6434 } else { 6435 ret = get_errno(reboot(arg1, arg2, arg3, NULL)); 6436 } 6437 break; 6438 #ifdef TARGET_NR_readdir 6439 case TARGET_NR_readdir: 6440 goto unimplemented; 6441 #endif 6442 #ifdef TARGET_NR_mmap 6443 case TARGET_NR_mmap: 6444 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 6445 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \ 6446 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ 6447 || defined(TARGET_S390X) 6448 { 6449 abi_ulong *v; 6450 abi_ulong v1, v2, v3, v4, v5, v6; 6451 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) 6452 goto efault; 6453 v1 = tswapal(v[0]); 6454 v2 = tswapal(v[1]); 6455 v3 = tswapal(v[2]); 6456 v4 = tswapal(v[3]); 6457 v5 = tswapal(v[4]); 6458 v6 = tswapal(v[5]); 6459 unlock_user(v, arg1, 0); 6460 ret = get_errno(target_mmap(v1, v2, v3, 6461 target_to_host_bitmask(v4, mmap_flags_tbl), 6462 v5, v6)); 6463 } 6464 #else 6465 ret = get_errno(target_mmap(arg1, arg2, arg3, 6466 target_to_host_bitmask(arg4, mmap_flags_tbl), 6467 arg5, 6468 arg6)); 6469 #endif 6470 break; 6471 #endif 6472 #ifdef TARGET_NR_mmap2 6473 case TARGET_NR_mmap2: 6474 #ifndef MMAP_SHIFT 6475 #define MMAP_SHIFT 12 6476 #endif 6477 ret = get_errno(target_mmap(arg1, arg2, arg3, 6478 target_to_host_bitmask(arg4, mmap_flags_tbl), 6479 arg5, 6480 arg6 << MMAP_SHIFT)); 6481 break; 6482 #endif 6483 case TARGET_NR_munmap: 6484 ret = get_errno(target_munmap(arg1, arg2)); 6485 break; 6486 case TARGET_NR_mprotect: 6487 { 6488 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 6489 /* Special hack to detect libc making the stack executable. */ 6490 if ((arg3 & PROT_GROWSDOWN) 6491 && arg1 >= ts->info->stack_limit 6492 && arg1 <= ts->info->start_stack) { 6493 arg3 &= ~PROT_GROWSDOWN; 6494 arg2 = arg2 + arg1 - ts->info->stack_limit; 6495 arg1 = ts->info->stack_limit; 6496 } 6497 } 6498 ret = get_errno(target_mprotect(arg1, arg2, arg3)); 6499 break; 6500 #ifdef TARGET_NR_mremap 6501 case TARGET_NR_mremap: 6502 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); 6503 break; 6504 #endif 6505 /* ??? msync/mlock/munlock are broken for softmmu. */ 6506 #ifdef TARGET_NR_msync 6507 case TARGET_NR_msync: 6508 ret = get_errno(msync(g2h(arg1), arg2, arg3)); 6509 break; 6510 #endif 6511 #ifdef TARGET_NR_mlock 6512 case TARGET_NR_mlock: 6513 ret = get_errno(mlock(g2h(arg1), arg2)); 6514 break; 6515 #endif 6516 #ifdef TARGET_NR_munlock 6517 case TARGET_NR_munlock: 6518 ret = get_errno(munlock(g2h(arg1), arg2)); 6519 break; 6520 #endif 6521 #ifdef TARGET_NR_mlockall 6522 case TARGET_NR_mlockall: 6523 ret = get_errno(mlockall(arg1)); 6524 break; 6525 #endif 6526 #ifdef TARGET_NR_munlockall 6527 case TARGET_NR_munlockall: 6528 ret = get_errno(munlockall()); 6529 break; 6530 #endif 6531 case TARGET_NR_truncate: 6532 if (!(p = lock_user_string(arg1))) 6533 goto efault; 6534 ret = get_errno(truncate(p, arg2)); 6535 unlock_user(p, arg1, 0); 6536 break; 6537 case TARGET_NR_ftruncate: 6538 ret = get_errno(ftruncate(arg1, arg2)); 6539 break; 6540 case TARGET_NR_fchmod: 6541 ret = get_errno(fchmod(arg1, arg2)); 6542 break; 6543 #if defined(TARGET_NR_fchmodat) 6544 case TARGET_NR_fchmodat: 6545 if (!(p = lock_user_string(arg2))) 6546 goto efault; 6547 ret = get_errno(fchmodat(arg1, p, arg3, 0)); 6548 unlock_user(p, arg2, 0); 6549 break; 6550 #endif 6551 case TARGET_NR_getpriority: 6552 /* Note that negative values are valid for getpriority, so we must 6553 differentiate based on errno settings. */ 6554 errno = 0; 6555 ret = getpriority(arg1, arg2); 6556 if (ret == -1 && errno != 0) { 6557 ret = -host_to_target_errno(errno); 6558 break; 6559 } 6560 #ifdef TARGET_ALPHA 6561 /* Return value is the unbiased priority. Signal no error. */ 6562 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; 6563 #else 6564 /* Return value is a biased priority to avoid negative numbers. */ 6565 ret = 20 - ret; 6566 #endif 6567 break; 6568 case TARGET_NR_setpriority: 6569 ret = get_errno(setpriority(arg1, arg2, arg3)); 6570 break; 6571 #ifdef TARGET_NR_profil 6572 case TARGET_NR_profil: 6573 goto unimplemented; 6574 #endif 6575 case TARGET_NR_statfs: 6576 if (!(p = lock_user_string(arg1))) 6577 goto efault; 6578 ret = get_errno(statfs(path(p), &stfs)); 6579 unlock_user(p, arg1, 0); 6580 convert_statfs: 6581 if (!is_error(ret)) { 6582 struct target_statfs *target_stfs; 6583 6584 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) 6585 goto efault; 6586 __put_user(stfs.f_type, &target_stfs->f_type); 6587 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 6588 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 6589 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 6590 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 6591 __put_user(stfs.f_files, &target_stfs->f_files); 6592 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 6593 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 6594 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 6595 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 6596 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 6597 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 6598 unlock_user_struct(target_stfs, arg2, 1); 6599 } 6600 break; 6601 case TARGET_NR_fstatfs: 6602 ret = get_errno(fstatfs(arg1, &stfs)); 6603 goto convert_statfs; 6604 #ifdef TARGET_NR_statfs64 6605 case TARGET_NR_statfs64: 6606 if (!(p = lock_user_string(arg1))) 6607 goto efault; 6608 ret = get_errno(statfs(path(p), &stfs)); 6609 unlock_user(p, arg1, 0); 6610 convert_statfs64: 6611 if (!is_error(ret)) { 6612 struct target_statfs64 *target_stfs; 6613 6614 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) 6615 goto efault; 6616 __put_user(stfs.f_type, &target_stfs->f_type); 6617 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 6618 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 6619 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 6620 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 6621 __put_user(stfs.f_files, &target_stfs->f_files); 6622 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 6623 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 6624 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 6625 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 6626 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 6627 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 6628 unlock_user_struct(target_stfs, arg3, 1); 6629 } 6630 break; 6631 case TARGET_NR_fstatfs64: 6632 ret = get_errno(fstatfs(arg1, &stfs)); 6633 goto convert_statfs64; 6634 #endif 6635 #ifdef TARGET_NR_ioperm 6636 case TARGET_NR_ioperm: 6637 goto unimplemented; 6638 #endif 6639 #ifdef TARGET_NR_socketcall 6640 case TARGET_NR_socketcall: 6641 ret = do_socketcall(arg1, arg2); 6642 break; 6643 #endif 6644 #ifdef TARGET_NR_accept 6645 case TARGET_NR_accept: 6646 ret = do_accept4(arg1, arg2, arg3, 0); 6647 break; 6648 #endif 6649 #ifdef TARGET_NR_accept4 6650 case TARGET_NR_accept4: 6651 #ifdef CONFIG_ACCEPT4 6652 ret = do_accept4(arg1, arg2, arg3, arg4); 6653 #else 6654 goto unimplemented; 6655 #endif 6656 break; 6657 #endif 6658 #ifdef TARGET_NR_bind 6659 case TARGET_NR_bind: 6660 ret = do_bind(arg1, arg2, arg3); 6661 break; 6662 #endif 6663 #ifdef TARGET_NR_connect 6664 case TARGET_NR_connect: 6665 ret = do_connect(arg1, arg2, arg3); 6666 break; 6667 #endif 6668 #ifdef TARGET_NR_getpeername 6669 case TARGET_NR_getpeername: 6670 ret = do_getpeername(arg1, arg2, arg3); 6671 break; 6672 #endif 6673 #ifdef TARGET_NR_getsockname 6674 case TARGET_NR_getsockname: 6675 ret = do_getsockname(arg1, arg2, arg3); 6676 break; 6677 #endif 6678 #ifdef TARGET_NR_getsockopt 6679 case TARGET_NR_getsockopt: 6680 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5); 6681 break; 6682 #endif 6683 #ifdef TARGET_NR_listen 6684 case TARGET_NR_listen: 6685 ret = get_errno(listen(arg1, arg2)); 6686 break; 6687 #endif 6688 #ifdef TARGET_NR_recv 6689 case TARGET_NR_recv: 6690 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); 6691 break; 6692 #endif 6693 #ifdef TARGET_NR_recvfrom 6694 case TARGET_NR_recvfrom: 6695 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); 6696 break; 6697 #endif 6698 #ifdef TARGET_NR_recvmsg 6699 case TARGET_NR_recvmsg: 6700 ret = do_sendrecvmsg(arg1, arg2, arg3, 0); 6701 break; 6702 #endif 6703 #ifdef TARGET_NR_send 6704 case TARGET_NR_send: 6705 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0); 6706 break; 6707 #endif 6708 #ifdef TARGET_NR_sendmsg 6709 case TARGET_NR_sendmsg: 6710 ret = do_sendrecvmsg(arg1, arg2, arg3, 1); 6711 break; 6712 #endif 6713 #ifdef TARGET_NR_sendto 6714 case TARGET_NR_sendto: 6715 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); 6716 break; 6717 #endif 6718 #ifdef TARGET_NR_shutdown 6719 case TARGET_NR_shutdown: 6720 ret = get_errno(shutdown(arg1, arg2)); 6721 break; 6722 #endif 6723 #ifdef TARGET_NR_socket 6724 case TARGET_NR_socket: 6725 ret = do_socket(arg1, arg2, arg3); 6726 break; 6727 #endif 6728 #ifdef TARGET_NR_socketpair 6729 case TARGET_NR_socketpair: 6730 ret = do_socketpair(arg1, arg2, arg3, arg4); 6731 break; 6732 #endif 6733 #ifdef TARGET_NR_setsockopt 6734 case TARGET_NR_setsockopt: 6735 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); 6736 break; 6737 #endif 6738 6739 case TARGET_NR_syslog: 6740 if (!(p = lock_user_string(arg2))) 6741 goto efault; 6742 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); 6743 unlock_user(p, arg2, 0); 6744 break; 6745 6746 case TARGET_NR_setitimer: 6747 { 6748 struct itimerval value, ovalue, *pvalue; 6749 6750 if (arg2) { 6751 pvalue = &value; 6752 if (copy_from_user_timeval(&pvalue->it_interval, arg2) 6753 || copy_from_user_timeval(&pvalue->it_value, 6754 arg2 + sizeof(struct target_timeval))) 6755 goto efault; 6756 } else { 6757 pvalue = NULL; 6758 } 6759 ret = get_errno(setitimer(arg1, pvalue, &ovalue)); 6760 if (!is_error(ret) && arg3) { 6761 if (copy_to_user_timeval(arg3, 6762 &ovalue.it_interval) 6763 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), 6764 &ovalue.it_value)) 6765 goto efault; 6766 } 6767 } 6768 break; 6769 case TARGET_NR_getitimer: 6770 { 6771 struct itimerval value; 6772 6773 ret = get_errno(getitimer(arg1, &value)); 6774 if (!is_error(ret) && arg2) { 6775 if (copy_to_user_timeval(arg2, 6776 &value.it_interval) 6777 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), 6778 &value.it_value)) 6779 goto efault; 6780 } 6781 } 6782 break; 6783 case TARGET_NR_stat: 6784 if (!(p = lock_user_string(arg1))) 6785 goto efault; 6786 ret = get_errno(stat(path(p), &st)); 6787 unlock_user(p, arg1, 0); 6788 goto do_stat; 6789 case TARGET_NR_lstat: 6790 if (!(p = lock_user_string(arg1))) 6791 goto efault; 6792 ret = get_errno(lstat(path(p), &st)); 6793 unlock_user(p, arg1, 0); 6794 goto do_stat; 6795 case TARGET_NR_fstat: 6796 { 6797 ret = get_errno(fstat(arg1, &st)); 6798 do_stat: 6799 if (!is_error(ret)) { 6800 struct target_stat *target_st; 6801 6802 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) 6803 goto efault; 6804 memset(target_st, 0, sizeof(*target_st)); 6805 __put_user(st.st_dev, &target_st->st_dev); 6806 __put_user(st.st_ino, &target_st->st_ino); 6807 __put_user(st.st_mode, &target_st->st_mode); 6808 __put_user(st.st_uid, &target_st->st_uid); 6809 __put_user(st.st_gid, &target_st->st_gid); 6810 __put_user(st.st_nlink, &target_st->st_nlink); 6811 __put_user(st.st_rdev, &target_st->st_rdev); 6812 __put_user(st.st_size, &target_st->st_size); 6813 __put_user(st.st_blksize, &target_st->st_blksize); 6814 __put_user(st.st_blocks, &target_st->st_blocks); 6815 __put_user(st.st_atime, &target_st->target_st_atime); 6816 __put_user(st.st_mtime, &target_st->target_st_mtime); 6817 __put_user(st.st_ctime, &target_st->target_st_ctime); 6818 unlock_user_struct(target_st, arg2, 1); 6819 } 6820 } 6821 break; 6822 #ifdef TARGET_NR_olduname 6823 case TARGET_NR_olduname: 6824 goto unimplemented; 6825 #endif 6826 #ifdef TARGET_NR_iopl 6827 case TARGET_NR_iopl: 6828 goto unimplemented; 6829 #endif 6830 case TARGET_NR_vhangup: 6831 ret = get_errno(vhangup()); 6832 break; 6833 #ifdef TARGET_NR_idle 6834 case TARGET_NR_idle: 6835 goto unimplemented; 6836 #endif 6837 #ifdef TARGET_NR_syscall 6838 case TARGET_NR_syscall: 6839 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5, 6840 arg6, arg7, arg8, 0); 6841 break; 6842 #endif 6843 case TARGET_NR_wait4: 6844 { 6845 int status; 6846 abi_long status_ptr = arg2; 6847 struct rusage rusage, *rusage_ptr; 6848 abi_ulong target_rusage = arg4; 6849 if (target_rusage) 6850 rusage_ptr = &rusage; 6851 else 6852 rusage_ptr = NULL; 6853 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr)); 6854 if (!is_error(ret)) { 6855 if (status_ptr && ret) { 6856 status = host_to_target_waitstatus(status); 6857 if (put_user_s32(status, status_ptr)) 6858 goto efault; 6859 } 6860 if (target_rusage) 6861 host_to_target_rusage(target_rusage, &rusage); 6862 } 6863 } 6864 break; 6865 #ifdef TARGET_NR_swapoff 6866 case TARGET_NR_swapoff: 6867 if (!(p = lock_user_string(arg1))) 6868 goto efault; 6869 ret = get_errno(swapoff(p)); 6870 unlock_user(p, arg1, 0); 6871 break; 6872 #endif 6873 case TARGET_NR_sysinfo: 6874 { 6875 struct target_sysinfo *target_value; 6876 struct sysinfo value; 6877 ret = get_errno(sysinfo(&value)); 6878 if (!is_error(ret) && arg1) 6879 { 6880 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) 6881 goto efault; 6882 __put_user(value.uptime, &target_value->uptime); 6883 __put_user(value.loads[0], &target_value->loads[0]); 6884 __put_user(value.loads[1], &target_value->loads[1]); 6885 __put_user(value.loads[2], &target_value->loads[2]); 6886 __put_user(value.totalram, &target_value->totalram); 6887 __put_user(value.freeram, &target_value->freeram); 6888 __put_user(value.sharedram, &target_value->sharedram); 6889 __put_user(value.bufferram, &target_value->bufferram); 6890 __put_user(value.totalswap, &target_value->totalswap); 6891 __put_user(value.freeswap, &target_value->freeswap); 6892 __put_user(value.procs, &target_value->procs); 6893 __put_user(value.totalhigh, &target_value->totalhigh); 6894 __put_user(value.freehigh, &target_value->freehigh); 6895 __put_user(value.mem_unit, &target_value->mem_unit); 6896 unlock_user_struct(target_value, arg1, 1); 6897 } 6898 } 6899 break; 6900 #ifdef TARGET_NR_ipc 6901 case TARGET_NR_ipc: 6902 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6); 6903 break; 6904 #endif 6905 #ifdef TARGET_NR_semget 6906 case TARGET_NR_semget: 6907 ret = get_errno(semget(arg1, arg2, arg3)); 6908 break; 6909 #endif 6910 #ifdef TARGET_NR_semop 6911 case TARGET_NR_semop: 6912 ret = do_semop(arg1, arg2, arg3); 6913 break; 6914 #endif 6915 #ifdef TARGET_NR_semctl 6916 case TARGET_NR_semctl: 6917 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4); 6918 break; 6919 #endif 6920 #ifdef TARGET_NR_msgctl 6921 case TARGET_NR_msgctl: 6922 ret = do_msgctl(arg1, arg2, arg3); 6923 break; 6924 #endif 6925 #ifdef TARGET_NR_msgget 6926 case TARGET_NR_msgget: 6927 ret = get_errno(msgget(arg1, arg2)); 6928 break; 6929 #endif 6930 #ifdef TARGET_NR_msgrcv 6931 case TARGET_NR_msgrcv: 6932 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5); 6933 break; 6934 #endif 6935 #ifdef TARGET_NR_msgsnd 6936 case TARGET_NR_msgsnd: 6937 ret = do_msgsnd(arg1, arg2, arg3, arg4); 6938 break; 6939 #endif 6940 #ifdef TARGET_NR_shmget 6941 case TARGET_NR_shmget: 6942 ret = get_errno(shmget(arg1, arg2, arg3)); 6943 break; 6944 #endif 6945 #ifdef TARGET_NR_shmctl 6946 case TARGET_NR_shmctl: 6947 ret = do_shmctl(arg1, arg2, arg3); 6948 break; 6949 #endif 6950 #ifdef TARGET_NR_shmat 6951 case TARGET_NR_shmat: 6952 ret = do_shmat(arg1, arg2, arg3); 6953 break; 6954 #endif 6955 #ifdef TARGET_NR_shmdt 6956 case TARGET_NR_shmdt: 6957 ret = do_shmdt(arg1); 6958 break; 6959 #endif 6960 case TARGET_NR_fsync: 6961 ret = get_errno(fsync(arg1)); 6962 break; 6963 case TARGET_NR_clone: 6964 /* Linux manages to have three different orderings for its 6965 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines 6966 * match the kernel's CONFIG_CLONE_* settings. 6967 * Microblaze is further special in that it uses a sixth 6968 * implicit argument to clone for the TLS pointer. 6969 */ 6970 #if defined(TARGET_MICROBLAZE) 6971 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5)); 6972 #elif defined(TARGET_CLONE_BACKWARDS) 6973 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); 6974 #elif defined(TARGET_CLONE_BACKWARDS2) 6975 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); 6976 #else 6977 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); 6978 #endif 6979 break; 6980 #ifdef __NR_exit_group 6981 /* new thread calls */ 6982 case TARGET_NR_exit_group: 6983 #ifdef TARGET_GPROF 6984 _mcleanup(); 6985 #endif 6986 gdb_exit(cpu_env, arg1); 6987 ret = get_errno(exit_group(arg1)); 6988 break; 6989 #endif 6990 case TARGET_NR_setdomainname: 6991 if (!(p = lock_user_string(arg1))) 6992 goto efault; 6993 ret = get_errno(setdomainname(p, arg2)); 6994 unlock_user(p, arg1, 0); 6995 break; 6996 case TARGET_NR_uname: 6997 /* no need to transcode because we use the linux syscall */ 6998 { 6999 struct new_utsname * buf; 7000 7001 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) 7002 goto efault; 7003 ret = get_errno(sys_uname(buf)); 7004 if (!is_error(ret)) { 7005 /* Overrite the native machine name with whatever is being 7006 emulated. */ 7007 strcpy (buf->machine, cpu_to_uname_machine(cpu_env)); 7008 /* Allow the user to override the reported release. */ 7009 if (qemu_uname_release && *qemu_uname_release) 7010 strcpy (buf->release, qemu_uname_release); 7011 } 7012 unlock_user_struct(buf, arg1, 1); 7013 } 7014 break; 7015 #ifdef TARGET_I386 7016 case TARGET_NR_modify_ldt: 7017 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3); 7018 break; 7019 #if !defined(TARGET_X86_64) 7020 case TARGET_NR_vm86old: 7021 goto unimplemented; 7022 case TARGET_NR_vm86: 7023 ret = do_vm86(cpu_env, arg1, arg2); 7024 break; 7025 #endif 7026 #endif 7027 case TARGET_NR_adjtimex: 7028 goto unimplemented; 7029 #ifdef TARGET_NR_create_module 7030 case TARGET_NR_create_module: 7031 #endif 7032 case TARGET_NR_init_module: 7033 case TARGET_NR_delete_module: 7034 #ifdef TARGET_NR_get_kernel_syms 7035 case TARGET_NR_get_kernel_syms: 7036 #endif 7037 goto unimplemented; 7038 case TARGET_NR_quotactl: 7039 goto unimplemented; 7040 case TARGET_NR_getpgid: 7041 ret = get_errno(getpgid(arg1)); 7042 break; 7043 case TARGET_NR_fchdir: 7044 ret = get_errno(fchdir(arg1)); 7045 break; 7046 #ifdef TARGET_NR_bdflush /* not on x86_64 */ 7047 case TARGET_NR_bdflush: 7048 goto unimplemented; 7049 #endif 7050 #ifdef TARGET_NR_sysfs 7051 case TARGET_NR_sysfs: 7052 goto unimplemented; 7053 #endif 7054 case TARGET_NR_personality: 7055 ret = get_errno(personality(arg1)); 7056 break; 7057 #ifdef TARGET_NR_afs_syscall 7058 case TARGET_NR_afs_syscall: 7059 goto unimplemented; 7060 #endif 7061 #ifdef TARGET_NR__llseek /* Not on alpha */ 7062 case TARGET_NR__llseek: 7063 { 7064 int64_t res; 7065 #if !defined(__NR_llseek) 7066 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5); 7067 if (res == -1) { 7068 ret = get_errno(res); 7069 } else { 7070 ret = 0; 7071 } 7072 #else 7073 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); 7074 #endif 7075 if ((ret == 0) && put_user_s64(res, arg4)) { 7076 goto efault; 7077 } 7078 } 7079 break; 7080 #endif 7081 case TARGET_NR_getdents: 7082 #ifdef __NR_getdents 7083 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 7084 { 7085 struct target_dirent *target_dirp; 7086 struct linux_dirent *dirp; 7087 abi_long count = arg3; 7088 7089 dirp = malloc(count); 7090 if (!dirp) { 7091 ret = -TARGET_ENOMEM; 7092 goto fail; 7093 } 7094 7095 ret = get_errno(sys_getdents(arg1, dirp, count)); 7096 if (!is_error(ret)) { 7097 struct linux_dirent *de; 7098 struct target_dirent *tde; 7099 int len = ret; 7100 int reclen, treclen; 7101 int count1, tnamelen; 7102 7103 count1 = 0; 7104 de = dirp; 7105 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7106 goto efault; 7107 tde = target_dirp; 7108 while (len > 0) { 7109 reclen = de->d_reclen; 7110 tnamelen = reclen - offsetof(struct linux_dirent, d_name); 7111 assert(tnamelen >= 0); 7112 treclen = tnamelen + offsetof(struct target_dirent, d_name); 7113 assert(count1 + treclen <= count); 7114 tde->d_reclen = tswap16(treclen); 7115 tde->d_ino = tswapal(de->d_ino); 7116 tde->d_off = tswapal(de->d_off); 7117 memcpy(tde->d_name, de->d_name, tnamelen); 7118 de = (struct linux_dirent *)((char *)de + reclen); 7119 len -= reclen; 7120 tde = (struct target_dirent *)((char *)tde + treclen); 7121 count1 += treclen; 7122 } 7123 ret = count1; 7124 unlock_user(target_dirp, arg2, ret); 7125 } 7126 free(dirp); 7127 } 7128 #else 7129 { 7130 struct linux_dirent *dirp; 7131 abi_long count = arg3; 7132 7133 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7134 goto efault; 7135 ret = get_errno(sys_getdents(arg1, dirp, count)); 7136 if (!is_error(ret)) { 7137 struct linux_dirent *de; 7138 int len = ret; 7139 int reclen; 7140 de = dirp; 7141 while (len > 0) { 7142 reclen = de->d_reclen; 7143 if (reclen > len) 7144 break; 7145 de->d_reclen = tswap16(reclen); 7146 tswapls(&de->d_ino); 7147 tswapls(&de->d_off); 7148 de = (struct linux_dirent *)((char *)de + reclen); 7149 len -= reclen; 7150 } 7151 } 7152 unlock_user(dirp, arg2, ret); 7153 } 7154 #endif 7155 #else 7156 /* Implement getdents in terms of getdents64 */ 7157 { 7158 struct linux_dirent64 *dirp; 7159 abi_long count = arg3; 7160 7161 dirp = lock_user(VERIFY_WRITE, arg2, count, 0); 7162 if (!dirp) { 7163 goto efault; 7164 } 7165 ret = get_errno(sys_getdents64(arg1, dirp, count)); 7166 if (!is_error(ret)) { 7167 /* Convert the dirent64 structs to target dirent. We do this 7168 * in-place, since we can guarantee that a target_dirent is no 7169 * larger than a dirent64; however this means we have to be 7170 * careful to read everything before writing in the new format. 7171 */ 7172 struct linux_dirent64 *de; 7173 struct target_dirent *tde; 7174 int len = ret; 7175 int tlen = 0; 7176 7177 de = dirp; 7178 tde = (struct target_dirent *)dirp; 7179 while (len > 0) { 7180 int namelen, treclen; 7181 int reclen = de->d_reclen; 7182 uint64_t ino = de->d_ino; 7183 int64_t off = de->d_off; 7184 uint8_t type = de->d_type; 7185 7186 namelen = strlen(de->d_name); 7187 treclen = offsetof(struct target_dirent, d_name) 7188 + namelen + 2; 7189 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long)); 7190 7191 memmove(tde->d_name, de->d_name, namelen + 1); 7192 tde->d_ino = tswapal(ino); 7193 tde->d_off = tswapal(off); 7194 tde->d_reclen = tswap16(treclen); 7195 /* The target_dirent type is in what was formerly a padding 7196 * byte at the end of the structure: 7197 */ 7198 *(((char *)tde) + treclen - 1) = type; 7199 7200 de = (struct linux_dirent64 *)((char *)de + reclen); 7201 tde = (struct target_dirent *)((char *)tde + treclen); 7202 len -= reclen; 7203 tlen += treclen; 7204 } 7205 ret = tlen; 7206 } 7207 unlock_user(dirp, arg2, ret); 7208 } 7209 #endif 7210 break; 7211 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 7212 case TARGET_NR_getdents64: 7213 { 7214 struct linux_dirent64 *dirp; 7215 abi_long count = arg3; 7216 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7217 goto efault; 7218 ret = get_errno(sys_getdents64(arg1, dirp, count)); 7219 if (!is_error(ret)) { 7220 struct linux_dirent64 *de; 7221 int len = ret; 7222 int reclen; 7223 de = dirp; 7224 while (len > 0) { 7225 reclen = de->d_reclen; 7226 if (reclen > len) 7227 break; 7228 de->d_reclen = tswap16(reclen); 7229 tswap64s((uint64_t *)&de->d_ino); 7230 tswap64s((uint64_t *)&de->d_off); 7231 de = (struct linux_dirent64 *)((char *)de + reclen); 7232 len -= reclen; 7233 } 7234 } 7235 unlock_user(dirp, arg2, ret); 7236 } 7237 break; 7238 #endif /* TARGET_NR_getdents64 */ 7239 #if defined(TARGET_NR__newselect) 7240 case TARGET_NR__newselect: 7241 ret = do_select(arg1, arg2, arg3, arg4, arg5); 7242 break; 7243 #endif 7244 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) 7245 # ifdef TARGET_NR_poll 7246 case TARGET_NR_poll: 7247 # endif 7248 # ifdef TARGET_NR_ppoll 7249 case TARGET_NR_ppoll: 7250 # endif 7251 { 7252 struct target_pollfd *target_pfd; 7253 unsigned int nfds = arg2; 7254 int timeout = arg3; 7255 struct pollfd *pfd; 7256 unsigned int i; 7257 7258 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1); 7259 if (!target_pfd) 7260 goto efault; 7261 7262 pfd = alloca(sizeof(struct pollfd) * nfds); 7263 for(i = 0; i < nfds; i++) { 7264 pfd[i].fd = tswap32(target_pfd[i].fd); 7265 pfd[i].events = tswap16(target_pfd[i].events); 7266 } 7267 7268 # ifdef TARGET_NR_ppoll 7269 if (num == TARGET_NR_ppoll) { 7270 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; 7271 target_sigset_t *target_set; 7272 sigset_t _set, *set = &_set; 7273 7274 if (arg3) { 7275 if (target_to_host_timespec(timeout_ts, arg3)) { 7276 unlock_user(target_pfd, arg1, 0); 7277 goto efault; 7278 } 7279 } else { 7280 timeout_ts = NULL; 7281 } 7282 7283 if (arg4) { 7284 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1); 7285 if (!target_set) { 7286 unlock_user(target_pfd, arg1, 0); 7287 goto efault; 7288 } 7289 target_to_host_sigset(set, target_set); 7290 } else { 7291 set = NULL; 7292 } 7293 7294 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8)); 7295 7296 if (!is_error(ret) && arg3) { 7297 host_to_target_timespec(arg3, timeout_ts); 7298 } 7299 if (arg4) { 7300 unlock_user(target_set, arg4, 0); 7301 } 7302 } else 7303 # endif 7304 ret = get_errno(poll(pfd, nfds, timeout)); 7305 7306 if (!is_error(ret)) { 7307 for(i = 0; i < nfds; i++) { 7308 target_pfd[i].revents = tswap16(pfd[i].revents); 7309 } 7310 } 7311 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds); 7312 } 7313 break; 7314 #endif 7315 case TARGET_NR_flock: 7316 /* NOTE: the flock constant seems to be the same for every 7317 Linux platform */ 7318 ret = get_errno(flock(arg1, arg2)); 7319 break; 7320 case TARGET_NR_readv: 7321 { 7322 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 7323 if (vec != NULL) { 7324 ret = get_errno(readv(arg1, vec, arg3)); 7325 unlock_iovec(vec, arg2, arg3, 1); 7326 } else { 7327 ret = -host_to_target_errno(errno); 7328 } 7329 } 7330 break; 7331 case TARGET_NR_writev: 7332 { 7333 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 7334 if (vec != NULL) { 7335 ret = get_errno(writev(arg1, vec, arg3)); 7336 unlock_iovec(vec, arg2, arg3, 0); 7337 } else { 7338 ret = -host_to_target_errno(errno); 7339 } 7340 } 7341 break; 7342 case TARGET_NR_getsid: 7343 ret = get_errno(getsid(arg1)); 7344 break; 7345 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ 7346 case TARGET_NR_fdatasync: 7347 ret = get_errno(fdatasync(arg1)); 7348 break; 7349 #endif 7350 case TARGET_NR__sysctl: 7351 /* We don't implement this, but ENOTDIR is always a safe 7352 return value. */ 7353 ret = -TARGET_ENOTDIR; 7354 break; 7355 case TARGET_NR_sched_getaffinity: 7356 { 7357 unsigned int mask_size; 7358 unsigned long *mask; 7359 7360 /* 7361 * sched_getaffinity needs multiples of ulong, so need to take 7362 * care of mismatches between target ulong and host ulong sizes. 7363 */ 7364 if (arg2 & (sizeof(abi_ulong) - 1)) { 7365 ret = -TARGET_EINVAL; 7366 break; 7367 } 7368 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 7369 7370 mask = alloca(mask_size); 7371 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask)); 7372 7373 if (!is_error(ret)) { 7374 if (copy_to_user(arg3, mask, ret)) { 7375 goto efault; 7376 } 7377 } 7378 } 7379 break; 7380 case TARGET_NR_sched_setaffinity: 7381 { 7382 unsigned int mask_size; 7383 unsigned long *mask; 7384 7385 /* 7386 * sched_setaffinity needs multiples of ulong, so need to take 7387 * care of mismatches between target ulong and host ulong sizes. 7388 */ 7389 if (arg2 & (sizeof(abi_ulong) - 1)) { 7390 ret = -TARGET_EINVAL; 7391 break; 7392 } 7393 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 7394 7395 mask = alloca(mask_size); 7396 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) { 7397 goto efault; 7398 } 7399 memcpy(mask, p, arg2); 7400 unlock_user_struct(p, arg2, 0); 7401 7402 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask)); 7403 } 7404 break; 7405 case TARGET_NR_sched_setparam: 7406 { 7407 struct sched_param *target_schp; 7408 struct sched_param schp; 7409 7410 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) 7411 goto efault; 7412 schp.sched_priority = tswap32(target_schp->sched_priority); 7413 unlock_user_struct(target_schp, arg2, 0); 7414 ret = get_errno(sched_setparam(arg1, &schp)); 7415 } 7416 break; 7417 case TARGET_NR_sched_getparam: 7418 { 7419 struct sched_param *target_schp; 7420 struct sched_param schp; 7421 ret = get_errno(sched_getparam(arg1, &schp)); 7422 if (!is_error(ret)) { 7423 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) 7424 goto efault; 7425 target_schp->sched_priority = tswap32(schp.sched_priority); 7426 unlock_user_struct(target_schp, arg2, 1); 7427 } 7428 } 7429 break; 7430 case TARGET_NR_sched_setscheduler: 7431 { 7432 struct sched_param *target_schp; 7433 struct sched_param schp; 7434 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) 7435 goto efault; 7436 schp.sched_priority = tswap32(target_schp->sched_priority); 7437 unlock_user_struct(target_schp, arg3, 0); 7438 ret = get_errno(sched_setscheduler(arg1, arg2, &schp)); 7439 } 7440 break; 7441 case TARGET_NR_sched_getscheduler: 7442 ret = get_errno(sched_getscheduler(arg1)); 7443 break; 7444 case TARGET_NR_sched_yield: 7445 ret = get_errno(sched_yield()); 7446 break; 7447 case TARGET_NR_sched_get_priority_max: 7448 ret = get_errno(sched_get_priority_max(arg1)); 7449 break; 7450 case TARGET_NR_sched_get_priority_min: 7451 ret = get_errno(sched_get_priority_min(arg1)); 7452 break; 7453 case TARGET_NR_sched_rr_get_interval: 7454 { 7455 struct timespec ts; 7456 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 7457 if (!is_error(ret)) { 7458 host_to_target_timespec(arg2, &ts); 7459 } 7460 } 7461 break; 7462 case TARGET_NR_nanosleep: 7463 { 7464 struct timespec req, rem; 7465 target_to_host_timespec(&req, arg1); 7466 ret = get_errno(nanosleep(&req, &rem)); 7467 if (is_error(ret) && arg2) { 7468 host_to_target_timespec(arg2, &rem); 7469 } 7470 } 7471 break; 7472 #ifdef TARGET_NR_query_module 7473 case TARGET_NR_query_module: 7474 goto unimplemented; 7475 #endif 7476 #ifdef TARGET_NR_nfsservctl 7477 case TARGET_NR_nfsservctl: 7478 goto unimplemented; 7479 #endif 7480 case TARGET_NR_prctl: 7481 switch (arg1) { 7482 case PR_GET_PDEATHSIG: 7483 { 7484 int deathsig; 7485 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5)); 7486 if (!is_error(ret) && arg2 7487 && put_user_ual(deathsig, arg2)) { 7488 goto efault; 7489 } 7490 break; 7491 } 7492 #ifdef PR_GET_NAME 7493 case PR_GET_NAME: 7494 { 7495 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1); 7496 if (!name) { 7497 goto efault; 7498 } 7499 ret = get_errno(prctl(arg1, (unsigned long)name, 7500 arg3, arg4, arg5)); 7501 unlock_user(name, arg2, 16); 7502 break; 7503 } 7504 case PR_SET_NAME: 7505 { 7506 void *name = lock_user(VERIFY_READ, arg2, 16, 1); 7507 if (!name) { 7508 goto efault; 7509 } 7510 ret = get_errno(prctl(arg1, (unsigned long)name, 7511 arg3, arg4, arg5)); 7512 unlock_user(name, arg2, 0); 7513 break; 7514 } 7515 #endif 7516 default: 7517 /* Most prctl options have no pointer arguments */ 7518 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5)); 7519 break; 7520 } 7521 break; 7522 #ifdef TARGET_NR_arch_prctl 7523 case TARGET_NR_arch_prctl: 7524 #if defined(TARGET_I386) && !defined(TARGET_ABI32) 7525 ret = do_arch_prctl(cpu_env, arg1, arg2); 7526 break; 7527 #else 7528 goto unimplemented; 7529 #endif 7530 #endif 7531 #ifdef TARGET_NR_pread64 7532 case TARGET_NR_pread64: 7533 if (regpairs_aligned(cpu_env)) { 7534 arg4 = arg5; 7535 arg5 = arg6; 7536 } 7537 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 7538 goto efault; 7539 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); 7540 unlock_user(p, arg2, ret); 7541 break; 7542 case TARGET_NR_pwrite64: 7543 if (regpairs_aligned(cpu_env)) { 7544 arg4 = arg5; 7545 arg5 = arg6; 7546 } 7547 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 7548 goto efault; 7549 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); 7550 unlock_user(p, arg2, 0); 7551 break; 7552 #endif 7553 case TARGET_NR_getcwd: 7554 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) 7555 goto efault; 7556 ret = get_errno(sys_getcwd1(p, arg2)); 7557 unlock_user(p, arg1, ret); 7558 break; 7559 case TARGET_NR_capget: 7560 goto unimplemented; 7561 case TARGET_NR_capset: 7562 goto unimplemented; 7563 case TARGET_NR_sigaltstack: 7564 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \ 7565 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \ 7566 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC) 7567 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env)); 7568 break; 7569 #else 7570 goto unimplemented; 7571 #endif 7572 7573 #ifdef CONFIG_SENDFILE 7574 case TARGET_NR_sendfile: 7575 { 7576 off_t *offp = NULL; 7577 off_t off; 7578 if (arg3) { 7579 ret = get_user_sal(off, arg3); 7580 if (is_error(ret)) { 7581 break; 7582 } 7583 offp = &off; 7584 } 7585 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 7586 if (!is_error(ret) && arg3) { 7587 abi_long ret2 = put_user_sal(off, arg3); 7588 if (is_error(ret2)) { 7589 ret = ret2; 7590 } 7591 } 7592 break; 7593 } 7594 #ifdef TARGET_NR_sendfile64 7595 case TARGET_NR_sendfile64: 7596 { 7597 off_t *offp = NULL; 7598 off_t off; 7599 if (arg3) { 7600 ret = get_user_s64(off, arg3); 7601 if (is_error(ret)) { 7602 break; 7603 } 7604 offp = &off; 7605 } 7606 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 7607 if (!is_error(ret) && arg3) { 7608 abi_long ret2 = put_user_s64(off, arg3); 7609 if (is_error(ret2)) { 7610 ret = ret2; 7611 } 7612 } 7613 break; 7614 } 7615 #endif 7616 #else 7617 case TARGET_NR_sendfile: 7618 #ifdef TARGET_NR_sendfile64 7619 case TARGET_NR_sendfile64: 7620 #endif 7621 goto unimplemented; 7622 #endif 7623 7624 #ifdef TARGET_NR_getpmsg 7625 case TARGET_NR_getpmsg: 7626 goto unimplemented; 7627 #endif 7628 #ifdef TARGET_NR_putpmsg 7629 case TARGET_NR_putpmsg: 7630 goto unimplemented; 7631 #endif 7632 #ifdef TARGET_NR_vfork 7633 case TARGET_NR_vfork: 7634 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 7635 0, 0, 0, 0)); 7636 break; 7637 #endif 7638 #ifdef TARGET_NR_ugetrlimit 7639 case TARGET_NR_ugetrlimit: 7640 { 7641 struct rlimit rlim; 7642 int resource = target_to_host_resource(arg1); 7643 ret = get_errno(getrlimit(resource, &rlim)); 7644 if (!is_error(ret)) { 7645 struct target_rlimit *target_rlim; 7646 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 7647 goto efault; 7648 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 7649 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 7650 unlock_user_struct(target_rlim, arg2, 1); 7651 } 7652 break; 7653 } 7654 #endif 7655 #ifdef TARGET_NR_truncate64 7656 case TARGET_NR_truncate64: 7657 if (!(p = lock_user_string(arg1))) 7658 goto efault; 7659 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); 7660 unlock_user(p, arg1, 0); 7661 break; 7662 #endif 7663 #ifdef TARGET_NR_ftruncate64 7664 case TARGET_NR_ftruncate64: 7665 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); 7666 break; 7667 #endif 7668 #ifdef TARGET_NR_stat64 7669 case TARGET_NR_stat64: 7670 if (!(p = lock_user_string(arg1))) 7671 goto efault; 7672 ret = get_errno(stat(path(p), &st)); 7673 unlock_user(p, arg1, 0); 7674 if (!is_error(ret)) 7675 ret = host_to_target_stat64(cpu_env, arg2, &st); 7676 break; 7677 #endif 7678 #ifdef TARGET_NR_lstat64 7679 case TARGET_NR_lstat64: 7680 if (!(p = lock_user_string(arg1))) 7681 goto efault; 7682 ret = get_errno(lstat(path(p), &st)); 7683 unlock_user(p, arg1, 0); 7684 if (!is_error(ret)) 7685 ret = host_to_target_stat64(cpu_env, arg2, &st); 7686 break; 7687 #endif 7688 #ifdef TARGET_NR_fstat64 7689 case TARGET_NR_fstat64: 7690 ret = get_errno(fstat(arg1, &st)); 7691 if (!is_error(ret)) 7692 ret = host_to_target_stat64(cpu_env, arg2, &st); 7693 break; 7694 #endif 7695 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) 7696 #ifdef TARGET_NR_fstatat64 7697 case TARGET_NR_fstatat64: 7698 #endif 7699 #ifdef TARGET_NR_newfstatat 7700 case TARGET_NR_newfstatat: 7701 #endif 7702 if (!(p = lock_user_string(arg2))) 7703 goto efault; 7704 ret = get_errno(fstatat(arg1, path(p), &st, arg4)); 7705 if (!is_error(ret)) 7706 ret = host_to_target_stat64(cpu_env, arg3, &st); 7707 break; 7708 #endif 7709 case TARGET_NR_lchown: 7710 if (!(p = lock_user_string(arg1))) 7711 goto efault; 7712 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); 7713 unlock_user(p, arg1, 0); 7714 break; 7715 #ifdef TARGET_NR_getuid 7716 case TARGET_NR_getuid: 7717 ret = get_errno(high2lowuid(getuid())); 7718 break; 7719 #endif 7720 #ifdef TARGET_NR_getgid 7721 case TARGET_NR_getgid: 7722 ret = get_errno(high2lowgid(getgid())); 7723 break; 7724 #endif 7725 #ifdef TARGET_NR_geteuid 7726 case TARGET_NR_geteuid: 7727 ret = get_errno(high2lowuid(geteuid())); 7728 break; 7729 #endif 7730 #ifdef TARGET_NR_getegid 7731 case TARGET_NR_getegid: 7732 ret = get_errno(high2lowgid(getegid())); 7733 break; 7734 #endif 7735 case TARGET_NR_setreuid: 7736 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); 7737 break; 7738 case TARGET_NR_setregid: 7739 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); 7740 break; 7741 case TARGET_NR_getgroups: 7742 { 7743 int gidsetsize = arg1; 7744 target_id *target_grouplist; 7745 gid_t *grouplist; 7746 int i; 7747 7748 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7749 ret = get_errno(getgroups(gidsetsize, grouplist)); 7750 if (gidsetsize == 0) 7751 break; 7752 if (!is_error(ret)) { 7753 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0); 7754 if (!target_grouplist) 7755 goto efault; 7756 for(i = 0;i < ret; i++) 7757 target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); 7758 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id)); 7759 } 7760 } 7761 break; 7762 case TARGET_NR_setgroups: 7763 { 7764 int gidsetsize = arg1; 7765 target_id *target_grouplist; 7766 gid_t *grouplist = NULL; 7767 int i; 7768 if (gidsetsize) { 7769 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7770 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1); 7771 if (!target_grouplist) { 7772 ret = -TARGET_EFAULT; 7773 goto fail; 7774 } 7775 for (i = 0; i < gidsetsize; i++) { 7776 grouplist[i] = low2highgid(tswapid(target_grouplist[i])); 7777 } 7778 unlock_user(target_grouplist, arg2, 0); 7779 } 7780 ret = get_errno(setgroups(gidsetsize, grouplist)); 7781 } 7782 break; 7783 case TARGET_NR_fchown: 7784 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); 7785 break; 7786 #if defined(TARGET_NR_fchownat) 7787 case TARGET_NR_fchownat: 7788 if (!(p = lock_user_string(arg2))) 7789 goto efault; 7790 ret = get_errno(fchownat(arg1, p, low2highuid(arg3), 7791 low2highgid(arg4), arg5)); 7792 unlock_user(p, arg2, 0); 7793 break; 7794 #endif 7795 #ifdef TARGET_NR_setresuid 7796 case TARGET_NR_setresuid: 7797 ret = get_errno(setresuid(low2highuid(arg1), 7798 low2highuid(arg2), 7799 low2highuid(arg3))); 7800 break; 7801 #endif 7802 #ifdef TARGET_NR_getresuid 7803 case TARGET_NR_getresuid: 7804 { 7805 uid_t ruid, euid, suid; 7806 ret = get_errno(getresuid(&ruid, &euid, &suid)); 7807 if (!is_error(ret)) { 7808 if (put_user_u16(high2lowuid(ruid), arg1) 7809 || put_user_u16(high2lowuid(euid), arg2) 7810 || put_user_u16(high2lowuid(suid), arg3)) 7811 goto efault; 7812 } 7813 } 7814 break; 7815 #endif 7816 #ifdef TARGET_NR_getresgid 7817 case TARGET_NR_setresgid: 7818 ret = get_errno(setresgid(low2highgid(arg1), 7819 low2highgid(arg2), 7820 low2highgid(arg3))); 7821 break; 7822 #endif 7823 #ifdef TARGET_NR_getresgid 7824 case TARGET_NR_getresgid: 7825 { 7826 gid_t rgid, egid, sgid; 7827 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 7828 if (!is_error(ret)) { 7829 if (put_user_u16(high2lowgid(rgid), arg1) 7830 || put_user_u16(high2lowgid(egid), arg2) 7831 || put_user_u16(high2lowgid(sgid), arg3)) 7832 goto efault; 7833 } 7834 } 7835 break; 7836 #endif 7837 case TARGET_NR_chown: 7838 if (!(p = lock_user_string(arg1))) 7839 goto efault; 7840 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); 7841 unlock_user(p, arg1, 0); 7842 break; 7843 case TARGET_NR_setuid: 7844 ret = get_errno(setuid(low2highuid(arg1))); 7845 break; 7846 case TARGET_NR_setgid: 7847 ret = get_errno(setgid(low2highgid(arg1))); 7848 break; 7849 case TARGET_NR_setfsuid: 7850 ret = get_errno(setfsuid(arg1)); 7851 break; 7852 case TARGET_NR_setfsgid: 7853 ret = get_errno(setfsgid(arg1)); 7854 break; 7855 7856 #ifdef TARGET_NR_lchown32 7857 case TARGET_NR_lchown32: 7858 if (!(p = lock_user_string(arg1))) 7859 goto efault; 7860 ret = get_errno(lchown(p, arg2, arg3)); 7861 unlock_user(p, arg1, 0); 7862 break; 7863 #endif 7864 #ifdef TARGET_NR_getuid32 7865 case TARGET_NR_getuid32: 7866 ret = get_errno(getuid()); 7867 break; 7868 #endif 7869 7870 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) 7871 /* Alpha specific */ 7872 case TARGET_NR_getxuid: 7873 { 7874 uid_t euid; 7875 euid=geteuid(); 7876 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid; 7877 } 7878 ret = get_errno(getuid()); 7879 break; 7880 #endif 7881 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) 7882 /* Alpha specific */ 7883 case TARGET_NR_getxgid: 7884 { 7885 uid_t egid; 7886 egid=getegid(); 7887 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid; 7888 } 7889 ret = get_errno(getgid()); 7890 break; 7891 #endif 7892 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) 7893 /* Alpha specific */ 7894 case TARGET_NR_osf_getsysinfo: 7895 ret = -TARGET_EOPNOTSUPP; 7896 switch (arg1) { 7897 case TARGET_GSI_IEEE_FP_CONTROL: 7898 { 7899 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env); 7900 7901 /* Copied from linux ieee_fpcr_to_swcr. */ 7902 swcr = (fpcr >> 35) & SWCR_STATUS_MASK; 7903 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ; 7904 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV 7905 | SWCR_TRAP_ENABLE_DZE 7906 | SWCR_TRAP_ENABLE_OVF); 7907 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF 7908 | SWCR_TRAP_ENABLE_INE); 7909 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ; 7910 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO; 7911 7912 if (put_user_u64 (swcr, arg2)) 7913 goto efault; 7914 ret = 0; 7915 } 7916 break; 7917 7918 /* case GSI_IEEE_STATE_AT_SIGNAL: 7919 -- Not implemented in linux kernel. 7920 case GSI_UACPROC: 7921 -- Retrieves current unaligned access state; not much used. 7922 case GSI_PROC_TYPE: 7923 -- Retrieves implver information; surely not used. 7924 case GSI_GET_HWRPB: 7925 -- Grabs a copy of the HWRPB; surely not used. 7926 */ 7927 } 7928 break; 7929 #endif 7930 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) 7931 /* Alpha specific */ 7932 case TARGET_NR_osf_setsysinfo: 7933 ret = -TARGET_EOPNOTSUPP; 7934 switch (arg1) { 7935 case TARGET_SSI_IEEE_FP_CONTROL: 7936 { 7937 uint64_t swcr, fpcr, orig_fpcr; 7938 7939 if (get_user_u64 (swcr, arg2)) { 7940 goto efault; 7941 } 7942 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 7943 fpcr = orig_fpcr & FPCR_DYN_MASK; 7944 7945 /* Copied from linux ieee_swcr_to_fpcr. */ 7946 fpcr |= (swcr & SWCR_STATUS_MASK) << 35; 7947 fpcr |= (swcr & SWCR_MAP_DMZ) << 36; 7948 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV 7949 | SWCR_TRAP_ENABLE_DZE 7950 | SWCR_TRAP_ENABLE_OVF)) << 48; 7951 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF 7952 | SWCR_TRAP_ENABLE_INE)) << 57; 7953 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0); 7954 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41; 7955 7956 cpu_alpha_store_fpcr(cpu_env, fpcr); 7957 ret = 0; 7958 } 7959 break; 7960 7961 case TARGET_SSI_IEEE_RAISE_EXCEPTION: 7962 { 7963 uint64_t exc, fpcr, orig_fpcr; 7964 int si_code; 7965 7966 if (get_user_u64(exc, arg2)) { 7967 goto efault; 7968 } 7969 7970 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 7971 7972 /* We only add to the exception status here. */ 7973 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35); 7974 7975 cpu_alpha_store_fpcr(cpu_env, fpcr); 7976 ret = 0; 7977 7978 /* Old exceptions are not signaled. */ 7979 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK); 7980 7981 /* If any exceptions set by this call, 7982 and are unmasked, send a signal. */ 7983 si_code = 0; 7984 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) { 7985 si_code = TARGET_FPE_FLTRES; 7986 } 7987 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) { 7988 si_code = TARGET_FPE_FLTUND; 7989 } 7990 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) { 7991 si_code = TARGET_FPE_FLTOVF; 7992 } 7993 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) { 7994 si_code = TARGET_FPE_FLTDIV; 7995 } 7996 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) { 7997 si_code = TARGET_FPE_FLTINV; 7998 } 7999 if (si_code != 0) { 8000 target_siginfo_t info; 8001 info.si_signo = SIGFPE; 8002 info.si_errno = 0; 8003 info.si_code = si_code; 8004 info._sifields._sigfault._addr 8005 = ((CPUArchState *)cpu_env)->pc; 8006 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info); 8007 } 8008 } 8009 break; 8010 8011 /* case SSI_NVPAIRS: 8012 -- Used with SSIN_UACPROC to enable unaligned accesses. 8013 case SSI_IEEE_STATE_AT_SIGNAL: 8014 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: 8015 -- Not implemented in linux kernel 8016 */ 8017 } 8018 break; 8019 #endif 8020 #ifdef TARGET_NR_osf_sigprocmask 8021 /* Alpha specific. */ 8022 case TARGET_NR_osf_sigprocmask: 8023 { 8024 abi_ulong mask; 8025 int how; 8026 sigset_t set, oldset; 8027 8028 switch(arg1) { 8029 case TARGET_SIG_BLOCK: 8030 how = SIG_BLOCK; 8031 break; 8032 case TARGET_SIG_UNBLOCK: 8033 how = SIG_UNBLOCK; 8034 break; 8035 case TARGET_SIG_SETMASK: 8036 how = SIG_SETMASK; 8037 break; 8038 default: 8039 ret = -TARGET_EINVAL; 8040 goto fail; 8041 } 8042 mask = arg2; 8043 target_to_host_old_sigset(&set, &mask); 8044 sigprocmask(how, &set, &oldset); 8045 host_to_target_old_sigset(&mask, &oldset); 8046 ret = mask; 8047 } 8048 break; 8049 #endif 8050 8051 #ifdef TARGET_NR_getgid32 8052 case TARGET_NR_getgid32: 8053 ret = get_errno(getgid()); 8054 break; 8055 #endif 8056 #ifdef TARGET_NR_geteuid32 8057 case TARGET_NR_geteuid32: 8058 ret = get_errno(geteuid()); 8059 break; 8060 #endif 8061 #ifdef TARGET_NR_getegid32 8062 case TARGET_NR_getegid32: 8063 ret = get_errno(getegid()); 8064 break; 8065 #endif 8066 #ifdef TARGET_NR_setreuid32 8067 case TARGET_NR_setreuid32: 8068 ret = get_errno(setreuid(arg1, arg2)); 8069 break; 8070 #endif 8071 #ifdef TARGET_NR_setregid32 8072 case TARGET_NR_setregid32: 8073 ret = get_errno(setregid(arg1, arg2)); 8074 break; 8075 #endif 8076 #ifdef TARGET_NR_getgroups32 8077 case TARGET_NR_getgroups32: 8078 { 8079 int gidsetsize = arg1; 8080 uint32_t *target_grouplist; 8081 gid_t *grouplist; 8082 int i; 8083 8084 grouplist = alloca(gidsetsize * sizeof(gid_t)); 8085 ret = get_errno(getgroups(gidsetsize, grouplist)); 8086 if (gidsetsize == 0) 8087 break; 8088 if (!is_error(ret)) { 8089 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); 8090 if (!target_grouplist) { 8091 ret = -TARGET_EFAULT; 8092 goto fail; 8093 } 8094 for(i = 0;i < ret; i++) 8095 target_grouplist[i] = tswap32(grouplist[i]); 8096 unlock_user(target_grouplist, arg2, gidsetsize * 4); 8097 } 8098 } 8099 break; 8100 #endif 8101 #ifdef TARGET_NR_setgroups32 8102 case TARGET_NR_setgroups32: 8103 { 8104 int gidsetsize = arg1; 8105 uint32_t *target_grouplist; 8106 gid_t *grouplist; 8107 int i; 8108 8109 grouplist = alloca(gidsetsize * sizeof(gid_t)); 8110 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); 8111 if (!target_grouplist) { 8112 ret = -TARGET_EFAULT; 8113 goto fail; 8114 } 8115 for(i = 0;i < gidsetsize; i++) 8116 grouplist[i] = tswap32(target_grouplist[i]); 8117 unlock_user(target_grouplist, arg2, 0); 8118 ret = get_errno(setgroups(gidsetsize, grouplist)); 8119 } 8120 break; 8121 #endif 8122 #ifdef TARGET_NR_fchown32 8123 case TARGET_NR_fchown32: 8124 ret = get_errno(fchown(arg1, arg2, arg3)); 8125 break; 8126 #endif 8127 #ifdef TARGET_NR_setresuid32 8128 case TARGET_NR_setresuid32: 8129 ret = get_errno(setresuid(arg1, arg2, arg3)); 8130 break; 8131 #endif 8132 #ifdef TARGET_NR_getresuid32 8133 case TARGET_NR_getresuid32: 8134 { 8135 uid_t ruid, euid, suid; 8136 ret = get_errno(getresuid(&ruid, &euid, &suid)); 8137 if (!is_error(ret)) { 8138 if (put_user_u32(ruid, arg1) 8139 || put_user_u32(euid, arg2) 8140 || put_user_u32(suid, arg3)) 8141 goto efault; 8142 } 8143 } 8144 break; 8145 #endif 8146 #ifdef TARGET_NR_setresgid32 8147 case TARGET_NR_setresgid32: 8148 ret = get_errno(setresgid(arg1, arg2, arg3)); 8149 break; 8150 #endif 8151 #ifdef TARGET_NR_getresgid32 8152 case TARGET_NR_getresgid32: 8153 { 8154 gid_t rgid, egid, sgid; 8155 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 8156 if (!is_error(ret)) { 8157 if (put_user_u32(rgid, arg1) 8158 || put_user_u32(egid, arg2) 8159 || put_user_u32(sgid, arg3)) 8160 goto efault; 8161 } 8162 } 8163 break; 8164 #endif 8165 #ifdef TARGET_NR_chown32 8166 case TARGET_NR_chown32: 8167 if (!(p = lock_user_string(arg1))) 8168 goto efault; 8169 ret = get_errno(chown(p, arg2, arg3)); 8170 unlock_user(p, arg1, 0); 8171 break; 8172 #endif 8173 #ifdef TARGET_NR_setuid32 8174 case TARGET_NR_setuid32: 8175 ret = get_errno(setuid(arg1)); 8176 break; 8177 #endif 8178 #ifdef TARGET_NR_setgid32 8179 case TARGET_NR_setgid32: 8180 ret = get_errno(setgid(arg1)); 8181 break; 8182 #endif 8183 #ifdef TARGET_NR_setfsuid32 8184 case TARGET_NR_setfsuid32: 8185 ret = get_errno(setfsuid(arg1)); 8186 break; 8187 #endif 8188 #ifdef TARGET_NR_setfsgid32 8189 case TARGET_NR_setfsgid32: 8190 ret = get_errno(setfsgid(arg1)); 8191 break; 8192 #endif 8193 8194 case TARGET_NR_pivot_root: 8195 goto unimplemented; 8196 #ifdef TARGET_NR_mincore 8197 case TARGET_NR_mincore: 8198 { 8199 void *a; 8200 ret = -TARGET_EFAULT; 8201 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0))) 8202 goto efault; 8203 if (!(p = lock_user_string(arg3))) 8204 goto mincore_fail; 8205 ret = get_errno(mincore(a, arg2, p)); 8206 unlock_user(p, arg3, ret); 8207 mincore_fail: 8208 unlock_user(a, arg1, 0); 8209 } 8210 break; 8211 #endif 8212 #ifdef TARGET_NR_arm_fadvise64_64 8213 case TARGET_NR_arm_fadvise64_64: 8214 { 8215 /* 8216 * arm_fadvise64_64 looks like fadvise64_64 but 8217 * with different argument order 8218 */ 8219 abi_long temp; 8220 temp = arg3; 8221 arg3 = arg4; 8222 arg4 = temp; 8223 } 8224 #endif 8225 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64) 8226 #ifdef TARGET_NR_fadvise64_64 8227 case TARGET_NR_fadvise64_64: 8228 #endif 8229 #ifdef TARGET_NR_fadvise64 8230 case TARGET_NR_fadvise64: 8231 #endif 8232 #ifdef TARGET_S390X 8233 switch (arg4) { 8234 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ 8235 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ 8236 case 6: arg4 = POSIX_FADV_DONTNEED; break; 8237 case 7: arg4 = POSIX_FADV_NOREUSE; break; 8238 default: break; 8239 } 8240 #endif 8241 ret = -posix_fadvise(arg1, arg2, arg3, arg4); 8242 break; 8243 #endif 8244 #ifdef TARGET_NR_madvise 8245 case TARGET_NR_madvise: 8246 /* A straight passthrough may not be safe because qemu sometimes 8247 turns private file-backed mappings into anonymous mappings. 8248 This will break MADV_DONTNEED. 8249 This is a hint, so ignoring and returning success is ok. */ 8250 ret = get_errno(0); 8251 break; 8252 #endif 8253 #if TARGET_ABI_BITS == 32 8254 case TARGET_NR_fcntl64: 8255 { 8256 int cmd; 8257 struct flock64 fl; 8258 struct target_flock64 *target_fl; 8259 #ifdef TARGET_ARM 8260 struct target_eabi_flock64 *target_efl; 8261 #endif 8262 8263 cmd = target_to_host_fcntl_cmd(arg2); 8264 if (cmd == -TARGET_EINVAL) { 8265 ret = cmd; 8266 break; 8267 } 8268 8269 switch(arg2) { 8270 case TARGET_F_GETLK64: 8271 #ifdef TARGET_ARM 8272 if (((CPUARMState *)cpu_env)->eabi) { 8273 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 8274 goto efault; 8275 fl.l_type = tswap16(target_efl->l_type); 8276 fl.l_whence = tswap16(target_efl->l_whence); 8277 fl.l_start = tswap64(target_efl->l_start); 8278 fl.l_len = tswap64(target_efl->l_len); 8279 fl.l_pid = tswap32(target_efl->l_pid); 8280 unlock_user_struct(target_efl, arg3, 0); 8281 } else 8282 #endif 8283 { 8284 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 8285 goto efault; 8286 fl.l_type = tswap16(target_fl->l_type); 8287 fl.l_whence = tswap16(target_fl->l_whence); 8288 fl.l_start = tswap64(target_fl->l_start); 8289 fl.l_len = tswap64(target_fl->l_len); 8290 fl.l_pid = tswap32(target_fl->l_pid); 8291 unlock_user_struct(target_fl, arg3, 0); 8292 } 8293 ret = get_errno(fcntl(arg1, cmd, &fl)); 8294 if (ret == 0) { 8295 #ifdef TARGET_ARM 8296 if (((CPUARMState *)cpu_env)->eabi) { 8297 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0)) 8298 goto efault; 8299 target_efl->l_type = tswap16(fl.l_type); 8300 target_efl->l_whence = tswap16(fl.l_whence); 8301 target_efl->l_start = tswap64(fl.l_start); 8302 target_efl->l_len = tswap64(fl.l_len); 8303 target_efl->l_pid = tswap32(fl.l_pid); 8304 unlock_user_struct(target_efl, arg3, 1); 8305 } else 8306 #endif 8307 { 8308 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0)) 8309 goto efault; 8310 target_fl->l_type = tswap16(fl.l_type); 8311 target_fl->l_whence = tswap16(fl.l_whence); 8312 target_fl->l_start = tswap64(fl.l_start); 8313 target_fl->l_len = tswap64(fl.l_len); 8314 target_fl->l_pid = tswap32(fl.l_pid); 8315 unlock_user_struct(target_fl, arg3, 1); 8316 } 8317 } 8318 break; 8319 8320 case TARGET_F_SETLK64: 8321 case TARGET_F_SETLKW64: 8322 #ifdef TARGET_ARM 8323 if (((CPUARMState *)cpu_env)->eabi) { 8324 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 8325 goto efault; 8326 fl.l_type = tswap16(target_efl->l_type); 8327 fl.l_whence = tswap16(target_efl->l_whence); 8328 fl.l_start = tswap64(target_efl->l_start); 8329 fl.l_len = tswap64(target_efl->l_len); 8330 fl.l_pid = tswap32(target_efl->l_pid); 8331 unlock_user_struct(target_efl, arg3, 0); 8332 } else 8333 #endif 8334 { 8335 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 8336 goto efault; 8337 fl.l_type = tswap16(target_fl->l_type); 8338 fl.l_whence = tswap16(target_fl->l_whence); 8339 fl.l_start = tswap64(target_fl->l_start); 8340 fl.l_len = tswap64(target_fl->l_len); 8341 fl.l_pid = tswap32(target_fl->l_pid); 8342 unlock_user_struct(target_fl, arg3, 0); 8343 } 8344 ret = get_errno(fcntl(arg1, cmd, &fl)); 8345 break; 8346 default: 8347 ret = do_fcntl(arg1, arg2, arg3); 8348 break; 8349 } 8350 break; 8351 } 8352 #endif 8353 #ifdef TARGET_NR_cacheflush 8354 case TARGET_NR_cacheflush: 8355 /* self-modifying code is handled automatically, so nothing needed */ 8356 ret = 0; 8357 break; 8358 #endif 8359 #ifdef TARGET_NR_security 8360 case TARGET_NR_security: 8361 goto unimplemented; 8362 #endif 8363 #ifdef TARGET_NR_getpagesize 8364 case TARGET_NR_getpagesize: 8365 ret = TARGET_PAGE_SIZE; 8366 break; 8367 #endif 8368 case TARGET_NR_gettid: 8369 ret = get_errno(gettid()); 8370 break; 8371 #ifdef TARGET_NR_readahead 8372 case TARGET_NR_readahead: 8373 #if TARGET_ABI_BITS == 32 8374 if (regpairs_aligned(cpu_env)) { 8375 arg2 = arg3; 8376 arg3 = arg4; 8377 arg4 = arg5; 8378 } 8379 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4)); 8380 #else 8381 ret = get_errno(readahead(arg1, arg2, arg3)); 8382 #endif 8383 break; 8384 #endif 8385 #ifdef CONFIG_ATTR 8386 #ifdef TARGET_NR_setxattr 8387 case TARGET_NR_listxattr: 8388 case TARGET_NR_llistxattr: 8389 { 8390 void *p, *b = 0; 8391 if (arg2) { 8392 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 8393 if (!b) { 8394 ret = -TARGET_EFAULT; 8395 break; 8396 } 8397 } 8398 p = lock_user_string(arg1); 8399 if (p) { 8400 if (num == TARGET_NR_listxattr) { 8401 ret = get_errno(listxattr(p, b, arg3)); 8402 } else { 8403 ret = get_errno(llistxattr(p, b, arg3)); 8404 } 8405 } else { 8406 ret = -TARGET_EFAULT; 8407 } 8408 unlock_user(p, arg1, 0); 8409 unlock_user(b, arg2, arg3); 8410 break; 8411 } 8412 case TARGET_NR_flistxattr: 8413 { 8414 void *b = 0; 8415 if (arg2) { 8416 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 8417 if (!b) { 8418 ret = -TARGET_EFAULT; 8419 break; 8420 } 8421 } 8422 ret = get_errno(flistxattr(arg1, b, arg3)); 8423 unlock_user(b, arg2, arg3); 8424 break; 8425 } 8426 case TARGET_NR_setxattr: 8427 case TARGET_NR_lsetxattr: 8428 { 8429 void *p, *n, *v = 0; 8430 if (arg3) { 8431 v = lock_user(VERIFY_READ, arg3, arg4, 1); 8432 if (!v) { 8433 ret = -TARGET_EFAULT; 8434 break; 8435 } 8436 } 8437 p = lock_user_string(arg1); 8438 n = lock_user_string(arg2); 8439 if (p && n) { 8440 if (num == TARGET_NR_setxattr) { 8441 ret = get_errno(setxattr(p, n, v, arg4, arg5)); 8442 } else { 8443 ret = get_errno(lsetxattr(p, n, v, arg4, arg5)); 8444 } 8445 } else { 8446 ret = -TARGET_EFAULT; 8447 } 8448 unlock_user(p, arg1, 0); 8449 unlock_user(n, arg2, 0); 8450 unlock_user(v, arg3, 0); 8451 } 8452 break; 8453 case TARGET_NR_fsetxattr: 8454 { 8455 void *n, *v = 0; 8456 if (arg3) { 8457 v = lock_user(VERIFY_READ, arg3, arg4, 1); 8458 if (!v) { 8459 ret = -TARGET_EFAULT; 8460 break; 8461 } 8462 } 8463 n = lock_user_string(arg2); 8464 if (n) { 8465 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5)); 8466 } else { 8467 ret = -TARGET_EFAULT; 8468 } 8469 unlock_user(n, arg2, 0); 8470 unlock_user(v, arg3, 0); 8471 } 8472 break; 8473 case TARGET_NR_getxattr: 8474 case TARGET_NR_lgetxattr: 8475 { 8476 void *p, *n, *v = 0; 8477 if (arg3) { 8478 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 8479 if (!v) { 8480 ret = -TARGET_EFAULT; 8481 break; 8482 } 8483 } 8484 p = lock_user_string(arg1); 8485 n = lock_user_string(arg2); 8486 if (p && n) { 8487 if (num == TARGET_NR_getxattr) { 8488 ret = get_errno(getxattr(p, n, v, arg4)); 8489 } else { 8490 ret = get_errno(lgetxattr(p, n, v, arg4)); 8491 } 8492 } else { 8493 ret = -TARGET_EFAULT; 8494 } 8495 unlock_user(p, arg1, 0); 8496 unlock_user(n, arg2, 0); 8497 unlock_user(v, arg3, arg4); 8498 } 8499 break; 8500 case TARGET_NR_fgetxattr: 8501 { 8502 void *n, *v = 0; 8503 if (arg3) { 8504 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 8505 if (!v) { 8506 ret = -TARGET_EFAULT; 8507 break; 8508 } 8509 } 8510 n = lock_user_string(arg2); 8511 if (n) { 8512 ret = get_errno(fgetxattr(arg1, n, v, arg4)); 8513 } else { 8514 ret = -TARGET_EFAULT; 8515 } 8516 unlock_user(n, arg2, 0); 8517 unlock_user(v, arg3, arg4); 8518 } 8519 break; 8520 case TARGET_NR_removexattr: 8521 case TARGET_NR_lremovexattr: 8522 { 8523 void *p, *n; 8524 p = lock_user_string(arg1); 8525 n = lock_user_string(arg2); 8526 if (p && n) { 8527 if (num == TARGET_NR_removexattr) { 8528 ret = get_errno(removexattr(p, n)); 8529 } else { 8530 ret = get_errno(lremovexattr(p, n)); 8531 } 8532 } else { 8533 ret = -TARGET_EFAULT; 8534 } 8535 unlock_user(p, arg1, 0); 8536 unlock_user(n, arg2, 0); 8537 } 8538 break; 8539 case TARGET_NR_fremovexattr: 8540 { 8541 void *n; 8542 n = lock_user_string(arg2); 8543 if (n) { 8544 ret = get_errno(fremovexattr(arg1, n)); 8545 } else { 8546 ret = -TARGET_EFAULT; 8547 } 8548 unlock_user(n, arg2, 0); 8549 } 8550 break; 8551 #endif 8552 #endif /* CONFIG_ATTR */ 8553 #ifdef TARGET_NR_set_thread_area 8554 case TARGET_NR_set_thread_area: 8555 #if defined(TARGET_MIPS) 8556 ((CPUMIPSState *) cpu_env)->tls_value = arg1; 8557 ret = 0; 8558 break; 8559 #elif defined(TARGET_CRIS) 8560 if (arg1 & 0xff) 8561 ret = -TARGET_EINVAL; 8562 else { 8563 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1; 8564 ret = 0; 8565 } 8566 break; 8567 #elif defined(TARGET_I386) && defined(TARGET_ABI32) 8568 ret = do_set_thread_area(cpu_env, arg1); 8569 break; 8570 #elif defined(TARGET_M68K) 8571 { 8572 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 8573 ts->tp_value = arg1; 8574 ret = 0; 8575 break; 8576 } 8577 #else 8578 goto unimplemented_nowarn; 8579 #endif 8580 #endif 8581 #ifdef TARGET_NR_get_thread_area 8582 case TARGET_NR_get_thread_area: 8583 #if defined(TARGET_I386) && defined(TARGET_ABI32) 8584 ret = do_get_thread_area(cpu_env, arg1); 8585 break; 8586 #elif defined(TARGET_M68K) 8587 { 8588 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 8589 ret = ts->tp_value; 8590 break; 8591 } 8592 #else 8593 goto unimplemented_nowarn; 8594 #endif 8595 #endif 8596 #ifdef TARGET_NR_getdomainname 8597 case TARGET_NR_getdomainname: 8598 goto unimplemented_nowarn; 8599 #endif 8600 8601 #ifdef TARGET_NR_clock_gettime 8602 case TARGET_NR_clock_gettime: 8603 { 8604 struct timespec ts; 8605 ret = get_errno(clock_gettime(arg1, &ts)); 8606 if (!is_error(ret)) { 8607 host_to_target_timespec(arg2, &ts); 8608 } 8609 break; 8610 } 8611 #endif 8612 #ifdef TARGET_NR_clock_getres 8613 case TARGET_NR_clock_getres: 8614 { 8615 struct timespec ts; 8616 ret = get_errno(clock_getres(arg1, &ts)); 8617 if (!is_error(ret)) { 8618 host_to_target_timespec(arg2, &ts); 8619 } 8620 break; 8621 } 8622 #endif 8623 #ifdef TARGET_NR_clock_nanosleep 8624 case TARGET_NR_clock_nanosleep: 8625 { 8626 struct timespec ts; 8627 target_to_host_timespec(&ts, arg3); 8628 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL)); 8629 if (arg4) 8630 host_to_target_timespec(arg4, &ts); 8631 break; 8632 } 8633 #endif 8634 8635 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 8636 case TARGET_NR_set_tid_address: 8637 ret = get_errno(set_tid_address((int *)g2h(arg1))); 8638 break; 8639 #endif 8640 8641 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 8642 case TARGET_NR_tkill: 8643 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2))); 8644 break; 8645 #endif 8646 8647 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 8648 case TARGET_NR_tgkill: 8649 ret = get_errno(sys_tgkill((int)arg1, (int)arg2, 8650 target_to_host_signal(arg3))); 8651 break; 8652 #endif 8653 8654 #ifdef TARGET_NR_set_robust_list 8655 case TARGET_NR_set_robust_list: 8656 case TARGET_NR_get_robust_list: 8657 /* The ABI for supporting robust futexes has userspace pass 8658 * the kernel a pointer to a linked list which is updated by 8659 * userspace after the syscall; the list is walked by the kernel 8660 * when the thread exits. Since the linked list in QEMU guest 8661 * memory isn't a valid linked list for the host and we have 8662 * no way to reliably intercept the thread-death event, we can't 8663 * support these. Silently return ENOSYS so that guest userspace 8664 * falls back to a non-robust futex implementation (which should 8665 * be OK except in the corner case of the guest crashing while 8666 * holding a mutex that is shared with another process via 8667 * shared memory). 8668 */ 8669 goto unimplemented_nowarn; 8670 #endif 8671 8672 #if defined(TARGET_NR_utimensat) 8673 case TARGET_NR_utimensat: 8674 { 8675 struct timespec *tsp, ts[2]; 8676 if (!arg3) { 8677 tsp = NULL; 8678 } else { 8679 target_to_host_timespec(ts, arg3); 8680 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec)); 8681 tsp = ts; 8682 } 8683 if (!arg2) 8684 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 8685 else { 8686 if (!(p = lock_user_string(arg2))) { 8687 ret = -TARGET_EFAULT; 8688 goto fail; 8689 } 8690 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 8691 unlock_user(p, arg2, 0); 8692 } 8693 } 8694 break; 8695 #endif 8696 case TARGET_NR_futex: 8697 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6); 8698 break; 8699 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 8700 case TARGET_NR_inotify_init: 8701 ret = get_errno(sys_inotify_init()); 8702 break; 8703 #endif 8704 #ifdef CONFIG_INOTIFY1 8705 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 8706 case TARGET_NR_inotify_init1: 8707 ret = get_errno(sys_inotify_init1(arg1)); 8708 break; 8709 #endif 8710 #endif 8711 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 8712 case TARGET_NR_inotify_add_watch: 8713 p = lock_user_string(arg2); 8714 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3)); 8715 unlock_user(p, arg2, 0); 8716 break; 8717 #endif 8718 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 8719 case TARGET_NR_inotify_rm_watch: 8720 ret = get_errno(sys_inotify_rm_watch(arg1, arg2)); 8721 break; 8722 #endif 8723 8724 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 8725 case TARGET_NR_mq_open: 8726 { 8727 struct mq_attr posix_mq_attr; 8728 8729 p = lock_user_string(arg1 - 1); 8730 if (arg4 != 0) 8731 copy_from_user_mq_attr (&posix_mq_attr, arg4); 8732 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr)); 8733 unlock_user (p, arg1, 0); 8734 } 8735 break; 8736 8737 case TARGET_NR_mq_unlink: 8738 p = lock_user_string(arg1 - 1); 8739 ret = get_errno(mq_unlink(p)); 8740 unlock_user (p, arg1, 0); 8741 break; 8742 8743 case TARGET_NR_mq_timedsend: 8744 { 8745 struct timespec ts; 8746 8747 p = lock_user (VERIFY_READ, arg2, arg3, 1); 8748 if (arg5 != 0) { 8749 target_to_host_timespec(&ts, arg5); 8750 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts)); 8751 host_to_target_timespec(arg5, &ts); 8752 } 8753 else 8754 ret = get_errno(mq_send(arg1, p, arg3, arg4)); 8755 unlock_user (p, arg2, arg3); 8756 } 8757 break; 8758 8759 case TARGET_NR_mq_timedreceive: 8760 { 8761 struct timespec ts; 8762 unsigned int prio; 8763 8764 p = lock_user (VERIFY_READ, arg2, arg3, 1); 8765 if (arg5 != 0) { 8766 target_to_host_timespec(&ts, arg5); 8767 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts)); 8768 host_to_target_timespec(arg5, &ts); 8769 } 8770 else 8771 ret = get_errno(mq_receive(arg1, p, arg3, &prio)); 8772 unlock_user (p, arg2, arg3); 8773 if (arg4 != 0) 8774 put_user_u32(prio, arg4); 8775 } 8776 break; 8777 8778 /* Not implemented for now... */ 8779 /* case TARGET_NR_mq_notify: */ 8780 /* break; */ 8781 8782 case TARGET_NR_mq_getsetattr: 8783 { 8784 struct mq_attr posix_mq_attr_in, posix_mq_attr_out; 8785 ret = 0; 8786 if (arg3 != 0) { 8787 ret = mq_getattr(arg1, &posix_mq_attr_out); 8788 copy_to_user_mq_attr(arg3, &posix_mq_attr_out); 8789 } 8790 if (arg2 != 0) { 8791 copy_from_user_mq_attr(&posix_mq_attr_in, arg2); 8792 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out); 8793 } 8794 8795 } 8796 break; 8797 #endif 8798 8799 #ifdef CONFIG_SPLICE 8800 #ifdef TARGET_NR_tee 8801 case TARGET_NR_tee: 8802 { 8803 ret = get_errno(tee(arg1,arg2,arg3,arg4)); 8804 } 8805 break; 8806 #endif 8807 #ifdef TARGET_NR_splice 8808 case TARGET_NR_splice: 8809 { 8810 loff_t loff_in, loff_out; 8811 loff_t *ploff_in = NULL, *ploff_out = NULL; 8812 if(arg2) { 8813 get_user_u64(loff_in, arg2); 8814 ploff_in = &loff_in; 8815 } 8816 if(arg4) { 8817 get_user_u64(loff_out, arg2); 8818 ploff_out = &loff_out; 8819 } 8820 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); 8821 } 8822 break; 8823 #endif 8824 #ifdef TARGET_NR_vmsplice 8825 case TARGET_NR_vmsplice: 8826 { 8827 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 8828 if (vec != NULL) { 8829 ret = get_errno(vmsplice(arg1, vec, arg3, arg4)); 8830 unlock_iovec(vec, arg2, arg3, 0); 8831 } else { 8832 ret = -host_to_target_errno(errno); 8833 } 8834 } 8835 break; 8836 #endif 8837 #endif /* CONFIG_SPLICE */ 8838 #ifdef CONFIG_EVENTFD 8839 #if defined(TARGET_NR_eventfd) 8840 case TARGET_NR_eventfd: 8841 ret = get_errno(eventfd(arg1, 0)); 8842 break; 8843 #endif 8844 #if defined(TARGET_NR_eventfd2) 8845 case TARGET_NR_eventfd2: 8846 { 8847 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)); 8848 if (arg2 & TARGET_O_NONBLOCK) { 8849 host_flags |= O_NONBLOCK; 8850 } 8851 if (arg2 & TARGET_O_CLOEXEC) { 8852 host_flags |= O_CLOEXEC; 8853 } 8854 ret = get_errno(eventfd(arg1, host_flags)); 8855 break; 8856 } 8857 #endif 8858 #endif /* CONFIG_EVENTFD */ 8859 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) 8860 case TARGET_NR_fallocate: 8861 #if TARGET_ABI_BITS == 32 8862 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4), 8863 target_offset64(arg5, arg6))); 8864 #else 8865 ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); 8866 #endif 8867 break; 8868 #endif 8869 #if defined(CONFIG_SYNC_FILE_RANGE) 8870 #if defined(TARGET_NR_sync_file_range) 8871 case TARGET_NR_sync_file_range: 8872 #if TARGET_ABI_BITS == 32 8873 #if defined(TARGET_MIPS) 8874 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 8875 target_offset64(arg5, arg6), arg7)); 8876 #else 8877 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3), 8878 target_offset64(arg4, arg5), arg6)); 8879 #endif /* !TARGET_MIPS */ 8880 #else 8881 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4)); 8882 #endif 8883 break; 8884 #endif 8885 #if defined(TARGET_NR_sync_file_range2) 8886 case TARGET_NR_sync_file_range2: 8887 /* This is like sync_file_range but the arguments are reordered */ 8888 #if TARGET_ABI_BITS == 32 8889 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 8890 target_offset64(arg5, arg6), arg2)); 8891 #else 8892 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2)); 8893 #endif 8894 break; 8895 #endif 8896 #endif 8897 #if defined(CONFIG_EPOLL) 8898 #if defined(TARGET_NR_epoll_create) 8899 case TARGET_NR_epoll_create: 8900 ret = get_errno(epoll_create(arg1)); 8901 break; 8902 #endif 8903 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1) 8904 case TARGET_NR_epoll_create1: 8905 ret = get_errno(epoll_create1(arg1)); 8906 break; 8907 #endif 8908 #if defined(TARGET_NR_epoll_ctl) 8909 case TARGET_NR_epoll_ctl: 8910 { 8911 struct epoll_event ep; 8912 struct epoll_event *epp = 0; 8913 if (arg4) { 8914 struct target_epoll_event *target_ep; 8915 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) { 8916 goto efault; 8917 } 8918 ep.events = tswap32(target_ep->events); 8919 /* The epoll_data_t union is just opaque data to the kernel, 8920 * so we transfer all 64 bits across and need not worry what 8921 * actual data type it is. 8922 */ 8923 ep.data.u64 = tswap64(target_ep->data.u64); 8924 unlock_user_struct(target_ep, arg4, 0); 8925 epp = &ep; 8926 } 8927 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp)); 8928 break; 8929 } 8930 #endif 8931 8932 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT) 8933 #define IMPLEMENT_EPOLL_PWAIT 8934 #endif 8935 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT) 8936 #if defined(TARGET_NR_epoll_wait) 8937 case TARGET_NR_epoll_wait: 8938 #endif 8939 #if defined(IMPLEMENT_EPOLL_PWAIT) 8940 case TARGET_NR_epoll_pwait: 8941 #endif 8942 { 8943 struct target_epoll_event *target_ep; 8944 struct epoll_event *ep; 8945 int epfd = arg1; 8946 int maxevents = arg3; 8947 int timeout = arg4; 8948 8949 target_ep = lock_user(VERIFY_WRITE, arg2, 8950 maxevents * sizeof(struct target_epoll_event), 1); 8951 if (!target_ep) { 8952 goto efault; 8953 } 8954 8955 ep = alloca(maxevents * sizeof(struct epoll_event)); 8956 8957 switch (num) { 8958 #if defined(IMPLEMENT_EPOLL_PWAIT) 8959 case TARGET_NR_epoll_pwait: 8960 { 8961 target_sigset_t *target_set; 8962 sigset_t _set, *set = &_set; 8963 8964 if (arg5) { 8965 target_set = lock_user(VERIFY_READ, arg5, 8966 sizeof(target_sigset_t), 1); 8967 if (!target_set) { 8968 unlock_user(target_ep, arg2, 0); 8969 goto efault; 8970 } 8971 target_to_host_sigset(set, target_set); 8972 unlock_user(target_set, arg5, 0); 8973 } else { 8974 set = NULL; 8975 } 8976 8977 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set)); 8978 break; 8979 } 8980 #endif 8981 #if defined(TARGET_NR_epoll_wait) 8982 case TARGET_NR_epoll_wait: 8983 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout)); 8984 break; 8985 #endif 8986 default: 8987 ret = -TARGET_ENOSYS; 8988 } 8989 if (!is_error(ret)) { 8990 int i; 8991 for (i = 0; i < ret; i++) { 8992 target_ep[i].events = tswap32(ep[i].events); 8993 target_ep[i].data.u64 = tswap64(ep[i].data.u64); 8994 } 8995 } 8996 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event)); 8997 break; 8998 } 8999 #endif 9000 #endif 9001 #ifdef TARGET_NR_prlimit64 9002 case TARGET_NR_prlimit64: 9003 { 9004 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */ 9005 struct target_rlimit64 *target_rnew, *target_rold; 9006 struct host_rlimit64 rnew, rold, *rnewp = 0; 9007 if (arg3) { 9008 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { 9009 goto efault; 9010 } 9011 rnew.rlim_cur = tswap64(target_rnew->rlim_cur); 9012 rnew.rlim_max = tswap64(target_rnew->rlim_max); 9013 unlock_user_struct(target_rnew, arg3, 0); 9014 rnewp = &rnew; 9015 } 9016 9017 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0)); 9018 if (!is_error(ret) && arg4) { 9019 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { 9020 goto efault; 9021 } 9022 target_rold->rlim_cur = tswap64(rold.rlim_cur); 9023 target_rold->rlim_max = tswap64(rold.rlim_max); 9024 unlock_user_struct(target_rold, arg4, 1); 9025 } 9026 break; 9027 } 9028 #endif 9029 #ifdef TARGET_NR_gethostname 9030 case TARGET_NR_gethostname: 9031 { 9032 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0); 9033 if (name) { 9034 ret = get_errno(gethostname(name, arg2)); 9035 unlock_user(name, arg1, arg2); 9036 } else { 9037 ret = -TARGET_EFAULT; 9038 } 9039 break; 9040 } 9041 #endif 9042 #ifdef TARGET_NR_atomic_cmpxchg_32 9043 case TARGET_NR_atomic_cmpxchg_32: 9044 { 9045 /* should use start_exclusive from main.c */ 9046 abi_ulong mem_value; 9047 if (get_user_u32(mem_value, arg6)) { 9048 target_siginfo_t info; 9049 info.si_signo = SIGSEGV; 9050 info.si_errno = 0; 9051 info.si_code = TARGET_SEGV_MAPERR; 9052 info._sifields._sigfault._addr = arg6; 9053 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info); 9054 ret = 0xdeadbeef; 9055 9056 } 9057 if (mem_value == arg2) 9058 put_user_u32(arg1, arg6); 9059 ret = mem_value; 9060 break; 9061 } 9062 #endif 9063 #ifdef TARGET_NR_atomic_barrier 9064 case TARGET_NR_atomic_barrier: 9065 { 9066 /* Like the kernel implementation and the qemu arm barrier, no-op this? */ 9067 break; 9068 } 9069 #endif 9070 9071 #ifdef TARGET_NR_timer_create 9072 case TARGET_NR_timer_create: 9073 { 9074 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */ 9075 9076 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL; 9077 struct target_sigevent *ptarget_sevp; 9078 struct target_timer_t *ptarget_timer; 9079 9080 int clkid = arg1; 9081 int timer_index = next_free_host_timer(); 9082 9083 if (timer_index < 0) { 9084 ret = -TARGET_EAGAIN; 9085 } else { 9086 timer_t *phtimer = g_posix_timers + timer_index; 9087 9088 if (arg2) { 9089 if (!lock_user_struct(VERIFY_READ, ptarget_sevp, arg2, 1)) { 9090 goto efault; 9091 } 9092 9093 host_sevp.sigev_signo = tswap32(ptarget_sevp->sigev_signo); 9094 host_sevp.sigev_notify = tswap32(ptarget_sevp->sigev_notify); 9095 9096 phost_sevp = &host_sevp; 9097 } 9098 9099 ret = get_errno(timer_create(clkid, phost_sevp, phtimer)); 9100 if (ret) { 9101 phtimer = NULL; 9102 } else { 9103 if (!lock_user_struct(VERIFY_WRITE, ptarget_timer, arg3, 1)) { 9104 goto efault; 9105 } 9106 ptarget_timer->ptr = tswap32(0xcafe0000 | timer_index); 9107 unlock_user_struct(ptarget_timer, arg3, 1); 9108 } 9109 } 9110 break; 9111 } 9112 #endif 9113 9114 #ifdef TARGET_NR_timer_settime 9115 case TARGET_NR_timer_settime: 9116 { 9117 /* args: timer_t timerid, int flags, const struct itimerspec *new_value, 9118 * struct itimerspec * old_value */ 9119 arg1 &= 0xffff; 9120 if (arg3 == 0 || arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9121 ret = -TARGET_EINVAL; 9122 } else { 9123 timer_t htimer = g_posix_timers[arg1]; 9124 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},}; 9125 9126 target_to_host_itimerspec(&hspec_new, arg3); 9127 ret = get_errno( 9128 timer_settime(htimer, arg2, &hspec_new, &hspec_old)); 9129 host_to_target_itimerspec(arg2, &hspec_old); 9130 } 9131 break; 9132 } 9133 #endif 9134 9135 #ifdef TARGET_NR_timer_gettime 9136 case TARGET_NR_timer_gettime: 9137 { 9138 /* args: timer_t timerid, struct itimerspec *curr_value */ 9139 arg1 &= 0xffff; 9140 if (!arg2) { 9141 return -TARGET_EFAULT; 9142 } else if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9143 ret = -TARGET_EINVAL; 9144 } else { 9145 timer_t htimer = g_posix_timers[arg1]; 9146 struct itimerspec hspec; 9147 ret = get_errno(timer_gettime(htimer, &hspec)); 9148 9149 if (host_to_target_itimerspec(arg2, &hspec)) { 9150 ret = -TARGET_EFAULT; 9151 } 9152 } 9153 break; 9154 } 9155 #endif 9156 9157 #ifdef TARGET_NR_timer_getoverrun 9158 case TARGET_NR_timer_getoverrun: 9159 { 9160 /* args: timer_t timerid */ 9161 arg1 &= 0xffff; 9162 if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9163 ret = -TARGET_EINVAL; 9164 } else { 9165 timer_t htimer = g_posix_timers[arg1]; 9166 ret = get_errno(timer_getoverrun(htimer)); 9167 } 9168 break; 9169 } 9170 #endif 9171 9172 #ifdef TARGET_NR_timer_delete 9173 case TARGET_NR_timer_delete: 9174 { 9175 /* args: timer_t timerid */ 9176 arg1 &= 0xffff; 9177 if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9178 ret = -TARGET_EINVAL; 9179 } else { 9180 timer_t htimer = g_posix_timers[arg1]; 9181 ret = get_errno(timer_delete(htimer)); 9182 g_posix_timers[arg1] = 0; 9183 } 9184 break; 9185 } 9186 #endif 9187 9188 default: 9189 unimplemented: 9190 gemu_log("qemu: Unsupported syscall: %d\n", num); 9191 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list) 9192 unimplemented_nowarn: 9193 #endif 9194 ret = -TARGET_ENOSYS; 9195 break; 9196 } 9197 fail: 9198 #ifdef DEBUG 9199 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret); 9200 #endif 9201 if(do_strace) 9202 print_syscall_ret(num, ret); 9203 return ret; 9204 efault: 9205 ret = -TARGET_EFAULT; 9206 goto fail; 9207 } 9208