1 /* 2 * Linux syscalls 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #define _ATFILE_SOURCE 20 #include <stdlib.h> 21 #include <stdio.h> 22 #include <stdarg.h> 23 #include <string.h> 24 #include <elf.h> 25 #include <endian.h> 26 #include <errno.h> 27 #include <unistd.h> 28 #include <fcntl.h> 29 #include <time.h> 30 #include <limits.h> 31 #include <grp.h> 32 #include <sys/types.h> 33 #include <sys/ipc.h> 34 #include <sys/msg.h> 35 #include <sys/wait.h> 36 #include <sys/time.h> 37 #include <sys/stat.h> 38 #include <sys/mount.h> 39 #include <sys/file.h> 40 #include <sys/fsuid.h> 41 #include <sys/personality.h> 42 #include <sys/prctl.h> 43 #include <sys/resource.h> 44 #include <sys/mman.h> 45 #include <sys/swap.h> 46 #include <signal.h> 47 #include <sched.h> 48 #ifdef __ia64__ 49 int __clone2(int (*fn)(void *), void *child_stack_base, 50 size_t stack_size, int flags, void *arg, ...); 51 #endif 52 #include <sys/socket.h> 53 #include <sys/un.h> 54 #include <sys/uio.h> 55 #include <sys/poll.h> 56 #include <sys/times.h> 57 #include <sys/shm.h> 58 #include <sys/sem.h> 59 #include <sys/statfs.h> 60 #include <utime.h> 61 #include <sys/sysinfo.h> 62 #include <sys/utsname.h> 63 //#include <sys/user.h> 64 #include <netinet/ip.h> 65 #include <netinet/tcp.h> 66 #include <linux/wireless.h> 67 #include <linux/icmp.h> 68 #include "qemu-common.h" 69 #ifdef TARGET_GPROF 70 #include <sys/gmon.h> 71 #endif 72 #ifdef CONFIG_EVENTFD 73 #include <sys/eventfd.h> 74 #endif 75 #ifdef CONFIG_EPOLL 76 #include <sys/epoll.h> 77 #endif 78 #ifdef CONFIG_ATTR 79 #include "qemu/xattr.h" 80 #endif 81 #ifdef CONFIG_SENDFILE 82 #include <sys/sendfile.h> 83 #endif 84 85 #define termios host_termios 86 #define winsize host_winsize 87 #define termio host_termio 88 #define sgttyb host_sgttyb /* same as target */ 89 #define tchars host_tchars /* same as target */ 90 #define ltchars host_ltchars /* same as target */ 91 92 #include <linux/termios.h> 93 #include <linux/unistd.h> 94 #include <linux/utsname.h> 95 #include <linux/cdrom.h> 96 #include <linux/hdreg.h> 97 #include <linux/soundcard.h> 98 #include <linux/kd.h> 99 #include <linux/mtio.h> 100 #include <linux/fs.h> 101 #if defined(CONFIG_FIEMAP) 102 #include <linux/fiemap.h> 103 #endif 104 #include <linux/fb.h> 105 #include <linux/vt.h> 106 #include <linux/dm-ioctl.h> 107 #include <linux/reboot.h> 108 #include <linux/route.h> 109 #include <linux/filter.h> 110 #include "linux_loop.h" 111 #include "cpu-uname.h" 112 113 #include "qemu.h" 114 115 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \ 116 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID) 117 118 //#define DEBUG 119 120 //#include <linux/msdos_fs.h> 121 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2]) 122 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2]) 123 124 125 #undef _syscall0 126 #undef _syscall1 127 #undef _syscall2 128 #undef _syscall3 129 #undef _syscall4 130 #undef _syscall5 131 #undef _syscall6 132 133 #define _syscall0(type,name) \ 134 static type name (void) \ 135 { \ 136 return syscall(__NR_##name); \ 137 } 138 139 #define _syscall1(type,name,type1,arg1) \ 140 static type name (type1 arg1) \ 141 { \ 142 return syscall(__NR_##name, arg1); \ 143 } 144 145 #define _syscall2(type,name,type1,arg1,type2,arg2) \ 146 static type name (type1 arg1,type2 arg2) \ 147 { \ 148 return syscall(__NR_##name, arg1, arg2); \ 149 } 150 151 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 152 static type name (type1 arg1,type2 arg2,type3 arg3) \ 153 { \ 154 return syscall(__NR_##name, arg1, arg2, arg3); \ 155 } 156 157 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 158 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ 159 { \ 160 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 161 } 162 163 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 164 type5,arg5) \ 165 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ 166 { \ 167 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 168 } 169 170 171 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 172 type5,arg5,type6,arg6) \ 173 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \ 174 type6 arg6) \ 175 { \ 176 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 177 } 178 179 180 #define __NR_sys_uname __NR_uname 181 #define __NR_sys_getcwd1 __NR_getcwd 182 #define __NR_sys_getdents __NR_getdents 183 #define __NR_sys_getdents64 __NR_getdents64 184 #define __NR_sys_getpriority __NR_getpriority 185 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo 186 #define __NR_sys_syslog __NR_syslog 187 #define __NR_sys_tgkill __NR_tgkill 188 #define __NR_sys_tkill __NR_tkill 189 #define __NR_sys_futex __NR_futex 190 #define __NR_sys_inotify_init __NR_inotify_init 191 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch 192 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch 193 194 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \ 195 defined(__s390x__) 196 #define __NR__llseek __NR_lseek 197 #endif 198 199 #ifdef __NR_gettid 200 _syscall0(int, gettid) 201 #else 202 /* This is a replacement for the host gettid() and must return a host 203 errno. */ 204 static int gettid(void) { 205 return -ENOSYS; 206 } 207 #endif 208 #ifdef __NR_getdents 209 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count); 210 #endif 211 #if !defined(__NR_getdents) || \ 212 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64)) 213 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count); 214 #endif 215 #if defined(TARGET_NR__llseek) && defined(__NR_llseek) 216 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo, 217 loff_t *, res, uint, wh); 218 #endif 219 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo) 220 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len) 221 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 222 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig) 223 #endif 224 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 225 _syscall2(int,sys_tkill,int,tid,int,sig) 226 #endif 227 #ifdef __NR_exit_group 228 _syscall1(int,exit_group,int,error_code) 229 #endif 230 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 231 _syscall1(int,set_tid_address,int *,tidptr) 232 #endif 233 #if defined(TARGET_NR_futex) && defined(__NR_futex) 234 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, 235 const struct timespec *,timeout,int *,uaddr2,int,val3) 236 #endif 237 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity 238 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, 239 unsigned long *, user_mask_ptr); 240 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity 241 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len, 242 unsigned long *, user_mask_ptr); 243 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd, 244 void *, arg); 245 246 static bitmask_transtbl fcntl_flags_tbl[] = { 247 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, }, 248 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, }, 249 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, }, 250 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, }, 251 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, }, 252 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, }, 253 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, }, 254 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, }, 255 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, }, 256 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, }, 257 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, }, 258 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, }, 259 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, }, 260 #if defined(O_DIRECT) 261 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, }, 262 #endif 263 #if defined(O_NOATIME) 264 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME }, 265 #endif 266 #if defined(O_CLOEXEC) 267 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC }, 268 #endif 269 #if defined(O_PATH) 270 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH }, 271 #endif 272 /* Don't terminate the list prematurely on 64-bit host+guest. */ 273 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0 274 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, }, 275 #endif 276 { 0, 0, 0, 0 } 277 }; 278 279 #define COPY_UTSNAME_FIELD(dest, src) \ 280 do { \ 281 /* __NEW_UTS_LEN doesn't include terminating null */ \ 282 (void) strncpy((dest), (src), __NEW_UTS_LEN); \ 283 (dest)[__NEW_UTS_LEN] = '\0'; \ 284 } while (0) 285 286 static int sys_uname(struct new_utsname *buf) 287 { 288 struct utsname uts_buf; 289 290 if (uname(&uts_buf) < 0) 291 return (-1); 292 293 /* 294 * Just in case these have some differences, we 295 * translate utsname to new_utsname (which is the 296 * struct linux kernel uses). 297 */ 298 299 memset(buf, 0, sizeof(*buf)); 300 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname); 301 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename); 302 COPY_UTSNAME_FIELD(buf->release, uts_buf.release); 303 COPY_UTSNAME_FIELD(buf->version, uts_buf.version); 304 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine); 305 #ifdef _GNU_SOURCE 306 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname); 307 #endif 308 return (0); 309 310 #undef COPY_UTSNAME_FIELD 311 } 312 313 static int sys_getcwd1(char *buf, size_t size) 314 { 315 if (getcwd(buf, size) == NULL) { 316 /* getcwd() sets errno */ 317 return (-1); 318 } 319 return strlen(buf)+1; 320 } 321 322 #ifdef TARGET_NR_openat 323 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode) 324 { 325 /* 326 * open(2) has extra parameter 'mode' when called with 327 * flag O_CREAT. 328 */ 329 if ((flags & O_CREAT) != 0) { 330 return (openat(dirfd, pathname, flags, mode)); 331 } 332 return (openat(dirfd, pathname, flags)); 333 } 334 #endif 335 336 #ifdef TARGET_NR_utimensat 337 #ifdef CONFIG_UTIMENSAT 338 static int sys_utimensat(int dirfd, const char *pathname, 339 const struct timespec times[2], int flags) 340 { 341 if (pathname == NULL) 342 return futimens(dirfd, times); 343 else 344 return utimensat(dirfd, pathname, times, flags); 345 } 346 #elif defined(__NR_utimensat) 347 #define __NR_sys_utimensat __NR_utimensat 348 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname, 349 const struct timespec *,tsp,int,flags) 350 #else 351 static int sys_utimensat(int dirfd, const char *pathname, 352 const struct timespec times[2], int flags) 353 { 354 errno = ENOSYS; 355 return -1; 356 } 357 #endif 358 #endif /* TARGET_NR_utimensat */ 359 360 #ifdef CONFIG_INOTIFY 361 #include <sys/inotify.h> 362 363 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 364 static int sys_inotify_init(void) 365 { 366 return (inotify_init()); 367 } 368 #endif 369 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 370 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask) 371 { 372 return (inotify_add_watch(fd, pathname, mask)); 373 } 374 #endif 375 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 376 static int sys_inotify_rm_watch(int fd, int32_t wd) 377 { 378 return (inotify_rm_watch(fd, wd)); 379 } 380 #endif 381 #ifdef CONFIG_INOTIFY1 382 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 383 static int sys_inotify_init1(int flags) 384 { 385 return (inotify_init1(flags)); 386 } 387 #endif 388 #endif 389 #else 390 /* Userspace can usually survive runtime without inotify */ 391 #undef TARGET_NR_inotify_init 392 #undef TARGET_NR_inotify_init1 393 #undef TARGET_NR_inotify_add_watch 394 #undef TARGET_NR_inotify_rm_watch 395 #endif /* CONFIG_INOTIFY */ 396 397 #if defined(TARGET_NR_ppoll) 398 #ifndef __NR_ppoll 399 # define __NR_ppoll -1 400 #endif 401 #define __NR_sys_ppoll __NR_ppoll 402 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds, 403 struct timespec *, timeout, const __sigset_t *, sigmask, 404 size_t, sigsetsize) 405 #endif 406 407 #if defined(TARGET_NR_pselect6) 408 #ifndef __NR_pselect6 409 # define __NR_pselect6 -1 410 #endif 411 #define __NR_sys_pselect6 __NR_pselect6 412 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, 413 fd_set *, exceptfds, struct timespec *, timeout, void *, sig); 414 #endif 415 416 #if defined(TARGET_NR_prlimit64) 417 #ifndef __NR_prlimit64 418 # define __NR_prlimit64 -1 419 #endif 420 #define __NR_sys_prlimit64 __NR_prlimit64 421 /* The glibc rlimit structure may not be that used by the underlying syscall */ 422 struct host_rlimit64 { 423 uint64_t rlim_cur; 424 uint64_t rlim_max; 425 }; 426 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource, 427 const struct host_rlimit64 *, new_limit, 428 struct host_rlimit64 *, old_limit) 429 #endif 430 431 432 #if defined(TARGET_NR_timer_create) 433 /* Maxiumum of 32 active POSIX timers allowed at any one time. */ 434 static timer_t g_posix_timers[32] = { 0, } ; 435 436 static inline int next_free_host_timer(void) 437 { 438 int k ; 439 /* FIXME: Does finding the next free slot require a lock? */ 440 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) { 441 if (g_posix_timers[k] == 0) { 442 g_posix_timers[k] = (timer_t) 1; 443 return k; 444 } 445 } 446 return -1; 447 } 448 #endif 449 450 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */ 451 #ifdef TARGET_ARM 452 static inline int regpairs_aligned(void *cpu_env) { 453 return ((((CPUARMState *)cpu_env)->eabi) == 1) ; 454 } 455 #elif defined(TARGET_MIPS) 456 static inline int regpairs_aligned(void *cpu_env) { return 1; } 457 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64) 458 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs 459 * of registers which translates to the same as ARM/MIPS, because we start with 460 * r3 as arg1 */ 461 static inline int regpairs_aligned(void *cpu_env) { return 1; } 462 #else 463 static inline int regpairs_aligned(void *cpu_env) { return 0; } 464 #endif 465 466 #define ERRNO_TABLE_SIZE 1200 467 468 /* target_to_host_errno_table[] is initialized from 469 * host_to_target_errno_table[] in syscall_init(). */ 470 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = { 471 }; 472 473 /* 474 * This list is the union of errno values overridden in asm-<arch>/errno.h 475 * minus the errnos that are not actually generic to all archs. 476 */ 477 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = { 478 [EIDRM] = TARGET_EIDRM, 479 [ECHRNG] = TARGET_ECHRNG, 480 [EL2NSYNC] = TARGET_EL2NSYNC, 481 [EL3HLT] = TARGET_EL3HLT, 482 [EL3RST] = TARGET_EL3RST, 483 [ELNRNG] = TARGET_ELNRNG, 484 [EUNATCH] = TARGET_EUNATCH, 485 [ENOCSI] = TARGET_ENOCSI, 486 [EL2HLT] = TARGET_EL2HLT, 487 [EDEADLK] = TARGET_EDEADLK, 488 [ENOLCK] = TARGET_ENOLCK, 489 [EBADE] = TARGET_EBADE, 490 [EBADR] = TARGET_EBADR, 491 [EXFULL] = TARGET_EXFULL, 492 [ENOANO] = TARGET_ENOANO, 493 [EBADRQC] = TARGET_EBADRQC, 494 [EBADSLT] = TARGET_EBADSLT, 495 [EBFONT] = TARGET_EBFONT, 496 [ENOSTR] = TARGET_ENOSTR, 497 [ENODATA] = TARGET_ENODATA, 498 [ETIME] = TARGET_ETIME, 499 [ENOSR] = TARGET_ENOSR, 500 [ENONET] = TARGET_ENONET, 501 [ENOPKG] = TARGET_ENOPKG, 502 [EREMOTE] = TARGET_EREMOTE, 503 [ENOLINK] = TARGET_ENOLINK, 504 [EADV] = TARGET_EADV, 505 [ESRMNT] = TARGET_ESRMNT, 506 [ECOMM] = TARGET_ECOMM, 507 [EPROTO] = TARGET_EPROTO, 508 [EDOTDOT] = TARGET_EDOTDOT, 509 [EMULTIHOP] = TARGET_EMULTIHOP, 510 [EBADMSG] = TARGET_EBADMSG, 511 [ENAMETOOLONG] = TARGET_ENAMETOOLONG, 512 [EOVERFLOW] = TARGET_EOVERFLOW, 513 [ENOTUNIQ] = TARGET_ENOTUNIQ, 514 [EBADFD] = TARGET_EBADFD, 515 [EREMCHG] = TARGET_EREMCHG, 516 [ELIBACC] = TARGET_ELIBACC, 517 [ELIBBAD] = TARGET_ELIBBAD, 518 [ELIBSCN] = TARGET_ELIBSCN, 519 [ELIBMAX] = TARGET_ELIBMAX, 520 [ELIBEXEC] = TARGET_ELIBEXEC, 521 [EILSEQ] = TARGET_EILSEQ, 522 [ENOSYS] = TARGET_ENOSYS, 523 [ELOOP] = TARGET_ELOOP, 524 [ERESTART] = TARGET_ERESTART, 525 [ESTRPIPE] = TARGET_ESTRPIPE, 526 [ENOTEMPTY] = TARGET_ENOTEMPTY, 527 [EUSERS] = TARGET_EUSERS, 528 [ENOTSOCK] = TARGET_ENOTSOCK, 529 [EDESTADDRREQ] = TARGET_EDESTADDRREQ, 530 [EMSGSIZE] = TARGET_EMSGSIZE, 531 [EPROTOTYPE] = TARGET_EPROTOTYPE, 532 [ENOPROTOOPT] = TARGET_ENOPROTOOPT, 533 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT, 534 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT, 535 [EOPNOTSUPP] = TARGET_EOPNOTSUPP, 536 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT, 537 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT, 538 [EADDRINUSE] = TARGET_EADDRINUSE, 539 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL, 540 [ENETDOWN] = TARGET_ENETDOWN, 541 [ENETUNREACH] = TARGET_ENETUNREACH, 542 [ENETRESET] = TARGET_ENETRESET, 543 [ECONNABORTED] = TARGET_ECONNABORTED, 544 [ECONNRESET] = TARGET_ECONNRESET, 545 [ENOBUFS] = TARGET_ENOBUFS, 546 [EISCONN] = TARGET_EISCONN, 547 [ENOTCONN] = TARGET_ENOTCONN, 548 [EUCLEAN] = TARGET_EUCLEAN, 549 [ENOTNAM] = TARGET_ENOTNAM, 550 [ENAVAIL] = TARGET_ENAVAIL, 551 [EISNAM] = TARGET_EISNAM, 552 [EREMOTEIO] = TARGET_EREMOTEIO, 553 [ESHUTDOWN] = TARGET_ESHUTDOWN, 554 [ETOOMANYREFS] = TARGET_ETOOMANYREFS, 555 [ETIMEDOUT] = TARGET_ETIMEDOUT, 556 [ECONNREFUSED] = TARGET_ECONNREFUSED, 557 [EHOSTDOWN] = TARGET_EHOSTDOWN, 558 [EHOSTUNREACH] = TARGET_EHOSTUNREACH, 559 [EALREADY] = TARGET_EALREADY, 560 [EINPROGRESS] = TARGET_EINPROGRESS, 561 [ESTALE] = TARGET_ESTALE, 562 [ECANCELED] = TARGET_ECANCELED, 563 [ENOMEDIUM] = TARGET_ENOMEDIUM, 564 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE, 565 #ifdef ENOKEY 566 [ENOKEY] = TARGET_ENOKEY, 567 #endif 568 #ifdef EKEYEXPIRED 569 [EKEYEXPIRED] = TARGET_EKEYEXPIRED, 570 #endif 571 #ifdef EKEYREVOKED 572 [EKEYREVOKED] = TARGET_EKEYREVOKED, 573 #endif 574 #ifdef EKEYREJECTED 575 [EKEYREJECTED] = TARGET_EKEYREJECTED, 576 #endif 577 #ifdef EOWNERDEAD 578 [EOWNERDEAD] = TARGET_EOWNERDEAD, 579 #endif 580 #ifdef ENOTRECOVERABLE 581 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE, 582 #endif 583 }; 584 585 static inline int host_to_target_errno(int err) 586 { 587 if(host_to_target_errno_table[err]) 588 return host_to_target_errno_table[err]; 589 return err; 590 } 591 592 static inline int target_to_host_errno(int err) 593 { 594 if (target_to_host_errno_table[err]) 595 return target_to_host_errno_table[err]; 596 return err; 597 } 598 599 static inline abi_long get_errno(abi_long ret) 600 { 601 if (ret == -1) 602 return -host_to_target_errno(errno); 603 else 604 return ret; 605 } 606 607 static inline int is_error(abi_long ret) 608 { 609 return (abi_ulong)ret >= (abi_ulong)(-4096); 610 } 611 612 char *target_strerror(int err) 613 { 614 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) { 615 return NULL; 616 } 617 return strerror(target_to_host_errno(err)); 618 } 619 620 static abi_ulong target_brk; 621 static abi_ulong target_original_brk; 622 static abi_ulong brk_page; 623 624 void target_set_brk(abi_ulong new_brk) 625 { 626 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk); 627 brk_page = HOST_PAGE_ALIGN(target_brk); 628 } 629 630 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0) 631 #define DEBUGF_BRK(message, args...) 632 633 /* do_brk() must return target values and target errnos. */ 634 abi_long do_brk(abi_ulong new_brk) 635 { 636 abi_long mapped_addr; 637 int new_alloc_size; 638 639 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk); 640 641 if (!new_brk) { 642 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk); 643 return target_brk; 644 } 645 if (new_brk < target_original_brk) { 646 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n", 647 target_brk); 648 return target_brk; 649 } 650 651 /* If the new brk is less than the highest page reserved to the 652 * target heap allocation, set it and we're almost done... */ 653 if (new_brk <= brk_page) { 654 /* Heap contents are initialized to zero, as for anonymous 655 * mapped pages. */ 656 if (new_brk > target_brk) { 657 memset(g2h(target_brk), 0, new_brk - target_brk); 658 } 659 target_brk = new_brk; 660 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk); 661 return target_brk; 662 } 663 664 /* We need to allocate more memory after the brk... Note that 665 * we don't use MAP_FIXED because that will map over the top of 666 * any existing mapping (like the one with the host libc or qemu 667 * itself); instead we treat "mapped but at wrong address" as 668 * a failure and unmap again. 669 */ 670 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page); 671 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, 672 PROT_READ|PROT_WRITE, 673 MAP_ANON|MAP_PRIVATE, 0, 0)); 674 675 if (mapped_addr == brk_page) { 676 /* Heap contents are initialized to zero, as for anonymous 677 * mapped pages. Technically the new pages are already 678 * initialized to zero since they *are* anonymous mapped 679 * pages, however we have to take care with the contents that 680 * come from the remaining part of the previous page: it may 681 * contains garbage data due to a previous heap usage (grown 682 * then shrunken). */ 683 memset(g2h(target_brk), 0, brk_page - target_brk); 684 685 target_brk = new_brk; 686 brk_page = HOST_PAGE_ALIGN(target_brk); 687 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n", 688 target_brk); 689 return target_brk; 690 } else if (mapped_addr != -1) { 691 /* Mapped but at wrong address, meaning there wasn't actually 692 * enough space for this brk. 693 */ 694 target_munmap(mapped_addr, new_alloc_size); 695 mapped_addr = -1; 696 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk); 697 } 698 else { 699 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk); 700 } 701 702 #if defined(TARGET_ALPHA) 703 /* We (partially) emulate OSF/1 on Alpha, which requires we 704 return a proper errno, not an unchanged brk value. */ 705 return -TARGET_ENOMEM; 706 #endif 707 /* For everything else, return the previous break. */ 708 return target_brk; 709 } 710 711 static inline abi_long copy_from_user_fdset(fd_set *fds, 712 abi_ulong target_fds_addr, 713 int n) 714 { 715 int i, nw, j, k; 716 abi_ulong b, *target_fds; 717 718 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 719 if (!(target_fds = lock_user(VERIFY_READ, 720 target_fds_addr, 721 sizeof(abi_ulong) * nw, 722 1))) 723 return -TARGET_EFAULT; 724 725 FD_ZERO(fds); 726 k = 0; 727 for (i = 0; i < nw; i++) { 728 /* grab the abi_ulong */ 729 __get_user(b, &target_fds[i]); 730 for (j = 0; j < TARGET_ABI_BITS; j++) { 731 /* check the bit inside the abi_ulong */ 732 if ((b >> j) & 1) 733 FD_SET(k, fds); 734 k++; 735 } 736 } 737 738 unlock_user(target_fds, target_fds_addr, 0); 739 740 return 0; 741 } 742 743 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr, 744 abi_ulong target_fds_addr, 745 int n) 746 { 747 if (target_fds_addr) { 748 if (copy_from_user_fdset(fds, target_fds_addr, n)) 749 return -TARGET_EFAULT; 750 *fds_ptr = fds; 751 } else { 752 *fds_ptr = NULL; 753 } 754 return 0; 755 } 756 757 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr, 758 const fd_set *fds, 759 int n) 760 { 761 int i, nw, j, k; 762 abi_long v; 763 abi_ulong *target_fds; 764 765 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 766 if (!(target_fds = lock_user(VERIFY_WRITE, 767 target_fds_addr, 768 sizeof(abi_ulong) * nw, 769 0))) 770 return -TARGET_EFAULT; 771 772 k = 0; 773 for (i = 0; i < nw; i++) { 774 v = 0; 775 for (j = 0; j < TARGET_ABI_BITS; j++) { 776 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j); 777 k++; 778 } 779 __put_user(v, &target_fds[i]); 780 } 781 782 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw); 783 784 return 0; 785 } 786 787 #if defined(__alpha__) 788 #define HOST_HZ 1024 789 #else 790 #define HOST_HZ 100 791 #endif 792 793 static inline abi_long host_to_target_clock_t(long ticks) 794 { 795 #if HOST_HZ == TARGET_HZ 796 return ticks; 797 #else 798 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ; 799 #endif 800 } 801 802 static inline abi_long host_to_target_rusage(abi_ulong target_addr, 803 const struct rusage *rusage) 804 { 805 struct target_rusage *target_rusage; 806 807 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0)) 808 return -TARGET_EFAULT; 809 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec); 810 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec); 811 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec); 812 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec); 813 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss); 814 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss); 815 target_rusage->ru_idrss = tswapal(rusage->ru_idrss); 816 target_rusage->ru_isrss = tswapal(rusage->ru_isrss); 817 target_rusage->ru_minflt = tswapal(rusage->ru_minflt); 818 target_rusage->ru_majflt = tswapal(rusage->ru_majflt); 819 target_rusage->ru_nswap = tswapal(rusage->ru_nswap); 820 target_rusage->ru_inblock = tswapal(rusage->ru_inblock); 821 target_rusage->ru_oublock = tswapal(rusage->ru_oublock); 822 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd); 823 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv); 824 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals); 825 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw); 826 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw); 827 unlock_user_struct(target_rusage, target_addr, 1); 828 829 return 0; 830 } 831 832 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim) 833 { 834 abi_ulong target_rlim_swap; 835 rlim_t result; 836 837 target_rlim_swap = tswapal(target_rlim); 838 if (target_rlim_swap == TARGET_RLIM_INFINITY) 839 return RLIM_INFINITY; 840 841 result = target_rlim_swap; 842 if (target_rlim_swap != (rlim_t)result) 843 return RLIM_INFINITY; 844 845 return result; 846 } 847 848 static inline abi_ulong host_to_target_rlim(rlim_t rlim) 849 { 850 abi_ulong target_rlim_swap; 851 abi_ulong result; 852 853 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim) 854 target_rlim_swap = TARGET_RLIM_INFINITY; 855 else 856 target_rlim_swap = rlim; 857 result = tswapal(target_rlim_swap); 858 859 return result; 860 } 861 862 static inline int target_to_host_resource(int code) 863 { 864 switch (code) { 865 case TARGET_RLIMIT_AS: 866 return RLIMIT_AS; 867 case TARGET_RLIMIT_CORE: 868 return RLIMIT_CORE; 869 case TARGET_RLIMIT_CPU: 870 return RLIMIT_CPU; 871 case TARGET_RLIMIT_DATA: 872 return RLIMIT_DATA; 873 case TARGET_RLIMIT_FSIZE: 874 return RLIMIT_FSIZE; 875 case TARGET_RLIMIT_LOCKS: 876 return RLIMIT_LOCKS; 877 case TARGET_RLIMIT_MEMLOCK: 878 return RLIMIT_MEMLOCK; 879 case TARGET_RLIMIT_MSGQUEUE: 880 return RLIMIT_MSGQUEUE; 881 case TARGET_RLIMIT_NICE: 882 return RLIMIT_NICE; 883 case TARGET_RLIMIT_NOFILE: 884 return RLIMIT_NOFILE; 885 case TARGET_RLIMIT_NPROC: 886 return RLIMIT_NPROC; 887 case TARGET_RLIMIT_RSS: 888 return RLIMIT_RSS; 889 case TARGET_RLIMIT_RTPRIO: 890 return RLIMIT_RTPRIO; 891 case TARGET_RLIMIT_SIGPENDING: 892 return RLIMIT_SIGPENDING; 893 case TARGET_RLIMIT_STACK: 894 return RLIMIT_STACK; 895 default: 896 return code; 897 } 898 } 899 900 static inline abi_long copy_from_user_timeval(struct timeval *tv, 901 abi_ulong target_tv_addr) 902 { 903 struct target_timeval *target_tv; 904 905 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) 906 return -TARGET_EFAULT; 907 908 __get_user(tv->tv_sec, &target_tv->tv_sec); 909 __get_user(tv->tv_usec, &target_tv->tv_usec); 910 911 unlock_user_struct(target_tv, target_tv_addr, 0); 912 913 return 0; 914 } 915 916 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr, 917 const struct timeval *tv) 918 { 919 struct target_timeval *target_tv; 920 921 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) 922 return -TARGET_EFAULT; 923 924 __put_user(tv->tv_sec, &target_tv->tv_sec); 925 __put_user(tv->tv_usec, &target_tv->tv_usec); 926 927 unlock_user_struct(target_tv, target_tv_addr, 1); 928 929 return 0; 930 } 931 932 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 933 #include <mqueue.h> 934 935 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr, 936 abi_ulong target_mq_attr_addr) 937 { 938 struct target_mq_attr *target_mq_attr; 939 940 if (!lock_user_struct(VERIFY_READ, target_mq_attr, 941 target_mq_attr_addr, 1)) 942 return -TARGET_EFAULT; 943 944 __get_user(attr->mq_flags, &target_mq_attr->mq_flags); 945 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 946 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 947 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 948 949 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0); 950 951 return 0; 952 } 953 954 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr, 955 const struct mq_attr *attr) 956 { 957 struct target_mq_attr *target_mq_attr; 958 959 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr, 960 target_mq_attr_addr, 0)) 961 return -TARGET_EFAULT; 962 963 __put_user(attr->mq_flags, &target_mq_attr->mq_flags); 964 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 965 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 966 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 967 968 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1); 969 970 return 0; 971 } 972 #endif 973 974 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) 975 /* do_select() must return target values and target errnos. */ 976 static abi_long do_select(int n, 977 abi_ulong rfd_addr, abi_ulong wfd_addr, 978 abi_ulong efd_addr, abi_ulong target_tv_addr) 979 { 980 fd_set rfds, wfds, efds; 981 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 982 struct timeval tv, *tv_ptr; 983 abi_long ret; 984 985 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 986 if (ret) { 987 return ret; 988 } 989 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 990 if (ret) { 991 return ret; 992 } 993 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 994 if (ret) { 995 return ret; 996 } 997 998 if (target_tv_addr) { 999 if (copy_from_user_timeval(&tv, target_tv_addr)) 1000 return -TARGET_EFAULT; 1001 tv_ptr = &tv; 1002 } else { 1003 tv_ptr = NULL; 1004 } 1005 1006 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr)); 1007 1008 if (!is_error(ret)) { 1009 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 1010 return -TARGET_EFAULT; 1011 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 1012 return -TARGET_EFAULT; 1013 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 1014 return -TARGET_EFAULT; 1015 1016 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv)) 1017 return -TARGET_EFAULT; 1018 } 1019 1020 return ret; 1021 } 1022 #endif 1023 1024 static abi_long do_pipe2(int host_pipe[], int flags) 1025 { 1026 #ifdef CONFIG_PIPE2 1027 return pipe2(host_pipe, flags); 1028 #else 1029 return -ENOSYS; 1030 #endif 1031 } 1032 1033 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes, 1034 int flags, int is_pipe2) 1035 { 1036 int host_pipe[2]; 1037 abi_long ret; 1038 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe); 1039 1040 if (is_error(ret)) 1041 return get_errno(ret); 1042 1043 /* Several targets have special calling conventions for the original 1044 pipe syscall, but didn't replicate this into the pipe2 syscall. */ 1045 if (!is_pipe2) { 1046 #if defined(TARGET_ALPHA) 1047 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1]; 1048 return host_pipe[0]; 1049 #elif defined(TARGET_MIPS) 1050 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1]; 1051 return host_pipe[0]; 1052 #elif defined(TARGET_SH4) 1053 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1]; 1054 return host_pipe[0]; 1055 #elif defined(TARGET_SPARC) 1056 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1]; 1057 return host_pipe[0]; 1058 #endif 1059 } 1060 1061 if (put_user_s32(host_pipe[0], pipedes) 1062 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0]))) 1063 return -TARGET_EFAULT; 1064 return get_errno(ret); 1065 } 1066 1067 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn, 1068 abi_ulong target_addr, 1069 socklen_t len) 1070 { 1071 struct target_ip_mreqn *target_smreqn; 1072 1073 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1); 1074 if (!target_smreqn) 1075 return -TARGET_EFAULT; 1076 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr; 1077 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr; 1078 if (len == sizeof(struct target_ip_mreqn)) 1079 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex); 1080 unlock_user(target_smreqn, target_addr, 0); 1081 1082 return 0; 1083 } 1084 1085 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr, 1086 abi_ulong target_addr, 1087 socklen_t len) 1088 { 1089 const socklen_t unix_maxlen = sizeof (struct sockaddr_un); 1090 sa_family_t sa_family; 1091 struct target_sockaddr *target_saddr; 1092 1093 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 1094 if (!target_saddr) 1095 return -TARGET_EFAULT; 1096 1097 sa_family = tswap16(target_saddr->sa_family); 1098 1099 /* Oops. The caller might send a incomplete sun_path; sun_path 1100 * must be terminated by \0 (see the manual page), but 1101 * unfortunately it is quite common to specify sockaddr_un 1102 * length as "strlen(x->sun_path)" while it should be 1103 * "strlen(...) + 1". We'll fix that here if needed. 1104 * Linux kernel has a similar feature. 1105 */ 1106 1107 if (sa_family == AF_UNIX) { 1108 if (len < unix_maxlen && len > 0) { 1109 char *cp = (char*)target_saddr; 1110 1111 if ( cp[len-1] && !cp[len] ) 1112 len++; 1113 } 1114 if (len > unix_maxlen) 1115 len = unix_maxlen; 1116 } 1117 1118 memcpy(addr, target_saddr, len); 1119 addr->sa_family = sa_family; 1120 unlock_user(target_saddr, target_addr, 0); 1121 1122 return 0; 1123 } 1124 1125 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr, 1126 struct sockaddr *addr, 1127 socklen_t len) 1128 { 1129 struct target_sockaddr *target_saddr; 1130 1131 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0); 1132 if (!target_saddr) 1133 return -TARGET_EFAULT; 1134 memcpy(target_saddr, addr, len); 1135 target_saddr->sa_family = tswap16(addr->sa_family); 1136 unlock_user(target_saddr, target_addr, len); 1137 1138 return 0; 1139 } 1140 1141 static inline abi_long target_to_host_cmsg(struct msghdr *msgh, 1142 struct target_msghdr *target_msgh) 1143 { 1144 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1145 abi_long msg_controllen; 1146 abi_ulong target_cmsg_addr; 1147 struct target_cmsghdr *target_cmsg; 1148 socklen_t space = 0; 1149 1150 msg_controllen = tswapal(target_msgh->msg_controllen); 1151 if (msg_controllen < sizeof (struct target_cmsghdr)) 1152 goto the_end; 1153 target_cmsg_addr = tswapal(target_msgh->msg_control); 1154 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1); 1155 if (!target_cmsg) 1156 return -TARGET_EFAULT; 1157 1158 while (cmsg && target_cmsg) { 1159 void *data = CMSG_DATA(cmsg); 1160 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1161 1162 int len = tswapal(target_cmsg->cmsg_len) 1163 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr)); 1164 1165 space += CMSG_SPACE(len); 1166 if (space > msgh->msg_controllen) { 1167 space -= CMSG_SPACE(len); 1168 gemu_log("Host cmsg overflow\n"); 1169 break; 1170 } 1171 1172 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) { 1173 cmsg->cmsg_level = SOL_SOCKET; 1174 } else { 1175 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level); 1176 } 1177 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type); 1178 cmsg->cmsg_len = CMSG_LEN(len); 1179 1180 if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) { 1181 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type); 1182 memcpy(data, target_data, len); 1183 } else { 1184 int *fd = (int *)data; 1185 int *target_fd = (int *)target_data; 1186 int i, numfds = len / sizeof(int); 1187 1188 for (i = 0; i < numfds; i++) 1189 fd[i] = tswap32(target_fd[i]); 1190 } 1191 1192 cmsg = CMSG_NXTHDR(msgh, cmsg); 1193 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1194 } 1195 unlock_user(target_cmsg, target_cmsg_addr, 0); 1196 the_end: 1197 msgh->msg_controllen = space; 1198 return 0; 1199 } 1200 1201 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, 1202 struct msghdr *msgh) 1203 { 1204 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1205 abi_long msg_controllen; 1206 abi_ulong target_cmsg_addr; 1207 struct target_cmsghdr *target_cmsg; 1208 socklen_t space = 0; 1209 1210 msg_controllen = tswapal(target_msgh->msg_controllen); 1211 if (msg_controllen < sizeof (struct target_cmsghdr)) 1212 goto the_end; 1213 target_cmsg_addr = tswapal(target_msgh->msg_control); 1214 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0); 1215 if (!target_cmsg) 1216 return -TARGET_EFAULT; 1217 1218 while (cmsg && target_cmsg) { 1219 void *data = CMSG_DATA(cmsg); 1220 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1221 1222 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr)); 1223 1224 space += TARGET_CMSG_SPACE(len); 1225 if (space > msg_controllen) { 1226 space -= TARGET_CMSG_SPACE(len); 1227 gemu_log("Target cmsg overflow\n"); 1228 break; 1229 } 1230 1231 if (cmsg->cmsg_level == SOL_SOCKET) { 1232 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET); 1233 } else { 1234 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level); 1235 } 1236 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type); 1237 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len)); 1238 1239 if ((cmsg->cmsg_level == SOL_SOCKET) && 1240 (cmsg->cmsg_type == SCM_RIGHTS)) { 1241 int *fd = (int *)data; 1242 int *target_fd = (int *)target_data; 1243 int i, numfds = len / sizeof(int); 1244 1245 for (i = 0; i < numfds; i++) 1246 target_fd[i] = tswap32(fd[i]); 1247 } else if ((cmsg->cmsg_level == SOL_SOCKET) && 1248 (cmsg->cmsg_type == SO_TIMESTAMP) && 1249 (len == sizeof(struct timeval))) { 1250 /* copy struct timeval to target */ 1251 struct timeval *tv = (struct timeval *)data; 1252 struct target_timeval *target_tv = 1253 (struct target_timeval *)target_data; 1254 1255 target_tv->tv_sec = tswapal(tv->tv_sec); 1256 target_tv->tv_usec = tswapal(tv->tv_usec); 1257 } else { 1258 gemu_log("Unsupported ancillary data: %d/%d\n", 1259 cmsg->cmsg_level, cmsg->cmsg_type); 1260 memcpy(target_data, data, len); 1261 } 1262 1263 cmsg = CMSG_NXTHDR(msgh, cmsg); 1264 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1265 } 1266 unlock_user(target_cmsg, target_cmsg_addr, space); 1267 the_end: 1268 target_msgh->msg_controllen = tswapal(space); 1269 return 0; 1270 } 1271 1272 /* do_setsockopt() Must return target values and target errnos. */ 1273 static abi_long do_setsockopt(int sockfd, int level, int optname, 1274 abi_ulong optval_addr, socklen_t optlen) 1275 { 1276 abi_long ret; 1277 int val; 1278 struct ip_mreqn *ip_mreq; 1279 struct ip_mreq_source *ip_mreq_source; 1280 1281 switch(level) { 1282 case SOL_TCP: 1283 /* TCP options all take an 'int' value. */ 1284 if (optlen < sizeof(uint32_t)) 1285 return -TARGET_EINVAL; 1286 1287 if (get_user_u32(val, optval_addr)) 1288 return -TARGET_EFAULT; 1289 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1290 break; 1291 case SOL_IP: 1292 switch(optname) { 1293 case IP_TOS: 1294 case IP_TTL: 1295 case IP_HDRINCL: 1296 case IP_ROUTER_ALERT: 1297 case IP_RECVOPTS: 1298 case IP_RETOPTS: 1299 case IP_PKTINFO: 1300 case IP_MTU_DISCOVER: 1301 case IP_RECVERR: 1302 case IP_RECVTOS: 1303 #ifdef IP_FREEBIND 1304 case IP_FREEBIND: 1305 #endif 1306 case IP_MULTICAST_TTL: 1307 case IP_MULTICAST_LOOP: 1308 val = 0; 1309 if (optlen >= sizeof(uint32_t)) { 1310 if (get_user_u32(val, optval_addr)) 1311 return -TARGET_EFAULT; 1312 } else if (optlen >= 1) { 1313 if (get_user_u8(val, optval_addr)) 1314 return -TARGET_EFAULT; 1315 } 1316 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1317 break; 1318 case IP_ADD_MEMBERSHIP: 1319 case IP_DROP_MEMBERSHIP: 1320 if (optlen < sizeof (struct target_ip_mreq) || 1321 optlen > sizeof (struct target_ip_mreqn)) 1322 return -TARGET_EINVAL; 1323 1324 ip_mreq = (struct ip_mreqn *) alloca(optlen); 1325 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen); 1326 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen)); 1327 break; 1328 1329 case IP_BLOCK_SOURCE: 1330 case IP_UNBLOCK_SOURCE: 1331 case IP_ADD_SOURCE_MEMBERSHIP: 1332 case IP_DROP_SOURCE_MEMBERSHIP: 1333 if (optlen != sizeof (struct target_ip_mreq_source)) 1334 return -TARGET_EINVAL; 1335 1336 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1); 1337 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen)); 1338 unlock_user (ip_mreq_source, optval_addr, 0); 1339 break; 1340 1341 default: 1342 goto unimplemented; 1343 } 1344 break; 1345 case SOL_IPV6: 1346 switch (optname) { 1347 case IPV6_MTU_DISCOVER: 1348 case IPV6_MTU: 1349 case IPV6_V6ONLY: 1350 case IPV6_RECVPKTINFO: 1351 val = 0; 1352 if (optlen < sizeof(uint32_t)) { 1353 return -TARGET_EINVAL; 1354 } 1355 if (get_user_u32(val, optval_addr)) { 1356 return -TARGET_EFAULT; 1357 } 1358 ret = get_errno(setsockopt(sockfd, level, optname, 1359 &val, sizeof(val))); 1360 break; 1361 default: 1362 goto unimplemented; 1363 } 1364 break; 1365 case SOL_RAW: 1366 switch (optname) { 1367 case ICMP_FILTER: 1368 /* struct icmp_filter takes an u32 value */ 1369 if (optlen < sizeof(uint32_t)) { 1370 return -TARGET_EINVAL; 1371 } 1372 1373 if (get_user_u32(val, optval_addr)) { 1374 return -TARGET_EFAULT; 1375 } 1376 ret = get_errno(setsockopt(sockfd, level, optname, 1377 &val, sizeof(val))); 1378 break; 1379 1380 default: 1381 goto unimplemented; 1382 } 1383 break; 1384 case TARGET_SOL_SOCKET: 1385 switch (optname) { 1386 case TARGET_SO_RCVTIMEO: 1387 { 1388 struct timeval tv; 1389 1390 optname = SO_RCVTIMEO; 1391 1392 set_timeout: 1393 if (optlen != sizeof(struct target_timeval)) { 1394 return -TARGET_EINVAL; 1395 } 1396 1397 if (copy_from_user_timeval(&tv, optval_addr)) { 1398 return -TARGET_EFAULT; 1399 } 1400 1401 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 1402 &tv, sizeof(tv))); 1403 return ret; 1404 } 1405 case TARGET_SO_SNDTIMEO: 1406 optname = SO_SNDTIMEO; 1407 goto set_timeout; 1408 case TARGET_SO_ATTACH_FILTER: 1409 { 1410 struct target_sock_fprog *tfprog; 1411 struct target_sock_filter *tfilter; 1412 struct sock_fprog fprog; 1413 struct sock_filter *filter; 1414 int i; 1415 1416 if (optlen != sizeof(*tfprog)) { 1417 return -TARGET_EINVAL; 1418 } 1419 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) { 1420 return -TARGET_EFAULT; 1421 } 1422 if (!lock_user_struct(VERIFY_READ, tfilter, 1423 tswapal(tfprog->filter), 0)) { 1424 unlock_user_struct(tfprog, optval_addr, 1); 1425 return -TARGET_EFAULT; 1426 } 1427 1428 fprog.len = tswap16(tfprog->len); 1429 filter = malloc(fprog.len * sizeof(*filter)); 1430 if (filter == NULL) { 1431 unlock_user_struct(tfilter, tfprog->filter, 1); 1432 unlock_user_struct(tfprog, optval_addr, 1); 1433 return -TARGET_ENOMEM; 1434 } 1435 for (i = 0; i < fprog.len; i++) { 1436 filter[i].code = tswap16(tfilter[i].code); 1437 filter[i].jt = tfilter[i].jt; 1438 filter[i].jf = tfilter[i].jf; 1439 filter[i].k = tswap32(tfilter[i].k); 1440 } 1441 fprog.filter = filter; 1442 1443 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, 1444 SO_ATTACH_FILTER, &fprog, sizeof(fprog))); 1445 free(filter); 1446 1447 unlock_user_struct(tfilter, tfprog->filter, 1); 1448 unlock_user_struct(tfprog, optval_addr, 1); 1449 return ret; 1450 } 1451 /* Options with 'int' argument. */ 1452 case TARGET_SO_DEBUG: 1453 optname = SO_DEBUG; 1454 break; 1455 case TARGET_SO_REUSEADDR: 1456 optname = SO_REUSEADDR; 1457 break; 1458 case TARGET_SO_TYPE: 1459 optname = SO_TYPE; 1460 break; 1461 case TARGET_SO_ERROR: 1462 optname = SO_ERROR; 1463 break; 1464 case TARGET_SO_DONTROUTE: 1465 optname = SO_DONTROUTE; 1466 break; 1467 case TARGET_SO_BROADCAST: 1468 optname = SO_BROADCAST; 1469 break; 1470 case TARGET_SO_SNDBUF: 1471 optname = SO_SNDBUF; 1472 break; 1473 case TARGET_SO_RCVBUF: 1474 optname = SO_RCVBUF; 1475 break; 1476 case TARGET_SO_KEEPALIVE: 1477 optname = SO_KEEPALIVE; 1478 break; 1479 case TARGET_SO_OOBINLINE: 1480 optname = SO_OOBINLINE; 1481 break; 1482 case TARGET_SO_NO_CHECK: 1483 optname = SO_NO_CHECK; 1484 break; 1485 case TARGET_SO_PRIORITY: 1486 optname = SO_PRIORITY; 1487 break; 1488 #ifdef SO_BSDCOMPAT 1489 case TARGET_SO_BSDCOMPAT: 1490 optname = SO_BSDCOMPAT; 1491 break; 1492 #endif 1493 case TARGET_SO_PASSCRED: 1494 optname = SO_PASSCRED; 1495 break; 1496 case TARGET_SO_TIMESTAMP: 1497 optname = SO_TIMESTAMP; 1498 break; 1499 case TARGET_SO_RCVLOWAT: 1500 optname = SO_RCVLOWAT; 1501 break; 1502 break; 1503 default: 1504 goto unimplemented; 1505 } 1506 if (optlen < sizeof(uint32_t)) 1507 return -TARGET_EINVAL; 1508 1509 if (get_user_u32(val, optval_addr)) 1510 return -TARGET_EFAULT; 1511 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val))); 1512 break; 1513 default: 1514 unimplemented: 1515 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname); 1516 ret = -TARGET_ENOPROTOOPT; 1517 } 1518 return ret; 1519 } 1520 1521 /* do_getsockopt() Must return target values and target errnos. */ 1522 static abi_long do_getsockopt(int sockfd, int level, int optname, 1523 abi_ulong optval_addr, abi_ulong optlen) 1524 { 1525 abi_long ret; 1526 int len, val; 1527 socklen_t lv; 1528 1529 switch(level) { 1530 case TARGET_SOL_SOCKET: 1531 level = SOL_SOCKET; 1532 switch (optname) { 1533 /* These don't just return a single integer */ 1534 case TARGET_SO_LINGER: 1535 case TARGET_SO_RCVTIMEO: 1536 case TARGET_SO_SNDTIMEO: 1537 case TARGET_SO_PEERNAME: 1538 goto unimplemented; 1539 case TARGET_SO_PEERCRED: { 1540 struct ucred cr; 1541 socklen_t crlen; 1542 struct target_ucred *tcr; 1543 1544 if (get_user_u32(len, optlen)) { 1545 return -TARGET_EFAULT; 1546 } 1547 if (len < 0) { 1548 return -TARGET_EINVAL; 1549 } 1550 1551 crlen = sizeof(cr); 1552 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED, 1553 &cr, &crlen)); 1554 if (ret < 0) { 1555 return ret; 1556 } 1557 if (len > crlen) { 1558 len = crlen; 1559 } 1560 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) { 1561 return -TARGET_EFAULT; 1562 } 1563 __put_user(cr.pid, &tcr->pid); 1564 __put_user(cr.uid, &tcr->uid); 1565 __put_user(cr.gid, &tcr->gid); 1566 unlock_user_struct(tcr, optval_addr, 1); 1567 if (put_user_u32(len, optlen)) { 1568 return -TARGET_EFAULT; 1569 } 1570 break; 1571 } 1572 /* Options with 'int' argument. */ 1573 case TARGET_SO_DEBUG: 1574 optname = SO_DEBUG; 1575 goto int_case; 1576 case TARGET_SO_REUSEADDR: 1577 optname = SO_REUSEADDR; 1578 goto int_case; 1579 case TARGET_SO_TYPE: 1580 optname = SO_TYPE; 1581 goto int_case; 1582 case TARGET_SO_ERROR: 1583 optname = SO_ERROR; 1584 goto int_case; 1585 case TARGET_SO_DONTROUTE: 1586 optname = SO_DONTROUTE; 1587 goto int_case; 1588 case TARGET_SO_BROADCAST: 1589 optname = SO_BROADCAST; 1590 goto int_case; 1591 case TARGET_SO_SNDBUF: 1592 optname = SO_SNDBUF; 1593 goto int_case; 1594 case TARGET_SO_RCVBUF: 1595 optname = SO_RCVBUF; 1596 goto int_case; 1597 case TARGET_SO_KEEPALIVE: 1598 optname = SO_KEEPALIVE; 1599 goto int_case; 1600 case TARGET_SO_OOBINLINE: 1601 optname = SO_OOBINLINE; 1602 goto int_case; 1603 case TARGET_SO_NO_CHECK: 1604 optname = SO_NO_CHECK; 1605 goto int_case; 1606 case TARGET_SO_PRIORITY: 1607 optname = SO_PRIORITY; 1608 goto int_case; 1609 #ifdef SO_BSDCOMPAT 1610 case TARGET_SO_BSDCOMPAT: 1611 optname = SO_BSDCOMPAT; 1612 goto int_case; 1613 #endif 1614 case TARGET_SO_PASSCRED: 1615 optname = SO_PASSCRED; 1616 goto int_case; 1617 case TARGET_SO_TIMESTAMP: 1618 optname = SO_TIMESTAMP; 1619 goto int_case; 1620 case TARGET_SO_RCVLOWAT: 1621 optname = SO_RCVLOWAT; 1622 goto int_case; 1623 default: 1624 goto int_case; 1625 } 1626 break; 1627 case SOL_TCP: 1628 /* TCP options all take an 'int' value. */ 1629 int_case: 1630 if (get_user_u32(len, optlen)) 1631 return -TARGET_EFAULT; 1632 if (len < 0) 1633 return -TARGET_EINVAL; 1634 lv = sizeof(lv); 1635 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1636 if (ret < 0) 1637 return ret; 1638 if (len > lv) 1639 len = lv; 1640 if (len == 4) { 1641 if (put_user_u32(val, optval_addr)) 1642 return -TARGET_EFAULT; 1643 } else { 1644 if (put_user_u8(val, optval_addr)) 1645 return -TARGET_EFAULT; 1646 } 1647 if (put_user_u32(len, optlen)) 1648 return -TARGET_EFAULT; 1649 break; 1650 case SOL_IP: 1651 switch(optname) { 1652 case IP_TOS: 1653 case IP_TTL: 1654 case IP_HDRINCL: 1655 case IP_ROUTER_ALERT: 1656 case IP_RECVOPTS: 1657 case IP_RETOPTS: 1658 case IP_PKTINFO: 1659 case IP_MTU_DISCOVER: 1660 case IP_RECVERR: 1661 case IP_RECVTOS: 1662 #ifdef IP_FREEBIND 1663 case IP_FREEBIND: 1664 #endif 1665 case IP_MULTICAST_TTL: 1666 case IP_MULTICAST_LOOP: 1667 if (get_user_u32(len, optlen)) 1668 return -TARGET_EFAULT; 1669 if (len < 0) 1670 return -TARGET_EINVAL; 1671 lv = sizeof(lv); 1672 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1673 if (ret < 0) 1674 return ret; 1675 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 1676 len = 1; 1677 if (put_user_u32(len, optlen) 1678 || put_user_u8(val, optval_addr)) 1679 return -TARGET_EFAULT; 1680 } else { 1681 if (len > sizeof(int)) 1682 len = sizeof(int); 1683 if (put_user_u32(len, optlen) 1684 || put_user_u32(val, optval_addr)) 1685 return -TARGET_EFAULT; 1686 } 1687 break; 1688 default: 1689 ret = -TARGET_ENOPROTOOPT; 1690 break; 1691 } 1692 break; 1693 default: 1694 unimplemented: 1695 gemu_log("getsockopt level=%d optname=%d not yet supported\n", 1696 level, optname); 1697 ret = -TARGET_EOPNOTSUPP; 1698 break; 1699 } 1700 return ret; 1701 } 1702 1703 static struct iovec *lock_iovec(int type, abi_ulong target_addr, 1704 int count, int copy) 1705 { 1706 struct target_iovec *target_vec; 1707 struct iovec *vec; 1708 abi_ulong total_len, max_len; 1709 int i; 1710 1711 if (count == 0) { 1712 errno = 0; 1713 return NULL; 1714 } 1715 if (count < 0 || count > IOV_MAX) { 1716 errno = EINVAL; 1717 return NULL; 1718 } 1719 1720 vec = calloc(count, sizeof(struct iovec)); 1721 if (vec == NULL) { 1722 errno = ENOMEM; 1723 return NULL; 1724 } 1725 1726 target_vec = lock_user(VERIFY_READ, target_addr, 1727 count * sizeof(struct target_iovec), 1); 1728 if (target_vec == NULL) { 1729 errno = EFAULT; 1730 goto fail2; 1731 } 1732 1733 /* ??? If host page size > target page size, this will result in a 1734 value larger than what we can actually support. */ 1735 max_len = 0x7fffffff & TARGET_PAGE_MASK; 1736 total_len = 0; 1737 1738 for (i = 0; i < count; i++) { 1739 abi_ulong base = tswapal(target_vec[i].iov_base); 1740 abi_long len = tswapal(target_vec[i].iov_len); 1741 1742 if (len < 0) { 1743 errno = EINVAL; 1744 goto fail; 1745 } else if (len == 0) { 1746 /* Zero length pointer is ignored. */ 1747 vec[i].iov_base = 0; 1748 } else { 1749 vec[i].iov_base = lock_user(type, base, len, copy); 1750 if (!vec[i].iov_base) { 1751 errno = EFAULT; 1752 goto fail; 1753 } 1754 if (len > max_len - total_len) { 1755 len = max_len - total_len; 1756 } 1757 } 1758 vec[i].iov_len = len; 1759 total_len += len; 1760 } 1761 1762 unlock_user(target_vec, target_addr, 0); 1763 return vec; 1764 1765 fail: 1766 free(vec); 1767 fail2: 1768 unlock_user(target_vec, target_addr, 0); 1769 return NULL; 1770 } 1771 1772 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr, 1773 int count, int copy) 1774 { 1775 struct target_iovec *target_vec; 1776 int i; 1777 1778 target_vec = lock_user(VERIFY_READ, target_addr, 1779 count * sizeof(struct target_iovec), 1); 1780 if (target_vec) { 1781 for (i = 0; i < count; i++) { 1782 abi_ulong base = tswapal(target_vec[i].iov_base); 1783 abi_long len = tswapal(target_vec[i].iov_base); 1784 if (len < 0) { 1785 break; 1786 } 1787 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0); 1788 } 1789 unlock_user(target_vec, target_addr, 0); 1790 } 1791 1792 free(vec); 1793 } 1794 1795 static inline int target_to_host_sock_type(int *type) 1796 { 1797 int host_type = 0; 1798 int target_type = *type; 1799 1800 switch (target_type & TARGET_SOCK_TYPE_MASK) { 1801 case TARGET_SOCK_DGRAM: 1802 host_type = SOCK_DGRAM; 1803 break; 1804 case TARGET_SOCK_STREAM: 1805 host_type = SOCK_STREAM; 1806 break; 1807 default: 1808 host_type = target_type & TARGET_SOCK_TYPE_MASK; 1809 break; 1810 } 1811 if (target_type & TARGET_SOCK_CLOEXEC) { 1812 #if defined(SOCK_CLOEXEC) 1813 host_type |= SOCK_CLOEXEC; 1814 #else 1815 return -TARGET_EINVAL; 1816 #endif 1817 } 1818 if (target_type & TARGET_SOCK_NONBLOCK) { 1819 #if defined(SOCK_NONBLOCK) 1820 host_type |= SOCK_NONBLOCK; 1821 #elif !defined(O_NONBLOCK) 1822 return -TARGET_EINVAL; 1823 #endif 1824 } 1825 *type = host_type; 1826 return 0; 1827 } 1828 1829 /* Try to emulate socket type flags after socket creation. */ 1830 static int sock_flags_fixup(int fd, int target_type) 1831 { 1832 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK) 1833 if (target_type & TARGET_SOCK_NONBLOCK) { 1834 int flags = fcntl(fd, F_GETFL); 1835 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) { 1836 close(fd); 1837 return -TARGET_EINVAL; 1838 } 1839 } 1840 #endif 1841 return fd; 1842 } 1843 1844 /* do_socket() Must return target values and target errnos. */ 1845 static abi_long do_socket(int domain, int type, int protocol) 1846 { 1847 int target_type = type; 1848 int ret; 1849 1850 ret = target_to_host_sock_type(&type); 1851 if (ret) { 1852 return ret; 1853 } 1854 1855 if (domain == PF_NETLINK) 1856 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */ 1857 ret = get_errno(socket(domain, type, protocol)); 1858 if (ret >= 0) { 1859 ret = sock_flags_fixup(ret, target_type); 1860 } 1861 return ret; 1862 } 1863 1864 /* do_bind() Must return target values and target errnos. */ 1865 static abi_long do_bind(int sockfd, abi_ulong target_addr, 1866 socklen_t addrlen) 1867 { 1868 void *addr; 1869 abi_long ret; 1870 1871 if ((int)addrlen < 0) { 1872 return -TARGET_EINVAL; 1873 } 1874 1875 addr = alloca(addrlen+1); 1876 1877 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1878 if (ret) 1879 return ret; 1880 1881 return get_errno(bind(sockfd, addr, addrlen)); 1882 } 1883 1884 /* do_connect() Must return target values and target errnos. */ 1885 static abi_long do_connect(int sockfd, abi_ulong target_addr, 1886 socklen_t addrlen) 1887 { 1888 void *addr; 1889 abi_long ret; 1890 1891 if ((int)addrlen < 0) { 1892 return -TARGET_EINVAL; 1893 } 1894 1895 addr = alloca(addrlen); 1896 1897 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1898 if (ret) 1899 return ret; 1900 1901 return get_errno(connect(sockfd, addr, addrlen)); 1902 } 1903 1904 /* do_sendrecvmsg() Must return target values and target errnos. */ 1905 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg, 1906 int flags, int send) 1907 { 1908 abi_long ret, len; 1909 struct target_msghdr *msgp; 1910 struct msghdr msg; 1911 int count; 1912 struct iovec *vec; 1913 abi_ulong target_vec; 1914 1915 /* FIXME */ 1916 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE, 1917 msgp, 1918 target_msg, 1919 send ? 1 : 0)) 1920 return -TARGET_EFAULT; 1921 if (msgp->msg_name) { 1922 msg.msg_namelen = tswap32(msgp->msg_namelen); 1923 msg.msg_name = alloca(msg.msg_namelen); 1924 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name), 1925 msg.msg_namelen); 1926 if (ret) { 1927 goto out2; 1928 } 1929 } else { 1930 msg.msg_name = NULL; 1931 msg.msg_namelen = 0; 1932 } 1933 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen); 1934 msg.msg_control = alloca(msg.msg_controllen); 1935 msg.msg_flags = tswap32(msgp->msg_flags); 1936 1937 count = tswapal(msgp->msg_iovlen); 1938 target_vec = tswapal(msgp->msg_iov); 1939 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, 1940 target_vec, count, send); 1941 if (vec == NULL) { 1942 ret = -host_to_target_errno(errno); 1943 goto out2; 1944 } 1945 msg.msg_iovlen = count; 1946 msg.msg_iov = vec; 1947 1948 if (send) { 1949 ret = target_to_host_cmsg(&msg, msgp); 1950 if (ret == 0) 1951 ret = get_errno(sendmsg(fd, &msg, flags)); 1952 } else { 1953 ret = get_errno(recvmsg(fd, &msg, flags)); 1954 if (!is_error(ret)) { 1955 len = ret; 1956 ret = host_to_target_cmsg(msgp, &msg); 1957 if (!is_error(ret)) { 1958 msgp->msg_namelen = tswap32(msg.msg_namelen); 1959 if (msg.msg_name != NULL) { 1960 ret = host_to_target_sockaddr(tswapal(msgp->msg_name), 1961 msg.msg_name, msg.msg_namelen); 1962 if (ret) { 1963 goto out; 1964 } 1965 } 1966 1967 ret = len; 1968 } 1969 } 1970 } 1971 1972 out: 1973 unlock_iovec(vec, target_vec, count, !send); 1974 out2: 1975 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 1976 return ret; 1977 } 1978 1979 /* If we don't have a system accept4() then just call accept. 1980 * The callsites to do_accept4() will ensure that they don't 1981 * pass a non-zero flags argument in this config. 1982 */ 1983 #ifndef CONFIG_ACCEPT4 1984 static inline int accept4(int sockfd, struct sockaddr *addr, 1985 socklen_t *addrlen, int flags) 1986 { 1987 assert(flags == 0); 1988 return accept(sockfd, addr, addrlen); 1989 } 1990 #endif 1991 1992 /* do_accept4() Must return target values and target errnos. */ 1993 static abi_long do_accept4(int fd, abi_ulong target_addr, 1994 abi_ulong target_addrlen_addr, int flags) 1995 { 1996 socklen_t addrlen; 1997 void *addr; 1998 abi_long ret; 1999 2000 if (target_addr == 0) { 2001 return get_errno(accept4(fd, NULL, NULL, flags)); 2002 } 2003 2004 /* linux returns EINVAL if addrlen pointer is invalid */ 2005 if (get_user_u32(addrlen, target_addrlen_addr)) 2006 return -TARGET_EINVAL; 2007 2008 if ((int)addrlen < 0) { 2009 return -TARGET_EINVAL; 2010 } 2011 2012 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2013 return -TARGET_EINVAL; 2014 2015 addr = alloca(addrlen); 2016 2017 ret = get_errno(accept4(fd, addr, &addrlen, flags)); 2018 if (!is_error(ret)) { 2019 host_to_target_sockaddr(target_addr, addr, addrlen); 2020 if (put_user_u32(addrlen, target_addrlen_addr)) 2021 ret = -TARGET_EFAULT; 2022 } 2023 return ret; 2024 } 2025 2026 /* do_getpeername() Must return target values and target errnos. */ 2027 static abi_long do_getpeername(int fd, abi_ulong target_addr, 2028 abi_ulong target_addrlen_addr) 2029 { 2030 socklen_t addrlen; 2031 void *addr; 2032 abi_long ret; 2033 2034 if (get_user_u32(addrlen, target_addrlen_addr)) 2035 return -TARGET_EFAULT; 2036 2037 if ((int)addrlen < 0) { 2038 return -TARGET_EINVAL; 2039 } 2040 2041 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2042 return -TARGET_EFAULT; 2043 2044 addr = alloca(addrlen); 2045 2046 ret = get_errno(getpeername(fd, addr, &addrlen)); 2047 if (!is_error(ret)) { 2048 host_to_target_sockaddr(target_addr, addr, addrlen); 2049 if (put_user_u32(addrlen, target_addrlen_addr)) 2050 ret = -TARGET_EFAULT; 2051 } 2052 return ret; 2053 } 2054 2055 /* do_getsockname() Must return target values and target errnos. */ 2056 static abi_long do_getsockname(int fd, abi_ulong target_addr, 2057 abi_ulong target_addrlen_addr) 2058 { 2059 socklen_t addrlen; 2060 void *addr; 2061 abi_long ret; 2062 2063 if (get_user_u32(addrlen, target_addrlen_addr)) 2064 return -TARGET_EFAULT; 2065 2066 if ((int)addrlen < 0) { 2067 return -TARGET_EINVAL; 2068 } 2069 2070 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2071 return -TARGET_EFAULT; 2072 2073 addr = alloca(addrlen); 2074 2075 ret = get_errno(getsockname(fd, addr, &addrlen)); 2076 if (!is_error(ret)) { 2077 host_to_target_sockaddr(target_addr, addr, addrlen); 2078 if (put_user_u32(addrlen, target_addrlen_addr)) 2079 ret = -TARGET_EFAULT; 2080 } 2081 return ret; 2082 } 2083 2084 /* do_socketpair() Must return target values and target errnos. */ 2085 static abi_long do_socketpair(int domain, int type, int protocol, 2086 abi_ulong target_tab_addr) 2087 { 2088 int tab[2]; 2089 abi_long ret; 2090 2091 target_to_host_sock_type(&type); 2092 2093 ret = get_errno(socketpair(domain, type, protocol, tab)); 2094 if (!is_error(ret)) { 2095 if (put_user_s32(tab[0], target_tab_addr) 2096 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0]))) 2097 ret = -TARGET_EFAULT; 2098 } 2099 return ret; 2100 } 2101 2102 /* do_sendto() Must return target values and target errnos. */ 2103 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags, 2104 abi_ulong target_addr, socklen_t addrlen) 2105 { 2106 void *addr; 2107 void *host_msg; 2108 abi_long ret; 2109 2110 if ((int)addrlen < 0) { 2111 return -TARGET_EINVAL; 2112 } 2113 2114 host_msg = lock_user(VERIFY_READ, msg, len, 1); 2115 if (!host_msg) 2116 return -TARGET_EFAULT; 2117 if (target_addr) { 2118 addr = alloca(addrlen); 2119 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 2120 if (ret) { 2121 unlock_user(host_msg, msg, 0); 2122 return ret; 2123 } 2124 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen)); 2125 } else { 2126 ret = get_errno(send(fd, host_msg, len, flags)); 2127 } 2128 unlock_user(host_msg, msg, 0); 2129 return ret; 2130 } 2131 2132 /* do_recvfrom() Must return target values and target errnos. */ 2133 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags, 2134 abi_ulong target_addr, 2135 abi_ulong target_addrlen) 2136 { 2137 socklen_t addrlen; 2138 void *addr; 2139 void *host_msg; 2140 abi_long ret; 2141 2142 host_msg = lock_user(VERIFY_WRITE, msg, len, 0); 2143 if (!host_msg) 2144 return -TARGET_EFAULT; 2145 if (target_addr) { 2146 if (get_user_u32(addrlen, target_addrlen)) { 2147 ret = -TARGET_EFAULT; 2148 goto fail; 2149 } 2150 if ((int)addrlen < 0) { 2151 ret = -TARGET_EINVAL; 2152 goto fail; 2153 } 2154 addr = alloca(addrlen); 2155 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen)); 2156 } else { 2157 addr = NULL; /* To keep compiler quiet. */ 2158 ret = get_errno(qemu_recv(fd, host_msg, len, flags)); 2159 } 2160 if (!is_error(ret)) { 2161 if (target_addr) { 2162 host_to_target_sockaddr(target_addr, addr, addrlen); 2163 if (put_user_u32(addrlen, target_addrlen)) { 2164 ret = -TARGET_EFAULT; 2165 goto fail; 2166 } 2167 } 2168 unlock_user(host_msg, msg, len); 2169 } else { 2170 fail: 2171 unlock_user(host_msg, msg, 0); 2172 } 2173 return ret; 2174 } 2175 2176 #ifdef TARGET_NR_socketcall 2177 /* do_socketcall() Must return target values and target errnos. */ 2178 static abi_long do_socketcall(int num, abi_ulong vptr) 2179 { 2180 abi_long ret; 2181 const int n = sizeof(abi_ulong); 2182 2183 switch(num) { 2184 case SOCKOP_socket: 2185 { 2186 abi_ulong domain, type, protocol; 2187 2188 if (get_user_ual(domain, vptr) 2189 || get_user_ual(type, vptr + n) 2190 || get_user_ual(protocol, vptr + 2 * n)) 2191 return -TARGET_EFAULT; 2192 2193 ret = do_socket(domain, type, protocol); 2194 } 2195 break; 2196 case SOCKOP_bind: 2197 { 2198 abi_ulong sockfd; 2199 abi_ulong target_addr; 2200 socklen_t addrlen; 2201 2202 if (get_user_ual(sockfd, vptr) 2203 || get_user_ual(target_addr, vptr + n) 2204 || get_user_ual(addrlen, vptr + 2 * n)) 2205 return -TARGET_EFAULT; 2206 2207 ret = do_bind(sockfd, target_addr, addrlen); 2208 } 2209 break; 2210 case SOCKOP_connect: 2211 { 2212 abi_ulong sockfd; 2213 abi_ulong target_addr; 2214 socklen_t addrlen; 2215 2216 if (get_user_ual(sockfd, vptr) 2217 || get_user_ual(target_addr, vptr + n) 2218 || get_user_ual(addrlen, vptr + 2 * n)) 2219 return -TARGET_EFAULT; 2220 2221 ret = do_connect(sockfd, target_addr, addrlen); 2222 } 2223 break; 2224 case SOCKOP_listen: 2225 { 2226 abi_ulong sockfd, backlog; 2227 2228 if (get_user_ual(sockfd, vptr) 2229 || get_user_ual(backlog, vptr + n)) 2230 return -TARGET_EFAULT; 2231 2232 ret = get_errno(listen(sockfd, backlog)); 2233 } 2234 break; 2235 case SOCKOP_accept: 2236 { 2237 abi_ulong sockfd; 2238 abi_ulong target_addr, target_addrlen; 2239 2240 if (get_user_ual(sockfd, vptr) 2241 || get_user_ual(target_addr, vptr + n) 2242 || get_user_ual(target_addrlen, vptr + 2 * n)) 2243 return -TARGET_EFAULT; 2244 2245 ret = do_accept4(sockfd, target_addr, target_addrlen, 0); 2246 } 2247 break; 2248 case SOCKOP_accept4: 2249 { 2250 abi_ulong sockfd; 2251 abi_ulong target_addr, target_addrlen; 2252 abi_ulong flags; 2253 2254 if (get_user_ual(sockfd, vptr) 2255 || get_user_ual(target_addr, vptr + n) 2256 || get_user_ual(target_addrlen, vptr + 2 * n) 2257 || get_user_ual(flags, vptr + 3 * n)) { 2258 return -TARGET_EFAULT; 2259 } 2260 2261 ret = do_accept4(sockfd, target_addr, target_addrlen, flags); 2262 } 2263 break; 2264 case SOCKOP_getsockname: 2265 { 2266 abi_ulong sockfd; 2267 abi_ulong target_addr, target_addrlen; 2268 2269 if (get_user_ual(sockfd, vptr) 2270 || get_user_ual(target_addr, vptr + n) 2271 || get_user_ual(target_addrlen, vptr + 2 * n)) 2272 return -TARGET_EFAULT; 2273 2274 ret = do_getsockname(sockfd, target_addr, target_addrlen); 2275 } 2276 break; 2277 case SOCKOP_getpeername: 2278 { 2279 abi_ulong sockfd; 2280 abi_ulong target_addr, target_addrlen; 2281 2282 if (get_user_ual(sockfd, vptr) 2283 || get_user_ual(target_addr, vptr + n) 2284 || get_user_ual(target_addrlen, vptr + 2 * n)) 2285 return -TARGET_EFAULT; 2286 2287 ret = do_getpeername(sockfd, target_addr, target_addrlen); 2288 } 2289 break; 2290 case SOCKOP_socketpair: 2291 { 2292 abi_ulong domain, type, protocol; 2293 abi_ulong tab; 2294 2295 if (get_user_ual(domain, vptr) 2296 || get_user_ual(type, vptr + n) 2297 || get_user_ual(protocol, vptr + 2 * n) 2298 || get_user_ual(tab, vptr + 3 * n)) 2299 return -TARGET_EFAULT; 2300 2301 ret = do_socketpair(domain, type, protocol, tab); 2302 } 2303 break; 2304 case SOCKOP_send: 2305 { 2306 abi_ulong sockfd; 2307 abi_ulong msg; 2308 size_t len; 2309 abi_ulong flags; 2310 2311 if (get_user_ual(sockfd, vptr) 2312 || get_user_ual(msg, vptr + n) 2313 || get_user_ual(len, vptr + 2 * n) 2314 || get_user_ual(flags, vptr + 3 * n)) 2315 return -TARGET_EFAULT; 2316 2317 ret = do_sendto(sockfd, msg, len, flags, 0, 0); 2318 } 2319 break; 2320 case SOCKOP_recv: 2321 { 2322 abi_ulong sockfd; 2323 abi_ulong msg; 2324 size_t len; 2325 abi_ulong flags; 2326 2327 if (get_user_ual(sockfd, vptr) 2328 || get_user_ual(msg, vptr + n) 2329 || get_user_ual(len, vptr + 2 * n) 2330 || get_user_ual(flags, vptr + 3 * n)) 2331 return -TARGET_EFAULT; 2332 2333 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0); 2334 } 2335 break; 2336 case SOCKOP_sendto: 2337 { 2338 abi_ulong sockfd; 2339 abi_ulong msg; 2340 size_t len; 2341 abi_ulong flags; 2342 abi_ulong addr; 2343 abi_ulong addrlen; 2344 2345 if (get_user_ual(sockfd, vptr) 2346 || get_user_ual(msg, vptr + n) 2347 || get_user_ual(len, vptr + 2 * n) 2348 || get_user_ual(flags, vptr + 3 * n) 2349 || get_user_ual(addr, vptr + 4 * n) 2350 || get_user_ual(addrlen, vptr + 5 * n)) 2351 return -TARGET_EFAULT; 2352 2353 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen); 2354 } 2355 break; 2356 case SOCKOP_recvfrom: 2357 { 2358 abi_ulong sockfd; 2359 abi_ulong msg; 2360 size_t len; 2361 abi_ulong flags; 2362 abi_ulong addr; 2363 socklen_t addrlen; 2364 2365 if (get_user_ual(sockfd, vptr) 2366 || get_user_ual(msg, vptr + n) 2367 || get_user_ual(len, vptr + 2 * n) 2368 || get_user_ual(flags, vptr + 3 * n) 2369 || get_user_ual(addr, vptr + 4 * n) 2370 || get_user_ual(addrlen, vptr + 5 * n)) 2371 return -TARGET_EFAULT; 2372 2373 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen); 2374 } 2375 break; 2376 case SOCKOP_shutdown: 2377 { 2378 abi_ulong sockfd, how; 2379 2380 if (get_user_ual(sockfd, vptr) 2381 || get_user_ual(how, vptr + n)) 2382 return -TARGET_EFAULT; 2383 2384 ret = get_errno(shutdown(sockfd, how)); 2385 } 2386 break; 2387 case SOCKOP_sendmsg: 2388 case SOCKOP_recvmsg: 2389 { 2390 abi_ulong fd; 2391 abi_ulong target_msg; 2392 abi_ulong flags; 2393 2394 if (get_user_ual(fd, vptr) 2395 || get_user_ual(target_msg, vptr + n) 2396 || get_user_ual(flags, vptr + 2 * n)) 2397 return -TARGET_EFAULT; 2398 2399 ret = do_sendrecvmsg(fd, target_msg, flags, 2400 (num == SOCKOP_sendmsg)); 2401 } 2402 break; 2403 case SOCKOP_setsockopt: 2404 { 2405 abi_ulong sockfd; 2406 abi_ulong level; 2407 abi_ulong optname; 2408 abi_ulong optval; 2409 abi_ulong optlen; 2410 2411 if (get_user_ual(sockfd, vptr) 2412 || get_user_ual(level, vptr + n) 2413 || get_user_ual(optname, vptr + 2 * n) 2414 || get_user_ual(optval, vptr + 3 * n) 2415 || get_user_ual(optlen, vptr + 4 * n)) 2416 return -TARGET_EFAULT; 2417 2418 ret = do_setsockopt(sockfd, level, optname, optval, optlen); 2419 } 2420 break; 2421 case SOCKOP_getsockopt: 2422 { 2423 abi_ulong sockfd; 2424 abi_ulong level; 2425 abi_ulong optname; 2426 abi_ulong optval; 2427 socklen_t optlen; 2428 2429 if (get_user_ual(sockfd, vptr) 2430 || get_user_ual(level, vptr + n) 2431 || get_user_ual(optname, vptr + 2 * n) 2432 || get_user_ual(optval, vptr + 3 * n) 2433 || get_user_ual(optlen, vptr + 4 * n)) 2434 return -TARGET_EFAULT; 2435 2436 ret = do_getsockopt(sockfd, level, optname, optval, optlen); 2437 } 2438 break; 2439 default: 2440 gemu_log("Unsupported socketcall: %d\n", num); 2441 ret = -TARGET_ENOSYS; 2442 break; 2443 } 2444 return ret; 2445 } 2446 #endif 2447 2448 #define N_SHM_REGIONS 32 2449 2450 static struct shm_region { 2451 abi_ulong start; 2452 abi_ulong size; 2453 } shm_regions[N_SHM_REGIONS]; 2454 2455 struct target_semid_ds 2456 { 2457 struct target_ipc_perm sem_perm; 2458 abi_ulong sem_otime; 2459 abi_ulong __unused1; 2460 abi_ulong sem_ctime; 2461 abi_ulong __unused2; 2462 abi_ulong sem_nsems; 2463 abi_ulong __unused3; 2464 abi_ulong __unused4; 2465 }; 2466 2467 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip, 2468 abi_ulong target_addr) 2469 { 2470 struct target_ipc_perm *target_ip; 2471 struct target_semid_ds *target_sd; 2472 2473 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2474 return -TARGET_EFAULT; 2475 target_ip = &(target_sd->sem_perm); 2476 host_ip->__key = tswap32(target_ip->__key); 2477 host_ip->uid = tswap32(target_ip->uid); 2478 host_ip->gid = tswap32(target_ip->gid); 2479 host_ip->cuid = tswap32(target_ip->cuid); 2480 host_ip->cgid = tswap32(target_ip->cgid); 2481 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 2482 host_ip->mode = tswap32(target_ip->mode); 2483 #else 2484 host_ip->mode = tswap16(target_ip->mode); 2485 #endif 2486 #if defined(TARGET_PPC) 2487 host_ip->__seq = tswap32(target_ip->__seq); 2488 #else 2489 host_ip->__seq = tswap16(target_ip->__seq); 2490 #endif 2491 unlock_user_struct(target_sd, target_addr, 0); 2492 return 0; 2493 } 2494 2495 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr, 2496 struct ipc_perm *host_ip) 2497 { 2498 struct target_ipc_perm *target_ip; 2499 struct target_semid_ds *target_sd; 2500 2501 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2502 return -TARGET_EFAULT; 2503 target_ip = &(target_sd->sem_perm); 2504 target_ip->__key = tswap32(host_ip->__key); 2505 target_ip->uid = tswap32(host_ip->uid); 2506 target_ip->gid = tswap32(host_ip->gid); 2507 target_ip->cuid = tswap32(host_ip->cuid); 2508 target_ip->cgid = tswap32(host_ip->cgid); 2509 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 2510 target_ip->mode = tswap32(host_ip->mode); 2511 #else 2512 target_ip->mode = tswap16(host_ip->mode); 2513 #endif 2514 #if defined(TARGET_PPC) 2515 target_ip->__seq = tswap32(host_ip->__seq); 2516 #else 2517 target_ip->__seq = tswap16(host_ip->__seq); 2518 #endif 2519 unlock_user_struct(target_sd, target_addr, 1); 2520 return 0; 2521 } 2522 2523 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd, 2524 abi_ulong target_addr) 2525 { 2526 struct target_semid_ds *target_sd; 2527 2528 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2529 return -TARGET_EFAULT; 2530 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr)) 2531 return -TARGET_EFAULT; 2532 host_sd->sem_nsems = tswapal(target_sd->sem_nsems); 2533 host_sd->sem_otime = tswapal(target_sd->sem_otime); 2534 host_sd->sem_ctime = tswapal(target_sd->sem_ctime); 2535 unlock_user_struct(target_sd, target_addr, 0); 2536 return 0; 2537 } 2538 2539 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr, 2540 struct semid_ds *host_sd) 2541 { 2542 struct target_semid_ds *target_sd; 2543 2544 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2545 return -TARGET_EFAULT; 2546 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm))) 2547 return -TARGET_EFAULT; 2548 target_sd->sem_nsems = tswapal(host_sd->sem_nsems); 2549 target_sd->sem_otime = tswapal(host_sd->sem_otime); 2550 target_sd->sem_ctime = tswapal(host_sd->sem_ctime); 2551 unlock_user_struct(target_sd, target_addr, 1); 2552 return 0; 2553 } 2554 2555 struct target_seminfo { 2556 int semmap; 2557 int semmni; 2558 int semmns; 2559 int semmnu; 2560 int semmsl; 2561 int semopm; 2562 int semume; 2563 int semusz; 2564 int semvmx; 2565 int semaem; 2566 }; 2567 2568 static inline abi_long host_to_target_seminfo(abi_ulong target_addr, 2569 struct seminfo *host_seminfo) 2570 { 2571 struct target_seminfo *target_seminfo; 2572 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0)) 2573 return -TARGET_EFAULT; 2574 __put_user(host_seminfo->semmap, &target_seminfo->semmap); 2575 __put_user(host_seminfo->semmni, &target_seminfo->semmni); 2576 __put_user(host_seminfo->semmns, &target_seminfo->semmns); 2577 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu); 2578 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl); 2579 __put_user(host_seminfo->semopm, &target_seminfo->semopm); 2580 __put_user(host_seminfo->semume, &target_seminfo->semume); 2581 __put_user(host_seminfo->semusz, &target_seminfo->semusz); 2582 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx); 2583 __put_user(host_seminfo->semaem, &target_seminfo->semaem); 2584 unlock_user_struct(target_seminfo, target_addr, 1); 2585 return 0; 2586 } 2587 2588 union semun { 2589 int val; 2590 struct semid_ds *buf; 2591 unsigned short *array; 2592 struct seminfo *__buf; 2593 }; 2594 2595 union target_semun { 2596 int val; 2597 abi_ulong buf; 2598 abi_ulong array; 2599 abi_ulong __buf; 2600 }; 2601 2602 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array, 2603 abi_ulong target_addr) 2604 { 2605 int nsems; 2606 unsigned short *array; 2607 union semun semun; 2608 struct semid_ds semid_ds; 2609 int i, ret; 2610 2611 semun.buf = &semid_ds; 2612 2613 ret = semctl(semid, 0, IPC_STAT, semun); 2614 if (ret == -1) 2615 return get_errno(ret); 2616 2617 nsems = semid_ds.sem_nsems; 2618 2619 *host_array = malloc(nsems*sizeof(unsigned short)); 2620 array = lock_user(VERIFY_READ, target_addr, 2621 nsems*sizeof(unsigned short), 1); 2622 if (!array) 2623 return -TARGET_EFAULT; 2624 2625 for(i=0; i<nsems; i++) { 2626 __get_user((*host_array)[i], &array[i]); 2627 } 2628 unlock_user(array, target_addr, 0); 2629 2630 return 0; 2631 } 2632 2633 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr, 2634 unsigned short **host_array) 2635 { 2636 int nsems; 2637 unsigned short *array; 2638 union semun semun; 2639 struct semid_ds semid_ds; 2640 int i, ret; 2641 2642 semun.buf = &semid_ds; 2643 2644 ret = semctl(semid, 0, IPC_STAT, semun); 2645 if (ret == -1) 2646 return get_errno(ret); 2647 2648 nsems = semid_ds.sem_nsems; 2649 2650 array = lock_user(VERIFY_WRITE, target_addr, 2651 nsems*sizeof(unsigned short), 0); 2652 if (!array) 2653 return -TARGET_EFAULT; 2654 2655 for(i=0; i<nsems; i++) { 2656 __put_user((*host_array)[i], &array[i]); 2657 } 2658 free(*host_array); 2659 unlock_user(array, target_addr, 1); 2660 2661 return 0; 2662 } 2663 2664 static inline abi_long do_semctl(int semid, int semnum, int cmd, 2665 union target_semun target_su) 2666 { 2667 union semun arg; 2668 struct semid_ds dsarg; 2669 unsigned short *array = NULL; 2670 struct seminfo seminfo; 2671 abi_long ret = -TARGET_EINVAL; 2672 abi_long err; 2673 cmd &= 0xff; 2674 2675 switch( cmd ) { 2676 case GETVAL: 2677 case SETVAL: 2678 arg.val = tswap32(target_su.val); 2679 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2680 target_su.val = tswap32(arg.val); 2681 break; 2682 case GETALL: 2683 case SETALL: 2684 err = target_to_host_semarray(semid, &array, target_su.array); 2685 if (err) 2686 return err; 2687 arg.array = array; 2688 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2689 err = host_to_target_semarray(semid, target_su.array, &array); 2690 if (err) 2691 return err; 2692 break; 2693 case IPC_STAT: 2694 case IPC_SET: 2695 case SEM_STAT: 2696 err = target_to_host_semid_ds(&dsarg, target_su.buf); 2697 if (err) 2698 return err; 2699 arg.buf = &dsarg; 2700 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2701 err = host_to_target_semid_ds(target_su.buf, &dsarg); 2702 if (err) 2703 return err; 2704 break; 2705 case IPC_INFO: 2706 case SEM_INFO: 2707 arg.__buf = &seminfo; 2708 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2709 err = host_to_target_seminfo(target_su.__buf, &seminfo); 2710 if (err) 2711 return err; 2712 break; 2713 case IPC_RMID: 2714 case GETPID: 2715 case GETNCNT: 2716 case GETZCNT: 2717 ret = get_errno(semctl(semid, semnum, cmd, NULL)); 2718 break; 2719 } 2720 2721 return ret; 2722 } 2723 2724 struct target_sembuf { 2725 unsigned short sem_num; 2726 short sem_op; 2727 short sem_flg; 2728 }; 2729 2730 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf, 2731 abi_ulong target_addr, 2732 unsigned nsops) 2733 { 2734 struct target_sembuf *target_sembuf; 2735 int i; 2736 2737 target_sembuf = lock_user(VERIFY_READ, target_addr, 2738 nsops*sizeof(struct target_sembuf), 1); 2739 if (!target_sembuf) 2740 return -TARGET_EFAULT; 2741 2742 for(i=0; i<nsops; i++) { 2743 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num); 2744 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op); 2745 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg); 2746 } 2747 2748 unlock_user(target_sembuf, target_addr, 0); 2749 2750 return 0; 2751 } 2752 2753 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops) 2754 { 2755 struct sembuf sops[nsops]; 2756 2757 if (target_to_host_sembuf(sops, ptr, nsops)) 2758 return -TARGET_EFAULT; 2759 2760 return get_errno(semop(semid, sops, nsops)); 2761 } 2762 2763 struct target_msqid_ds 2764 { 2765 struct target_ipc_perm msg_perm; 2766 abi_ulong msg_stime; 2767 #if TARGET_ABI_BITS == 32 2768 abi_ulong __unused1; 2769 #endif 2770 abi_ulong msg_rtime; 2771 #if TARGET_ABI_BITS == 32 2772 abi_ulong __unused2; 2773 #endif 2774 abi_ulong msg_ctime; 2775 #if TARGET_ABI_BITS == 32 2776 abi_ulong __unused3; 2777 #endif 2778 abi_ulong __msg_cbytes; 2779 abi_ulong msg_qnum; 2780 abi_ulong msg_qbytes; 2781 abi_ulong msg_lspid; 2782 abi_ulong msg_lrpid; 2783 abi_ulong __unused4; 2784 abi_ulong __unused5; 2785 }; 2786 2787 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md, 2788 abi_ulong target_addr) 2789 { 2790 struct target_msqid_ds *target_md; 2791 2792 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1)) 2793 return -TARGET_EFAULT; 2794 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr)) 2795 return -TARGET_EFAULT; 2796 host_md->msg_stime = tswapal(target_md->msg_stime); 2797 host_md->msg_rtime = tswapal(target_md->msg_rtime); 2798 host_md->msg_ctime = tswapal(target_md->msg_ctime); 2799 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes); 2800 host_md->msg_qnum = tswapal(target_md->msg_qnum); 2801 host_md->msg_qbytes = tswapal(target_md->msg_qbytes); 2802 host_md->msg_lspid = tswapal(target_md->msg_lspid); 2803 host_md->msg_lrpid = tswapal(target_md->msg_lrpid); 2804 unlock_user_struct(target_md, target_addr, 0); 2805 return 0; 2806 } 2807 2808 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr, 2809 struct msqid_ds *host_md) 2810 { 2811 struct target_msqid_ds *target_md; 2812 2813 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0)) 2814 return -TARGET_EFAULT; 2815 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm))) 2816 return -TARGET_EFAULT; 2817 target_md->msg_stime = tswapal(host_md->msg_stime); 2818 target_md->msg_rtime = tswapal(host_md->msg_rtime); 2819 target_md->msg_ctime = tswapal(host_md->msg_ctime); 2820 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes); 2821 target_md->msg_qnum = tswapal(host_md->msg_qnum); 2822 target_md->msg_qbytes = tswapal(host_md->msg_qbytes); 2823 target_md->msg_lspid = tswapal(host_md->msg_lspid); 2824 target_md->msg_lrpid = tswapal(host_md->msg_lrpid); 2825 unlock_user_struct(target_md, target_addr, 1); 2826 return 0; 2827 } 2828 2829 struct target_msginfo { 2830 int msgpool; 2831 int msgmap; 2832 int msgmax; 2833 int msgmnb; 2834 int msgmni; 2835 int msgssz; 2836 int msgtql; 2837 unsigned short int msgseg; 2838 }; 2839 2840 static inline abi_long host_to_target_msginfo(abi_ulong target_addr, 2841 struct msginfo *host_msginfo) 2842 { 2843 struct target_msginfo *target_msginfo; 2844 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0)) 2845 return -TARGET_EFAULT; 2846 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool); 2847 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap); 2848 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax); 2849 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb); 2850 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni); 2851 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz); 2852 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql); 2853 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg); 2854 unlock_user_struct(target_msginfo, target_addr, 1); 2855 return 0; 2856 } 2857 2858 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr) 2859 { 2860 struct msqid_ds dsarg; 2861 struct msginfo msginfo; 2862 abi_long ret = -TARGET_EINVAL; 2863 2864 cmd &= 0xff; 2865 2866 switch (cmd) { 2867 case IPC_STAT: 2868 case IPC_SET: 2869 case MSG_STAT: 2870 if (target_to_host_msqid_ds(&dsarg,ptr)) 2871 return -TARGET_EFAULT; 2872 ret = get_errno(msgctl(msgid, cmd, &dsarg)); 2873 if (host_to_target_msqid_ds(ptr,&dsarg)) 2874 return -TARGET_EFAULT; 2875 break; 2876 case IPC_RMID: 2877 ret = get_errno(msgctl(msgid, cmd, NULL)); 2878 break; 2879 case IPC_INFO: 2880 case MSG_INFO: 2881 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo)); 2882 if (host_to_target_msginfo(ptr, &msginfo)) 2883 return -TARGET_EFAULT; 2884 break; 2885 } 2886 2887 return ret; 2888 } 2889 2890 struct target_msgbuf { 2891 abi_long mtype; 2892 char mtext[1]; 2893 }; 2894 2895 static inline abi_long do_msgsnd(int msqid, abi_long msgp, 2896 unsigned int msgsz, int msgflg) 2897 { 2898 struct target_msgbuf *target_mb; 2899 struct msgbuf *host_mb; 2900 abi_long ret = 0; 2901 2902 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0)) 2903 return -TARGET_EFAULT; 2904 host_mb = malloc(msgsz+sizeof(long)); 2905 host_mb->mtype = (abi_long) tswapal(target_mb->mtype); 2906 memcpy(host_mb->mtext, target_mb->mtext, msgsz); 2907 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg)); 2908 free(host_mb); 2909 unlock_user_struct(target_mb, msgp, 0); 2910 2911 return ret; 2912 } 2913 2914 static inline abi_long do_msgrcv(int msqid, abi_long msgp, 2915 unsigned int msgsz, abi_long msgtyp, 2916 int msgflg) 2917 { 2918 struct target_msgbuf *target_mb; 2919 char *target_mtext; 2920 struct msgbuf *host_mb; 2921 abi_long ret = 0; 2922 2923 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0)) 2924 return -TARGET_EFAULT; 2925 2926 host_mb = g_malloc(msgsz+sizeof(long)); 2927 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg)); 2928 2929 if (ret > 0) { 2930 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong); 2931 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0); 2932 if (!target_mtext) { 2933 ret = -TARGET_EFAULT; 2934 goto end; 2935 } 2936 memcpy(target_mb->mtext, host_mb->mtext, ret); 2937 unlock_user(target_mtext, target_mtext_addr, ret); 2938 } 2939 2940 target_mb->mtype = tswapal(host_mb->mtype); 2941 2942 end: 2943 if (target_mb) 2944 unlock_user_struct(target_mb, msgp, 1); 2945 g_free(host_mb); 2946 return ret; 2947 } 2948 2949 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd, 2950 abi_ulong target_addr) 2951 { 2952 struct target_shmid_ds *target_sd; 2953 2954 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2955 return -TARGET_EFAULT; 2956 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr)) 2957 return -TARGET_EFAULT; 2958 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2959 __get_user(host_sd->shm_atime, &target_sd->shm_atime); 2960 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2961 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2962 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2963 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2964 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2965 unlock_user_struct(target_sd, target_addr, 0); 2966 return 0; 2967 } 2968 2969 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr, 2970 struct shmid_ds *host_sd) 2971 { 2972 struct target_shmid_ds *target_sd; 2973 2974 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2975 return -TARGET_EFAULT; 2976 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm))) 2977 return -TARGET_EFAULT; 2978 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2979 __put_user(host_sd->shm_atime, &target_sd->shm_atime); 2980 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2981 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2982 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2983 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2984 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2985 unlock_user_struct(target_sd, target_addr, 1); 2986 return 0; 2987 } 2988 2989 struct target_shminfo { 2990 abi_ulong shmmax; 2991 abi_ulong shmmin; 2992 abi_ulong shmmni; 2993 abi_ulong shmseg; 2994 abi_ulong shmall; 2995 }; 2996 2997 static inline abi_long host_to_target_shminfo(abi_ulong target_addr, 2998 struct shminfo *host_shminfo) 2999 { 3000 struct target_shminfo *target_shminfo; 3001 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0)) 3002 return -TARGET_EFAULT; 3003 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax); 3004 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin); 3005 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni); 3006 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg); 3007 __put_user(host_shminfo->shmall, &target_shminfo->shmall); 3008 unlock_user_struct(target_shminfo, target_addr, 1); 3009 return 0; 3010 } 3011 3012 struct target_shm_info { 3013 int used_ids; 3014 abi_ulong shm_tot; 3015 abi_ulong shm_rss; 3016 abi_ulong shm_swp; 3017 abi_ulong swap_attempts; 3018 abi_ulong swap_successes; 3019 }; 3020 3021 static inline abi_long host_to_target_shm_info(abi_ulong target_addr, 3022 struct shm_info *host_shm_info) 3023 { 3024 struct target_shm_info *target_shm_info; 3025 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0)) 3026 return -TARGET_EFAULT; 3027 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids); 3028 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot); 3029 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss); 3030 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp); 3031 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts); 3032 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes); 3033 unlock_user_struct(target_shm_info, target_addr, 1); 3034 return 0; 3035 } 3036 3037 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf) 3038 { 3039 struct shmid_ds dsarg; 3040 struct shminfo shminfo; 3041 struct shm_info shm_info; 3042 abi_long ret = -TARGET_EINVAL; 3043 3044 cmd &= 0xff; 3045 3046 switch(cmd) { 3047 case IPC_STAT: 3048 case IPC_SET: 3049 case SHM_STAT: 3050 if (target_to_host_shmid_ds(&dsarg, buf)) 3051 return -TARGET_EFAULT; 3052 ret = get_errno(shmctl(shmid, cmd, &dsarg)); 3053 if (host_to_target_shmid_ds(buf, &dsarg)) 3054 return -TARGET_EFAULT; 3055 break; 3056 case IPC_INFO: 3057 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo)); 3058 if (host_to_target_shminfo(buf, &shminfo)) 3059 return -TARGET_EFAULT; 3060 break; 3061 case SHM_INFO: 3062 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info)); 3063 if (host_to_target_shm_info(buf, &shm_info)) 3064 return -TARGET_EFAULT; 3065 break; 3066 case IPC_RMID: 3067 case SHM_LOCK: 3068 case SHM_UNLOCK: 3069 ret = get_errno(shmctl(shmid, cmd, NULL)); 3070 break; 3071 } 3072 3073 return ret; 3074 } 3075 3076 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg) 3077 { 3078 abi_long raddr; 3079 void *host_raddr; 3080 struct shmid_ds shm_info; 3081 int i,ret; 3082 3083 /* find out the length of the shared memory segment */ 3084 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); 3085 if (is_error(ret)) { 3086 /* can't get length, bail out */ 3087 return ret; 3088 } 3089 3090 mmap_lock(); 3091 3092 if (shmaddr) 3093 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg); 3094 else { 3095 abi_ulong mmap_start; 3096 3097 mmap_start = mmap_find_vma(0, shm_info.shm_segsz); 3098 3099 if (mmap_start == -1) { 3100 errno = ENOMEM; 3101 host_raddr = (void *)-1; 3102 } else 3103 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP); 3104 } 3105 3106 if (host_raddr == (void *)-1) { 3107 mmap_unlock(); 3108 return get_errno((long)host_raddr); 3109 } 3110 raddr=h2g((unsigned long)host_raddr); 3111 3112 page_set_flags(raddr, raddr + shm_info.shm_segsz, 3113 PAGE_VALID | PAGE_READ | 3114 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE)); 3115 3116 for (i = 0; i < N_SHM_REGIONS; i++) { 3117 if (shm_regions[i].start == 0) { 3118 shm_regions[i].start = raddr; 3119 shm_regions[i].size = shm_info.shm_segsz; 3120 break; 3121 } 3122 } 3123 3124 mmap_unlock(); 3125 return raddr; 3126 3127 } 3128 3129 static inline abi_long do_shmdt(abi_ulong shmaddr) 3130 { 3131 int i; 3132 3133 for (i = 0; i < N_SHM_REGIONS; ++i) { 3134 if (shm_regions[i].start == shmaddr) { 3135 shm_regions[i].start = 0; 3136 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0); 3137 break; 3138 } 3139 } 3140 3141 return get_errno(shmdt(g2h(shmaddr))); 3142 } 3143 3144 #ifdef TARGET_NR_ipc 3145 /* ??? This only works with linear mappings. */ 3146 /* do_ipc() must return target values and target errnos. */ 3147 static abi_long do_ipc(unsigned int call, int first, 3148 int second, int third, 3149 abi_long ptr, abi_long fifth) 3150 { 3151 int version; 3152 abi_long ret = 0; 3153 3154 version = call >> 16; 3155 call &= 0xffff; 3156 3157 switch (call) { 3158 case IPCOP_semop: 3159 ret = do_semop(first, ptr, second); 3160 break; 3161 3162 case IPCOP_semget: 3163 ret = get_errno(semget(first, second, third)); 3164 break; 3165 3166 case IPCOP_semctl: 3167 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr); 3168 break; 3169 3170 case IPCOP_msgget: 3171 ret = get_errno(msgget(first, second)); 3172 break; 3173 3174 case IPCOP_msgsnd: 3175 ret = do_msgsnd(first, ptr, second, third); 3176 break; 3177 3178 case IPCOP_msgctl: 3179 ret = do_msgctl(first, second, ptr); 3180 break; 3181 3182 case IPCOP_msgrcv: 3183 switch (version) { 3184 case 0: 3185 { 3186 struct target_ipc_kludge { 3187 abi_long msgp; 3188 abi_long msgtyp; 3189 } *tmp; 3190 3191 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) { 3192 ret = -TARGET_EFAULT; 3193 break; 3194 } 3195 3196 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third); 3197 3198 unlock_user_struct(tmp, ptr, 0); 3199 break; 3200 } 3201 default: 3202 ret = do_msgrcv(first, ptr, second, fifth, third); 3203 } 3204 break; 3205 3206 case IPCOP_shmat: 3207 switch (version) { 3208 default: 3209 { 3210 abi_ulong raddr; 3211 raddr = do_shmat(first, ptr, second); 3212 if (is_error(raddr)) 3213 return get_errno(raddr); 3214 if (put_user_ual(raddr, third)) 3215 return -TARGET_EFAULT; 3216 break; 3217 } 3218 case 1: 3219 ret = -TARGET_EINVAL; 3220 break; 3221 } 3222 break; 3223 case IPCOP_shmdt: 3224 ret = do_shmdt(ptr); 3225 break; 3226 3227 case IPCOP_shmget: 3228 /* IPC_* flag values are the same on all linux platforms */ 3229 ret = get_errno(shmget(first, second, third)); 3230 break; 3231 3232 /* IPC_* and SHM_* command values are the same on all linux platforms */ 3233 case IPCOP_shmctl: 3234 ret = do_shmctl(first, second, ptr); 3235 break; 3236 default: 3237 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version); 3238 ret = -TARGET_ENOSYS; 3239 break; 3240 } 3241 return ret; 3242 } 3243 #endif 3244 3245 /* kernel structure types definitions */ 3246 3247 #define STRUCT(name, ...) STRUCT_ ## name, 3248 #define STRUCT_SPECIAL(name) STRUCT_ ## name, 3249 enum { 3250 #include "syscall_types.h" 3251 }; 3252 #undef STRUCT 3253 #undef STRUCT_SPECIAL 3254 3255 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL }; 3256 #define STRUCT_SPECIAL(name) 3257 #include "syscall_types.h" 3258 #undef STRUCT 3259 #undef STRUCT_SPECIAL 3260 3261 typedef struct IOCTLEntry IOCTLEntry; 3262 3263 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp, 3264 int fd, abi_long cmd, abi_long arg); 3265 3266 struct IOCTLEntry { 3267 unsigned int target_cmd; 3268 unsigned int host_cmd; 3269 const char *name; 3270 int access; 3271 do_ioctl_fn *do_ioctl; 3272 const argtype arg_type[5]; 3273 }; 3274 3275 #define IOC_R 0x0001 3276 #define IOC_W 0x0002 3277 #define IOC_RW (IOC_R | IOC_W) 3278 3279 #define MAX_STRUCT_SIZE 4096 3280 3281 #ifdef CONFIG_FIEMAP 3282 /* So fiemap access checks don't overflow on 32 bit systems. 3283 * This is very slightly smaller than the limit imposed by 3284 * the underlying kernel. 3285 */ 3286 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \ 3287 / sizeof(struct fiemap_extent)) 3288 3289 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp, 3290 int fd, abi_long cmd, abi_long arg) 3291 { 3292 /* The parameter for this ioctl is a struct fiemap followed 3293 * by an array of struct fiemap_extent whose size is set 3294 * in fiemap->fm_extent_count. The array is filled in by the 3295 * ioctl. 3296 */ 3297 int target_size_in, target_size_out; 3298 struct fiemap *fm; 3299 const argtype *arg_type = ie->arg_type; 3300 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) }; 3301 void *argptr, *p; 3302 abi_long ret; 3303 int i, extent_size = thunk_type_size(extent_arg_type, 0); 3304 uint32_t outbufsz; 3305 int free_fm = 0; 3306 3307 assert(arg_type[0] == TYPE_PTR); 3308 assert(ie->access == IOC_RW); 3309 arg_type++; 3310 target_size_in = thunk_type_size(arg_type, 0); 3311 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1); 3312 if (!argptr) { 3313 return -TARGET_EFAULT; 3314 } 3315 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3316 unlock_user(argptr, arg, 0); 3317 fm = (struct fiemap *)buf_temp; 3318 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) { 3319 return -TARGET_EINVAL; 3320 } 3321 3322 outbufsz = sizeof (*fm) + 3323 (sizeof(struct fiemap_extent) * fm->fm_extent_count); 3324 3325 if (outbufsz > MAX_STRUCT_SIZE) { 3326 /* We can't fit all the extents into the fixed size buffer. 3327 * Allocate one that is large enough and use it instead. 3328 */ 3329 fm = malloc(outbufsz); 3330 if (!fm) { 3331 return -TARGET_ENOMEM; 3332 } 3333 memcpy(fm, buf_temp, sizeof(struct fiemap)); 3334 free_fm = 1; 3335 } 3336 ret = get_errno(ioctl(fd, ie->host_cmd, fm)); 3337 if (!is_error(ret)) { 3338 target_size_out = target_size_in; 3339 /* An extent_count of 0 means we were only counting the extents 3340 * so there are no structs to copy 3341 */ 3342 if (fm->fm_extent_count != 0) { 3343 target_size_out += fm->fm_mapped_extents * extent_size; 3344 } 3345 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0); 3346 if (!argptr) { 3347 ret = -TARGET_EFAULT; 3348 } else { 3349 /* Convert the struct fiemap */ 3350 thunk_convert(argptr, fm, arg_type, THUNK_TARGET); 3351 if (fm->fm_extent_count != 0) { 3352 p = argptr + target_size_in; 3353 /* ...and then all the struct fiemap_extents */ 3354 for (i = 0; i < fm->fm_mapped_extents; i++) { 3355 thunk_convert(p, &fm->fm_extents[i], extent_arg_type, 3356 THUNK_TARGET); 3357 p += extent_size; 3358 } 3359 } 3360 unlock_user(argptr, arg, target_size_out); 3361 } 3362 } 3363 if (free_fm) { 3364 free(fm); 3365 } 3366 return ret; 3367 } 3368 #endif 3369 3370 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, 3371 int fd, abi_long cmd, abi_long arg) 3372 { 3373 const argtype *arg_type = ie->arg_type; 3374 int target_size; 3375 void *argptr; 3376 int ret; 3377 struct ifconf *host_ifconf; 3378 uint32_t outbufsz; 3379 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) }; 3380 int target_ifreq_size; 3381 int nb_ifreq; 3382 int free_buf = 0; 3383 int i; 3384 int target_ifc_len; 3385 abi_long target_ifc_buf; 3386 int host_ifc_len; 3387 char *host_ifc_buf; 3388 3389 assert(arg_type[0] == TYPE_PTR); 3390 assert(ie->access == IOC_RW); 3391 3392 arg_type++; 3393 target_size = thunk_type_size(arg_type, 0); 3394 3395 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3396 if (!argptr) 3397 return -TARGET_EFAULT; 3398 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3399 unlock_user(argptr, arg, 0); 3400 3401 host_ifconf = (struct ifconf *)(unsigned long)buf_temp; 3402 target_ifc_len = host_ifconf->ifc_len; 3403 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf; 3404 3405 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0); 3406 nb_ifreq = target_ifc_len / target_ifreq_size; 3407 host_ifc_len = nb_ifreq * sizeof(struct ifreq); 3408 3409 outbufsz = sizeof(*host_ifconf) + host_ifc_len; 3410 if (outbufsz > MAX_STRUCT_SIZE) { 3411 /* We can't fit all the extents into the fixed size buffer. 3412 * Allocate one that is large enough and use it instead. 3413 */ 3414 host_ifconf = malloc(outbufsz); 3415 if (!host_ifconf) { 3416 return -TARGET_ENOMEM; 3417 } 3418 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf)); 3419 free_buf = 1; 3420 } 3421 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf); 3422 3423 host_ifconf->ifc_len = host_ifc_len; 3424 host_ifconf->ifc_buf = host_ifc_buf; 3425 3426 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf)); 3427 if (!is_error(ret)) { 3428 /* convert host ifc_len to target ifc_len */ 3429 3430 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq); 3431 target_ifc_len = nb_ifreq * target_ifreq_size; 3432 host_ifconf->ifc_len = target_ifc_len; 3433 3434 /* restore target ifc_buf */ 3435 3436 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf; 3437 3438 /* copy struct ifconf to target user */ 3439 3440 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3441 if (!argptr) 3442 return -TARGET_EFAULT; 3443 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET); 3444 unlock_user(argptr, arg, target_size); 3445 3446 /* copy ifreq[] to target user */ 3447 3448 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0); 3449 for (i = 0; i < nb_ifreq ; i++) { 3450 thunk_convert(argptr + i * target_ifreq_size, 3451 host_ifc_buf + i * sizeof(struct ifreq), 3452 ifreq_arg_type, THUNK_TARGET); 3453 } 3454 unlock_user(argptr, target_ifc_buf, target_ifc_len); 3455 } 3456 3457 if (free_buf) { 3458 free(host_ifconf); 3459 } 3460 3461 return ret; 3462 } 3463 3464 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 3465 abi_long cmd, abi_long arg) 3466 { 3467 void *argptr; 3468 struct dm_ioctl *host_dm; 3469 abi_long guest_data; 3470 uint32_t guest_data_size; 3471 int target_size; 3472 const argtype *arg_type = ie->arg_type; 3473 abi_long ret; 3474 void *big_buf = NULL; 3475 char *host_data; 3476 3477 arg_type++; 3478 target_size = thunk_type_size(arg_type, 0); 3479 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3480 if (!argptr) { 3481 ret = -TARGET_EFAULT; 3482 goto out; 3483 } 3484 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3485 unlock_user(argptr, arg, 0); 3486 3487 /* buf_temp is too small, so fetch things into a bigger buffer */ 3488 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2); 3489 memcpy(big_buf, buf_temp, target_size); 3490 buf_temp = big_buf; 3491 host_dm = big_buf; 3492 3493 guest_data = arg + host_dm->data_start; 3494 if ((guest_data - arg) < 0) { 3495 ret = -EINVAL; 3496 goto out; 3497 } 3498 guest_data_size = host_dm->data_size - host_dm->data_start; 3499 host_data = (char*)host_dm + host_dm->data_start; 3500 3501 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1); 3502 switch (ie->host_cmd) { 3503 case DM_REMOVE_ALL: 3504 case DM_LIST_DEVICES: 3505 case DM_DEV_CREATE: 3506 case DM_DEV_REMOVE: 3507 case DM_DEV_SUSPEND: 3508 case DM_DEV_STATUS: 3509 case DM_DEV_WAIT: 3510 case DM_TABLE_STATUS: 3511 case DM_TABLE_CLEAR: 3512 case DM_TABLE_DEPS: 3513 case DM_LIST_VERSIONS: 3514 /* no input data */ 3515 break; 3516 case DM_DEV_RENAME: 3517 case DM_DEV_SET_GEOMETRY: 3518 /* data contains only strings */ 3519 memcpy(host_data, argptr, guest_data_size); 3520 break; 3521 case DM_TARGET_MSG: 3522 memcpy(host_data, argptr, guest_data_size); 3523 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr); 3524 break; 3525 case DM_TABLE_LOAD: 3526 { 3527 void *gspec = argptr; 3528 void *cur_data = host_data; 3529 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 3530 int spec_size = thunk_type_size(arg_type, 0); 3531 int i; 3532 3533 for (i = 0; i < host_dm->target_count; i++) { 3534 struct dm_target_spec *spec = cur_data; 3535 uint32_t next; 3536 int slen; 3537 3538 thunk_convert(spec, gspec, arg_type, THUNK_HOST); 3539 slen = strlen((char*)gspec + spec_size) + 1; 3540 next = spec->next; 3541 spec->next = sizeof(*spec) + slen; 3542 strcpy((char*)&spec[1], gspec + spec_size); 3543 gspec += next; 3544 cur_data += spec->next; 3545 } 3546 break; 3547 } 3548 default: 3549 ret = -TARGET_EINVAL; 3550 goto out; 3551 } 3552 unlock_user(argptr, guest_data, 0); 3553 3554 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3555 if (!is_error(ret)) { 3556 guest_data = arg + host_dm->data_start; 3557 guest_data_size = host_dm->data_size - host_dm->data_start; 3558 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0); 3559 switch (ie->host_cmd) { 3560 case DM_REMOVE_ALL: 3561 case DM_DEV_CREATE: 3562 case DM_DEV_REMOVE: 3563 case DM_DEV_RENAME: 3564 case DM_DEV_SUSPEND: 3565 case DM_DEV_STATUS: 3566 case DM_TABLE_LOAD: 3567 case DM_TABLE_CLEAR: 3568 case DM_TARGET_MSG: 3569 case DM_DEV_SET_GEOMETRY: 3570 /* no return data */ 3571 break; 3572 case DM_LIST_DEVICES: 3573 { 3574 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start; 3575 uint32_t remaining_data = guest_data_size; 3576 void *cur_data = argptr; 3577 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) }; 3578 int nl_size = 12; /* can't use thunk_size due to alignment */ 3579 3580 while (1) { 3581 uint32_t next = nl->next; 3582 if (next) { 3583 nl->next = nl_size + (strlen(nl->name) + 1); 3584 } 3585 if (remaining_data < nl->next) { 3586 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3587 break; 3588 } 3589 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET); 3590 strcpy(cur_data + nl_size, nl->name); 3591 cur_data += nl->next; 3592 remaining_data -= nl->next; 3593 if (!next) { 3594 break; 3595 } 3596 nl = (void*)nl + next; 3597 } 3598 break; 3599 } 3600 case DM_DEV_WAIT: 3601 case DM_TABLE_STATUS: 3602 { 3603 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start; 3604 void *cur_data = argptr; 3605 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 3606 int spec_size = thunk_type_size(arg_type, 0); 3607 int i; 3608 3609 for (i = 0; i < host_dm->target_count; i++) { 3610 uint32_t next = spec->next; 3611 int slen = strlen((char*)&spec[1]) + 1; 3612 spec->next = (cur_data - argptr) + spec_size + slen; 3613 if (guest_data_size < spec->next) { 3614 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3615 break; 3616 } 3617 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET); 3618 strcpy(cur_data + spec_size, (char*)&spec[1]); 3619 cur_data = argptr + spec->next; 3620 spec = (void*)host_dm + host_dm->data_start + next; 3621 } 3622 break; 3623 } 3624 case DM_TABLE_DEPS: 3625 { 3626 void *hdata = (void*)host_dm + host_dm->data_start; 3627 int count = *(uint32_t*)hdata; 3628 uint64_t *hdev = hdata + 8; 3629 uint64_t *gdev = argptr + 8; 3630 int i; 3631 3632 *(uint32_t*)argptr = tswap32(count); 3633 for (i = 0; i < count; i++) { 3634 *gdev = tswap64(*hdev); 3635 gdev++; 3636 hdev++; 3637 } 3638 break; 3639 } 3640 case DM_LIST_VERSIONS: 3641 { 3642 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start; 3643 uint32_t remaining_data = guest_data_size; 3644 void *cur_data = argptr; 3645 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) }; 3646 int vers_size = thunk_type_size(arg_type, 0); 3647 3648 while (1) { 3649 uint32_t next = vers->next; 3650 if (next) { 3651 vers->next = vers_size + (strlen(vers->name) + 1); 3652 } 3653 if (remaining_data < vers->next) { 3654 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3655 break; 3656 } 3657 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET); 3658 strcpy(cur_data + vers_size, vers->name); 3659 cur_data += vers->next; 3660 remaining_data -= vers->next; 3661 if (!next) { 3662 break; 3663 } 3664 vers = (void*)vers + next; 3665 } 3666 break; 3667 } 3668 default: 3669 ret = -TARGET_EINVAL; 3670 goto out; 3671 } 3672 unlock_user(argptr, guest_data, guest_data_size); 3673 3674 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3675 if (!argptr) { 3676 ret = -TARGET_EFAULT; 3677 goto out; 3678 } 3679 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3680 unlock_user(argptr, arg, target_size); 3681 } 3682 out: 3683 g_free(big_buf); 3684 return ret; 3685 } 3686 3687 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp, 3688 int fd, abi_long cmd, abi_long arg) 3689 { 3690 const argtype *arg_type = ie->arg_type; 3691 const StructEntry *se; 3692 const argtype *field_types; 3693 const int *dst_offsets, *src_offsets; 3694 int target_size; 3695 void *argptr; 3696 abi_ulong *target_rt_dev_ptr; 3697 unsigned long *host_rt_dev_ptr; 3698 abi_long ret; 3699 int i; 3700 3701 assert(ie->access == IOC_W); 3702 assert(*arg_type == TYPE_PTR); 3703 arg_type++; 3704 assert(*arg_type == TYPE_STRUCT); 3705 target_size = thunk_type_size(arg_type, 0); 3706 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3707 if (!argptr) { 3708 return -TARGET_EFAULT; 3709 } 3710 arg_type++; 3711 assert(*arg_type == (int)STRUCT_rtentry); 3712 se = struct_entries + *arg_type++; 3713 assert(se->convert[0] == NULL); 3714 /* convert struct here to be able to catch rt_dev string */ 3715 field_types = se->field_types; 3716 dst_offsets = se->field_offsets[THUNK_HOST]; 3717 src_offsets = se->field_offsets[THUNK_TARGET]; 3718 for (i = 0; i < se->nb_fields; i++) { 3719 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) { 3720 assert(*field_types == TYPE_PTRVOID); 3721 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]); 3722 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]); 3723 if (*target_rt_dev_ptr != 0) { 3724 *host_rt_dev_ptr = (unsigned long)lock_user_string( 3725 tswapal(*target_rt_dev_ptr)); 3726 if (!*host_rt_dev_ptr) { 3727 unlock_user(argptr, arg, 0); 3728 return -TARGET_EFAULT; 3729 } 3730 } else { 3731 *host_rt_dev_ptr = 0; 3732 } 3733 field_types++; 3734 continue; 3735 } 3736 field_types = thunk_convert(buf_temp + dst_offsets[i], 3737 argptr + src_offsets[i], 3738 field_types, THUNK_HOST); 3739 } 3740 unlock_user(argptr, arg, 0); 3741 3742 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3743 if (*host_rt_dev_ptr != 0) { 3744 unlock_user((void *)*host_rt_dev_ptr, 3745 *target_rt_dev_ptr, 0); 3746 } 3747 return ret; 3748 } 3749 3750 static IOCTLEntry ioctl_entries[] = { 3751 #define IOCTL(cmd, access, ...) \ 3752 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } }, 3753 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \ 3754 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } }, 3755 #include "ioctls.h" 3756 { 0, 0, }, 3757 }; 3758 3759 /* ??? Implement proper locking for ioctls. */ 3760 /* do_ioctl() Must return target values and target errnos. */ 3761 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg) 3762 { 3763 const IOCTLEntry *ie; 3764 const argtype *arg_type; 3765 abi_long ret; 3766 uint8_t buf_temp[MAX_STRUCT_SIZE]; 3767 int target_size; 3768 void *argptr; 3769 3770 ie = ioctl_entries; 3771 for(;;) { 3772 if (ie->target_cmd == 0) { 3773 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd); 3774 return -TARGET_ENOSYS; 3775 } 3776 if (ie->target_cmd == cmd) 3777 break; 3778 ie++; 3779 } 3780 arg_type = ie->arg_type; 3781 #if defined(DEBUG) 3782 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name); 3783 #endif 3784 if (ie->do_ioctl) { 3785 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg); 3786 } 3787 3788 switch(arg_type[0]) { 3789 case TYPE_NULL: 3790 /* no argument */ 3791 ret = get_errno(ioctl(fd, ie->host_cmd)); 3792 break; 3793 case TYPE_PTRVOID: 3794 case TYPE_INT: 3795 /* int argment */ 3796 ret = get_errno(ioctl(fd, ie->host_cmd, arg)); 3797 break; 3798 case TYPE_PTR: 3799 arg_type++; 3800 target_size = thunk_type_size(arg_type, 0); 3801 switch(ie->access) { 3802 case IOC_R: 3803 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3804 if (!is_error(ret)) { 3805 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3806 if (!argptr) 3807 return -TARGET_EFAULT; 3808 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3809 unlock_user(argptr, arg, target_size); 3810 } 3811 break; 3812 case IOC_W: 3813 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3814 if (!argptr) 3815 return -TARGET_EFAULT; 3816 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3817 unlock_user(argptr, arg, 0); 3818 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3819 break; 3820 default: 3821 case IOC_RW: 3822 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3823 if (!argptr) 3824 return -TARGET_EFAULT; 3825 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3826 unlock_user(argptr, arg, 0); 3827 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3828 if (!is_error(ret)) { 3829 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3830 if (!argptr) 3831 return -TARGET_EFAULT; 3832 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3833 unlock_user(argptr, arg, target_size); 3834 } 3835 break; 3836 } 3837 break; 3838 default: 3839 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n", 3840 (long)cmd, arg_type[0]); 3841 ret = -TARGET_ENOSYS; 3842 break; 3843 } 3844 return ret; 3845 } 3846 3847 static const bitmask_transtbl iflag_tbl[] = { 3848 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK }, 3849 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT }, 3850 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR }, 3851 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK }, 3852 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK }, 3853 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP }, 3854 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR }, 3855 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR }, 3856 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL }, 3857 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC }, 3858 { TARGET_IXON, TARGET_IXON, IXON, IXON }, 3859 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY }, 3860 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF }, 3861 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL }, 3862 { 0, 0, 0, 0 } 3863 }; 3864 3865 static const bitmask_transtbl oflag_tbl[] = { 3866 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST }, 3867 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC }, 3868 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR }, 3869 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL }, 3870 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR }, 3871 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET }, 3872 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL }, 3873 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL }, 3874 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 }, 3875 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 }, 3876 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 }, 3877 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 }, 3878 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 }, 3879 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 }, 3880 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 }, 3881 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 }, 3882 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 }, 3883 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 }, 3884 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 }, 3885 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 }, 3886 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 }, 3887 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 }, 3888 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 }, 3889 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 }, 3890 { 0, 0, 0, 0 } 3891 }; 3892 3893 static const bitmask_transtbl cflag_tbl[] = { 3894 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 }, 3895 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 }, 3896 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 }, 3897 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 }, 3898 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 }, 3899 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 }, 3900 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 }, 3901 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 }, 3902 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 }, 3903 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 }, 3904 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 }, 3905 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 }, 3906 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 }, 3907 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 }, 3908 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 }, 3909 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 }, 3910 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 }, 3911 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 }, 3912 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 }, 3913 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 }, 3914 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 }, 3915 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 }, 3916 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 }, 3917 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 }, 3918 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB }, 3919 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD }, 3920 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB }, 3921 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD }, 3922 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL }, 3923 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL }, 3924 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS }, 3925 { 0, 0, 0, 0 } 3926 }; 3927 3928 static const bitmask_transtbl lflag_tbl[] = { 3929 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG }, 3930 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON }, 3931 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE }, 3932 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO }, 3933 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE }, 3934 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK }, 3935 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL }, 3936 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH }, 3937 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP }, 3938 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL }, 3939 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT }, 3940 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE }, 3941 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO }, 3942 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN }, 3943 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN }, 3944 { 0, 0, 0, 0 } 3945 }; 3946 3947 static void target_to_host_termios (void *dst, const void *src) 3948 { 3949 struct host_termios *host = dst; 3950 const struct target_termios *target = src; 3951 3952 host->c_iflag = 3953 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl); 3954 host->c_oflag = 3955 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl); 3956 host->c_cflag = 3957 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl); 3958 host->c_lflag = 3959 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl); 3960 host->c_line = target->c_line; 3961 3962 memset(host->c_cc, 0, sizeof(host->c_cc)); 3963 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR]; 3964 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT]; 3965 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE]; 3966 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL]; 3967 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF]; 3968 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME]; 3969 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN]; 3970 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC]; 3971 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART]; 3972 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP]; 3973 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP]; 3974 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL]; 3975 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT]; 3976 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD]; 3977 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE]; 3978 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT]; 3979 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2]; 3980 } 3981 3982 static void host_to_target_termios (void *dst, const void *src) 3983 { 3984 struct target_termios *target = dst; 3985 const struct host_termios *host = src; 3986 3987 target->c_iflag = 3988 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl)); 3989 target->c_oflag = 3990 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl)); 3991 target->c_cflag = 3992 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl)); 3993 target->c_lflag = 3994 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl)); 3995 target->c_line = host->c_line; 3996 3997 memset(target->c_cc, 0, sizeof(target->c_cc)); 3998 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR]; 3999 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT]; 4000 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE]; 4001 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL]; 4002 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF]; 4003 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME]; 4004 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN]; 4005 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC]; 4006 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART]; 4007 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP]; 4008 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP]; 4009 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL]; 4010 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT]; 4011 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD]; 4012 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE]; 4013 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT]; 4014 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2]; 4015 } 4016 4017 static const StructEntry struct_termios_def = { 4018 .convert = { host_to_target_termios, target_to_host_termios }, 4019 .size = { sizeof(struct target_termios), sizeof(struct host_termios) }, 4020 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) }, 4021 }; 4022 4023 static bitmask_transtbl mmap_flags_tbl[] = { 4024 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED }, 4025 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE }, 4026 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED }, 4027 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS }, 4028 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN }, 4029 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE }, 4030 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE }, 4031 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED }, 4032 { 0, 0, 0, 0 } 4033 }; 4034 4035 #if defined(TARGET_I386) 4036 4037 /* NOTE: there is really one LDT for all the threads */ 4038 static uint8_t *ldt_table; 4039 4040 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount) 4041 { 4042 int size; 4043 void *p; 4044 4045 if (!ldt_table) 4046 return 0; 4047 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE; 4048 if (size > bytecount) 4049 size = bytecount; 4050 p = lock_user(VERIFY_WRITE, ptr, size, 0); 4051 if (!p) 4052 return -TARGET_EFAULT; 4053 /* ??? Should this by byteswapped? */ 4054 memcpy(p, ldt_table, size); 4055 unlock_user(p, ptr, size); 4056 return size; 4057 } 4058 4059 /* XXX: add locking support */ 4060 static abi_long write_ldt(CPUX86State *env, 4061 abi_ulong ptr, unsigned long bytecount, int oldmode) 4062 { 4063 struct target_modify_ldt_ldt_s ldt_info; 4064 struct target_modify_ldt_ldt_s *target_ldt_info; 4065 int seg_32bit, contents, read_exec_only, limit_in_pages; 4066 int seg_not_present, useable, lm; 4067 uint32_t *lp, entry_1, entry_2; 4068 4069 if (bytecount != sizeof(ldt_info)) 4070 return -TARGET_EINVAL; 4071 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1)) 4072 return -TARGET_EFAULT; 4073 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 4074 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 4075 ldt_info.limit = tswap32(target_ldt_info->limit); 4076 ldt_info.flags = tswap32(target_ldt_info->flags); 4077 unlock_user_struct(target_ldt_info, ptr, 0); 4078 4079 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES) 4080 return -TARGET_EINVAL; 4081 seg_32bit = ldt_info.flags & 1; 4082 contents = (ldt_info.flags >> 1) & 3; 4083 read_exec_only = (ldt_info.flags >> 3) & 1; 4084 limit_in_pages = (ldt_info.flags >> 4) & 1; 4085 seg_not_present = (ldt_info.flags >> 5) & 1; 4086 useable = (ldt_info.flags >> 6) & 1; 4087 #ifdef TARGET_ABI32 4088 lm = 0; 4089 #else 4090 lm = (ldt_info.flags >> 7) & 1; 4091 #endif 4092 if (contents == 3) { 4093 if (oldmode) 4094 return -TARGET_EINVAL; 4095 if (seg_not_present == 0) 4096 return -TARGET_EINVAL; 4097 } 4098 /* allocate the LDT */ 4099 if (!ldt_table) { 4100 env->ldt.base = target_mmap(0, 4101 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE, 4102 PROT_READ|PROT_WRITE, 4103 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 4104 if (env->ldt.base == -1) 4105 return -TARGET_ENOMEM; 4106 memset(g2h(env->ldt.base), 0, 4107 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE); 4108 env->ldt.limit = 0xffff; 4109 ldt_table = g2h(env->ldt.base); 4110 } 4111 4112 /* NOTE: same code as Linux kernel */ 4113 /* Allow LDTs to be cleared by the user. */ 4114 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 4115 if (oldmode || 4116 (contents == 0 && 4117 read_exec_only == 1 && 4118 seg_32bit == 0 && 4119 limit_in_pages == 0 && 4120 seg_not_present == 1 && 4121 useable == 0 )) { 4122 entry_1 = 0; 4123 entry_2 = 0; 4124 goto install; 4125 } 4126 } 4127 4128 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 4129 (ldt_info.limit & 0x0ffff); 4130 entry_2 = (ldt_info.base_addr & 0xff000000) | 4131 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 4132 (ldt_info.limit & 0xf0000) | 4133 ((read_exec_only ^ 1) << 9) | 4134 (contents << 10) | 4135 ((seg_not_present ^ 1) << 15) | 4136 (seg_32bit << 22) | 4137 (limit_in_pages << 23) | 4138 (lm << 21) | 4139 0x7000; 4140 if (!oldmode) 4141 entry_2 |= (useable << 20); 4142 4143 /* Install the new entry ... */ 4144 install: 4145 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3)); 4146 lp[0] = tswap32(entry_1); 4147 lp[1] = tswap32(entry_2); 4148 return 0; 4149 } 4150 4151 /* specific and weird i386 syscalls */ 4152 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr, 4153 unsigned long bytecount) 4154 { 4155 abi_long ret; 4156 4157 switch (func) { 4158 case 0: 4159 ret = read_ldt(ptr, bytecount); 4160 break; 4161 case 1: 4162 ret = write_ldt(env, ptr, bytecount, 1); 4163 break; 4164 case 0x11: 4165 ret = write_ldt(env, ptr, bytecount, 0); 4166 break; 4167 default: 4168 ret = -TARGET_ENOSYS; 4169 break; 4170 } 4171 return ret; 4172 } 4173 4174 #if defined(TARGET_I386) && defined(TARGET_ABI32) 4175 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr) 4176 { 4177 uint64_t *gdt_table = g2h(env->gdt.base); 4178 struct target_modify_ldt_ldt_s ldt_info; 4179 struct target_modify_ldt_ldt_s *target_ldt_info; 4180 int seg_32bit, contents, read_exec_only, limit_in_pages; 4181 int seg_not_present, useable, lm; 4182 uint32_t *lp, entry_1, entry_2; 4183 int i; 4184 4185 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 4186 if (!target_ldt_info) 4187 return -TARGET_EFAULT; 4188 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 4189 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 4190 ldt_info.limit = tswap32(target_ldt_info->limit); 4191 ldt_info.flags = tswap32(target_ldt_info->flags); 4192 if (ldt_info.entry_number == -1) { 4193 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) { 4194 if (gdt_table[i] == 0) { 4195 ldt_info.entry_number = i; 4196 target_ldt_info->entry_number = tswap32(i); 4197 break; 4198 } 4199 } 4200 } 4201 unlock_user_struct(target_ldt_info, ptr, 1); 4202 4203 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 4204 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX) 4205 return -TARGET_EINVAL; 4206 seg_32bit = ldt_info.flags & 1; 4207 contents = (ldt_info.flags >> 1) & 3; 4208 read_exec_only = (ldt_info.flags >> 3) & 1; 4209 limit_in_pages = (ldt_info.flags >> 4) & 1; 4210 seg_not_present = (ldt_info.flags >> 5) & 1; 4211 useable = (ldt_info.flags >> 6) & 1; 4212 #ifdef TARGET_ABI32 4213 lm = 0; 4214 #else 4215 lm = (ldt_info.flags >> 7) & 1; 4216 #endif 4217 4218 if (contents == 3) { 4219 if (seg_not_present == 0) 4220 return -TARGET_EINVAL; 4221 } 4222 4223 /* NOTE: same code as Linux kernel */ 4224 /* Allow LDTs to be cleared by the user. */ 4225 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 4226 if ((contents == 0 && 4227 read_exec_only == 1 && 4228 seg_32bit == 0 && 4229 limit_in_pages == 0 && 4230 seg_not_present == 1 && 4231 useable == 0 )) { 4232 entry_1 = 0; 4233 entry_2 = 0; 4234 goto install; 4235 } 4236 } 4237 4238 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 4239 (ldt_info.limit & 0x0ffff); 4240 entry_2 = (ldt_info.base_addr & 0xff000000) | 4241 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 4242 (ldt_info.limit & 0xf0000) | 4243 ((read_exec_only ^ 1) << 9) | 4244 (contents << 10) | 4245 ((seg_not_present ^ 1) << 15) | 4246 (seg_32bit << 22) | 4247 (limit_in_pages << 23) | 4248 (useable << 20) | 4249 (lm << 21) | 4250 0x7000; 4251 4252 /* Install the new entry ... */ 4253 install: 4254 lp = (uint32_t *)(gdt_table + ldt_info.entry_number); 4255 lp[0] = tswap32(entry_1); 4256 lp[1] = tswap32(entry_2); 4257 return 0; 4258 } 4259 4260 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr) 4261 { 4262 struct target_modify_ldt_ldt_s *target_ldt_info; 4263 uint64_t *gdt_table = g2h(env->gdt.base); 4264 uint32_t base_addr, limit, flags; 4265 int seg_32bit, contents, read_exec_only, limit_in_pages, idx; 4266 int seg_not_present, useable, lm; 4267 uint32_t *lp, entry_1, entry_2; 4268 4269 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 4270 if (!target_ldt_info) 4271 return -TARGET_EFAULT; 4272 idx = tswap32(target_ldt_info->entry_number); 4273 if (idx < TARGET_GDT_ENTRY_TLS_MIN || 4274 idx > TARGET_GDT_ENTRY_TLS_MAX) { 4275 unlock_user_struct(target_ldt_info, ptr, 1); 4276 return -TARGET_EINVAL; 4277 } 4278 lp = (uint32_t *)(gdt_table + idx); 4279 entry_1 = tswap32(lp[0]); 4280 entry_2 = tswap32(lp[1]); 4281 4282 read_exec_only = ((entry_2 >> 9) & 1) ^ 1; 4283 contents = (entry_2 >> 10) & 3; 4284 seg_not_present = ((entry_2 >> 15) & 1) ^ 1; 4285 seg_32bit = (entry_2 >> 22) & 1; 4286 limit_in_pages = (entry_2 >> 23) & 1; 4287 useable = (entry_2 >> 20) & 1; 4288 #ifdef TARGET_ABI32 4289 lm = 0; 4290 #else 4291 lm = (entry_2 >> 21) & 1; 4292 #endif 4293 flags = (seg_32bit << 0) | (contents << 1) | 4294 (read_exec_only << 3) | (limit_in_pages << 4) | 4295 (seg_not_present << 5) | (useable << 6) | (lm << 7); 4296 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000); 4297 base_addr = (entry_1 >> 16) | 4298 (entry_2 & 0xff000000) | 4299 ((entry_2 & 0xff) << 16); 4300 target_ldt_info->base_addr = tswapal(base_addr); 4301 target_ldt_info->limit = tswap32(limit); 4302 target_ldt_info->flags = tswap32(flags); 4303 unlock_user_struct(target_ldt_info, ptr, 1); 4304 return 0; 4305 } 4306 #endif /* TARGET_I386 && TARGET_ABI32 */ 4307 4308 #ifndef TARGET_ABI32 4309 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 4310 { 4311 abi_long ret = 0; 4312 abi_ulong val; 4313 int idx; 4314 4315 switch(code) { 4316 case TARGET_ARCH_SET_GS: 4317 case TARGET_ARCH_SET_FS: 4318 if (code == TARGET_ARCH_SET_GS) 4319 idx = R_GS; 4320 else 4321 idx = R_FS; 4322 cpu_x86_load_seg(env, idx, 0); 4323 env->segs[idx].base = addr; 4324 break; 4325 case TARGET_ARCH_GET_GS: 4326 case TARGET_ARCH_GET_FS: 4327 if (code == TARGET_ARCH_GET_GS) 4328 idx = R_GS; 4329 else 4330 idx = R_FS; 4331 val = env->segs[idx].base; 4332 if (put_user(val, addr, abi_ulong)) 4333 ret = -TARGET_EFAULT; 4334 break; 4335 default: 4336 ret = -TARGET_EINVAL; 4337 break; 4338 } 4339 return ret; 4340 } 4341 #endif 4342 4343 #endif /* defined(TARGET_I386) */ 4344 4345 #define NEW_STACK_SIZE 0x40000 4346 4347 4348 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; 4349 typedef struct { 4350 CPUArchState *env; 4351 pthread_mutex_t mutex; 4352 pthread_cond_t cond; 4353 pthread_t thread; 4354 uint32_t tid; 4355 abi_ulong child_tidptr; 4356 abi_ulong parent_tidptr; 4357 sigset_t sigmask; 4358 } new_thread_info; 4359 4360 static void *clone_func(void *arg) 4361 { 4362 new_thread_info *info = arg; 4363 CPUArchState *env; 4364 CPUState *cpu; 4365 TaskState *ts; 4366 4367 env = info->env; 4368 cpu = ENV_GET_CPU(env); 4369 thread_cpu = cpu; 4370 ts = (TaskState *)env->opaque; 4371 info->tid = gettid(); 4372 cpu->host_tid = info->tid; 4373 task_settid(ts); 4374 if (info->child_tidptr) 4375 put_user_u32(info->tid, info->child_tidptr); 4376 if (info->parent_tidptr) 4377 put_user_u32(info->tid, info->parent_tidptr); 4378 /* Enable signals. */ 4379 sigprocmask(SIG_SETMASK, &info->sigmask, NULL); 4380 /* Signal to the parent that we're ready. */ 4381 pthread_mutex_lock(&info->mutex); 4382 pthread_cond_broadcast(&info->cond); 4383 pthread_mutex_unlock(&info->mutex); 4384 /* Wait until the parent has finshed initializing the tls state. */ 4385 pthread_mutex_lock(&clone_lock); 4386 pthread_mutex_unlock(&clone_lock); 4387 cpu_loop(env); 4388 /* never exits */ 4389 return NULL; 4390 } 4391 4392 /* do_fork() Must return host values and target errnos (unlike most 4393 do_*() functions). */ 4394 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, 4395 abi_ulong parent_tidptr, target_ulong newtls, 4396 abi_ulong child_tidptr) 4397 { 4398 int ret; 4399 TaskState *ts; 4400 CPUArchState *new_env; 4401 unsigned int nptl_flags; 4402 sigset_t sigmask; 4403 4404 /* Emulate vfork() with fork() */ 4405 if (flags & CLONE_VFORK) 4406 flags &= ~(CLONE_VFORK | CLONE_VM); 4407 4408 if (flags & CLONE_VM) { 4409 TaskState *parent_ts = (TaskState *)env->opaque; 4410 new_thread_info info; 4411 pthread_attr_t attr; 4412 4413 ts = g_malloc0(sizeof(TaskState)); 4414 init_task_state(ts); 4415 /* we create a new CPU instance. */ 4416 new_env = cpu_copy(env); 4417 /* Init regs that differ from the parent. */ 4418 cpu_clone_regs(new_env, newsp); 4419 new_env->opaque = ts; 4420 ts->bprm = parent_ts->bprm; 4421 ts->info = parent_ts->info; 4422 nptl_flags = flags; 4423 flags &= ~CLONE_NPTL_FLAGS2; 4424 4425 if (nptl_flags & CLONE_CHILD_CLEARTID) { 4426 ts->child_tidptr = child_tidptr; 4427 } 4428 4429 if (nptl_flags & CLONE_SETTLS) 4430 cpu_set_tls (new_env, newtls); 4431 4432 /* Grab a mutex so that thread setup appears atomic. */ 4433 pthread_mutex_lock(&clone_lock); 4434 4435 memset(&info, 0, sizeof(info)); 4436 pthread_mutex_init(&info.mutex, NULL); 4437 pthread_mutex_lock(&info.mutex); 4438 pthread_cond_init(&info.cond, NULL); 4439 info.env = new_env; 4440 if (nptl_flags & CLONE_CHILD_SETTID) 4441 info.child_tidptr = child_tidptr; 4442 if (nptl_flags & CLONE_PARENT_SETTID) 4443 info.parent_tidptr = parent_tidptr; 4444 4445 ret = pthread_attr_init(&attr); 4446 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE); 4447 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 4448 /* It is not safe to deliver signals until the child has finished 4449 initializing, so temporarily block all signals. */ 4450 sigfillset(&sigmask); 4451 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); 4452 4453 ret = pthread_create(&info.thread, &attr, clone_func, &info); 4454 /* TODO: Free new CPU state if thread creation failed. */ 4455 4456 sigprocmask(SIG_SETMASK, &info.sigmask, NULL); 4457 pthread_attr_destroy(&attr); 4458 if (ret == 0) { 4459 /* Wait for the child to initialize. */ 4460 pthread_cond_wait(&info.cond, &info.mutex); 4461 ret = info.tid; 4462 if (flags & CLONE_PARENT_SETTID) 4463 put_user_u32(ret, parent_tidptr); 4464 } else { 4465 ret = -1; 4466 } 4467 pthread_mutex_unlock(&info.mutex); 4468 pthread_cond_destroy(&info.cond); 4469 pthread_mutex_destroy(&info.mutex); 4470 pthread_mutex_unlock(&clone_lock); 4471 } else { 4472 /* if no CLONE_VM, we consider it is a fork */ 4473 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) 4474 return -EINVAL; 4475 fork_start(); 4476 ret = fork(); 4477 if (ret == 0) { 4478 /* Child Process. */ 4479 cpu_clone_regs(env, newsp); 4480 fork_end(1); 4481 /* There is a race condition here. The parent process could 4482 theoretically read the TID in the child process before the child 4483 tid is set. This would require using either ptrace 4484 (not implemented) or having *_tidptr to point at a shared memory 4485 mapping. We can't repeat the spinlock hack used above because 4486 the child process gets its own copy of the lock. */ 4487 if (flags & CLONE_CHILD_SETTID) 4488 put_user_u32(gettid(), child_tidptr); 4489 if (flags & CLONE_PARENT_SETTID) 4490 put_user_u32(gettid(), parent_tidptr); 4491 ts = (TaskState *)env->opaque; 4492 if (flags & CLONE_SETTLS) 4493 cpu_set_tls (env, newtls); 4494 if (flags & CLONE_CHILD_CLEARTID) 4495 ts->child_tidptr = child_tidptr; 4496 } else { 4497 fork_end(0); 4498 } 4499 } 4500 return ret; 4501 } 4502 4503 /* warning : doesn't handle linux specific flags... */ 4504 static int target_to_host_fcntl_cmd(int cmd) 4505 { 4506 switch(cmd) { 4507 case TARGET_F_DUPFD: 4508 case TARGET_F_GETFD: 4509 case TARGET_F_SETFD: 4510 case TARGET_F_GETFL: 4511 case TARGET_F_SETFL: 4512 return cmd; 4513 case TARGET_F_GETLK: 4514 return F_GETLK; 4515 case TARGET_F_SETLK: 4516 return F_SETLK; 4517 case TARGET_F_SETLKW: 4518 return F_SETLKW; 4519 case TARGET_F_GETOWN: 4520 return F_GETOWN; 4521 case TARGET_F_SETOWN: 4522 return F_SETOWN; 4523 case TARGET_F_GETSIG: 4524 return F_GETSIG; 4525 case TARGET_F_SETSIG: 4526 return F_SETSIG; 4527 #if TARGET_ABI_BITS == 32 4528 case TARGET_F_GETLK64: 4529 return F_GETLK64; 4530 case TARGET_F_SETLK64: 4531 return F_SETLK64; 4532 case TARGET_F_SETLKW64: 4533 return F_SETLKW64; 4534 #endif 4535 case TARGET_F_SETLEASE: 4536 return F_SETLEASE; 4537 case TARGET_F_GETLEASE: 4538 return F_GETLEASE; 4539 #ifdef F_DUPFD_CLOEXEC 4540 case TARGET_F_DUPFD_CLOEXEC: 4541 return F_DUPFD_CLOEXEC; 4542 #endif 4543 case TARGET_F_NOTIFY: 4544 return F_NOTIFY; 4545 default: 4546 return -TARGET_EINVAL; 4547 } 4548 return -TARGET_EINVAL; 4549 } 4550 4551 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a } 4552 static const bitmask_transtbl flock_tbl[] = { 4553 TRANSTBL_CONVERT(F_RDLCK), 4554 TRANSTBL_CONVERT(F_WRLCK), 4555 TRANSTBL_CONVERT(F_UNLCK), 4556 TRANSTBL_CONVERT(F_EXLCK), 4557 TRANSTBL_CONVERT(F_SHLCK), 4558 { 0, 0, 0, 0 } 4559 }; 4560 4561 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) 4562 { 4563 struct flock fl; 4564 struct target_flock *target_fl; 4565 struct flock64 fl64; 4566 struct target_flock64 *target_fl64; 4567 abi_long ret; 4568 int host_cmd = target_to_host_fcntl_cmd(cmd); 4569 4570 if (host_cmd == -TARGET_EINVAL) 4571 return host_cmd; 4572 4573 switch(cmd) { 4574 case TARGET_F_GETLK: 4575 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4576 return -TARGET_EFAULT; 4577 fl.l_type = 4578 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl); 4579 fl.l_whence = tswap16(target_fl->l_whence); 4580 fl.l_start = tswapal(target_fl->l_start); 4581 fl.l_len = tswapal(target_fl->l_len); 4582 fl.l_pid = tswap32(target_fl->l_pid); 4583 unlock_user_struct(target_fl, arg, 0); 4584 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4585 if (ret == 0) { 4586 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0)) 4587 return -TARGET_EFAULT; 4588 target_fl->l_type = 4589 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl); 4590 target_fl->l_whence = tswap16(fl.l_whence); 4591 target_fl->l_start = tswapal(fl.l_start); 4592 target_fl->l_len = tswapal(fl.l_len); 4593 target_fl->l_pid = tswap32(fl.l_pid); 4594 unlock_user_struct(target_fl, arg, 1); 4595 } 4596 break; 4597 4598 case TARGET_F_SETLK: 4599 case TARGET_F_SETLKW: 4600 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4601 return -TARGET_EFAULT; 4602 fl.l_type = 4603 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl); 4604 fl.l_whence = tswap16(target_fl->l_whence); 4605 fl.l_start = tswapal(target_fl->l_start); 4606 fl.l_len = tswapal(target_fl->l_len); 4607 fl.l_pid = tswap32(target_fl->l_pid); 4608 unlock_user_struct(target_fl, arg, 0); 4609 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4610 break; 4611 4612 case TARGET_F_GETLK64: 4613 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4614 return -TARGET_EFAULT; 4615 fl64.l_type = 4616 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1; 4617 fl64.l_whence = tswap16(target_fl64->l_whence); 4618 fl64.l_start = tswap64(target_fl64->l_start); 4619 fl64.l_len = tswap64(target_fl64->l_len); 4620 fl64.l_pid = tswap32(target_fl64->l_pid); 4621 unlock_user_struct(target_fl64, arg, 0); 4622 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4623 if (ret == 0) { 4624 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0)) 4625 return -TARGET_EFAULT; 4626 target_fl64->l_type = 4627 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1; 4628 target_fl64->l_whence = tswap16(fl64.l_whence); 4629 target_fl64->l_start = tswap64(fl64.l_start); 4630 target_fl64->l_len = tswap64(fl64.l_len); 4631 target_fl64->l_pid = tswap32(fl64.l_pid); 4632 unlock_user_struct(target_fl64, arg, 1); 4633 } 4634 break; 4635 case TARGET_F_SETLK64: 4636 case TARGET_F_SETLKW64: 4637 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4638 return -TARGET_EFAULT; 4639 fl64.l_type = 4640 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1; 4641 fl64.l_whence = tswap16(target_fl64->l_whence); 4642 fl64.l_start = tswap64(target_fl64->l_start); 4643 fl64.l_len = tswap64(target_fl64->l_len); 4644 fl64.l_pid = tswap32(target_fl64->l_pid); 4645 unlock_user_struct(target_fl64, arg, 0); 4646 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4647 break; 4648 4649 case TARGET_F_GETFL: 4650 ret = get_errno(fcntl(fd, host_cmd, arg)); 4651 if (ret >= 0) { 4652 ret = host_to_target_bitmask(ret, fcntl_flags_tbl); 4653 } 4654 break; 4655 4656 case TARGET_F_SETFL: 4657 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl))); 4658 break; 4659 4660 case TARGET_F_SETOWN: 4661 case TARGET_F_GETOWN: 4662 case TARGET_F_SETSIG: 4663 case TARGET_F_GETSIG: 4664 case TARGET_F_SETLEASE: 4665 case TARGET_F_GETLEASE: 4666 ret = get_errno(fcntl(fd, host_cmd, arg)); 4667 break; 4668 4669 default: 4670 ret = get_errno(fcntl(fd, cmd, arg)); 4671 break; 4672 } 4673 return ret; 4674 } 4675 4676 #ifdef USE_UID16 4677 4678 static inline int high2lowuid(int uid) 4679 { 4680 if (uid > 65535) 4681 return 65534; 4682 else 4683 return uid; 4684 } 4685 4686 static inline int high2lowgid(int gid) 4687 { 4688 if (gid > 65535) 4689 return 65534; 4690 else 4691 return gid; 4692 } 4693 4694 static inline int low2highuid(int uid) 4695 { 4696 if ((int16_t)uid == -1) 4697 return -1; 4698 else 4699 return uid; 4700 } 4701 4702 static inline int low2highgid(int gid) 4703 { 4704 if ((int16_t)gid == -1) 4705 return -1; 4706 else 4707 return gid; 4708 } 4709 static inline int tswapid(int id) 4710 { 4711 return tswap16(id); 4712 } 4713 #else /* !USE_UID16 */ 4714 static inline int high2lowuid(int uid) 4715 { 4716 return uid; 4717 } 4718 static inline int high2lowgid(int gid) 4719 { 4720 return gid; 4721 } 4722 static inline int low2highuid(int uid) 4723 { 4724 return uid; 4725 } 4726 static inline int low2highgid(int gid) 4727 { 4728 return gid; 4729 } 4730 static inline int tswapid(int id) 4731 { 4732 return tswap32(id); 4733 } 4734 #endif /* USE_UID16 */ 4735 4736 void syscall_init(void) 4737 { 4738 IOCTLEntry *ie; 4739 const argtype *arg_type; 4740 int size; 4741 int i; 4742 4743 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def); 4744 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def); 4745 #include "syscall_types.h" 4746 #undef STRUCT 4747 #undef STRUCT_SPECIAL 4748 4749 /* Build target_to_host_errno_table[] table from 4750 * host_to_target_errno_table[]. */ 4751 for (i = 0; i < ERRNO_TABLE_SIZE; i++) { 4752 target_to_host_errno_table[host_to_target_errno_table[i]] = i; 4753 } 4754 4755 /* we patch the ioctl size if necessary. We rely on the fact that 4756 no ioctl has all the bits at '1' in the size field */ 4757 ie = ioctl_entries; 4758 while (ie->target_cmd != 0) { 4759 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) == 4760 TARGET_IOC_SIZEMASK) { 4761 arg_type = ie->arg_type; 4762 if (arg_type[0] != TYPE_PTR) { 4763 fprintf(stderr, "cannot patch size for ioctl 0x%x\n", 4764 ie->target_cmd); 4765 exit(1); 4766 } 4767 arg_type++; 4768 size = thunk_type_size(arg_type, 0); 4769 ie->target_cmd = (ie->target_cmd & 4770 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) | 4771 (size << TARGET_IOC_SIZESHIFT); 4772 } 4773 4774 /* automatic consistency check if same arch */ 4775 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 4776 (defined(__x86_64__) && defined(TARGET_X86_64)) 4777 if (unlikely(ie->target_cmd != ie->host_cmd)) { 4778 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n", 4779 ie->name, ie->target_cmd, ie->host_cmd); 4780 } 4781 #endif 4782 ie++; 4783 } 4784 } 4785 4786 #if TARGET_ABI_BITS == 32 4787 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1) 4788 { 4789 #ifdef TARGET_WORDS_BIGENDIAN 4790 return ((uint64_t)word0 << 32) | word1; 4791 #else 4792 return ((uint64_t)word1 << 32) | word0; 4793 #endif 4794 } 4795 #else /* TARGET_ABI_BITS == 32 */ 4796 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1) 4797 { 4798 return word0; 4799 } 4800 #endif /* TARGET_ABI_BITS != 32 */ 4801 4802 #ifdef TARGET_NR_truncate64 4803 static inline abi_long target_truncate64(void *cpu_env, const char *arg1, 4804 abi_long arg2, 4805 abi_long arg3, 4806 abi_long arg4) 4807 { 4808 if (regpairs_aligned(cpu_env)) { 4809 arg2 = arg3; 4810 arg3 = arg4; 4811 } 4812 return get_errno(truncate64(arg1, target_offset64(arg2, arg3))); 4813 } 4814 #endif 4815 4816 #ifdef TARGET_NR_ftruncate64 4817 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1, 4818 abi_long arg2, 4819 abi_long arg3, 4820 abi_long arg4) 4821 { 4822 if (regpairs_aligned(cpu_env)) { 4823 arg2 = arg3; 4824 arg3 = arg4; 4825 } 4826 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3))); 4827 } 4828 #endif 4829 4830 static inline abi_long target_to_host_timespec(struct timespec *host_ts, 4831 abi_ulong target_addr) 4832 { 4833 struct target_timespec *target_ts; 4834 4835 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) 4836 return -TARGET_EFAULT; 4837 host_ts->tv_sec = tswapal(target_ts->tv_sec); 4838 host_ts->tv_nsec = tswapal(target_ts->tv_nsec); 4839 unlock_user_struct(target_ts, target_addr, 0); 4840 return 0; 4841 } 4842 4843 static inline abi_long host_to_target_timespec(abi_ulong target_addr, 4844 struct timespec *host_ts) 4845 { 4846 struct target_timespec *target_ts; 4847 4848 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) 4849 return -TARGET_EFAULT; 4850 target_ts->tv_sec = tswapal(host_ts->tv_sec); 4851 target_ts->tv_nsec = tswapal(host_ts->tv_nsec); 4852 unlock_user_struct(target_ts, target_addr, 1); 4853 return 0; 4854 } 4855 4856 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec, 4857 abi_ulong target_addr) 4858 { 4859 struct target_itimerspec *target_itspec; 4860 4861 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) { 4862 return -TARGET_EFAULT; 4863 } 4864 4865 host_itspec->it_interval.tv_sec = 4866 tswapal(target_itspec->it_interval.tv_sec); 4867 host_itspec->it_interval.tv_nsec = 4868 tswapal(target_itspec->it_interval.tv_nsec); 4869 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec); 4870 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec); 4871 4872 unlock_user_struct(target_itspec, target_addr, 1); 4873 return 0; 4874 } 4875 4876 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr, 4877 struct itimerspec *host_its) 4878 { 4879 struct target_itimerspec *target_itspec; 4880 4881 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) { 4882 return -TARGET_EFAULT; 4883 } 4884 4885 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec); 4886 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec); 4887 4888 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec); 4889 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec); 4890 4891 unlock_user_struct(target_itspec, target_addr, 0); 4892 return 0; 4893 } 4894 4895 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat) 4896 static inline abi_long host_to_target_stat64(void *cpu_env, 4897 abi_ulong target_addr, 4898 struct stat *host_st) 4899 { 4900 #if defined(TARGET_ARM) && defined(TARGET_ABI32) 4901 if (((CPUARMState *)cpu_env)->eabi) { 4902 struct target_eabi_stat64 *target_st; 4903 4904 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 4905 return -TARGET_EFAULT; 4906 memset(target_st, 0, sizeof(struct target_eabi_stat64)); 4907 __put_user(host_st->st_dev, &target_st->st_dev); 4908 __put_user(host_st->st_ino, &target_st->st_ino); 4909 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 4910 __put_user(host_st->st_ino, &target_st->__st_ino); 4911 #endif 4912 __put_user(host_st->st_mode, &target_st->st_mode); 4913 __put_user(host_st->st_nlink, &target_st->st_nlink); 4914 __put_user(host_st->st_uid, &target_st->st_uid); 4915 __put_user(host_st->st_gid, &target_st->st_gid); 4916 __put_user(host_st->st_rdev, &target_st->st_rdev); 4917 __put_user(host_st->st_size, &target_st->st_size); 4918 __put_user(host_st->st_blksize, &target_st->st_blksize); 4919 __put_user(host_st->st_blocks, &target_st->st_blocks); 4920 __put_user(host_st->st_atime, &target_st->target_st_atime); 4921 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 4922 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 4923 unlock_user_struct(target_st, target_addr, 1); 4924 } else 4925 #endif 4926 { 4927 #if defined(TARGET_HAS_STRUCT_STAT64) 4928 struct target_stat64 *target_st; 4929 #else 4930 struct target_stat *target_st; 4931 #endif 4932 4933 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 4934 return -TARGET_EFAULT; 4935 memset(target_st, 0, sizeof(*target_st)); 4936 __put_user(host_st->st_dev, &target_st->st_dev); 4937 __put_user(host_st->st_ino, &target_st->st_ino); 4938 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 4939 __put_user(host_st->st_ino, &target_st->__st_ino); 4940 #endif 4941 __put_user(host_st->st_mode, &target_st->st_mode); 4942 __put_user(host_st->st_nlink, &target_st->st_nlink); 4943 __put_user(host_st->st_uid, &target_st->st_uid); 4944 __put_user(host_st->st_gid, &target_st->st_gid); 4945 __put_user(host_st->st_rdev, &target_st->st_rdev); 4946 /* XXX: better use of kernel struct */ 4947 __put_user(host_st->st_size, &target_st->st_size); 4948 __put_user(host_st->st_blksize, &target_st->st_blksize); 4949 __put_user(host_st->st_blocks, &target_st->st_blocks); 4950 __put_user(host_st->st_atime, &target_st->target_st_atime); 4951 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 4952 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 4953 unlock_user_struct(target_st, target_addr, 1); 4954 } 4955 4956 return 0; 4957 } 4958 #endif 4959 4960 /* ??? Using host futex calls even when target atomic operations 4961 are not really atomic probably breaks things. However implementing 4962 futexes locally would make futexes shared between multiple processes 4963 tricky. However they're probably useless because guest atomic 4964 operations won't work either. */ 4965 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout, 4966 target_ulong uaddr2, int val3) 4967 { 4968 struct timespec ts, *pts; 4969 int base_op; 4970 4971 /* ??? We assume FUTEX_* constants are the same on both host 4972 and target. */ 4973 #ifdef FUTEX_CMD_MASK 4974 base_op = op & FUTEX_CMD_MASK; 4975 #else 4976 base_op = op; 4977 #endif 4978 switch (base_op) { 4979 case FUTEX_WAIT: 4980 case FUTEX_WAIT_BITSET: 4981 if (timeout) { 4982 pts = &ts; 4983 target_to_host_timespec(pts, timeout); 4984 } else { 4985 pts = NULL; 4986 } 4987 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val), 4988 pts, NULL, val3)); 4989 case FUTEX_WAKE: 4990 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 4991 case FUTEX_FD: 4992 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 4993 case FUTEX_REQUEUE: 4994 case FUTEX_CMP_REQUEUE: 4995 case FUTEX_WAKE_OP: 4996 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 4997 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 4998 But the prototype takes a `struct timespec *'; insert casts 4999 to satisfy the compiler. We do not need to tswap TIMEOUT 5000 since it's not compared to guest memory. */ 5001 pts = (struct timespec *)(uintptr_t) timeout; 5002 return get_errno(sys_futex(g2h(uaddr), op, val, pts, 5003 g2h(uaddr2), 5004 (base_op == FUTEX_CMP_REQUEUE 5005 ? tswap32(val3) 5006 : val3))); 5007 default: 5008 return -TARGET_ENOSYS; 5009 } 5010 } 5011 5012 /* Map host to target signal numbers for the wait family of syscalls. 5013 Assume all other status bits are the same. */ 5014 int host_to_target_waitstatus(int status) 5015 { 5016 if (WIFSIGNALED(status)) { 5017 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f); 5018 } 5019 if (WIFSTOPPED(status)) { 5020 return (host_to_target_signal(WSTOPSIG(status)) << 8) 5021 | (status & 0xff); 5022 } 5023 return status; 5024 } 5025 5026 static int relstr_to_int(const char *s) 5027 { 5028 /* Convert a uname release string like "2.6.18" to an integer 5029 * of the form 0x020612. (Beware that 0x020612 is *not* 2.6.12.) 5030 */ 5031 int i, n, tmp; 5032 5033 tmp = 0; 5034 for (i = 0; i < 3; i++) { 5035 n = 0; 5036 while (*s >= '0' && *s <= '9') { 5037 n *= 10; 5038 n += *s - '0'; 5039 s++; 5040 } 5041 tmp = (tmp << 8) + n; 5042 if (*s == '.') { 5043 s++; 5044 } 5045 } 5046 return tmp; 5047 } 5048 5049 int get_osversion(void) 5050 { 5051 static int osversion; 5052 struct new_utsname buf; 5053 const char *s; 5054 5055 if (osversion) 5056 return osversion; 5057 if (qemu_uname_release && *qemu_uname_release) { 5058 s = qemu_uname_release; 5059 } else { 5060 if (sys_uname(&buf)) 5061 return 0; 5062 s = buf.release; 5063 } 5064 osversion = relstr_to_int(s); 5065 return osversion; 5066 } 5067 5068 void init_qemu_uname_release(void) 5069 { 5070 /* Initialize qemu_uname_release for later use. 5071 * If the host kernel is too old and the user hasn't asked for 5072 * a specific fake version number, we might want to fake a minimum 5073 * target kernel version. 5074 */ 5075 #ifdef UNAME_MINIMUM_RELEASE 5076 struct new_utsname buf; 5077 5078 if (qemu_uname_release && *qemu_uname_release) { 5079 return; 5080 } 5081 5082 if (sys_uname(&buf)) { 5083 return; 5084 } 5085 5086 if (relstr_to_int(buf.release) < relstr_to_int(UNAME_MINIMUM_RELEASE)) { 5087 qemu_uname_release = UNAME_MINIMUM_RELEASE; 5088 } 5089 #endif 5090 } 5091 5092 static int open_self_maps(void *cpu_env, int fd) 5093 { 5094 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32) 5095 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 5096 #endif 5097 FILE *fp; 5098 char *line = NULL; 5099 size_t len = 0; 5100 ssize_t read; 5101 5102 fp = fopen("/proc/self/maps", "r"); 5103 if (fp == NULL) { 5104 return -EACCES; 5105 } 5106 5107 while ((read = getline(&line, &len, fp)) != -1) { 5108 int fields, dev_maj, dev_min, inode; 5109 uint64_t min, max, offset; 5110 char flag_r, flag_w, flag_x, flag_p; 5111 char path[512] = ""; 5112 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d" 5113 " %512s", &min, &max, &flag_r, &flag_w, &flag_x, 5114 &flag_p, &offset, &dev_maj, &dev_min, &inode, path); 5115 5116 if ((fields < 10) || (fields > 11)) { 5117 continue; 5118 } 5119 if (!strncmp(path, "[stack]", 7)) { 5120 continue; 5121 } 5122 if (h2g_valid(min) && h2g_valid(max)) { 5123 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx 5124 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n", 5125 h2g(min), h2g(max), flag_r, flag_w, 5126 flag_x, flag_p, offset, dev_maj, dev_min, inode, 5127 path[0] ? " " : "", path); 5128 } 5129 } 5130 5131 free(line); 5132 fclose(fp); 5133 5134 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32) 5135 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n", 5136 (unsigned long long)ts->info->stack_limit, 5137 (unsigned long long)(ts->info->start_stack + 5138 (TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK, 5139 (unsigned long long)0); 5140 #endif 5141 5142 return 0; 5143 } 5144 5145 static int open_self_stat(void *cpu_env, int fd) 5146 { 5147 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 5148 abi_ulong start_stack = ts->info->start_stack; 5149 int i; 5150 5151 for (i = 0; i < 44; i++) { 5152 char buf[128]; 5153 int len; 5154 uint64_t val = 0; 5155 5156 if (i == 0) { 5157 /* pid */ 5158 val = getpid(); 5159 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 5160 } else if (i == 1) { 5161 /* app name */ 5162 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]); 5163 } else if (i == 27) { 5164 /* stack bottom */ 5165 val = start_stack; 5166 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 5167 } else { 5168 /* for the rest, there is MasterCard */ 5169 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' '); 5170 } 5171 5172 len = strlen(buf); 5173 if (write(fd, buf, len) != len) { 5174 return -1; 5175 } 5176 } 5177 5178 return 0; 5179 } 5180 5181 static int open_self_auxv(void *cpu_env, int fd) 5182 { 5183 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 5184 abi_ulong auxv = ts->info->saved_auxv; 5185 abi_ulong len = ts->info->auxv_len; 5186 char *ptr; 5187 5188 /* 5189 * Auxiliary vector is stored in target process stack. 5190 * read in whole auxv vector and copy it to file 5191 */ 5192 ptr = lock_user(VERIFY_READ, auxv, len, 0); 5193 if (ptr != NULL) { 5194 while (len > 0) { 5195 ssize_t r; 5196 r = write(fd, ptr, len); 5197 if (r <= 0) { 5198 break; 5199 } 5200 len -= r; 5201 ptr += r; 5202 } 5203 lseek(fd, 0, SEEK_SET); 5204 unlock_user(ptr, auxv, len); 5205 } 5206 5207 return 0; 5208 } 5209 5210 static int is_proc_myself(const char *filename, const char *entry) 5211 { 5212 if (!strncmp(filename, "/proc/", strlen("/proc/"))) { 5213 filename += strlen("/proc/"); 5214 if (!strncmp(filename, "self/", strlen("self/"))) { 5215 filename += strlen("self/"); 5216 } else if (*filename >= '1' && *filename <= '9') { 5217 char myself[80]; 5218 snprintf(myself, sizeof(myself), "%d/", getpid()); 5219 if (!strncmp(filename, myself, strlen(myself))) { 5220 filename += strlen(myself); 5221 } else { 5222 return 0; 5223 } 5224 } else { 5225 return 0; 5226 } 5227 if (!strcmp(filename, entry)) { 5228 return 1; 5229 } 5230 } 5231 return 0; 5232 } 5233 5234 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 5235 static int is_proc(const char *filename, const char *entry) 5236 { 5237 return strcmp(filename, entry) == 0; 5238 } 5239 5240 static int open_net_route(void *cpu_env, int fd) 5241 { 5242 FILE *fp; 5243 char *line = NULL; 5244 size_t len = 0; 5245 ssize_t read; 5246 5247 fp = fopen("/proc/net/route", "r"); 5248 if (fp == NULL) { 5249 return -EACCES; 5250 } 5251 5252 /* read header */ 5253 5254 read = getline(&line, &len, fp); 5255 dprintf(fd, "%s", line); 5256 5257 /* read routes */ 5258 5259 while ((read = getline(&line, &len, fp)) != -1) { 5260 char iface[16]; 5261 uint32_t dest, gw, mask; 5262 unsigned int flags, refcnt, use, metric, mtu, window, irtt; 5263 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 5264 iface, &dest, &gw, &flags, &refcnt, &use, &metric, 5265 &mask, &mtu, &window, &irtt); 5266 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 5267 iface, tswap32(dest), tswap32(gw), flags, refcnt, use, 5268 metric, tswap32(mask), mtu, window, irtt); 5269 } 5270 5271 free(line); 5272 fclose(fp); 5273 5274 return 0; 5275 } 5276 #endif 5277 5278 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode) 5279 { 5280 struct fake_open { 5281 const char *filename; 5282 int (*fill)(void *cpu_env, int fd); 5283 int (*cmp)(const char *s1, const char *s2); 5284 }; 5285 const struct fake_open *fake_open; 5286 static const struct fake_open fakes[] = { 5287 { "maps", open_self_maps, is_proc_myself }, 5288 { "stat", open_self_stat, is_proc_myself }, 5289 { "auxv", open_self_auxv, is_proc_myself }, 5290 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 5291 { "/proc/net/route", open_net_route, is_proc }, 5292 #endif 5293 { NULL, NULL, NULL } 5294 }; 5295 5296 for (fake_open = fakes; fake_open->filename; fake_open++) { 5297 if (fake_open->cmp(pathname, fake_open->filename)) { 5298 break; 5299 } 5300 } 5301 5302 if (fake_open->filename) { 5303 const char *tmpdir; 5304 char filename[PATH_MAX]; 5305 int fd, r; 5306 5307 /* create temporary file to map stat to */ 5308 tmpdir = getenv("TMPDIR"); 5309 if (!tmpdir) 5310 tmpdir = "/tmp"; 5311 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir); 5312 fd = mkstemp(filename); 5313 if (fd < 0) { 5314 return fd; 5315 } 5316 unlink(filename); 5317 5318 if ((r = fake_open->fill(cpu_env, fd))) { 5319 close(fd); 5320 return r; 5321 } 5322 lseek(fd, 0, SEEK_SET); 5323 5324 return fd; 5325 } 5326 5327 return get_errno(open(path(pathname), flags, mode)); 5328 } 5329 5330 /* do_syscall() should always have a single exit point at the end so 5331 that actions, such as logging of syscall results, can be performed. 5332 All errnos that do_syscall() returns must be -TARGET_<errcode>. */ 5333 abi_long do_syscall(void *cpu_env, int num, abi_long arg1, 5334 abi_long arg2, abi_long arg3, abi_long arg4, 5335 abi_long arg5, abi_long arg6, abi_long arg7, 5336 abi_long arg8) 5337 { 5338 CPUState *cpu = ENV_GET_CPU(cpu_env); 5339 abi_long ret; 5340 struct stat st; 5341 struct statfs stfs; 5342 void *p; 5343 5344 #ifdef DEBUG 5345 gemu_log("syscall %d", num); 5346 #endif 5347 if(do_strace) 5348 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); 5349 5350 switch(num) { 5351 case TARGET_NR_exit: 5352 /* In old applications this may be used to implement _exit(2). 5353 However in threaded applictions it is used for thread termination, 5354 and _exit_group is used for application termination. 5355 Do thread termination if we have more then one thread. */ 5356 /* FIXME: This probably breaks if a signal arrives. We should probably 5357 be disabling signals. */ 5358 if (CPU_NEXT(first_cpu)) { 5359 TaskState *ts; 5360 5361 cpu_list_lock(); 5362 /* Remove the CPU from the list. */ 5363 QTAILQ_REMOVE(&cpus, cpu, node); 5364 cpu_list_unlock(); 5365 ts = ((CPUArchState *)cpu_env)->opaque; 5366 if (ts->child_tidptr) { 5367 put_user_u32(0, ts->child_tidptr); 5368 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX, 5369 NULL, NULL, 0); 5370 } 5371 thread_cpu = NULL; 5372 object_unref(OBJECT(ENV_GET_CPU(cpu_env))); 5373 g_free(ts); 5374 pthread_exit(NULL); 5375 } 5376 #ifdef TARGET_GPROF 5377 _mcleanup(); 5378 #endif 5379 gdb_exit(cpu_env, arg1); 5380 _exit(arg1); 5381 ret = 0; /* avoid warning */ 5382 break; 5383 case TARGET_NR_read: 5384 if (arg3 == 0) 5385 ret = 0; 5386 else { 5387 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 5388 goto efault; 5389 ret = get_errno(read(arg1, p, arg3)); 5390 unlock_user(p, arg2, ret); 5391 } 5392 break; 5393 case TARGET_NR_write: 5394 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 5395 goto efault; 5396 ret = get_errno(write(arg1, p, arg3)); 5397 unlock_user(p, arg2, 0); 5398 break; 5399 case TARGET_NR_open: 5400 if (!(p = lock_user_string(arg1))) 5401 goto efault; 5402 ret = get_errno(do_open(cpu_env, p, 5403 target_to_host_bitmask(arg2, fcntl_flags_tbl), 5404 arg3)); 5405 unlock_user(p, arg1, 0); 5406 break; 5407 #if defined(TARGET_NR_openat) && defined(__NR_openat) 5408 case TARGET_NR_openat: 5409 if (!(p = lock_user_string(arg2))) 5410 goto efault; 5411 ret = get_errno(sys_openat(arg1, 5412 path(p), 5413 target_to_host_bitmask(arg3, fcntl_flags_tbl), 5414 arg4)); 5415 unlock_user(p, arg2, 0); 5416 break; 5417 #endif 5418 case TARGET_NR_close: 5419 ret = get_errno(close(arg1)); 5420 break; 5421 case TARGET_NR_brk: 5422 ret = do_brk(arg1); 5423 break; 5424 case TARGET_NR_fork: 5425 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0)); 5426 break; 5427 #ifdef TARGET_NR_waitpid 5428 case TARGET_NR_waitpid: 5429 { 5430 int status; 5431 ret = get_errno(waitpid(arg1, &status, arg3)); 5432 if (!is_error(ret) && arg2 && ret 5433 && put_user_s32(host_to_target_waitstatus(status), arg2)) 5434 goto efault; 5435 } 5436 break; 5437 #endif 5438 #ifdef TARGET_NR_waitid 5439 case TARGET_NR_waitid: 5440 { 5441 siginfo_t info; 5442 info.si_pid = 0; 5443 ret = get_errno(waitid(arg1, arg2, &info, arg4)); 5444 if (!is_error(ret) && arg3 && info.si_pid != 0) { 5445 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) 5446 goto efault; 5447 host_to_target_siginfo(p, &info); 5448 unlock_user(p, arg3, sizeof(target_siginfo_t)); 5449 } 5450 } 5451 break; 5452 #endif 5453 #ifdef TARGET_NR_creat /* not on alpha */ 5454 case TARGET_NR_creat: 5455 if (!(p = lock_user_string(arg1))) 5456 goto efault; 5457 ret = get_errno(creat(p, arg2)); 5458 unlock_user(p, arg1, 0); 5459 break; 5460 #endif 5461 case TARGET_NR_link: 5462 { 5463 void * p2; 5464 p = lock_user_string(arg1); 5465 p2 = lock_user_string(arg2); 5466 if (!p || !p2) 5467 ret = -TARGET_EFAULT; 5468 else 5469 ret = get_errno(link(p, p2)); 5470 unlock_user(p2, arg2, 0); 5471 unlock_user(p, arg1, 0); 5472 } 5473 break; 5474 #if defined(TARGET_NR_linkat) 5475 case TARGET_NR_linkat: 5476 { 5477 void * p2 = NULL; 5478 if (!arg2 || !arg4) 5479 goto efault; 5480 p = lock_user_string(arg2); 5481 p2 = lock_user_string(arg4); 5482 if (!p || !p2) 5483 ret = -TARGET_EFAULT; 5484 else 5485 ret = get_errno(linkat(arg1, p, arg3, p2, arg5)); 5486 unlock_user(p, arg2, 0); 5487 unlock_user(p2, arg4, 0); 5488 } 5489 break; 5490 #endif 5491 case TARGET_NR_unlink: 5492 if (!(p = lock_user_string(arg1))) 5493 goto efault; 5494 ret = get_errno(unlink(p)); 5495 unlock_user(p, arg1, 0); 5496 break; 5497 #if defined(TARGET_NR_unlinkat) 5498 case TARGET_NR_unlinkat: 5499 if (!(p = lock_user_string(arg2))) 5500 goto efault; 5501 ret = get_errno(unlinkat(arg1, p, arg3)); 5502 unlock_user(p, arg2, 0); 5503 break; 5504 #endif 5505 case TARGET_NR_execve: 5506 { 5507 char **argp, **envp; 5508 int argc, envc; 5509 abi_ulong gp; 5510 abi_ulong guest_argp; 5511 abi_ulong guest_envp; 5512 abi_ulong addr; 5513 char **q; 5514 int total_size = 0; 5515 5516 argc = 0; 5517 guest_argp = arg2; 5518 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { 5519 if (get_user_ual(addr, gp)) 5520 goto efault; 5521 if (!addr) 5522 break; 5523 argc++; 5524 } 5525 envc = 0; 5526 guest_envp = arg3; 5527 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { 5528 if (get_user_ual(addr, gp)) 5529 goto efault; 5530 if (!addr) 5531 break; 5532 envc++; 5533 } 5534 5535 argp = alloca((argc + 1) * sizeof(void *)); 5536 envp = alloca((envc + 1) * sizeof(void *)); 5537 5538 for (gp = guest_argp, q = argp; gp; 5539 gp += sizeof(abi_ulong), q++) { 5540 if (get_user_ual(addr, gp)) 5541 goto execve_efault; 5542 if (!addr) 5543 break; 5544 if (!(*q = lock_user_string(addr))) 5545 goto execve_efault; 5546 total_size += strlen(*q) + 1; 5547 } 5548 *q = NULL; 5549 5550 for (gp = guest_envp, q = envp; gp; 5551 gp += sizeof(abi_ulong), q++) { 5552 if (get_user_ual(addr, gp)) 5553 goto execve_efault; 5554 if (!addr) 5555 break; 5556 if (!(*q = lock_user_string(addr))) 5557 goto execve_efault; 5558 total_size += strlen(*q) + 1; 5559 } 5560 *q = NULL; 5561 5562 /* This case will not be caught by the host's execve() if its 5563 page size is bigger than the target's. */ 5564 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) { 5565 ret = -TARGET_E2BIG; 5566 goto execve_end; 5567 } 5568 if (!(p = lock_user_string(arg1))) 5569 goto execve_efault; 5570 ret = get_errno(execve(p, argp, envp)); 5571 unlock_user(p, arg1, 0); 5572 5573 goto execve_end; 5574 5575 execve_efault: 5576 ret = -TARGET_EFAULT; 5577 5578 execve_end: 5579 for (gp = guest_argp, q = argp; *q; 5580 gp += sizeof(abi_ulong), q++) { 5581 if (get_user_ual(addr, gp) 5582 || !addr) 5583 break; 5584 unlock_user(*q, addr, 0); 5585 } 5586 for (gp = guest_envp, q = envp; *q; 5587 gp += sizeof(abi_ulong), q++) { 5588 if (get_user_ual(addr, gp) 5589 || !addr) 5590 break; 5591 unlock_user(*q, addr, 0); 5592 } 5593 } 5594 break; 5595 case TARGET_NR_chdir: 5596 if (!(p = lock_user_string(arg1))) 5597 goto efault; 5598 ret = get_errno(chdir(p)); 5599 unlock_user(p, arg1, 0); 5600 break; 5601 #ifdef TARGET_NR_time 5602 case TARGET_NR_time: 5603 { 5604 time_t host_time; 5605 ret = get_errno(time(&host_time)); 5606 if (!is_error(ret) 5607 && arg1 5608 && put_user_sal(host_time, arg1)) 5609 goto efault; 5610 } 5611 break; 5612 #endif 5613 case TARGET_NR_mknod: 5614 if (!(p = lock_user_string(arg1))) 5615 goto efault; 5616 ret = get_errno(mknod(p, arg2, arg3)); 5617 unlock_user(p, arg1, 0); 5618 break; 5619 #if defined(TARGET_NR_mknodat) 5620 case TARGET_NR_mknodat: 5621 if (!(p = lock_user_string(arg2))) 5622 goto efault; 5623 ret = get_errno(mknodat(arg1, p, arg3, arg4)); 5624 unlock_user(p, arg2, 0); 5625 break; 5626 #endif 5627 case TARGET_NR_chmod: 5628 if (!(p = lock_user_string(arg1))) 5629 goto efault; 5630 ret = get_errno(chmod(p, arg2)); 5631 unlock_user(p, arg1, 0); 5632 break; 5633 #ifdef TARGET_NR_break 5634 case TARGET_NR_break: 5635 goto unimplemented; 5636 #endif 5637 #ifdef TARGET_NR_oldstat 5638 case TARGET_NR_oldstat: 5639 goto unimplemented; 5640 #endif 5641 case TARGET_NR_lseek: 5642 ret = get_errno(lseek(arg1, arg2, arg3)); 5643 break; 5644 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) 5645 /* Alpha specific */ 5646 case TARGET_NR_getxpid: 5647 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid(); 5648 ret = get_errno(getpid()); 5649 break; 5650 #endif 5651 #ifdef TARGET_NR_getpid 5652 case TARGET_NR_getpid: 5653 ret = get_errno(getpid()); 5654 break; 5655 #endif 5656 case TARGET_NR_mount: 5657 { 5658 /* need to look at the data field */ 5659 void *p2, *p3; 5660 p = lock_user_string(arg1); 5661 p2 = lock_user_string(arg2); 5662 p3 = lock_user_string(arg3); 5663 if (!p || !p2 || !p3) 5664 ret = -TARGET_EFAULT; 5665 else { 5666 /* FIXME - arg5 should be locked, but it isn't clear how to 5667 * do that since it's not guaranteed to be a NULL-terminated 5668 * string. 5669 */ 5670 if ( ! arg5 ) 5671 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL)); 5672 else 5673 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5))); 5674 } 5675 unlock_user(p, arg1, 0); 5676 unlock_user(p2, arg2, 0); 5677 unlock_user(p3, arg3, 0); 5678 break; 5679 } 5680 #ifdef TARGET_NR_umount 5681 case TARGET_NR_umount: 5682 if (!(p = lock_user_string(arg1))) 5683 goto efault; 5684 ret = get_errno(umount(p)); 5685 unlock_user(p, arg1, 0); 5686 break; 5687 #endif 5688 #ifdef TARGET_NR_stime /* not on alpha */ 5689 case TARGET_NR_stime: 5690 { 5691 time_t host_time; 5692 if (get_user_sal(host_time, arg1)) 5693 goto efault; 5694 ret = get_errno(stime(&host_time)); 5695 } 5696 break; 5697 #endif 5698 case TARGET_NR_ptrace: 5699 goto unimplemented; 5700 #ifdef TARGET_NR_alarm /* not on alpha */ 5701 case TARGET_NR_alarm: 5702 ret = alarm(arg1); 5703 break; 5704 #endif 5705 #ifdef TARGET_NR_oldfstat 5706 case TARGET_NR_oldfstat: 5707 goto unimplemented; 5708 #endif 5709 #ifdef TARGET_NR_pause /* not on alpha */ 5710 case TARGET_NR_pause: 5711 ret = get_errno(pause()); 5712 break; 5713 #endif 5714 #ifdef TARGET_NR_utime 5715 case TARGET_NR_utime: 5716 { 5717 struct utimbuf tbuf, *host_tbuf; 5718 struct target_utimbuf *target_tbuf; 5719 if (arg2) { 5720 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) 5721 goto efault; 5722 tbuf.actime = tswapal(target_tbuf->actime); 5723 tbuf.modtime = tswapal(target_tbuf->modtime); 5724 unlock_user_struct(target_tbuf, arg2, 0); 5725 host_tbuf = &tbuf; 5726 } else { 5727 host_tbuf = NULL; 5728 } 5729 if (!(p = lock_user_string(arg1))) 5730 goto efault; 5731 ret = get_errno(utime(p, host_tbuf)); 5732 unlock_user(p, arg1, 0); 5733 } 5734 break; 5735 #endif 5736 case TARGET_NR_utimes: 5737 { 5738 struct timeval *tvp, tv[2]; 5739 if (arg2) { 5740 if (copy_from_user_timeval(&tv[0], arg2) 5741 || copy_from_user_timeval(&tv[1], 5742 arg2 + sizeof(struct target_timeval))) 5743 goto efault; 5744 tvp = tv; 5745 } else { 5746 tvp = NULL; 5747 } 5748 if (!(p = lock_user_string(arg1))) 5749 goto efault; 5750 ret = get_errno(utimes(p, tvp)); 5751 unlock_user(p, arg1, 0); 5752 } 5753 break; 5754 #if defined(TARGET_NR_futimesat) 5755 case TARGET_NR_futimesat: 5756 { 5757 struct timeval *tvp, tv[2]; 5758 if (arg3) { 5759 if (copy_from_user_timeval(&tv[0], arg3) 5760 || copy_from_user_timeval(&tv[1], 5761 arg3 + sizeof(struct target_timeval))) 5762 goto efault; 5763 tvp = tv; 5764 } else { 5765 tvp = NULL; 5766 } 5767 if (!(p = lock_user_string(arg2))) 5768 goto efault; 5769 ret = get_errno(futimesat(arg1, path(p), tvp)); 5770 unlock_user(p, arg2, 0); 5771 } 5772 break; 5773 #endif 5774 #ifdef TARGET_NR_stty 5775 case TARGET_NR_stty: 5776 goto unimplemented; 5777 #endif 5778 #ifdef TARGET_NR_gtty 5779 case TARGET_NR_gtty: 5780 goto unimplemented; 5781 #endif 5782 case TARGET_NR_access: 5783 if (!(p = lock_user_string(arg1))) 5784 goto efault; 5785 ret = get_errno(access(path(p), arg2)); 5786 unlock_user(p, arg1, 0); 5787 break; 5788 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 5789 case TARGET_NR_faccessat: 5790 if (!(p = lock_user_string(arg2))) 5791 goto efault; 5792 ret = get_errno(faccessat(arg1, p, arg3, 0)); 5793 unlock_user(p, arg2, 0); 5794 break; 5795 #endif 5796 #ifdef TARGET_NR_nice /* not on alpha */ 5797 case TARGET_NR_nice: 5798 ret = get_errno(nice(arg1)); 5799 break; 5800 #endif 5801 #ifdef TARGET_NR_ftime 5802 case TARGET_NR_ftime: 5803 goto unimplemented; 5804 #endif 5805 case TARGET_NR_sync: 5806 sync(); 5807 ret = 0; 5808 break; 5809 case TARGET_NR_kill: 5810 ret = get_errno(kill(arg1, target_to_host_signal(arg2))); 5811 break; 5812 case TARGET_NR_rename: 5813 { 5814 void *p2; 5815 p = lock_user_string(arg1); 5816 p2 = lock_user_string(arg2); 5817 if (!p || !p2) 5818 ret = -TARGET_EFAULT; 5819 else 5820 ret = get_errno(rename(p, p2)); 5821 unlock_user(p2, arg2, 0); 5822 unlock_user(p, arg1, 0); 5823 } 5824 break; 5825 #if defined(TARGET_NR_renameat) 5826 case TARGET_NR_renameat: 5827 { 5828 void *p2; 5829 p = lock_user_string(arg2); 5830 p2 = lock_user_string(arg4); 5831 if (!p || !p2) 5832 ret = -TARGET_EFAULT; 5833 else 5834 ret = get_errno(renameat(arg1, p, arg3, p2)); 5835 unlock_user(p2, arg4, 0); 5836 unlock_user(p, arg2, 0); 5837 } 5838 break; 5839 #endif 5840 case TARGET_NR_mkdir: 5841 if (!(p = lock_user_string(arg1))) 5842 goto efault; 5843 ret = get_errno(mkdir(p, arg2)); 5844 unlock_user(p, arg1, 0); 5845 break; 5846 #if defined(TARGET_NR_mkdirat) 5847 case TARGET_NR_mkdirat: 5848 if (!(p = lock_user_string(arg2))) 5849 goto efault; 5850 ret = get_errno(mkdirat(arg1, p, arg3)); 5851 unlock_user(p, arg2, 0); 5852 break; 5853 #endif 5854 case TARGET_NR_rmdir: 5855 if (!(p = lock_user_string(arg1))) 5856 goto efault; 5857 ret = get_errno(rmdir(p)); 5858 unlock_user(p, arg1, 0); 5859 break; 5860 case TARGET_NR_dup: 5861 ret = get_errno(dup(arg1)); 5862 break; 5863 case TARGET_NR_pipe: 5864 ret = do_pipe(cpu_env, arg1, 0, 0); 5865 break; 5866 #ifdef TARGET_NR_pipe2 5867 case TARGET_NR_pipe2: 5868 ret = do_pipe(cpu_env, arg1, 5869 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1); 5870 break; 5871 #endif 5872 case TARGET_NR_times: 5873 { 5874 struct target_tms *tmsp; 5875 struct tms tms; 5876 ret = get_errno(times(&tms)); 5877 if (arg1) { 5878 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); 5879 if (!tmsp) 5880 goto efault; 5881 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime)); 5882 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime)); 5883 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime)); 5884 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime)); 5885 } 5886 if (!is_error(ret)) 5887 ret = host_to_target_clock_t(ret); 5888 } 5889 break; 5890 #ifdef TARGET_NR_prof 5891 case TARGET_NR_prof: 5892 goto unimplemented; 5893 #endif 5894 #ifdef TARGET_NR_signal 5895 case TARGET_NR_signal: 5896 goto unimplemented; 5897 #endif 5898 case TARGET_NR_acct: 5899 if (arg1 == 0) { 5900 ret = get_errno(acct(NULL)); 5901 } else { 5902 if (!(p = lock_user_string(arg1))) 5903 goto efault; 5904 ret = get_errno(acct(path(p))); 5905 unlock_user(p, arg1, 0); 5906 } 5907 break; 5908 #ifdef TARGET_NR_umount2 5909 case TARGET_NR_umount2: 5910 if (!(p = lock_user_string(arg1))) 5911 goto efault; 5912 ret = get_errno(umount2(p, arg2)); 5913 unlock_user(p, arg1, 0); 5914 break; 5915 #endif 5916 #ifdef TARGET_NR_lock 5917 case TARGET_NR_lock: 5918 goto unimplemented; 5919 #endif 5920 case TARGET_NR_ioctl: 5921 ret = do_ioctl(arg1, arg2, arg3); 5922 break; 5923 case TARGET_NR_fcntl: 5924 ret = do_fcntl(arg1, arg2, arg3); 5925 break; 5926 #ifdef TARGET_NR_mpx 5927 case TARGET_NR_mpx: 5928 goto unimplemented; 5929 #endif 5930 case TARGET_NR_setpgid: 5931 ret = get_errno(setpgid(arg1, arg2)); 5932 break; 5933 #ifdef TARGET_NR_ulimit 5934 case TARGET_NR_ulimit: 5935 goto unimplemented; 5936 #endif 5937 #ifdef TARGET_NR_oldolduname 5938 case TARGET_NR_oldolduname: 5939 goto unimplemented; 5940 #endif 5941 case TARGET_NR_umask: 5942 ret = get_errno(umask(arg1)); 5943 break; 5944 case TARGET_NR_chroot: 5945 if (!(p = lock_user_string(arg1))) 5946 goto efault; 5947 ret = get_errno(chroot(p)); 5948 unlock_user(p, arg1, 0); 5949 break; 5950 case TARGET_NR_ustat: 5951 goto unimplemented; 5952 case TARGET_NR_dup2: 5953 ret = get_errno(dup2(arg1, arg2)); 5954 break; 5955 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) 5956 case TARGET_NR_dup3: 5957 ret = get_errno(dup3(arg1, arg2, arg3)); 5958 break; 5959 #endif 5960 #ifdef TARGET_NR_getppid /* not on alpha */ 5961 case TARGET_NR_getppid: 5962 ret = get_errno(getppid()); 5963 break; 5964 #endif 5965 case TARGET_NR_getpgrp: 5966 ret = get_errno(getpgrp()); 5967 break; 5968 case TARGET_NR_setsid: 5969 ret = get_errno(setsid()); 5970 break; 5971 #ifdef TARGET_NR_sigaction 5972 case TARGET_NR_sigaction: 5973 { 5974 #if defined(TARGET_ALPHA) 5975 struct target_sigaction act, oact, *pact = 0; 5976 struct target_old_sigaction *old_act; 5977 if (arg2) { 5978 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5979 goto efault; 5980 act._sa_handler = old_act->_sa_handler; 5981 target_siginitset(&act.sa_mask, old_act->sa_mask); 5982 act.sa_flags = old_act->sa_flags; 5983 act.sa_restorer = 0; 5984 unlock_user_struct(old_act, arg2, 0); 5985 pact = &act; 5986 } 5987 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5988 if (!is_error(ret) && arg3) { 5989 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5990 goto efault; 5991 old_act->_sa_handler = oact._sa_handler; 5992 old_act->sa_mask = oact.sa_mask.sig[0]; 5993 old_act->sa_flags = oact.sa_flags; 5994 unlock_user_struct(old_act, arg3, 1); 5995 } 5996 #elif defined(TARGET_MIPS) 5997 struct target_sigaction act, oact, *pact, *old_act; 5998 5999 if (arg2) { 6000 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 6001 goto efault; 6002 act._sa_handler = old_act->_sa_handler; 6003 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); 6004 act.sa_flags = old_act->sa_flags; 6005 unlock_user_struct(old_act, arg2, 0); 6006 pact = &act; 6007 } else { 6008 pact = NULL; 6009 } 6010 6011 ret = get_errno(do_sigaction(arg1, pact, &oact)); 6012 6013 if (!is_error(ret) && arg3) { 6014 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 6015 goto efault; 6016 old_act->_sa_handler = oact._sa_handler; 6017 old_act->sa_flags = oact.sa_flags; 6018 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; 6019 old_act->sa_mask.sig[1] = 0; 6020 old_act->sa_mask.sig[2] = 0; 6021 old_act->sa_mask.sig[3] = 0; 6022 unlock_user_struct(old_act, arg3, 1); 6023 } 6024 #else 6025 struct target_old_sigaction *old_act; 6026 struct target_sigaction act, oact, *pact; 6027 if (arg2) { 6028 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 6029 goto efault; 6030 act._sa_handler = old_act->_sa_handler; 6031 target_siginitset(&act.sa_mask, old_act->sa_mask); 6032 act.sa_flags = old_act->sa_flags; 6033 act.sa_restorer = old_act->sa_restorer; 6034 unlock_user_struct(old_act, arg2, 0); 6035 pact = &act; 6036 } else { 6037 pact = NULL; 6038 } 6039 ret = get_errno(do_sigaction(arg1, pact, &oact)); 6040 if (!is_error(ret) && arg3) { 6041 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 6042 goto efault; 6043 old_act->_sa_handler = oact._sa_handler; 6044 old_act->sa_mask = oact.sa_mask.sig[0]; 6045 old_act->sa_flags = oact.sa_flags; 6046 old_act->sa_restorer = oact.sa_restorer; 6047 unlock_user_struct(old_act, arg3, 1); 6048 } 6049 #endif 6050 } 6051 break; 6052 #endif 6053 case TARGET_NR_rt_sigaction: 6054 { 6055 #if defined(TARGET_ALPHA) 6056 struct target_sigaction act, oact, *pact = 0; 6057 struct target_rt_sigaction *rt_act; 6058 /* ??? arg4 == sizeof(sigset_t). */ 6059 if (arg2) { 6060 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1)) 6061 goto efault; 6062 act._sa_handler = rt_act->_sa_handler; 6063 act.sa_mask = rt_act->sa_mask; 6064 act.sa_flags = rt_act->sa_flags; 6065 act.sa_restorer = arg5; 6066 unlock_user_struct(rt_act, arg2, 0); 6067 pact = &act; 6068 } 6069 ret = get_errno(do_sigaction(arg1, pact, &oact)); 6070 if (!is_error(ret) && arg3) { 6071 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0)) 6072 goto efault; 6073 rt_act->_sa_handler = oact._sa_handler; 6074 rt_act->sa_mask = oact.sa_mask; 6075 rt_act->sa_flags = oact.sa_flags; 6076 unlock_user_struct(rt_act, arg3, 1); 6077 } 6078 #else 6079 struct target_sigaction *act; 6080 struct target_sigaction *oact; 6081 6082 if (arg2) { 6083 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) 6084 goto efault; 6085 } else 6086 act = NULL; 6087 if (arg3) { 6088 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { 6089 ret = -TARGET_EFAULT; 6090 goto rt_sigaction_fail; 6091 } 6092 } else 6093 oact = NULL; 6094 ret = get_errno(do_sigaction(arg1, act, oact)); 6095 rt_sigaction_fail: 6096 if (act) 6097 unlock_user_struct(act, arg2, 0); 6098 if (oact) 6099 unlock_user_struct(oact, arg3, 1); 6100 #endif 6101 } 6102 break; 6103 #ifdef TARGET_NR_sgetmask /* not on alpha */ 6104 case TARGET_NR_sgetmask: 6105 { 6106 sigset_t cur_set; 6107 abi_ulong target_set; 6108 sigprocmask(0, NULL, &cur_set); 6109 host_to_target_old_sigset(&target_set, &cur_set); 6110 ret = target_set; 6111 } 6112 break; 6113 #endif 6114 #ifdef TARGET_NR_ssetmask /* not on alpha */ 6115 case TARGET_NR_ssetmask: 6116 { 6117 sigset_t set, oset, cur_set; 6118 abi_ulong target_set = arg1; 6119 sigprocmask(0, NULL, &cur_set); 6120 target_to_host_old_sigset(&set, &target_set); 6121 sigorset(&set, &set, &cur_set); 6122 sigprocmask(SIG_SETMASK, &set, &oset); 6123 host_to_target_old_sigset(&target_set, &oset); 6124 ret = target_set; 6125 } 6126 break; 6127 #endif 6128 #ifdef TARGET_NR_sigprocmask 6129 case TARGET_NR_sigprocmask: 6130 { 6131 #if defined(TARGET_ALPHA) 6132 sigset_t set, oldset; 6133 abi_ulong mask; 6134 int how; 6135 6136 switch (arg1) { 6137 case TARGET_SIG_BLOCK: 6138 how = SIG_BLOCK; 6139 break; 6140 case TARGET_SIG_UNBLOCK: 6141 how = SIG_UNBLOCK; 6142 break; 6143 case TARGET_SIG_SETMASK: 6144 how = SIG_SETMASK; 6145 break; 6146 default: 6147 ret = -TARGET_EINVAL; 6148 goto fail; 6149 } 6150 mask = arg2; 6151 target_to_host_old_sigset(&set, &mask); 6152 6153 ret = get_errno(sigprocmask(how, &set, &oldset)); 6154 if (!is_error(ret)) { 6155 host_to_target_old_sigset(&mask, &oldset); 6156 ret = mask; 6157 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */ 6158 } 6159 #else 6160 sigset_t set, oldset, *set_ptr; 6161 int how; 6162 6163 if (arg2) { 6164 switch (arg1) { 6165 case TARGET_SIG_BLOCK: 6166 how = SIG_BLOCK; 6167 break; 6168 case TARGET_SIG_UNBLOCK: 6169 how = SIG_UNBLOCK; 6170 break; 6171 case TARGET_SIG_SETMASK: 6172 how = SIG_SETMASK; 6173 break; 6174 default: 6175 ret = -TARGET_EINVAL; 6176 goto fail; 6177 } 6178 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 6179 goto efault; 6180 target_to_host_old_sigset(&set, p); 6181 unlock_user(p, arg2, 0); 6182 set_ptr = &set; 6183 } else { 6184 how = 0; 6185 set_ptr = NULL; 6186 } 6187 ret = get_errno(sigprocmask(how, set_ptr, &oldset)); 6188 if (!is_error(ret) && arg3) { 6189 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 6190 goto efault; 6191 host_to_target_old_sigset(p, &oldset); 6192 unlock_user(p, arg3, sizeof(target_sigset_t)); 6193 } 6194 #endif 6195 } 6196 break; 6197 #endif 6198 case TARGET_NR_rt_sigprocmask: 6199 { 6200 int how = arg1; 6201 sigset_t set, oldset, *set_ptr; 6202 6203 if (arg2) { 6204 switch(how) { 6205 case TARGET_SIG_BLOCK: 6206 how = SIG_BLOCK; 6207 break; 6208 case TARGET_SIG_UNBLOCK: 6209 how = SIG_UNBLOCK; 6210 break; 6211 case TARGET_SIG_SETMASK: 6212 how = SIG_SETMASK; 6213 break; 6214 default: 6215 ret = -TARGET_EINVAL; 6216 goto fail; 6217 } 6218 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 6219 goto efault; 6220 target_to_host_sigset(&set, p); 6221 unlock_user(p, arg2, 0); 6222 set_ptr = &set; 6223 } else { 6224 how = 0; 6225 set_ptr = NULL; 6226 } 6227 ret = get_errno(sigprocmask(how, set_ptr, &oldset)); 6228 if (!is_error(ret) && arg3) { 6229 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 6230 goto efault; 6231 host_to_target_sigset(p, &oldset); 6232 unlock_user(p, arg3, sizeof(target_sigset_t)); 6233 } 6234 } 6235 break; 6236 #ifdef TARGET_NR_sigpending 6237 case TARGET_NR_sigpending: 6238 { 6239 sigset_t set; 6240 ret = get_errno(sigpending(&set)); 6241 if (!is_error(ret)) { 6242 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 6243 goto efault; 6244 host_to_target_old_sigset(p, &set); 6245 unlock_user(p, arg1, sizeof(target_sigset_t)); 6246 } 6247 } 6248 break; 6249 #endif 6250 case TARGET_NR_rt_sigpending: 6251 { 6252 sigset_t set; 6253 ret = get_errno(sigpending(&set)); 6254 if (!is_error(ret)) { 6255 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 6256 goto efault; 6257 host_to_target_sigset(p, &set); 6258 unlock_user(p, arg1, sizeof(target_sigset_t)); 6259 } 6260 } 6261 break; 6262 #ifdef TARGET_NR_sigsuspend 6263 case TARGET_NR_sigsuspend: 6264 { 6265 sigset_t set; 6266 #if defined(TARGET_ALPHA) 6267 abi_ulong mask = arg1; 6268 target_to_host_old_sigset(&set, &mask); 6269 #else 6270 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6271 goto efault; 6272 target_to_host_old_sigset(&set, p); 6273 unlock_user(p, arg1, 0); 6274 #endif 6275 ret = get_errno(sigsuspend(&set)); 6276 } 6277 break; 6278 #endif 6279 case TARGET_NR_rt_sigsuspend: 6280 { 6281 sigset_t set; 6282 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6283 goto efault; 6284 target_to_host_sigset(&set, p); 6285 unlock_user(p, arg1, 0); 6286 ret = get_errno(sigsuspend(&set)); 6287 } 6288 break; 6289 case TARGET_NR_rt_sigtimedwait: 6290 { 6291 sigset_t set; 6292 struct timespec uts, *puts; 6293 siginfo_t uinfo; 6294 6295 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6296 goto efault; 6297 target_to_host_sigset(&set, p); 6298 unlock_user(p, arg1, 0); 6299 if (arg3) { 6300 puts = &uts; 6301 target_to_host_timespec(puts, arg3); 6302 } else { 6303 puts = NULL; 6304 } 6305 ret = get_errno(sigtimedwait(&set, &uinfo, puts)); 6306 if (!is_error(ret) && arg2) { 6307 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0))) 6308 goto efault; 6309 host_to_target_siginfo(p, &uinfo); 6310 unlock_user(p, arg2, sizeof(target_siginfo_t)); 6311 } 6312 } 6313 break; 6314 case TARGET_NR_rt_sigqueueinfo: 6315 { 6316 siginfo_t uinfo; 6317 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1))) 6318 goto efault; 6319 target_to_host_siginfo(&uinfo, p); 6320 unlock_user(p, arg1, 0); 6321 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo)); 6322 } 6323 break; 6324 #ifdef TARGET_NR_sigreturn 6325 case TARGET_NR_sigreturn: 6326 /* NOTE: ret is eax, so not transcoding must be done */ 6327 ret = do_sigreturn(cpu_env); 6328 break; 6329 #endif 6330 case TARGET_NR_rt_sigreturn: 6331 /* NOTE: ret is eax, so not transcoding must be done */ 6332 ret = do_rt_sigreturn(cpu_env); 6333 break; 6334 case TARGET_NR_sethostname: 6335 if (!(p = lock_user_string(arg1))) 6336 goto efault; 6337 ret = get_errno(sethostname(p, arg2)); 6338 unlock_user(p, arg1, 0); 6339 break; 6340 case TARGET_NR_setrlimit: 6341 { 6342 int resource = target_to_host_resource(arg1); 6343 struct target_rlimit *target_rlim; 6344 struct rlimit rlim; 6345 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) 6346 goto efault; 6347 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur); 6348 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max); 6349 unlock_user_struct(target_rlim, arg2, 0); 6350 ret = get_errno(setrlimit(resource, &rlim)); 6351 } 6352 break; 6353 case TARGET_NR_getrlimit: 6354 { 6355 int resource = target_to_host_resource(arg1); 6356 struct target_rlimit *target_rlim; 6357 struct rlimit rlim; 6358 6359 ret = get_errno(getrlimit(resource, &rlim)); 6360 if (!is_error(ret)) { 6361 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 6362 goto efault; 6363 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 6364 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 6365 unlock_user_struct(target_rlim, arg2, 1); 6366 } 6367 } 6368 break; 6369 case TARGET_NR_getrusage: 6370 { 6371 struct rusage rusage; 6372 ret = get_errno(getrusage(arg1, &rusage)); 6373 if (!is_error(ret)) { 6374 host_to_target_rusage(arg2, &rusage); 6375 } 6376 } 6377 break; 6378 case TARGET_NR_gettimeofday: 6379 { 6380 struct timeval tv; 6381 ret = get_errno(gettimeofday(&tv, NULL)); 6382 if (!is_error(ret)) { 6383 if (copy_to_user_timeval(arg1, &tv)) 6384 goto efault; 6385 } 6386 } 6387 break; 6388 case TARGET_NR_settimeofday: 6389 { 6390 struct timeval tv; 6391 if (copy_from_user_timeval(&tv, arg1)) 6392 goto efault; 6393 ret = get_errno(settimeofday(&tv, NULL)); 6394 } 6395 break; 6396 #if defined(TARGET_NR_select) 6397 case TARGET_NR_select: 6398 #if defined(TARGET_S390X) || defined(TARGET_ALPHA) 6399 ret = do_select(arg1, arg2, arg3, arg4, arg5); 6400 #else 6401 { 6402 struct target_sel_arg_struct *sel; 6403 abi_ulong inp, outp, exp, tvp; 6404 long nsel; 6405 6406 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) 6407 goto efault; 6408 nsel = tswapal(sel->n); 6409 inp = tswapal(sel->inp); 6410 outp = tswapal(sel->outp); 6411 exp = tswapal(sel->exp); 6412 tvp = tswapal(sel->tvp); 6413 unlock_user_struct(sel, arg1, 0); 6414 ret = do_select(nsel, inp, outp, exp, tvp); 6415 } 6416 #endif 6417 break; 6418 #endif 6419 #ifdef TARGET_NR_pselect6 6420 case TARGET_NR_pselect6: 6421 { 6422 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr; 6423 fd_set rfds, wfds, efds; 6424 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 6425 struct timespec ts, *ts_ptr; 6426 6427 /* 6428 * The 6th arg is actually two args smashed together, 6429 * so we cannot use the C library. 6430 */ 6431 sigset_t set; 6432 struct { 6433 sigset_t *set; 6434 size_t size; 6435 } sig, *sig_ptr; 6436 6437 abi_ulong arg_sigset, arg_sigsize, *arg7; 6438 target_sigset_t *target_sigset; 6439 6440 n = arg1; 6441 rfd_addr = arg2; 6442 wfd_addr = arg3; 6443 efd_addr = arg4; 6444 ts_addr = arg5; 6445 6446 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 6447 if (ret) { 6448 goto fail; 6449 } 6450 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 6451 if (ret) { 6452 goto fail; 6453 } 6454 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 6455 if (ret) { 6456 goto fail; 6457 } 6458 6459 /* 6460 * This takes a timespec, and not a timeval, so we cannot 6461 * use the do_select() helper ... 6462 */ 6463 if (ts_addr) { 6464 if (target_to_host_timespec(&ts, ts_addr)) { 6465 goto efault; 6466 } 6467 ts_ptr = &ts; 6468 } else { 6469 ts_ptr = NULL; 6470 } 6471 6472 /* Extract the two packed args for the sigset */ 6473 if (arg6) { 6474 sig_ptr = &sig; 6475 sig.size = _NSIG / 8; 6476 6477 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1); 6478 if (!arg7) { 6479 goto efault; 6480 } 6481 arg_sigset = tswapal(arg7[0]); 6482 arg_sigsize = tswapal(arg7[1]); 6483 unlock_user(arg7, arg6, 0); 6484 6485 if (arg_sigset) { 6486 sig.set = &set; 6487 if (arg_sigsize != sizeof(*target_sigset)) { 6488 /* Like the kernel, we enforce correct size sigsets */ 6489 ret = -TARGET_EINVAL; 6490 goto fail; 6491 } 6492 target_sigset = lock_user(VERIFY_READ, arg_sigset, 6493 sizeof(*target_sigset), 1); 6494 if (!target_sigset) { 6495 goto efault; 6496 } 6497 target_to_host_sigset(&set, target_sigset); 6498 unlock_user(target_sigset, arg_sigset, 0); 6499 } else { 6500 sig.set = NULL; 6501 } 6502 } else { 6503 sig_ptr = NULL; 6504 } 6505 6506 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 6507 ts_ptr, sig_ptr)); 6508 6509 if (!is_error(ret)) { 6510 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 6511 goto efault; 6512 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 6513 goto efault; 6514 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 6515 goto efault; 6516 6517 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) 6518 goto efault; 6519 } 6520 } 6521 break; 6522 #endif 6523 case TARGET_NR_symlink: 6524 { 6525 void *p2; 6526 p = lock_user_string(arg1); 6527 p2 = lock_user_string(arg2); 6528 if (!p || !p2) 6529 ret = -TARGET_EFAULT; 6530 else 6531 ret = get_errno(symlink(p, p2)); 6532 unlock_user(p2, arg2, 0); 6533 unlock_user(p, arg1, 0); 6534 } 6535 break; 6536 #if defined(TARGET_NR_symlinkat) 6537 case TARGET_NR_symlinkat: 6538 { 6539 void *p2; 6540 p = lock_user_string(arg1); 6541 p2 = lock_user_string(arg3); 6542 if (!p || !p2) 6543 ret = -TARGET_EFAULT; 6544 else 6545 ret = get_errno(symlinkat(p, arg2, p2)); 6546 unlock_user(p2, arg3, 0); 6547 unlock_user(p, arg1, 0); 6548 } 6549 break; 6550 #endif 6551 #ifdef TARGET_NR_oldlstat 6552 case TARGET_NR_oldlstat: 6553 goto unimplemented; 6554 #endif 6555 case TARGET_NR_readlink: 6556 { 6557 void *p2; 6558 p = lock_user_string(arg1); 6559 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); 6560 if (!p || !p2) { 6561 ret = -TARGET_EFAULT; 6562 } else if (is_proc_myself((const char *)p, "exe")) { 6563 char real[PATH_MAX], *temp; 6564 temp = realpath(exec_path, real); 6565 ret = temp == NULL ? get_errno(-1) : strlen(real) ; 6566 snprintf((char *)p2, arg3, "%s", real); 6567 } else { 6568 ret = get_errno(readlink(path(p), p2, arg3)); 6569 } 6570 unlock_user(p2, arg2, ret); 6571 unlock_user(p, arg1, 0); 6572 } 6573 break; 6574 #if defined(TARGET_NR_readlinkat) 6575 case TARGET_NR_readlinkat: 6576 { 6577 void *p2; 6578 p = lock_user_string(arg2); 6579 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); 6580 if (!p || !p2) { 6581 ret = -TARGET_EFAULT; 6582 } else if (is_proc_myself((const char *)p, "exe")) { 6583 char real[PATH_MAX], *temp; 6584 temp = realpath(exec_path, real); 6585 ret = temp == NULL ? get_errno(-1) : strlen(real) ; 6586 snprintf((char *)p2, arg4, "%s", real); 6587 } else { 6588 ret = get_errno(readlinkat(arg1, path(p), p2, arg4)); 6589 } 6590 unlock_user(p2, arg3, ret); 6591 unlock_user(p, arg2, 0); 6592 } 6593 break; 6594 #endif 6595 #ifdef TARGET_NR_uselib 6596 case TARGET_NR_uselib: 6597 goto unimplemented; 6598 #endif 6599 #ifdef TARGET_NR_swapon 6600 case TARGET_NR_swapon: 6601 if (!(p = lock_user_string(arg1))) 6602 goto efault; 6603 ret = get_errno(swapon(p, arg2)); 6604 unlock_user(p, arg1, 0); 6605 break; 6606 #endif 6607 case TARGET_NR_reboot: 6608 if (arg3 == LINUX_REBOOT_CMD_RESTART2) { 6609 /* arg4 must be ignored in all other cases */ 6610 p = lock_user_string(arg4); 6611 if (!p) { 6612 goto efault; 6613 } 6614 ret = get_errno(reboot(arg1, arg2, arg3, p)); 6615 unlock_user(p, arg4, 0); 6616 } else { 6617 ret = get_errno(reboot(arg1, arg2, arg3, NULL)); 6618 } 6619 break; 6620 #ifdef TARGET_NR_readdir 6621 case TARGET_NR_readdir: 6622 goto unimplemented; 6623 #endif 6624 #ifdef TARGET_NR_mmap 6625 case TARGET_NR_mmap: 6626 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 6627 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \ 6628 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ 6629 || defined(TARGET_S390X) 6630 { 6631 abi_ulong *v; 6632 abi_ulong v1, v2, v3, v4, v5, v6; 6633 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) 6634 goto efault; 6635 v1 = tswapal(v[0]); 6636 v2 = tswapal(v[1]); 6637 v3 = tswapal(v[2]); 6638 v4 = tswapal(v[3]); 6639 v5 = tswapal(v[4]); 6640 v6 = tswapal(v[5]); 6641 unlock_user(v, arg1, 0); 6642 ret = get_errno(target_mmap(v1, v2, v3, 6643 target_to_host_bitmask(v4, mmap_flags_tbl), 6644 v5, v6)); 6645 } 6646 #else 6647 ret = get_errno(target_mmap(arg1, arg2, arg3, 6648 target_to_host_bitmask(arg4, mmap_flags_tbl), 6649 arg5, 6650 arg6)); 6651 #endif 6652 break; 6653 #endif 6654 #ifdef TARGET_NR_mmap2 6655 case TARGET_NR_mmap2: 6656 #ifndef MMAP_SHIFT 6657 #define MMAP_SHIFT 12 6658 #endif 6659 ret = get_errno(target_mmap(arg1, arg2, arg3, 6660 target_to_host_bitmask(arg4, mmap_flags_tbl), 6661 arg5, 6662 arg6 << MMAP_SHIFT)); 6663 break; 6664 #endif 6665 case TARGET_NR_munmap: 6666 ret = get_errno(target_munmap(arg1, arg2)); 6667 break; 6668 case TARGET_NR_mprotect: 6669 { 6670 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 6671 /* Special hack to detect libc making the stack executable. */ 6672 if ((arg3 & PROT_GROWSDOWN) 6673 && arg1 >= ts->info->stack_limit 6674 && arg1 <= ts->info->start_stack) { 6675 arg3 &= ~PROT_GROWSDOWN; 6676 arg2 = arg2 + arg1 - ts->info->stack_limit; 6677 arg1 = ts->info->stack_limit; 6678 } 6679 } 6680 ret = get_errno(target_mprotect(arg1, arg2, arg3)); 6681 break; 6682 #ifdef TARGET_NR_mremap 6683 case TARGET_NR_mremap: 6684 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); 6685 break; 6686 #endif 6687 /* ??? msync/mlock/munlock are broken for softmmu. */ 6688 #ifdef TARGET_NR_msync 6689 case TARGET_NR_msync: 6690 ret = get_errno(msync(g2h(arg1), arg2, arg3)); 6691 break; 6692 #endif 6693 #ifdef TARGET_NR_mlock 6694 case TARGET_NR_mlock: 6695 ret = get_errno(mlock(g2h(arg1), arg2)); 6696 break; 6697 #endif 6698 #ifdef TARGET_NR_munlock 6699 case TARGET_NR_munlock: 6700 ret = get_errno(munlock(g2h(arg1), arg2)); 6701 break; 6702 #endif 6703 #ifdef TARGET_NR_mlockall 6704 case TARGET_NR_mlockall: 6705 ret = get_errno(mlockall(arg1)); 6706 break; 6707 #endif 6708 #ifdef TARGET_NR_munlockall 6709 case TARGET_NR_munlockall: 6710 ret = get_errno(munlockall()); 6711 break; 6712 #endif 6713 case TARGET_NR_truncate: 6714 if (!(p = lock_user_string(arg1))) 6715 goto efault; 6716 ret = get_errno(truncate(p, arg2)); 6717 unlock_user(p, arg1, 0); 6718 break; 6719 case TARGET_NR_ftruncate: 6720 ret = get_errno(ftruncate(arg1, arg2)); 6721 break; 6722 case TARGET_NR_fchmod: 6723 ret = get_errno(fchmod(arg1, arg2)); 6724 break; 6725 #if defined(TARGET_NR_fchmodat) 6726 case TARGET_NR_fchmodat: 6727 if (!(p = lock_user_string(arg2))) 6728 goto efault; 6729 ret = get_errno(fchmodat(arg1, p, arg3, 0)); 6730 unlock_user(p, arg2, 0); 6731 break; 6732 #endif 6733 case TARGET_NR_getpriority: 6734 /* Note that negative values are valid for getpriority, so we must 6735 differentiate based on errno settings. */ 6736 errno = 0; 6737 ret = getpriority(arg1, arg2); 6738 if (ret == -1 && errno != 0) { 6739 ret = -host_to_target_errno(errno); 6740 break; 6741 } 6742 #ifdef TARGET_ALPHA 6743 /* Return value is the unbiased priority. Signal no error. */ 6744 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; 6745 #else 6746 /* Return value is a biased priority to avoid negative numbers. */ 6747 ret = 20 - ret; 6748 #endif 6749 break; 6750 case TARGET_NR_setpriority: 6751 ret = get_errno(setpriority(arg1, arg2, arg3)); 6752 break; 6753 #ifdef TARGET_NR_profil 6754 case TARGET_NR_profil: 6755 goto unimplemented; 6756 #endif 6757 case TARGET_NR_statfs: 6758 if (!(p = lock_user_string(arg1))) 6759 goto efault; 6760 ret = get_errno(statfs(path(p), &stfs)); 6761 unlock_user(p, arg1, 0); 6762 convert_statfs: 6763 if (!is_error(ret)) { 6764 struct target_statfs *target_stfs; 6765 6766 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) 6767 goto efault; 6768 __put_user(stfs.f_type, &target_stfs->f_type); 6769 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 6770 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 6771 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 6772 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 6773 __put_user(stfs.f_files, &target_stfs->f_files); 6774 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 6775 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 6776 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 6777 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 6778 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 6779 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 6780 unlock_user_struct(target_stfs, arg2, 1); 6781 } 6782 break; 6783 case TARGET_NR_fstatfs: 6784 ret = get_errno(fstatfs(arg1, &stfs)); 6785 goto convert_statfs; 6786 #ifdef TARGET_NR_statfs64 6787 case TARGET_NR_statfs64: 6788 if (!(p = lock_user_string(arg1))) 6789 goto efault; 6790 ret = get_errno(statfs(path(p), &stfs)); 6791 unlock_user(p, arg1, 0); 6792 convert_statfs64: 6793 if (!is_error(ret)) { 6794 struct target_statfs64 *target_stfs; 6795 6796 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) 6797 goto efault; 6798 __put_user(stfs.f_type, &target_stfs->f_type); 6799 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 6800 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 6801 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 6802 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 6803 __put_user(stfs.f_files, &target_stfs->f_files); 6804 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 6805 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 6806 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 6807 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 6808 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 6809 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 6810 unlock_user_struct(target_stfs, arg3, 1); 6811 } 6812 break; 6813 case TARGET_NR_fstatfs64: 6814 ret = get_errno(fstatfs(arg1, &stfs)); 6815 goto convert_statfs64; 6816 #endif 6817 #ifdef TARGET_NR_ioperm 6818 case TARGET_NR_ioperm: 6819 goto unimplemented; 6820 #endif 6821 #ifdef TARGET_NR_socketcall 6822 case TARGET_NR_socketcall: 6823 ret = do_socketcall(arg1, arg2); 6824 break; 6825 #endif 6826 #ifdef TARGET_NR_accept 6827 case TARGET_NR_accept: 6828 ret = do_accept4(arg1, arg2, arg3, 0); 6829 break; 6830 #endif 6831 #ifdef TARGET_NR_accept4 6832 case TARGET_NR_accept4: 6833 #ifdef CONFIG_ACCEPT4 6834 ret = do_accept4(arg1, arg2, arg3, arg4); 6835 #else 6836 goto unimplemented; 6837 #endif 6838 break; 6839 #endif 6840 #ifdef TARGET_NR_bind 6841 case TARGET_NR_bind: 6842 ret = do_bind(arg1, arg2, arg3); 6843 break; 6844 #endif 6845 #ifdef TARGET_NR_connect 6846 case TARGET_NR_connect: 6847 ret = do_connect(arg1, arg2, arg3); 6848 break; 6849 #endif 6850 #ifdef TARGET_NR_getpeername 6851 case TARGET_NR_getpeername: 6852 ret = do_getpeername(arg1, arg2, arg3); 6853 break; 6854 #endif 6855 #ifdef TARGET_NR_getsockname 6856 case TARGET_NR_getsockname: 6857 ret = do_getsockname(arg1, arg2, arg3); 6858 break; 6859 #endif 6860 #ifdef TARGET_NR_getsockopt 6861 case TARGET_NR_getsockopt: 6862 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5); 6863 break; 6864 #endif 6865 #ifdef TARGET_NR_listen 6866 case TARGET_NR_listen: 6867 ret = get_errno(listen(arg1, arg2)); 6868 break; 6869 #endif 6870 #ifdef TARGET_NR_recv 6871 case TARGET_NR_recv: 6872 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); 6873 break; 6874 #endif 6875 #ifdef TARGET_NR_recvfrom 6876 case TARGET_NR_recvfrom: 6877 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); 6878 break; 6879 #endif 6880 #ifdef TARGET_NR_recvmsg 6881 case TARGET_NR_recvmsg: 6882 ret = do_sendrecvmsg(arg1, arg2, arg3, 0); 6883 break; 6884 #endif 6885 #ifdef TARGET_NR_send 6886 case TARGET_NR_send: 6887 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0); 6888 break; 6889 #endif 6890 #ifdef TARGET_NR_sendmsg 6891 case TARGET_NR_sendmsg: 6892 ret = do_sendrecvmsg(arg1, arg2, arg3, 1); 6893 break; 6894 #endif 6895 #ifdef TARGET_NR_sendto 6896 case TARGET_NR_sendto: 6897 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); 6898 break; 6899 #endif 6900 #ifdef TARGET_NR_shutdown 6901 case TARGET_NR_shutdown: 6902 ret = get_errno(shutdown(arg1, arg2)); 6903 break; 6904 #endif 6905 #ifdef TARGET_NR_socket 6906 case TARGET_NR_socket: 6907 ret = do_socket(arg1, arg2, arg3); 6908 break; 6909 #endif 6910 #ifdef TARGET_NR_socketpair 6911 case TARGET_NR_socketpair: 6912 ret = do_socketpair(arg1, arg2, arg3, arg4); 6913 break; 6914 #endif 6915 #ifdef TARGET_NR_setsockopt 6916 case TARGET_NR_setsockopt: 6917 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); 6918 break; 6919 #endif 6920 6921 case TARGET_NR_syslog: 6922 if (!(p = lock_user_string(arg2))) 6923 goto efault; 6924 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); 6925 unlock_user(p, arg2, 0); 6926 break; 6927 6928 case TARGET_NR_setitimer: 6929 { 6930 struct itimerval value, ovalue, *pvalue; 6931 6932 if (arg2) { 6933 pvalue = &value; 6934 if (copy_from_user_timeval(&pvalue->it_interval, arg2) 6935 || copy_from_user_timeval(&pvalue->it_value, 6936 arg2 + sizeof(struct target_timeval))) 6937 goto efault; 6938 } else { 6939 pvalue = NULL; 6940 } 6941 ret = get_errno(setitimer(arg1, pvalue, &ovalue)); 6942 if (!is_error(ret) && arg3) { 6943 if (copy_to_user_timeval(arg3, 6944 &ovalue.it_interval) 6945 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), 6946 &ovalue.it_value)) 6947 goto efault; 6948 } 6949 } 6950 break; 6951 case TARGET_NR_getitimer: 6952 { 6953 struct itimerval value; 6954 6955 ret = get_errno(getitimer(arg1, &value)); 6956 if (!is_error(ret) && arg2) { 6957 if (copy_to_user_timeval(arg2, 6958 &value.it_interval) 6959 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), 6960 &value.it_value)) 6961 goto efault; 6962 } 6963 } 6964 break; 6965 case TARGET_NR_stat: 6966 if (!(p = lock_user_string(arg1))) 6967 goto efault; 6968 ret = get_errno(stat(path(p), &st)); 6969 unlock_user(p, arg1, 0); 6970 goto do_stat; 6971 case TARGET_NR_lstat: 6972 if (!(p = lock_user_string(arg1))) 6973 goto efault; 6974 ret = get_errno(lstat(path(p), &st)); 6975 unlock_user(p, arg1, 0); 6976 goto do_stat; 6977 case TARGET_NR_fstat: 6978 { 6979 ret = get_errno(fstat(arg1, &st)); 6980 do_stat: 6981 if (!is_error(ret)) { 6982 struct target_stat *target_st; 6983 6984 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) 6985 goto efault; 6986 memset(target_st, 0, sizeof(*target_st)); 6987 __put_user(st.st_dev, &target_st->st_dev); 6988 __put_user(st.st_ino, &target_st->st_ino); 6989 __put_user(st.st_mode, &target_st->st_mode); 6990 __put_user(st.st_uid, &target_st->st_uid); 6991 __put_user(st.st_gid, &target_st->st_gid); 6992 __put_user(st.st_nlink, &target_st->st_nlink); 6993 __put_user(st.st_rdev, &target_st->st_rdev); 6994 __put_user(st.st_size, &target_st->st_size); 6995 __put_user(st.st_blksize, &target_st->st_blksize); 6996 __put_user(st.st_blocks, &target_st->st_blocks); 6997 __put_user(st.st_atime, &target_st->target_st_atime); 6998 __put_user(st.st_mtime, &target_st->target_st_mtime); 6999 __put_user(st.st_ctime, &target_st->target_st_ctime); 7000 unlock_user_struct(target_st, arg2, 1); 7001 } 7002 } 7003 break; 7004 #ifdef TARGET_NR_olduname 7005 case TARGET_NR_olduname: 7006 goto unimplemented; 7007 #endif 7008 #ifdef TARGET_NR_iopl 7009 case TARGET_NR_iopl: 7010 goto unimplemented; 7011 #endif 7012 case TARGET_NR_vhangup: 7013 ret = get_errno(vhangup()); 7014 break; 7015 #ifdef TARGET_NR_idle 7016 case TARGET_NR_idle: 7017 goto unimplemented; 7018 #endif 7019 #ifdef TARGET_NR_syscall 7020 case TARGET_NR_syscall: 7021 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5, 7022 arg6, arg7, arg8, 0); 7023 break; 7024 #endif 7025 case TARGET_NR_wait4: 7026 { 7027 int status; 7028 abi_long status_ptr = arg2; 7029 struct rusage rusage, *rusage_ptr; 7030 abi_ulong target_rusage = arg4; 7031 if (target_rusage) 7032 rusage_ptr = &rusage; 7033 else 7034 rusage_ptr = NULL; 7035 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr)); 7036 if (!is_error(ret)) { 7037 if (status_ptr && ret) { 7038 status = host_to_target_waitstatus(status); 7039 if (put_user_s32(status, status_ptr)) 7040 goto efault; 7041 } 7042 if (target_rusage) 7043 host_to_target_rusage(target_rusage, &rusage); 7044 } 7045 } 7046 break; 7047 #ifdef TARGET_NR_swapoff 7048 case TARGET_NR_swapoff: 7049 if (!(p = lock_user_string(arg1))) 7050 goto efault; 7051 ret = get_errno(swapoff(p)); 7052 unlock_user(p, arg1, 0); 7053 break; 7054 #endif 7055 case TARGET_NR_sysinfo: 7056 { 7057 struct target_sysinfo *target_value; 7058 struct sysinfo value; 7059 ret = get_errno(sysinfo(&value)); 7060 if (!is_error(ret) && arg1) 7061 { 7062 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) 7063 goto efault; 7064 __put_user(value.uptime, &target_value->uptime); 7065 __put_user(value.loads[0], &target_value->loads[0]); 7066 __put_user(value.loads[1], &target_value->loads[1]); 7067 __put_user(value.loads[2], &target_value->loads[2]); 7068 __put_user(value.totalram, &target_value->totalram); 7069 __put_user(value.freeram, &target_value->freeram); 7070 __put_user(value.sharedram, &target_value->sharedram); 7071 __put_user(value.bufferram, &target_value->bufferram); 7072 __put_user(value.totalswap, &target_value->totalswap); 7073 __put_user(value.freeswap, &target_value->freeswap); 7074 __put_user(value.procs, &target_value->procs); 7075 __put_user(value.totalhigh, &target_value->totalhigh); 7076 __put_user(value.freehigh, &target_value->freehigh); 7077 __put_user(value.mem_unit, &target_value->mem_unit); 7078 unlock_user_struct(target_value, arg1, 1); 7079 } 7080 } 7081 break; 7082 #ifdef TARGET_NR_ipc 7083 case TARGET_NR_ipc: 7084 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6); 7085 break; 7086 #endif 7087 #ifdef TARGET_NR_semget 7088 case TARGET_NR_semget: 7089 ret = get_errno(semget(arg1, arg2, arg3)); 7090 break; 7091 #endif 7092 #ifdef TARGET_NR_semop 7093 case TARGET_NR_semop: 7094 ret = do_semop(arg1, arg2, arg3); 7095 break; 7096 #endif 7097 #ifdef TARGET_NR_semctl 7098 case TARGET_NR_semctl: 7099 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4); 7100 break; 7101 #endif 7102 #ifdef TARGET_NR_msgctl 7103 case TARGET_NR_msgctl: 7104 ret = do_msgctl(arg1, arg2, arg3); 7105 break; 7106 #endif 7107 #ifdef TARGET_NR_msgget 7108 case TARGET_NR_msgget: 7109 ret = get_errno(msgget(arg1, arg2)); 7110 break; 7111 #endif 7112 #ifdef TARGET_NR_msgrcv 7113 case TARGET_NR_msgrcv: 7114 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5); 7115 break; 7116 #endif 7117 #ifdef TARGET_NR_msgsnd 7118 case TARGET_NR_msgsnd: 7119 ret = do_msgsnd(arg1, arg2, arg3, arg4); 7120 break; 7121 #endif 7122 #ifdef TARGET_NR_shmget 7123 case TARGET_NR_shmget: 7124 ret = get_errno(shmget(arg1, arg2, arg3)); 7125 break; 7126 #endif 7127 #ifdef TARGET_NR_shmctl 7128 case TARGET_NR_shmctl: 7129 ret = do_shmctl(arg1, arg2, arg3); 7130 break; 7131 #endif 7132 #ifdef TARGET_NR_shmat 7133 case TARGET_NR_shmat: 7134 ret = do_shmat(arg1, arg2, arg3); 7135 break; 7136 #endif 7137 #ifdef TARGET_NR_shmdt 7138 case TARGET_NR_shmdt: 7139 ret = do_shmdt(arg1); 7140 break; 7141 #endif 7142 case TARGET_NR_fsync: 7143 ret = get_errno(fsync(arg1)); 7144 break; 7145 case TARGET_NR_clone: 7146 /* Linux manages to have three different orderings for its 7147 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines 7148 * match the kernel's CONFIG_CLONE_* settings. 7149 * Microblaze is further special in that it uses a sixth 7150 * implicit argument to clone for the TLS pointer. 7151 */ 7152 #if defined(TARGET_MICROBLAZE) 7153 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5)); 7154 #elif defined(TARGET_CLONE_BACKWARDS) 7155 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); 7156 #elif defined(TARGET_CLONE_BACKWARDS2) 7157 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); 7158 #else 7159 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); 7160 #endif 7161 break; 7162 #ifdef __NR_exit_group 7163 /* new thread calls */ 7164 case TARGET_NR_exit_group: 7165 #ifdef TARGET_GPROF 7166 _mcleanup(); 7167 #endif 7168 gdb_exit(cpu_env, arg1); 7169 ret = get_errno(exit_group(arg1)); 7170 break; 7171 #endif 7172 case TARGET_NR_setdomainname: 7173 if (!(p = lock_user_string(arg1))) 7174 goto efault; 7175 ret = get_errno(setdomainname(p, arg2)); 7176 unlock_user(p, arg1, 0); 7177 break; 7178 case TARGET_NR_uname: 7179 /* no need to transcode because we use the linux syscall */ 7180 { 7181 struct new_utsname * buf; 7182 7183 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) 7184 goto efault; 7185 ret = get_errno(sys_uname(buf)); 7186 if (!is_error(ret)) { 7187 /* Overrite the native machine name with whatever is being 7188 emulated. */ 7189 strcpy (buf->machine, cpu_to_uname_machine(cpu_env)); 7190 /* Allow the user to override the reported release. */ 7191 if (qemu_uname_release && *qemu_uname_release) 7192 strcpy (buf->release, qemu_uname_release); 7193 } 7194 unlock_user_struct(buf, arg1, 1); 7195 } 7196 break; 7197 #ifdef TARGET_I386 7198 case TARGET_NR_modify_ldt: 7199 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3); 7200 break; 7201 #if !defined(TARGET_X86_64) 7202 case TARGET_NR_vm86old: 7203 goto unimplemented; 7204 case TARGET_NR_vm86: 7205 ret = do_vm86(cpu_env, arg1, arg2); 7206 break; 7207 #endif 7208 #endif 7209 case TARGET_NR_adjtimex: 7210 goto unimplemented; 7211 #ifdef TARGET_NR_create_module 7212 case TARGET_NR_create_module: 7213 #endif 7214 case TARGET_NR_init_module: 7215 case TARGET_NR_delete_module: 7216 #ifdef TARGET_NR_get_kernel_syms 7217 case TARGET_NR_get_kernel_syms: 7218 #endif 7219 goto unimplemented; 7220 case TARGET_NR_quotactl: 7221 goto unimplemented; 7222 case TARGET_NR_getpgid: 7223 ret = get_errno(getpgid(arg1)); 7224 break; 7225 case TARGET_NR_fchdir: 7226 ret = get_errno(fchdir(arg1)); 7227 break; 7228 #ifdef TARGET_NR_bdflush /* not on x86_64 */ 7229 case TARGET_NR_bdflush: 7230 goto unimplemented; 7231 #endif 7232 #ifdef TARGET_NR_sysfs 7233 case TARGET_NR_sysfs: 7234 goto unimplemented; 7235 #endif 7236 case TARGET_NR_personality: 7237 ret = get_errno(personality(arg1)); 7238 break; 7239 #ifdef TARGET_NR_afs_syscall 7240 case TARGET_NR_afs_syscall: 7241 goto unimplemented; 7242 #endif 7243 #ifdef TARGET_NR__llseek /* Not on alpha */ 7244 case TARGET_NR__llseek: 7245 { 7246 int64_t res; 7247 #if !defined(__NR_llseek) 7248 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5); 7249 if (res == -1) { 7250 ret = get_errno(res); 7251 } else { 7252 ret = 0; 7253 } 7254 #else 7255 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); 7256 #endif 7257 if ((ret == 0) && put_user_s64(res, arg4)) { 7258 goto efault; 7259 } 7260 } 7261 break; 7262 #endif 7263 case TARGET_NR_getdents: 7264 #ifdef __NR_getdents 7265 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 7266 { 7267 struct target_dirent *target_dirp; 7268 struct linux_dirent *dirp; 7269 abi_long count = arg3; 7270 7271 dirp = malloc(count); 7272 if (!dirp) { 7273 ret = -TARGET_ENOMEM; 7274 goto fail; 7275 } 7276 7277 ret = get_errno(sys_getdents(arg1, dirp, count)); 7278 if (!is_error(ret)) { 7279 struct linux_dirent *de; 7280 struct target_dirent *tde; 7281 int len = ret; 7282 int reclen, treclen; 7283 int count1, tnamelen; 7284 7285 count1 = 0; 7286 de = dirp; 7287 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7288 goto efault; 7289 tde = target_dirp; 7290 while (len > 0) { 7291 reclen = de->d_reclen; 7292 tnamelen = reclen - offsetof(struct linux_dirent, d_name); 7293 assert(tnamelen >= 0); 7294 treclen = tnamelen + offsetof(struct target_dirent, d_name); 7295 assert(count1 + treclen <= count); 7296 tde->d_reclen = tswap16(treclen); 7297 tde->d_ino = tswapal(de->d_ino); 7298 tde->d_off = tswapal(de->d_off); 7299 memcpy(tde->d_name, de->d_name, tnamelen); 7300 de = (struct linux_dirent *)((char *)de + reclen); 7301 len -= reclen; 7302 tde = (struct target_dirent *)((char *)tde + treclen); 7303 count1 += treclen; 7304 } 7305 ret = count1; 7306 unlock_user(target_dirp, arg2, ret); 7307 } 7308 free(dirp); 7309 } 7310 #else 7311 { 7312 struct linux_dirent *dirp; 7313 abi_long count = arg3; 7314 7315 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7316 goto efault; 7317 ret = get_errno(sys_getdents(arg1, dirp, count)); 7318 if (!is_error(ret)) { 7319 struct linux_dirent *de; 7320 int len = ret; 7321 int reclen; 7322 de = dirp; 7323 while (len > 0) { 7324 reclen = de->d_reclen; 7325 if (reclen > len) 7326 break; 7327 de->d_reclen = tswap16(reclen); 7328 tswapls(&de->d_ino); 7329 tswapls(&de->d_off); 7330 de = (struct linux_dirent *)((char *)de + reclen); 7331 len -= reclen; 7332 } 7333 } 7334 unlock_user(dirp, arg2, ret); 7335 } 7336 #endif 7337 #else 7338 /* Implement getdents in terms of getdents64 */ 7339 { 7340 struct linux_dirent64 *dirp; 7341 abi_long count = arg3; 7342 7343 dirp = lock_user(VERIFY_WRITE, arg2, count, 0); 7344 if (!dirp) { 7345 goto efault; 7346 } 7347 ret = get_errno(sys_getdents64(arg1, dirp, count)); 7348 if (!is_error(ret)) { 7349 /* Convert the dirent64 structs to target dirent. We do this 7350 * in-place, since we can guarantee that a target_dirent is no 7351 * larger than a dirent64; however this means we have to be 7352 * careful to read everything before writing in the new format. 7353 */ 7354 struct linux_dirent64 *de; 7355 struct target_dirent *tde; 7356 int len = ret; 7357 int tlen = 0; 7358 7359 de = dirp; 7360 tde = (struct target_dirent *)dirp; 7361 while (len > 0) { 7362 int namelen, treclen; 7363 int reclen = de->d_reclen; 7364 uint64_t ino = de->d_ino; 7365 int64_t off = de->d_off; 7366 uint8_t type = de->d_type; 7367 7368 namelen = strlen(de->d_name); 7369 treclen = offsetof(struct target_dirent, d_name) 7370 + namelen + 2; 7371 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long)); 7372 7373 memmove(tde->d_name, de->d_name, namelen + 1); 7374 tde->d_ino = tswapal(ino); 7375 tde->d_off = tswapal(off); 7376 tde->d_reclen = tswap16(treclen); 7377 /* The target_dirent type is in what was formerly a padding 7378 * byte at the end of the structure: 7379 */ 7380 *(((char *)tde) + treclen - 1) = type; 7381 7382 de = (struct linux_dirent64 *)((char *)de + reclen); 7383 tde = (struct target_dirent *)((char *)tde + treclen); 7384 len -= reclen; 7385 tlen += treclen; 7386 } 7387 ret = tlen; 7388 } 7389 unlock_user(dirp, arg2, ret); 7390 } 7391 #endif 7392 break; 7393 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 7394 case TARGET_NR_getdents64: 7395 { 7396 struct linux_dirent64 *dirp; 7397 abi_long count = arg3; 7398 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7399 goto efault; 7400 ret = get_errno(sys_getdents64(arg1, dirp, count)); 7401 if (!is_error(ret)) { 7402 struct linux_dirent64 *de; 7403 int len = ret; 7404 int reclen; 7405 de = dirp; 7406 while (len > 0) { 7407 reclen = de->d_reclen; 7408 if (reclen > len) 7409 break; 7410 de->d_reclen = tswap16(reclen); 7411 tswap64s((uint64_t *)&de->d_ino); 7412 tswap64s((uint64_t *)&de->d_off); 7413 de = (struct linux_dirent64 *)((char *)de + reclen); 7414 len -= reclen; 7415 } 7416 } 7417 unlock_user(dirp, arg2, ret); 7418 } 7419 break; 7420 #endif /* TARGET_NR_getdents64 */ 7421 #if defined(TARGET_NR__newselect) 7422 case TARGET_NR__newselect: 7423 ret = do_select(arg1, arg2, arg3, arg4, arg5); 7424 break; 7425 #endif 7426 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) 7427 # ifdef TARGET_NR_poll 7428 case TARGET_NR_poll: 7429 # endif 7430 # ifdef TARGET_NR_ppoll 7431 case TARGET_NR_ppoll: 7432 # endif 7433 { 7434 struct target_pollfd *target_pfd; 7435 unsigned int nfds = arg2; 7436 int timeout = arg3; 7437 struct pollfd *pfd; 7438 unsigned int i; 7439 7440 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1); 7441 if (!target_pfd) 7442 goto efault; 7443 7444 pfd = alloca(sizeof(struct pollfd) * nfds); 7445 for(i = 0; i < nfds; i++) { 7446 pfd[i].fd = tswap32(target_pfd[i].fd); 7447 pfd[i].events = tswap16(target_pfd[i].events); 7448 } 7449 7450 # ifdef TARGET_NR_ppoll 7451 if (num == TARGET_NR_ppoll) { 7452 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; 7453 target_sigset_t *target_set; 7454 sigset_t _set, *set = &_set; 7455 7456 if (arg3) { 7457 if (target_to_host_timespec(timeout_ts, arg3)) { 7458 unlock_user(target_pfd, arg1, 0); 7459 goto efault; 7460 } 7461 } else { 7462 timeout_ts = NULL; 7463 } 7464 7465 if (arg4) { 7466 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1); 7467 if (!target_set) { 7468 unlock_user(target_pfd, arg1, 0); 7469 goto efault; 7470 } 7471 target_to_host_sigset(set, target_set); 7472 } else { 7473 set = NULL; 7474 } 7475 7476 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8)); 7477 7478 if (!is_error(ret) && arg3) { 7479 host_to_target_timespec(arg3, timeout_ts); 7480 } 7481 if (arg4) { 7482 unlock_user(target_set, arg4, 0); 7483 } 7484 } else 7485 # endif 7486 ret = get_errno(poll(pfd, nfds, timeout)); 7487 7488 if (!is_error(ret)) { 7489 for(i = 0; i < nfds; i++) { 7490 target_pfd[i].revents = tswap16(pfd[i].revents); 7491 } 7492 } 7493 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds); 7494 } 7495 break; 7496 #endif 7497 case TARGET_NR_flock: 7498 /* NOTE: the flock constant seems to be the same for every 7499 Linux platform */ 7500 ret = get_errno(flock(arg1, arg2)); 7501 break; 7502 case TARGET_NR_readv: 7503 { 7504 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 7505 if (vec != NULL) { 7506 ret = get_errno(readv(arg1, vec, arg3)); 7507 unlock_iovec(vec, arg2, arg3, 1); 7508 } else { 7509 ret = -host_to_target_errno(errno); 7510 } 7511 } 7512 break; 7513 case TARGET_NR_writev: 7514 { 7515 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 7516 if (vec != NULL) { 7517 ret = get_errno(writev(arg1, vec, arg3)); 7518 unlock_iovec(vec, arg2, arg3, 0); 7519 } else { 7520 ret = -host_to_target_errno(errno); 7521 } 7522 } 7523 break; 7524 case TARGET_NR_getsid: 7525 ret = get_errno(getsid(arg1)); 7526 break; 7527 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ 7528 case TARGET_NR_fdatasync: 7529 ret = get_errno(fdatasync(arg1)); 7530 break; 7531 #endif 7532 case TARGET_NR__sysctl: 7533 /* We don't implement this, but ENOTDIR is always a safe 7534 return value. */ 7535 ret = -TARGET_ENOTDIR; 7536 break; 7537 case TARGET_NR_sched_getaffinity: 7538 { 7539 unsigned int mask_size; 7540 unsigned long *mask; 7541 7542 /* 7543 * sched_getaffinity needs multiples of ulong, so need to take 7544 * care of mismatches between target ulong and host ulong sizes. 7545 */ 7546 if (arg2 & (sizeof(abi_ulong) - 1)) { 7547 ret = -TARGET_EINVAL; 7548 break; 7549 } 7550 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 7551 7552 mask = alloca(mask_size); 7553 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask)); 7554 7555 if (!is_error(ret)) { 7556 if (copy_to_user(arg3, mask, ret)) { 7557 goto efault; 7558 } 7559 } 7560 } 7561 break; 7562 case TARGET_NR_sched_setaffinity: 7563 { 7564 unsigned int mask_size; 7565 unsigned long *mask; 7566 7567 /* 7568 * sched_setaffinity needs multiples of ulong, so need to take 7569 * care of mismatches between target ulong and host ulong sizes. 7570 */ 7571 if (arg2 & (sizeof(abi_ulong) - 1)) { 7572 ret = -TARGET_EINVAL; 7573 break; 7574 } 7575 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 7576 7577 mask = alloca(mask_size); 7578 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) { 7579 goto efault; 7580 } 7581 memcpy(mask, p, arg2); 7582 unlock_user_struct(p, arg2, 0); 7583 7584 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask)); 7585 } 7586 break; 7587 case TARGET_NR_sched_setparam: 7588 { 7589 struct sched_param *target_schp; 7590 struct sched_param schp; 7591 7592 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) 7593 goto efault; 7594 schp.sched_priority = tswap32(target_schp->sched_priority); 7595 unlock_user_struct(target_schp, arg2, 0); 7596 ret = get_errno(sched_setparam(arg1, &schp)); 7597 } 7598 break; 7599 case TARGET_NR_sched_getparam: 7600 { 7601 struct sched_param *target_schp; 7602 struct sched_param schp; 7603 ret = get_errno(sched_getparam(arg1, &schp)); 7604 if (!is_error(ret)) { 7605 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) 7606 goto efault; 7607 target_schp->sched_priority = tswap32(schp.sched_priority); 7608 unlock_user_struct(target_schp, arg2, 1); 7609 } 7610 } 7611 break; 7612 case TARGET_NR_sched_setscheduler: 7613 { 7614 struct sched_param *target_schp; 7615 struct sched_param schp; 7616 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) 7617 goto efault; 7618 schp.sched_priority = tswap32(target_schp->sched_priority); 7619 unlock_user_struct(target_schp, arg3, 0); 7620 ret = get_errno(sched_setscheduler(arg1, arg2, &schp)); 7621 } 7622 break; 7623 case TARGET_NR_sched_getscheduler: 7624 ret = get_errno(sched_getscheduler(arg1)); 7625 break; 7626 case TARGET_NR_sched_yield: 7627 ret = get_errno(sched_yield()); 7628 break; 7629 case TARGET_NR_sched_get_priority_max: 7630 ret = get_errno(sched_get_priority_max(arg1)); 7631 break; 7632 case TARGET_NR_sched_get_priority_min: 7633 ret = get_errno(sched_get_priority_min(arg1)); 7634 break; 7635 case TARGET_NR_sched_rr_get_interval: 7636 { 7637 struct timespec ts; 7638 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 7639 if (!is_error(ret)) { 7640 host_to_target_timespec(arg2, &ts); 7641 } 7642 } 7643 break; 7644 case TARGET_NR_nanosleep: 7645 { 7646 struct timespec req, rem; 7647 target_to_host_timespec(&req, arg1); 7648 ret = get_errno(nanosleep(&req, &rem)); 7649 if (is_error(ret) && arg2) { 7650 host_to_target_timespec(arg2, &rem); 7651 } 7652 } 7653 break; 7654 #ifdef TARGET_NR_query_module 7655 case TARGET_NR_query_module: 7656 goto unimplemented; 7657 #endif 7658 #ifdef TARGET_NR_nfsservctl 7659 case TARGET_NR_nfsservctl: 7660 goto unimplemented; 7661 #endif 7662 case TARGET_NR_prctl: 7663 switch (arg1) { 7664 case PR_GET_PDEATHSIG: 7665 { 7666 int deathsig; 7667 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5)); 7668 if (!is_error(ret) && arg2 7669 && put_user_ual(deathsig, arg2)) { 7670 goto efault; 7671 } 7672 break; 7673 } 7674 #ifdef PR_GET_NAME 7675 case PR_GET_NAME: 7676 { 7677 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1); 7678 if (!name) { 7679 goto efault; 7680 } 7681 ret = get_errno(prctl(arg1, (unsigned long)name, 7682 arg3, arg4, arg5)); 7683 unlock_user(name, arg2, 16); 7684 break; 7685 } 7686 case PR_SET_NAME: 7687 { 7688 void *name = lock_user(VERIFY_READ, arg2, 16, 1); 7689 if (!name) { 7690 goto efault; 7691 } 7692 ret = get_errno(prctl(arg1, (unsigned long)name, 7693 arg3, arg4, arg5)); 7694 unlock_user(name, arg2, 0); 7695 break; 7696 } 7697 #endif 7698 default: 7699 /* Most prctl options have no pointer arguments */ 7700 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5)); 7701 break; 7702 } 7703 break; 7704 #ifdef TARGET_NR_arch_prctl 7705 case TARGET_NR_arch_prctl: 7706 #if defined(TARGET_I386) && !defined(TARGET_ABI32) 7707 ret = do_arch_prctl(cpu_env, arg1, arg2); 7708 break; 7709 #else 7710 goto unimplemented; 7711 #endif 7712 #endif 7713 #ifdef TARGET_NR_pread64 7714 case TARGET_NR_pread64: 7715 if (regpairs_aligned(cpu_env)) { 7716 arg4 = arg5; 7717 arg5 = arg6; 7718 } 7719 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 7720 goto efault; 7721 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); 7722 unlock_user(p, arg2, ret); 7723 break; 7724 case TARGET_NR_pwrite64: 7725 if (regpairs_aligned(cpu_env)) { 7726 arg4 = arg5; 7727 arg5 = arg6; 7728 } 7729 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 7730 goto efault; 7731 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); 7732 unlock_user(p, arg2, 0); 7733 break; 7734 #endif 7735 case TARGET_NR_getcwd: 7736 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) 7737 goto efault; 7738 ret = get_errno(sys_getcwd1(p, arg2)); 7739 unlock_user(p, arg1, ret); 7740 break; 7741 case TARGET_NR_capget: 7742 goto unimplemented; 7743 case TARGET_NR_capset: 7744 goto unimplemented; 7745 case TARGET_NR_sigaltstack: 7746 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \ 7747 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \ 7748 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC) 7749 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env)); 7750 break; 7751 #else 7752 goto unimplemented; 7753 #endif 7754 7755 #ifdef CONFIG_SENDFILE 7756 case TARGET_NR_sendfile: 7757 { 7758 off_t *offp = NULL; 7759 off_t off; 7760 if (arg3) { 7761 ret = get_user_sal(off, arg3); 7762 if (is_error(ret)) { 7763 break; 7764 } 7765 offp = &off; 7766 } 7767 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 7768 if (!is_error(ret) && arg3) { 7769 abi_long ret2 = put_user_sal(off, arg3); 7770 if (is_error(ret2)) { 7771 ret = ret2; 7772 } 7773 } 7774 break; 7775 } 7776 #ifdef TARGET_NR_sendfile64 7777 case TARGET_NR_sendfile64: 7778 { 7779 off_t *offp = NULL; 7780 off_t off; 7781 if (arg3) { 7782 ret = get_user_s64(off, arg3); 7783 if (is_error(ret)) { 7784 break; 7785 } 7786 offp = &off; 7787 } 7788 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 7789 if (!is_error(ret) && arg3) { 7790 abi_long ret2 = put_user_s64(off, arg3); 7791 if (is_error(ret2)) { 7792 ret = ret2; 7793 } 7794 } 7795 break; 7796 } 7797 #endif 7798 #else 7799 case TARGET_NR_sendfile: 7800 #ifdef TARGET_NR_sendfile64 7801 case TARGET_NR_sendfile64: 7802 #endif 7803 goto unimplemented; 7804 #endif 7805 7806 #ifdef TARGET_NR_getpmsg 7807 case TARGET_NR_getpmsg: 7808 goto unimplemented; 7809 #endif 7810 #ifdef TARGET_NR_putpmsg 7811 case TARGET_NR_putpmsg: 7812 goto unimplemented; 7813 #endif 7814 #ifdef TARGET_NR_vfork 7815 case TARGET_NR_vfork: 7816 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 7817 0, 0, 0, 0)); 7818 break; 7819 #endif 7820 #ifdef TARGET_NR_ugetrlimit 7821 case TARGET_NR_ugetrlimit: 7822 { 7823 struct rlimit rlim; 7824 int resource = target_to_host_resource(arg1); 7825 ret = get_errno(getrlimit(resource, &rlim)); 7826 if (!is_error(ret)) { 7827 struct target_rlimit *target_rlim; 7828 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 7829 goto efault; 7830 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 7831 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 7832 unlock_user_struct(target_rlim, arg2, 1); 7833 } 7834 break; 7835 } 7836 #endif 7837 #ifdef TARGET_NR_truncate64 7838 case TARGET_NR_truncate64: 7839 if (!(p = lock_user_string(arg1))) 7840 goto efault; 7841 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); 7842 unlock_user(p, arg1, 0); 7843 break; 7844 #endif 7845 #ifdef TARGET_NR_ftruncate64 7846 case TARGET_NR_ftruncate64: 7847 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); 7848 break; 7849 #endif 7850 #ifdef TARGET_NR_stat64 7851 case TARGET_NR_stat64: 7852 if (!(p = lock_user_string(arg1))) 7853 goto efault; 7854 ret = get_errno(stat(path(p), &st)); 7855 unlock_user(p, arg1, 0); 7856 if (!is_error(ret)) 7857 ret = host_to_target_stat64(cpu_env, arg2, &st); 7858 break; 7859 #endif 7860 #ifdef TARGET_NR_lstat64 7861 case TARGET_NR_lstat64: 7862 if (!(p = lock_user_string(arg1))) 7863 goto efault; 7864 ret = get_errno(lstat(path(p), &st)); 7865 unlock_user(p, arg1, 0); 7866 if (!is_error(ret)) 7867 ret = host_to_target_stat64(cpu_env, arg2, &st); 7868 break; 7869 #endif 7870 #ifdef TARGET_NR_fstat64 7871 case TARGET_NR_fstat64: 7872 ret = get_errno(fstat(arg1, &st)); 7873 if (!is_error(ret)) 7874 ret = host_to_target_stat64(cpu_env, arg2, &st); 7875 break; 7876 #endif 7877 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) 7878 #ifdef TARGET_NR_fstatat64 7879 case TARGET_NR_fstatat64: 7880 #endif 7881 #ifdef TARGET_NR_newfstatat 7882 case TARGET_NR_newfstatat: 7883 #endif 7884 if (!(p = lock_user_string(arg2))) 7885 goto efault; 7886 ret = get_errno(fstatat(arg1, path(p), &st, arg4)); 7887 if (!is_error(ret)) 7888 ret = host_to_target_stat64(cpu_env, arg3, &st); 7889 break; 7890 #endif 7891 case TARGET_NR_lchown: 7892 if (!(p = lock_user_string(arg1))) 7893 goto efault; 7894 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); 7895 unlock_user(p, arg1, 0); 7896 break; 7897 #ifdef TARGET_NR_getuid 7898 case TARGET_NR_getuid: 7899 ret = get_errno(high2lowuid(getuid())); 7900 break; 7901 #endif 7902 #ifdef TARGET_NR_getgid 7903 case TARGET_NR_getgid: 7904 ret = get_errno(high2lowgid(getgid())); 7905 break; 7906 #endif 7907 #ifdef TARGET_NR_geteuid 7908 case TARGET_NR_geteuid: 7909 ret = get_errno(high2lowuid(geteuid())); 7910 break; 7911 #endif 7912 #ifdef TARGET_NR_getegid 7913 case TARGET_NR_getegid: 7914 ret = get_errno(high2lowgid(getegid())); 7915 break; 7916 #endif 7917 case TARGET_NR_setreuid: 7918 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); 7919 break; 7920 case TARGET_NR_setregid: 7921 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); 7922 break; 7923 case TARGET_NR_getgroups: 7924 { 7925 int gidsetsize = arg1; 7926 target_id *target_grouplist; 7927 gid_t *grouplist; 7928 int i; 7929 7930 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7931 ret = get_errno(getgroups(gidsetsize, grouplist)); 7932 if (gidsetsize == 0) 7933 break; 7934 if (!is_error(ret)) { 7935 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0); 7936 if (!target_grouplist) 7937 goto efault; 7938 for(i = 0;i < ret; i++) 7939 target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); 7940 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id)); 7941 } 7942 } 7943 break; 7944 case TARGET_NR_setgroups: 7945 { 7946 int gidsetsize = arg1; 7947 target_id *target_grouplist; 7948 gid_t *grouplist = NULL; 7949 int i; 7950 if (gidsetsize) { 7951 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7952 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1); 7953 if (!target_grouplist) { 7954 ret = -TARGET_EFAULT; 7955 goto fail; 7956 } 7957 for (i = 0; i < gidsetsize; i++) { 7958 grouplist[i] = low2highgid(tswapid(target_grouplist[i])); 7959 } 7960 unlock_user(target_grouplist, arg2, 0); 7961 } 7962 ret = get_errno(setgroups(gidsetsize, grouplist)); 7963 } 7964 break; 7965 case TARGET_NR_fchown: 7966 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); 7967 break; 7968 #if defined(TARGET_NR_fchownat) 7969 case TARGET_NR_fchownat: 7970 if (!(p = lock_user_string(arg2))) 7971 goto efault; 7972 ret = get_errno(fchownat(arg1, p, low2highuid(arg3), 7973 low2highgid(arg4), arg5)); 7974 unlock_user(p, arg2, 0); 7975 break; 7976 #endif 7977 #ifdef TARGET_NR_setresuid 7978 case TARGET_NR_setresuid: 7979 ret = get_errno(setresuid(low2highuid(arg1), 7980 low2highuid(arg2), 7981 low2highuid(arg3))); 7982 break; 7983 #endif 7984 #ifdef TARGET_NR_getresuid 7985 case TARGET_NR_getresuid: 7986 { 7987 uid_t ruid, euid, suid; 7988 ret = get_errno(getresuid(&ruid, &euid, &suid)); 7989 if (!is_error(ret)) { 7990 if (put_user_u16(high2lowuid(ruid), arg1) 7991 || put_user_u16(high2lowuid(euid), arg2) 7992 || put_user_u16(high2lowuid(suid), arg3)) 7993 goto efault; 7994 } 7995 } 7996 break; 7997 #endif 7998 #ifdef TARGET_NR_getresgid 7999 case TARGET_NR_setresgid: 8000 ret = get_errno(setresgid(low2highgid(arg1), 8001 low2highgid(arg2), 8002 low2highgid(arg3))); 8003 break; 8004 #endif 8005 #ifdef TARGET_NR_getresgid 8006 case TARGET_NR_getresgid: 8007 { 8008 gid_t rgid, egid, sgid; 8009 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 8010 if (!is_error(ret)) { 8011 if (put_user_u16(high2lowgid(rgid), arg1) 8012 || put_user_u16(high2lowgid(egid), arg2) 8013 || put_user_u16(high2lowgid(sgid), arg3)) 8014 goto efault; 8015 } 8016 } 8017 break; 8018 #endif 8019 case TARGET_NR_chown: 8020 if (!(p = lock_user_string(arg1))) 8021 goto efault; 8022 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); 8023 unlock_user(p, arg1, 0); 8024 break; 8025 case TARGET_NR_setuid: 8026 ret = get_errno(setuid(low2highuid(arg1))); 8027 break; 8028 case TARGET_NR_setgid: 8029 ret = get_errno(setgid(low2highgid(arg1))); 8030 break; 8031 case TARGET_NR_setfsuid: 8032 ret = get_errno(setfsuid(arg1)); 8033 break; 8034 case TARGET_NR_setfsgid: 8035 ret = get_errno(setfsgid(arg1)); 8036 break; 8037 8038 #ifdef TARGET_NR_lchown32 8039 case TARGET_NR_lchown32: 8040 if (!(p = lock_user_string(arg1))) 8041 goto efault; 8042 ret = get_errno(lchown(p, arg2, arg3)); 8043 unlock_user(p, arg1, 0); 8044 break; 8045 #endif 8046 #ifdef TARGET_NR_getuid32 8047 case TARGET_NR_getuid32: 8048 ret = get_errno(getuid()); 8049 break; 8050 #endif 8051 8052 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) 8053 /* Alpha specific */ 8054 case TARGET_NR_getxuid: 8055 { 8056 uid_t euid; 8057 euid=geteuid(); 8058 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid; 8059 } 8060 ret = get_errno(getuid()); 8061 break; 8062 #endif 8063 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) 8064 /* Alpha specific */ 8065 case TARGET_NR_getxgid: 8066 { 8067 uid_t egid; 8068 egid=getegid(); 8069 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid; 8070 } 8071 ret = get_errno(getgid()); 8072 break; 8073 #endif 8074 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) 8075 /* Alpha specific */ 8076 case TARGET_NR_osf_getsysinfo: 8077 ret = -TARGET_EOPNOTSUPP; 8078 switch (arg1) { 8079 case TARGET_GSI_IEEE_FP_CONTROL: 8080 { 8081 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env); 8082 8083 /* Copied from linux ieee_fpcr_to_swcr. */ 8084 swcr = (fpcr >> 35) & SWCR_STATUS_MASK; 8085 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ; 8086 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV 8087 | SWCR_TRAP_ENABLE_DZE 8088 | SWCR_TRAP_ENABLE_OVF); 8089 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF 8090 | SWCR_TRAP_ENABLE_INE); 8091 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ; 8092 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO; 8093 8094 if (put_user_u64 (swcr, arg2)) 8095 goto efault; 8096 ret = 0; 8097 } 8098 break; 8099 8100 /* case GSI_IEEE_STATE_AT_SIGNAL: 8101 -- Not implemented in linux kernel. 8102 case GSI_UACPROC: 8103 -- Retrieves current unaligned access state; not much used. 8104 case GSI_PROC_TYPE: 8105 -- Retrieves implver information; surely not used. 8106 case GSI_GET_HWRPB: 8107 -- Grabs a copy of the HWRPB; surely not used. 8108 */ 8109 } 8110 break; 8111 #endif 8112 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) 8113 /* Alpha specific */ 8114 case TARGET_NR_osf_setsysinfo: 8115 ret = -TARGET_EOPNOTSUPP; 8116 switch (arg1) { 8117 case TARGET_SSI_IEEE_FP_CONTROL: 8118 { 8119 uint64_t swcr, fpcr, orig_fpcr; 8120 8121 if (get_user_u64 (swcr, arg2)) { 8122 goto efault; 8123 } 8124 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 8125 fpcr = orig_fpcr & FPCR_DYN_MASK; 8126 8127 /* Copied from linux ieee_swcr_to_fpcr. */ 8128 fpcr |= (swcr & SWCR_STATUS_MASK) << 35; 8129 fpcr |= (swcr & SWCR_MAP_DMZ) << 36; 8130 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV 8131 | SWCR_TRAP_ENABLE_DZE 8132 | SWCR_TRAP_ENABLE_OVF)) << 48; 8133 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF 8134 | SWCR_TRAP_ENABLE_INE)) << 57; 8135 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0); 8136 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41; 8137 8138 cpu_alpha_store_fpcr(cpu_env, fpcr); 8139 ret = 0; 8140 } 8141 break; 8142 8143 case TARGET_SSI_IEEE_RAISE_EXCEPTION: 8144 { 8145 uint64_t exc, fpcr, orig_fpcr; 8146 int si_code; 8147 8148 if (get_user_u64(exc, arg2)) { 8149 goto efault; 8150 } 8151 8152 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 8153 8154 /* We only add to the exception status here. */ 8155 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35); 8156 8157 cpu_alpha_store_fpcr(cpu_env, fpcr); 8158 ret = 0; 8159 8160 /* Old exceptions are not signaled. */ 8161 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK); 8162 8163 /* If any exceptions set by this call, 8164 and are unmasked, send a signal. */ 8165 si_code = 0; 8166 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) { 8167 si_code = TARGET_FPE_FLTRES; 8168 } 8169 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) { 8170 si_code = TARGET_FPE_FLTUND; 8171 } 8172 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) { 8173 si_code = TARGET_FPE_FLTOVF; 8174 } 8175 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) { 8176 si_code = TARGET_FPE_FLTDIV; 8177 } 8178 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) { 8179 si_code = TARGET_FPE_FLTINV; 8180 } 8181 if (si_code != 0) { 8182 target_siginfo_t info; 8183 info.si_signo = SIGFPE; 8184 info.si_errno = 0; 8185 info.si_code = si_code; 8186 info._sifields._sigfault._addr 8187 = ((CPUArchState *)cpu_env)->pc; 8188 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info); 8189 } 8190 } 8191 break; 8192 8193 /* case SSI_NVPAIRS: 8194 -- Used with SSIN_UACPROC to enable unaligned accesses. 8195 case SSI_IEEE_STATE_AT_SIGNAL: 8196 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: 8197 -- Not implemented in linux kernel 8198 */ 8199 } 8200 break; 8201 #endif 8202 #ifdef TARGET_NR_osf_sigprocmask 8203 /* Alpha specific. */ 8204 case TARGET_NR_osf_sigprocmask: 8205 { 8206 abi_ulong mask; 8207 int how; 8208 sigset_t set, oldset; 8209 8210 switch(arg1) { 8211 case TARGET_SIG_BLOCK: 8212 how = SIG_BLOCK; 8213 break; 8214 case TARGET_SIG_UNBLOCK: 8215 how = SIG_UNBLOCK; 8216 break; 8217 case TARGET_SIG_SETMASK: 8218 how = SIG_SETMASK; 8219 break; 8220 default: 8221 ret = -TARGET_EINVAL; 8222 goto fail; 8223 } 8224 mask = arg2; 8225 target_to_host_old_sigset(&set, &mask); 8226 sigprocmask(how, &set, &oldset); 8227 host_to_target_old_sigset(&mask, &oldset); 8228 ret = mask; 8229 } 8230 break; 8231 #endif 8232 8233 #ifdef TARGET_NR_getgid32 8234 case TARGET_NR_getgid32: 8235 ret = get_errno(getgid()); 8236 break; 8237 #endif 8238 #ifdef TARGET_NR_geteuid32 8239 case TARGET_NR_geteuid32: 8240 ret = get_errno(geteuid()); 8241 break; 8242 #endif 8243 #ifdef TARGET_NR_getegid32 8244 case TARGET_NR_getegid32: 8245 ret = get_errno(getegid()); 8246 break; 8247 #endif 8248 #ifdef TARGET_NR_setreuid32 8249 case TARGET_NR_setreuid32: 8250 ret = get_errno(setreuid(arg1, arg2)); 8251 break; 8252 #endif 8253 #ifdef TARGET_NR_setregid32 8254 case TARGET_NR_setregid32: 8255 ret = get_errno(setregid(arg1, arg2)); 8256 break; 8257 #endif 8258 #ifdef TARGET_NR_getgroups32 8259 case TARGET_NR_getgroups32: 8260 { 8261 int gidsetsize = arg1; 8262 uint32_t *target_grouplist; 8263 gid_t *grouplist; 8264 int i; 8265 8266 grouplist = alloca(gidsetsize * sizeof(gid_t)); 8267 ret = get_errno(getgroups(gidsetsize, grouplist)); 8268 if (gidsetsize == 0) 8269 break; 8270 if (!is_error(ret)) { 8271 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); 8272 if (!target_grouplist) { 8273 ret = -TARGET_EFAULT; 8274 goto fail; 8275 } 8276 for(i = 0;i < ret; i++) 8277 target_grouplist[i] = tswap32(grouplist[i]); 8278 unlock_user(target_grouplist, arg2, gidsetsize * 4); 8279 } 8280 } 8281 break; 8282 #endif 8283 #ifdef TARGET_NR_setgroups32 8284 case TARGET_NR_setgroups32: 8285 { 8286 int gidsetsize = arg1; 8287 uint32_t *target_grouplist; 8288 gid_t *grouplist; 8289 int i; 8290 8291 grouplist = alloca(gidsetsize * sizeof(gid_t)); 8292 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); 8293 if (!target_grouplist) { 8294 ret = -TARGET_EFAULT; 8295 goto fail; 8296 } 8297 for(i = 0;i < gidsetsize; i++) 8298 grouplist[i] = tswap32(target_grouplist[i]); 8299 unlock_user(target_grouplist, arg2, 0); 8300 ret = get_errno(setgroups(gidsetsize, grouplist)); 8301 } 8302 break; 8303 #endif 8304 #ifdef TARGET_NR_fchown32 8305 case TARGET_NR_fchown32: 8306 ret = get_errno(fchown(arg1, arg2, arg3)); 8307 break; 8308 #endif 8309 #ifdef TARGET_NR_setresuid32 8310 case TARGET_NR_setresuid32: 8311 ret = get_errno(setresuid(arg1, arg2, arg3)); 8312 break; 8313 #endif 8314 #ifdef TARGET_NR_getresuid32 8315 case TARGET_NR_getresuid32: 8316 { 8317 uid_t ruid, euid, suid; 8318 ret = get_errno(getresuid(&ruid, &euid, &suid)); 8319 if (!is_error(ret)) { 8320 if (put_user_u32(ruid, arg1) 8321 || put_user_u32(euid, arg2) 8322 || put_user_u32(suid, arg3)) 8323 goto efault; 8324 } 8325 } 8326 break; 8327 #endif 8328 #ifdef TARGET_NR_setresgid32 8329 case TARGET_NR_setresgid32: 8330 ret = get_errno(setresgid(arg1, arg2, arg3)); 8331 break; 8332 #endif 8333 #ifdef TARGET_NR_getresgid32 8334 case TARGET_NR_getresgid32: 8335 { 8336 gid_t rgid, egid, sgid; 8337 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 8338 if (!is_error(ret)) { 8339 if (put_user_u32(rgid, arg1) 8340 || put_user_u32(egid, arg2) 8341 || put_user_u32(sgid, arg3)) 8342 goto efault; 8343 } 8344 } 8345 break; 8346 #endif 8347 #ifdef TARGET_NR_chown32 8348 case TARGET_NR_chown32: 8349 if (!(p = lock_user_string(arg1))) 8350 goto efault; 8351 ret = get_errno(chown(p, arg2, arg3)); 8352 unlock_user(p, arg1, 0); 8353 break; 8354 #endif 8355 #ifdef TARGET_NR_setuid32 8356 case TARGET_NR_setuid32: 8357 ret = get_errno(setuid(arg1)); 8358 break; 8359 #endif 8360 #ifdef TARGET_NR_setgid32 8361 case TARGET_NR_setgid32: 8362 ret = get_errno(setgid(arg1)); 8363 break; 8364 #endif 8365 #ifdef TARGET_NR_setfsuid32 8366 case TARGET_NR_setfsuid32: 8367 ret = get_errno(setfsuid(arg1)); 8368 break; 8369 #endif 8370 #ifdef TARGET_NR_setfsgid32 8371 case TARGET_NR_setfsgid32: 8372 ret = get_errno(setfsgid(arg1)); 8373 break; 8374 #endif 8375 8376 case TARGET_NR_pivot_root: 8377 goto unimplemented; 8378 #ifdef TARGET_NR_mincore 8379 case TARGET_NR_mincore: 8380 { 8381 void *a; 8382 ret = -TARGET_EFAULT; 8383 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0))) 8384 goto efault; 8385 if (!(p = lock_user_string(arg3))) 8386 goto mincore_fail; 8387 ret = get_errno(mincore(a, arg2, p)); 8388 unlock_user(p, arg3, ret); 8389 mincore_fail: 8390 unlock_user(a, arg1, 0); 8391 } 8392 break; 8393 #endif 8394 #ifdef TARGET_NR_arm_fadvise64_64 8395 case TARGET_NR_arm_fadvise64_64: 8396 { 8397 /* 8398 * arm_fadvise64_64 looks like fadvise64_64 but 8399 * with different argument order 8400 */ 8401 abi_long temp; 8402 temp = arg3; 8403 arg3 = arg4; 8404 arg4 = temp; 8405 } 8406 #endif 8407 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64) 8408 #ifdef TARGET_NR_fadvise64_64 8409 case TARGET_NR_fadvise64_64: 8410 #endif 8411 #ifdef TARGET_NR_fadvise64 8412 case TARGET_NR_fadvise64: 8413 #endif 8414 #ifdef TARGET_S390X 8415 switch (arg4) { 8416 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ 8417 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ 8418 case 6: arg4 = POSIX_FADV_DONTNEED; break; 8419 case 7: arg4 = POSIX_FADV_NOREUSE; break; 8420 default: break; 8421 } 8422 #endif 8423 ret = -posix_fadvise(arg1, arg2, arg3, arg4); 8424 break; 8425 #endif 8426 #ifdef TARGET_NR_madvise 8427 case TARGET_NR_madvise: 8428 /* A straight passthrough may not be safe because qemu sometimes 8429 turns private file-backed mappings into anonymous mappings. 8430 This will break MADV_DONTNEED. 8431 This is a hint, so ignoring and returning success is ok. */ 8432 ret = get_errno(0); 8433 break; 8434 #endif 8435 #if TARGET_ABI_BITS == 32 8436 case TARGET_NR_fcntl64: 8437 { 8438 int cmd; 8439 struct flock64 fl; 8440 struct target_flock64 *target_fl; 8441 #ifdef TARGET_ARM 8442 struct target_eabi_flock64 *target_efl; 8443 #endif 8444 8445 cmd = target_to_host_fcntl_cmd(arg2); 8446 if (cmd == -TARGET_EINVAL) { 8447 ret = cmd; 8448 break; 8449 } 8450 8451 switch(arg2) { 8452 case TARGET_F_GETLK64: 8453 #ifdef TARGET_ARM 8454 if (((CPUARMState *)cpu_env)->eabi) { 8455 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 8456 goto efault; 8457 fl.l_type = tswap16(target_efl->l_type); 8458 fl.l_whence = tswap16(target_efl->l_whence); 8459 fl.l_start = tswap64(target_efl->l_start); 8460 fl.l_len = tswap64(target_efl->l_len); 8461 fl.l_pid = tswap32(target_efl->l_pid); 8462 unlock_user_struct(target_efl, arg3, 0); 8463 } else 8464 #endif 8465 { 8466 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 8467 goto efault; 8468 fl.l_type = tswap16(target_fl->l_type); 8469 fl.l_whence = tswap16(target_fl->l_whence); 8470 fl.l_start = tswap64(target_fl->l_start); 8471 fl.l_len = tswap64(target_fl->l_len); 8472 fl.l_pid = tswap32(target_fl->l_pid); 8473 unlock_user_struct(target_fl, arg3, 0); 8474 } 8475 ret = get_errno(fcntl(arg1, cmd, &fl)); 8476 if (ret == 0) { 8477 #ifdef TARGET_ARM 8478 if (((CPUARMState *)cpu_env)->eabi) { 8479 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0)) 8480 goto efault; 8481 target_efl->l_type = tswap16(fl.l_type); 8482 target_efl->l_whence = tswap16(fl.l_whence); 8483 target_efl->l_start = tswap64(fl.l_start); 8484 target_efl->l_len = tswap64(fl.l_len); 8485 target_efl->l_pid = tswap32(fl.l_pid); 8486 unlock_user_struct(target_efl, arg3, 1); 8487 } else 8488 #endif 8489 { 8490 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0)) 8491 goto efault; 8492 target_fl->l_type = tswap16(fl.l_type); 8493 target_fl->l_whence = tswap16(fl.l_whence); 8494 target_fl->l_start = tswap64(fl.l_start); 8495 target_fl->l_len = tswap64(fl.l_len); 8496 target_fl->l_pid = tswap32(fl.l_pid); 8497 unlock_user_struct(target_fl, arg3, 1); 8498 } 8499 } 8500 break; 8501 8502 case TARGET_F_SETLK64: 8503 case TARGET_F_SETLKW64: 8504 #ifdef TARGET_ARM 8505 if (((CPUARMState *)cpu_env)->eabi) { 8506 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 8507 goto efault; 8508 fl.l_type = tswap16(target_efl->l_type); 8509 fl.l_whence = tswap16(target_efl->l_whence); 8510 fl.l_start = tswap64(target_efl->l_start); 8511 fl.l_len = tswap64(target_efl->l_len); 8512 fl.l_pid = tswap32(target_efl->l_pid); 8513 unlock_user_struct(target_efl, arg3, 0); 8514 } else 8515 #endif 8516 { 8517 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 8518 goto efault; 8519 fl.l_type = tswap16(target_fl->l_type); 8520 fl.l_whence = tswap16(target_fl->l_whence); 8521 fl.l_start = tswap64(target_fl->l_start); 8522 fl.l_len = tswap64(target_fl->l_len); 8523 fl.l_pid = tswap32(target_fl->l_pid); 8524 unlock_user_struct(target_fl, arg3, 0); 8525 } 8526 ret = get_errno(fcntl(arg1, cmd, &fl)); 8527 break; 8528 default: 8529 ret = do_fcntl(arg1, arg2, arg3); 8530 break; 8531 } 8532 break; 8533 } 8534 #endif 8535 #ifdef TARGET_NR_cacheflush 8536 case TARGET_NR_cacheflush: 8537 /* self-modifying code is handled automatically, so nothing needed */ 8538 ret = 0; 8539 break; 8540 #endif 8541 #ifdef TARGET_NR_security 8542 case TARGET_NR_security: 8543 goto unimplemented; 8544 #endif 8545 #ifdef TARGET_NR_getpagesize 8546 case TARGET_NR_getpagesize: 8547 ret = TARGET_PAGE_SIZE; 8548 break; 8549 #endif 8550 case TARGET_NR_gettid: 8551 ret = get_errno(gettid()); 8552 break; 8553 #ifdef TARGET_NR_readahead 8554 case TARGET_NR_readahead: 8555 #if TARGET_ABI_BITS == 32 8556 if (regpairs_aligned(cpu_env)) { 8557 arg2 = arg3; 8558 arg3 = arg4; 8559 arg4 = arg5; 8560 } 8561 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4)); 8562 #else 8563 ret = get_errno(readahead(arg1, arg2, arg3)); 8564 #endif 8565 break; 8566 #endif 8567 #ifdef CONFIG_ATTR 8568 #ifdef TARGET_NR_setxattr 8569 case TARGET_NR_listxattr: 8570 case TARGET_NR_llistxattr: 8571 { 8572 void *p, *b = 0; 8573 if (arg2) { 8574 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 8575 if (!b) { 8576 ret = -TARGET_EFAULT; 8577 break; 8578 } 8579 } 8580 p = lock_user_string(arg1); 8581 if (p) { 8582 if (num == TARGET_NR_listxattr) { 8583 ret = get_errno(listxattr(p, b, arg3)); 8584 } else { 8585 ret = get_errno(llistxattr(p, b, arg3)); 8586 } 8587 } else { 8588 ret = -TARGET_EFAULT; 8589 } 8590 unlock_user(p, arg1, 0); 8591 unlock_user(b, arg2, arg3); 8592 break; 8593 } 8594 case TARGET_NR_flistxattr: 8595 { 8596 void *b = 0; 8597 if (arg2) { 8598 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 8599 if (!b) { 8600 ret = -TARGET_EFAULT; 8601 break; 8602 } 8603 } 8604 ret = get_errno(flistxattr(arg1, b, arg3)); 8605 unlock_user(b, arg2, arg3); 8606 break; 8607 } 8608 case TARGET_NR_setxattr: 8609 case TARGET_NR_lsetxattr: 8610 { 8611 void *p, *n, *v = 0; 8612 if (arg3) { 8613 v = lock_user(VERIFY_READ, arg3, arg4, 1); 8614 if (!v) { 8615 ret = -TARGET_EFAULT; 8616 break; 8617 } 8618 } 8619 p = lock_user_string(arg1); 8620 n = lock_user_string(arg2); 8621 if (p && n) { 8622 if (num == TARGET_NR_setxattr) { 8623 ret = get_errno(setxattr(p, n, v, arg4, arg5)); 8624 } else { 8625 ret = get_errno(lsetxattr(p, n, v, arg4, arg5)); 8626 } 8627 } else { 8628 ret = -TARGET_EFAULT; 8629 } 8630 unlock_user(p, arg1, 0); 8631 unlock_user(n, arg2, 0); 8632 unlock_user(v, arg3, 0); 8633 } 8634 break; 8635 case TARGET_NR_fsetxattr: 8636 { 8637 void *n, *v = 0; 8638 if (arg3) { 8639 v = lock_user(VERIFY_READ, arg3, arg4, 1); 8640 if (!v) { 8641 ret = -TARGET_EFAULT; 8642 break; 8643 } 8644 } 8645 n = lock_user_string(arg2); 8646 if (n) { 8647 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5)); 8648 } else { 8649 ret = -TARGET_EFAULT; 8650 } 8651 unlock_user(n, arg2, 0); 8652 unlock_user(v, arg3, 0); 8653 } 8654 break; 8655 case TARGET_NR_getxattr: 8656 case TARGET_NR_lgetxattr: 8657 { 8658 void *p, *n, *v = 0; 8659 if (arg3) { 8660 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 8661 if (!v) { 8662 ret = -TARGET_EFAULT; 8663 break; 8664 } 8665 } 8666 p = lock_user_string(arg1); 8667 n = lock_user_string(arg2); 8668 if (p && n) { 8669 if (num == TARGET_NR_getxattr) { 8670 ret = get_errno(getxattr(p, n, v, arg4)); 8671 } else { 8672 ret = get_errno(lgetxattr(p, n, v, arg4)); 8673 } 8674 } else { 8675 ret = -TARGET_EFAULT; 8676 } 8677 unlock_user(p, arg1, 0); 8678 unlock_user(n, arg2, 0); 8679 unlock_user(v, arg3, arg4); 8680 } 8681 break; 8682 case TARGET_NR_fgetxattr: 8683 { 8684 void *n, *v = 0; 8685 if (arg3) { 8686 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 8687 if (!v) { 8688 ret = -TARGET_EFAULT; 8689 break; 8690 } 8691 } 8692 n = lock_user_string(arg2); 8693 if (n) { 8694 ret = get_errno(fgetxattr(arg1, n, v, arg4)); 8695 } else { 8696 ret = -TARGET_EFAULT; 8697 } 8698 unlock_user(n, arg2, 0); 8699 unlock_user(v, arg3, arg4); 8700 } 8701 break; 8702 case TARGET_NR_removexattr: 8703 case TARGET_NR_lremovexattr: 8704 { 8705 void *p, *n; 8706 p = lock_user_string(arg1); 8707 n = lock_user_string(arg2); 8708 if (p && n) { 8709 if (num == TARGET_NR_removexattr) { 8710 ret = get_errno(removexattr(p, n)); 8711 } else { 8712 ret = get_errno(lremovexattr(p, n)); 8713 } 8714 } else { 8715 ret = -TARGET_EFAULT; 8716 } 8717 unlock_user(p, arg1, 0); 8718 unlock_user(n, arg2, 0); 8719 } 8720 break; 8721 case TARGET_NR_fremovexattr: 8722 { 8723 void *n; 8724 n = lock_user_string(arg2); 8725 if (n) { 8726 ret = get_errno(fremovexattr(arg1, n)); 8727 } else { 8728 ret = -TARGET_EFAULT; 8729 } 8730 unlock_user(n, arg2, 0); 8731 } 8732 break; 8733 #endif 8734 #endif /* CONFIG_ATTR */ 8735 #ifdef TARGET_NR_set_thread_area 8736 case TARGET_NR_set_thread_area: 8737 #if defined(TARGET_MIPS) 8738 ((CPUMIPSState *) cpu_env)->tls_value = arg1; 8739 ret = 0; 8740 break; 8741 #elif defined(TARGET_CRIS) 8742 if (arg1 & 0xff) 8743 ret = -TARGET_EINVAL; 8744 else { 8745 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1; 8746 ret = 0; 8747 } 8748 break; 8749 #elif defined(TARGET_I386) && defined(TARGET_ABI32) 8750 ret = do_set_thread_area(cpu_env, arg1); 8751 break; 8752 #elif defined(TARGET_M68K) 8753 { 8754 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 8755 ts->tp_value = arg1; 8756 ret = 0; 8757 break; 8758 } 8759 #else 8760 goto unimplemented_nowarn; 8761 #endif 8762 #endif 8763 #ifdef TARGET_NR_get_thread_area 8764 case TARGET_NR_get_thread_area: 8765 #if defined(TARGET_I386) && defined(TARGET_ABI32) 8766 ret = do_get_thread_area(cpu_env, arg1); 8767 break; 8768 #elif defined(TARGET_M68K) 8769 { 8770 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 8771 ret = ts->tp_value; 8772 break; 8773 } 8774 #else 8775 goto unimplemented_nowarn; 8776 #endif 8777 #endif 8778 #ifdef TARGET_NR_getdomainname 8779 case TARGET_NR_getdomainname: 8780 goto unimplemented_nowarn; 8781 #endif 8782 8783 #ifdef TARGET_NR_clock_gettime 8784 case TARGET_NR_clock_gettime: 8785 { 8786 struct timespec ts; 8787 ret = get_errno(clock_gettime(arg1, &ts)); 8788 if (!is_error(ret)) { 8789 host_to_target_timespec(arg2, &ts); 8790 } 8791 break; 8792 } 8793 #endif 8794 #ifdef TARGET_NR_clock_getres 8795 case TARGET_NR_clock_getres: 8796 { 8797 struct timespec ts; 8798 ret = get_errno(clock_getres(arg1, &ts)); 8799 if (!is_error(ret)) { 8800 host_to_target_timespec(arg2, &ts); 8801 } 8802 break; 8803 } 8804 #endif 8805 #ifdef TARGET_NR_clock_nanosleep 8806 case TARGET_NR_clock_nanosleep: 8807 { 8808 struct timespec ts; 8809 target_to_host_timespec(&ts, arg3); 8810 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL)); 8811 if (arg4) 8812 host_to_target_timespec(arg4, &ts); 8813 break; 8814 } 8815 #endif 8816 8817 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 8818 case TARGET_NR_set_tid_address: 8819 ret = get_errno(set_tid_address((int *)g2h(arg1))); 8820 break; 8821 #endif 8822 8823 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 8824 case TARGET_NR_tkill: 8825 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2))); 8826 break; 8827 #endif 8828 8829 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 8830 case TARGET_NR_tgkill: 8831 ret = get_errno(sys_tgkill((int)arg1, (int)arg2, 8832 target_to_host_signal(arg3))); 8833 break; 8834 #endif 8835 8836 #ifdef TARGET_NR_set_robust_list 8837 case TARGET_NR_set_robust_list: 8838 case TARGET_NR_get_robust_list: 8839 /* The ABI for supporting robust futexes has userspace pass 8840 * the kernel a pointer to a linked list which is updated by 8841 * userspace after the syscall; the list is walked by the kernel 8842 * when the thread exits. Since the linked list in QEMU guest 8843 * memory isn't a valid linked list for the host and we have 8844 * no way to reliably intercept the thread-death event, we can't 8845 * support these. Silently return ENOSYS so that guest userspace 8846 * falls back to a non-robust futex implementation (which should 8847 * be OK except in the corner case of the guest crashing while 8848 * holding a mutex that is shared with another process via 8849 * shared memory). 8850 */ 8851 goto unimplemented_nowarn; 8852 #endif 8853 8854 #if defined(TARGET_NR_utimensat) 8855 case TARGET_NR_utimensat: 8856 { 8857 struct timespec *tsp, ts[2]; 8858 if (!arg3) { 8859 tsp = NULL; 8860 } else { 8861 target_to_host_timespec(ts, arg3); 8862 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec)); 8863 tsp = ts; 8864 } 8865 if (!arg2) 8866 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 8867 else { 8868 if (!(p = lock_user_string(arg2))) { 8869 ret = -TARGET_EFAULT; 8870 goto fail; 8871 } 8872 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 8873 unlock_user(p, arg2, 0); 8874 } 8875 } 8876 break; 8877 #endif 8878 case TARGET_NR_futex: 8879 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6); 8880 break; 8881 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 8882 case TARGET_NR_inotify_init: 8883 ret = get_errno(sys_inotify_init()); 8884 break; 8885 #endif 8886 #ifdef CONFIG_INOTIFY1 8887 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 8888 case TARGET_NR_inotify_init1: 8889 ret = get_errno(sys_inotify_init1(arg1)); 8890 break; 8891 #endif 8892 #endif 8893 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 8894 case TARGET_NR_inotify_add_watch: 8895 p = lock_user_string(arg2); 8896 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3)); 8897 unlock_user(p, arg2, 0); 8898 break; 8899 #endif 8900 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 8901 case TARGET_NR_inotify_rm_watch: 8902 ret = get_errno(sys_inotify_rm_watch(arg1, arg2)); 8903 break; 8904 #endif 8905 8906 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 8907 case TARGET_NR_mq_open: 8908 { 8909 struct mq_attr posix_mq_attr; 8910 8911 p = lock_user_string(arg1 - 1); 8912 if (arg4 != 0) 8913 copy_from_user_mq_attr (&posix_mq_attr, arg4); 8914 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr)); 8915 unlock_user (p, arg1, 0); 8916 } 8917 break; 8918 8919 case TARGET_NR_mq_unlink: 8920 p = lock_user_string(arg1 - 1); 8921 ret = get_errno(mq_unlink(p)); 8922 unlock_user (p, arg1, 0); 8923 break; 8924 8925 case TARGET_NR_mq_timedsend: 8926 { 8927 struct timespec ts; 8928 8929 p = lock_user (VERIFY_READ, arg2, arg3, 1); 8930 if (arg5 != 0) { 8931 target_to_host_timespec(&ts, arg5); 8932 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts)); 8933 host_to_target_timespec(arg5, &ts); 8934 } 8935 else 8936 ret = get_errno(mq_send(arg1, p, arg3, arg4)); 8937 unlock_user (p, arg2, arg3); 8938 } 8939 break; 8940 8941 case TARGET_NR_mq_timedreceive: 8942 { 8943 struct timespec ts; 8944 unsigned int prio; 8945 8946 p = lock_user (VERIFY_READ, arg2, arg3, 1); 8947 if (arg5 != 0) { 8948 target_to_host_timespec(&ts, arg5); 8949 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts)); 8950 host_to_target_timespec(arg5, &ts); 8951 } 8952 else 8953 ret = get_errno(mq_receive(arg1, p, arg3, &prio)); 8954 unlock_user (p, arg2, arg3); 8955 if (arg4 != 0) 8956 put_user_u32(prio, arg4); 8957 } 8958 break; 8959 8960 /* Not implemented for now... */ 8961 /* case TARGET_NR_mq_notify: */ 8962 /* break; */ 8963 8964 case TARGET_NR_mq_getsetattr: 8965 { 8966 struct mq_attr posix_mq_attr_in, posix_mq_attr_out; 8967 ret = 0; 8968 if (arg3 != 0) { 8969 ret = mq_getattr(arg1, &posix_mq_attr_out); 8970 copy_to_user_mq_attr(arg3, &posix_mq_attr_out); 8971 } 8972 if (arg2 != 0) { 8973 copy_from_user_mq_attr(&posix_mq_attr_in, arg2); 8974 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out); 8975 } 8976 8977 } 8978 break; 8979 #endif 8980 8981 #ifdef CONFIG_SPLICE 8982 #ifdef TARGET_NR_tee 8983 case TARGET_NR_tee: 8984 { 8985 ret = get_errno(tee(arg1,arg2,arg3,arg4)); 8986 } 8987 break; 8988 #endif 8989 #ifdef TARGET_NR_splice 8990 case TARGET_NR_splice: 8991 { 8992 loff_t loff_in, loff_out; 8993 loff_t *ploff_in = NULL, *ploff_out = NULL; 8994 if(arg2) { 8995 get_user_u64(loff_in, arg2); 8996 ploff_in = &loff_in; 8997 } 8998 if(arg4) { 8999 get_user_u64(loff_out, arg2); 9000 ploff_out = &loff_out; 9001 } 9002 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); 9003 } 9004 break; 9005 #endif 9006 #ifdef TARGET_NR_vmsplice 9007 case TARGET_NR_vmsplice: 9008 { 9009 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 9010 if (vec != NULL) { 9011 ret = get_errno(vmsplice(arg1, vec, arg3, arg4)); 9012 unlock_iovec(vec, arg2, arg3, 0); 9013 } else { 9014 ret = -host_to_target_errno(errno); 9015 } 9016 } 9017 break; 9018 #endif 9019 #endif /* CONFIG_SPLICE */ 9020 #ifdef CONFIG_EVENTFD 9021 #if defined(TARGET_NR_eventfd) 9022 case TARGET_NR_eventfd: 9023 ret = get_errno(eventfd(arg1, 0)); 9024 break; 9025 #endif 9026 #if defined(TARGET_NR_eventfd2) 9027 case TARGET_NR_eventfd2: 9028 { 9029 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)); 9030 if (arg2 & TARGET_O_NONBLOCK) { 9031 host_flags |= O_NONBLOCK; 9032 } 9033 if (arg2 & TARGET_O_CLOEXEC) { 9034 host_flags |= O_CLOEXEC; 9035 } 9036 ret = get_errno(eventfd(arg1, host_flags)); 9037 break; 9038 } 9039 #endif 9040 #endif /* CONFIG_EVENTFD */ 9041 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) 9042 case TARGET_NR_fallocate: 9043 #if TARGET_ABI_BITS == 32 9044 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4), 9045 target_offset64(arg5, arg6))); 9046 #else 9047 ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); 9048 #endif 9049 break; 9050 #endif 9051 #if defined(CONFIG_SYNC_FILE_RANGE) 9052 #if defined(TARGET_NR_sync_file_range) 9053 case TARGET_NR_sync_file_range: 9054 #if TARGET_ABI_BITS == 32 9055 #if defined(TARGET_MIPS) 9056 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 9057 target_offset64(arg5, arg6), arg7)); 9058 #else 9059 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3), 9060 target_offset64(arg4, arg5), arg6)); 9061 #endif /* !TARGET_MIPS */ 9062 #else 9063 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4)); 9064 #endif 9065 break; 9066 #endif 9067 #if defined(TARGET_NR_sync_file_range2) 9068 case TARGET_NR_sync_file_range2: 9069 /* This is like sync_file_range but the arguments are reordered */ 9070 #if TARGET_ABI_BITS == 32 9071 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 9072 target_offset64(arg5, arg6), arg2)); 9073 #else 9074 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2)); 9075 #endif 9076 break; 9077 #endif 9078 #endif 9079 #if defined(CONFIG_EPOLL) 9080 #if defined(TARGET_NR_epoll_create) 9081 case TARGET_NR_epoll_create: 9082 ret = get_errno(epoll_create(arg1)); 9083 break; 9084 #endif 9085 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1) 9086 case TARGET_NR_epoll_create1: 9087 ret = get_errno(epoll_create1(arg1)); 9088 break; 9089 #endif 9090 #if defined(TARGET_NR_epoll_ctl) 9091 case TARGET_NR_epoll_ctl: 9092 { 9093 struct epoll_event ep; 9094 struct epoll_event *epp = 0; 9095 if (arg4) { 9096 struct target_epoll_event *target_ep; 9097 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) { 9098 goto efault; 9099 } 9100 ep.events = tswap32(target_ep->events); 9101 /* The epoll_data_t union is just opaque data to the kernel, 9102 * so we transfer all 64 bits across and need not worry what 9103 * actual data type it is. 9104 */ 9105 ep.data.u64 = tswap64(target_ep->data.u64); 9106 unlock_user_struct(target_ep, arg4, 0); 9107 epp = &ep; 9108 } 9109 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp)); 9110 break; 9111 } 9112 #endif 9113 9114 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT) 9115 #define IMPLEMENT_EPOLL_PWAIT 9116 #endif 9117 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT) 9118 #if defined(TARGET_NR_epoll_wait) 9119 case TARGET_NR_epoll_wait: 9120 #endif 9121 #if defined(IMPLEMENT_EPOLL_PWAIT) 9122 case TARGET_NR_epoll_pwait: 9123 #endif 9124 { 9125 struct target_epoll_event *target_ep; 9126 struct epoll_event *ep; 9127 int epfd = arg1; 9128 int maxevents = arg3; 9129 int timeout = arg4; 9130 9131 target_ep = lock_user(VERIFY_WRITE, arg2, 9132 maxevents * sizeof(struct target_epoll_event), 1); 9133 if (!target_ep) { 9134 goto efault; 9135 } 9136 9137 ep = alloca(maxevents * sizeof(struct epoll_event)); 9138 9139 switch (num) { 9140 #if defined(IMPLEMENT_EPOLL_PWAIT) 9141 case TARGET_NR_epoll_pwait: 9142 { 9143 target_sigset_t *target_set; 9144 sigset_t _set, *set = &_set; 9145 9146 if (arg5) { 9147 target_set = lock_user(VERIFY_READ, arg5, 9148 sizeof(target_sigset_t), 1); 9149 if (!target_set) { 9150 unlock_user(target_ep, arg2, 0); 9151 goto efault; 9152 } 9153 target_to_host_sigset(set, target_set); 9154 unlock_user(target_set, arg5, 0); 9155 } else { 9156 set = NULL; 9157 } 9158 9159 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set)); 9160 break; 9161 } 9162 #endif 9163 #if defined(TARGET_NR_epoll_wait) 9164 case TARGET_NR_epoll_wait: 9165 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout)); 9166 break; 9167 #endif 9168 default: 9169 ret = -TARGET_ENOSYS; 9170 } 9171 if (!is_error(ret)) { 9172 int i; 9173 for (i = 0; i < ret; i++) { 9174 target_ep[i].events = tswap32(ep[i].events); 9175 target_ep[i].data.u64 = tswap64(ep[i].data.u64); 9176 } 9177 } 9178 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event)); 9179 break; 9180 } 9181 #endif 9182 #endif 9183 #ifdef TARGET_NR_prlimit64 9184 case TARGET_NR_prlimit64: 9185 { 9186 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */ 9187 struct target_rlimit64 *target_rnew, *target_rold; 9188 struct host_rlimit64 rnew, rold, *rnewp = 0; 9189 if (arg3) { 9190 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { 9191 goto efault; 9192 } 9193 rnew.rlim_cur = tswap64(target_rnew->rlim_cur); 9194 rnew.rlim_max = tswap64(target_rnew->rlim_max); 9195 unlock_user_struct(target_rnew, arg3, 0); 9196 rnewp = &rnew; 9197 } 9198 9199 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0)); 9200 if (!is_error(ret) && arg4) { 9201 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { 9202 goto efault; 9203 } 9204 target_rold->rlim_cur = tswap64(rold.rlim_cur); 9205 target_rold->rlim_max = tswap64(rold.rlim_max); 9206 unlock_user_struct(target_rold, arg4, 1); 9207 } 9208 break; 9209 } 9210 #endif 9211 #ifdef TARGET_NR_gethostname 9212 case TARGET_NR_gethostname: 9213 { 9214 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0); 9215 if (name) { 9216 ret = get_errno(gethostname(name, arg2)); 9217 unlock_user(name, arg1, arg2); 9218 } else { 9219 ret = -TARGET_EFAULT; 9220 } 9221 break; 9222 } 9223 #endif 9224 #ifdef TARGET_NR_atomic_cmpxchg_32 9225 case TARGET_NR_atomic_cmpxchg_32: 9226 { 9227 /* should use start_exclusive from main.c */ 9228 abi_ulong mem_value; 9229 if (get_user_u32(mem_value, arg6)) { 9230 target_siginfo_t info; 9231 info.si_signo = SIGSEGV; 9232 info.si_errno = 0; 9233 info.si_code = TARGET_SEGV_MAPERR; 9234 info._sifields._sigfault._addr = arg6; 9235 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info); 9236 ret = 0xdeadbeef; 9237 9238 } 9239 if (mem_value == arg2) 9240 put_user_u32(arg1, arg6); 9241 ret = mem_value; 9242 break; 9243 } 9244 #endif 9245 #ifdef TARGET_NR_atomic_barrier 9246 case TARGET_NR_atomic_barrier: 9247 { 9248 /* Like the kernel implementation and the qemu arm barrier, no-op this? */ 9249 break; 9250 } 9251 #endif 9252 9253 #ifdef TARGET_NR_timer_create 9254 case TARGET_NR_timer_create: 9255 { 9256 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */ 9257 9258 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL; 9259 struct target_sigevent *ptarget_sevp; 9260 struct target_timer_t *ptarget_timer; 9261 9262 int clkid = arg1; 9263 int timer_index = next_free_host_timer(); 9264 9265 if (timer_index < 0) { 9266 ret = -TARGET_EAGAIN; 9267 } else { 9268 timer_t *phtimer = g_posix_timers + timer_index; 9269 9270 if (arg2) { 9271 if (!lock_user_struct(VERIFY_READ, ptarget_sevp, arg2, 1)) { 9272 goto efault; 9273 } 9274 9275 host_sevp.sigev_signo = tswap32(ptarget_sevp->sigev_signo); 9276 host_sevp.sigev_notify = tswap32(ptarget_sevp->sigev_notify); 9277 9278 phost_sevp = &host_sevp; 9279 } 9280 9281 ret = get_errno(timer_create(clkid, phost_sevp, phtimer)); 9282 if (ret) { 9283 phtimer = NULL; 9284 } else { 9285 if (!lock_user_struct(VERIFY_WRITE, ptarget_timer, arg3, 1)) { 9286 goto efault; 9287 } 9288 ptarget_timer->ptr = tswap32(0xcafe0000 | timer_index); 9289 unlock_user_struct(ptarget_timer, arg3, 1); 9290 } 9291 } 9292 break; 9293 } 9294 #endif 9295 9296 #ifdef TARGET_NR_timer_settime 9297 case TARGET_NR_timer_settime: 9298 { 9299 /* args: timer_t timerid, int flags, const struct itimerspec *new_value, 9300 * struct itimerspec * old_value */ 9301 arg1 &= 0xffff; 9302 if (arg3 == 0 || arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9303 ret = -TARGET_EINVAL; 9304 } else { 9305 timer_t htimer = g_posix_timers[arg1]; 9306 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},}; 9307 9308 target_to_host_itimerspec(&hspec_new, arg3); 9309 ret = get_errno( 9310 timer_settime(htimer, arg2, &hspec_new, &hspec_old)); 9311 host_to_target_itimerspec(arg2, &hspec_old); 9312 } 9313 break; 9314 } 9315 #endif 9316 9317 #ifdef TARGET_NR_timer_gettime 9318 case TARGET_NR_timer_gettime: 9319 { 9320 /* args: timer_t timerid, struct itimerspec *curr_value */ 9321 arg1 &= 0xffff; 9322 if (!arg2) { 9323 return -TARGET_EFAULT; 9324 } else if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9325 ret = -TARGET_EINVAL; 9326 } else { 9327 timer_t htimer = g_posix_timers[arg1]; 9328 struct itimerspec hspec; 9329 ret = get_errno(timer_gettime(htimer, &hspec)); 9330 9331 if (host_to_target_itimerspec(arg2, &hspec)) { 9332 ret = -TARGET_EFAULT; 9333 } 9334 } 9335 break; 9336 } 9337 #endif 9338 9339 #ifdef TARGET_NR_timer_getoverrun 9340 case TARGET_NR_timer_getoverrun: 9341 { 9342 /* args: timer_t timerid */ 9343 arg1 &= 0xffff; 9344 if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9345 ret = -TARGET_EINVAL; 9346 } else { 9347 timer_t htimer = g_posix_timers[arg1]; 9348 ret = get_errno(timer_getoverrun(htimer)); 9349 } 9350 break; 9351 } 9352 #endif 9353 9354 #ifdef TARGET_NR_timer_delete 9355 case TARGET_NR_timer_delete: 9356 { 9357 /* args: timer_t timerid */ 9358 arg1 &= 0xffff; 9359 if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9360 ret = -TARGET_EINVAL; 9361 } else { 9362 timer_t htimer = g_posix_timers[arg1]; 9363 ret = get_errno(timer_delete(htimer)); 9364 g_posix_timers[arg1] = 0; 9365 } 9366 break; 9367 } 9368 #endif 9369 9370 default: 9371 unimplemented: 9372 gemu_log("qemu: Unsupported syscall: %d\n", num); 9373 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list) 9374 unimplemented_nowarn: 9375 #endif 9376 ret = -TARGET_ENOSYS; 9377 break; 9378 } 9379 fail: 9380 #ifdef DEBUG 9381 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret); 9382 #endif 9383 if(do_strace) 9384 print_syscall_ret(num, ret); 9385 return ret; 9386 efault: 9387 ret = -TARGET_EFAULT; 9388 goto fail; 9389 } 9390