1 /* 2 * Linux syscalls 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #define _ATFILE_SOURCE 20 #include <stdlib.h> 21 #include <stdio.h> 22 #include <stdarg.h> 23 #include <string.h> 24 #include <elf.h> 25 #include <endian.h> 26 #include <errno.h> 27 #include <unistd.h> 28 #include <fcntl.h> 29 #include <time.h> 30 #include <limits.h> 31 #include <sys/types.h> 32 #include <sys/ipc.h> 33 #include <sys/msg.h> 34 #include <sys/wait.h> 35 #include <sys/time.h> 36 #include <sys/stat.h> 37 #include <sys/mount.h> 38 #include <sys/prctl.h> 39 #include <sys/resource.h> 40 #include <sys/mman.h> 41 #include <sys/swap.h> 42 #include <signal.h> 43 #include <sched.h> 44 #ifdef __ia64__ 45 int __clone2(int (*fn)(void *), void *child_stack_base, 46 size_t stack_size, int flags, void *arg, ...); 47 #endif 48 #include <sys/socket.h> 49 #include <sys/un.h> 50 #include <sys/uio.h> 51 #include <sys/poll.h> 52 #include <sys/times.h> 53 #include <sys/shm.h> 54 #include <sys/sem.h> 55 #include <sys/statfs.h> 56 #include <utime.h> 57 #include <sys/sysinfo.h> 58 #include <sys/utsname.h> 59 //#include <sys/user.h> 60 #include <netinet/ip.h> 61 #include <netinet/tcp.h> 62 #include <linux/wireless.h> 63 #include <qemu-common.h> 64 #ifdef TARGET_GPROF 65 #include <sys/gmon.h> 66 #endif 67 #ifdef CONFIG_EVENTFD 68 #include <sys/eventfd.h> 69 #endif 70 #ifdef CONFIG_EPOLL 71 #include <sys/epoll.h> 72 #endif 73 #ifdef CONFIG_ATTR 74 #include <attr/xattr.h> 75 #endif 76 77 #define termios host_termios 78 #define winsize host_winsize 79 #define termio host_termio 80 #define sgttyb host_sgttyb /* same as target */ 81 #define tchars host_tchars /* same as target */ 82 #define ltchars host_ltchars /* same as target */ 83 84 #include <linux/termios.h> 85 #include <linux/unistd.h> 86 #include <linux/utsname.h> 87 #include <linux/cdrom.h> 88 #include <linux/hdreg.h> 89 #include <linux/soundcard.h> 90 #include <linux/kd.h> 91 #include <linux/mtio.h> 92 #include <linux/fs.h> 93 #if defined(CONFIG_FIEMAP) 94 #include <linux/fiemap.h> 95 #endif 96 #include <linux/fb.h> 97 #include <linux/vt.h> 98 #include "linux_loop.h" 99 #include "cpu-uname.h" 100 101 #include "qemu.h" 102 #include "qemu-common.h" 103 104 #if defined(CONFIG_USE_NPTL) 105 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \ 106 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID) 107 #else 108 /* XXX: Hardcode the above values. */ 109 #define CLONE_NPTL_FLAGS2 0 110 #endif 111 112 //#define DEBUG 113 114 //#include <linux/msdos_fs.h> 115 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2]) 116 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2]) 117 118 119 #undef _syscall0 120 #undef _syscall1 121 #undef _syscall2 122 #undef _syscall3 123 #undef _syscall4 124 #undef _syscall5 125 #undef _syscall6 126 127 #define _syscall0(type,name) \ 128 static type name (void) \ 129 { \ 130 return syscall(__NR_##name); \ 131 } 132 133 #define _syscall1(type,name,type1,arg1) \ 134 static type name (type1 arg1) \ 135 { \ 136 return syscall(__NR_##name, arg1); \ 137 } 138 139 #define _syscall2(type,name,type1,arg1,type2,arg2) \ 140 static type name (type1 arg1,type2 arg2) \ 141 { \ 142 return syscall(__NR_##name, arg1, arg2); \ 143 } 144 145 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 146 static type name (type1 arg1,type2 arg2,type3 arg3) \ 147 { \ 148 return syscall(__NR_##name, arg1, arg2, arg3); \ 149 } 150 151 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 152 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ 153 { \ 154 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 155 } 156 157 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 158 type5,arg5) \ 159 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ 160 { \ 161 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 162 } 163 164 165 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 166 type5,arg5,type6,arg6) \ 167 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \ 168 type6 arg6) \ 169 { \ 170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 171 } 172 173 174 #define __NR_sys_uname __NR_uname 175 #define __NR_sys_faccessat __NR_faccessat 176 #define __NR_sys_fchmodat __NR_fchmodat 177 #define __NR_sys_fchownat __NR_fchownat 178 #define __NR_sys_fstatat64 __NR_fstatat64 179 #define __NR_sys_futimesat __NR_futimesat 180 #define __NR_sys_getcwd1 __NR_getcwd 181 #define __NR_sys_getdents __NR_getdents 182 #define __NR_sys_getdents64 __NR_getdents64 183 #define __NR_sys_getpriority __NR_getpriority 184 #define __NR_sys_linkat __NR_linkat 185 #define __NR_sys_mkdirat __NR_mkdirat 186 #define __NR_sys_mknodat __NR_mknodat 187 #define __NR_sys_newfstatat __NR_newfstatat 188 #define __NR_sys_openat __NR_openat 189 #define __NR_sys_readlinkat __NR_readlinkat 190 #define __NR_sys_renameat __NR_renameat 191 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo 192 #define __NR_sys_symlinkat __NR_symlinkat 193 #define __NR_sys_syslog __NR_syslog 194 #define __NR_sys_tgkill __NR_tgkill 195 #define __NR_sys_tkill __NR_tkill 196 #define __NR_sys_unlinkat __NR_unlinkat 197 #define __NR_sys_utimensat __NR_utimensat 198 #define __NR_sys_futex __NR_futex 199 #define __NR_sys_inotify_init __NR_inotify_init 200 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch 201 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch 202 203 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \ 204 defined(__s390x__) 205 #define __NR__llseek __NR_lseek 206 #endif 207 208 #ifdef __NR_gettid 209 _syscall0(int, gettid) 210 #else 211 /* This is a replacement for the host gettid() and must return a host 212 errno. */ 213 static int gettid(void) { 214 return -ENOSYS; 215 } 216 #endif 217 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count); 218 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 219 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count); 220 #endif 221 _syscall2(int, sys_getpriority, int, which, int, who); 222 #if defined(TARGET_NR__llseek) && defined(__NR_llseek) 223 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo, 224 loff_t *, res, uint, wh); 225 #endif 226 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo) 227 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len) 228 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 229 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig) 230 #endif 231 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 232 _syscall2(int,sys_tkill,int,tid,int,sig) 233 #endif 234 #ifdef __NR_exit_group 235 _syscall1(int,exit_group,int,error_code) 236 #endif 237 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 238 _syscall1(int,set_tid_address,int *,tidptr) 239 #endif 240 #if defined(CONFIG_USE_NPTL) 241 #if defined(TARGET_NR_futex) && defined(__NR_futex) 242 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, 243 const struct timespec *,timeout,int *,uaddr2,int,val3) 244 #endif 245 #endif 246 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity 247 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, 248 unsigned long *, user_mask_ptr); 249 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity 250 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len, 251 unsigned long *, user_mask_ptr); 252 253 static bitmask_transtbl fcntl_flags_tbl[] = { 254 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, }, 255 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, }, 256 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, }, 257 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, }, 258 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, }, 259 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, }, 260 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, }, 261 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, }, 262 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, }, 263 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, }, 264 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, }, 265 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, }, 266 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, }, 267 #if defined(O_DIRECT) 268 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, }, 269 #endif 270 { 0, 0, 0, 0 } 271 }; 272 273 #define COPY_UTSNAME_FIELD(dest, src) \ 274 do { \ 275 /* __NEW_UTS_LEN doesn't include terminating null */ \ 276 (void) strncpy((dest), (src), __NEW_UTS_LEN); \ 277 (dest)[__NEW_UTS_LEN] = '\0'; \ 278 } while (0) 279 280 static int sys_uname(struct new_utsname *buf) 281 { 282 struct utsname uts_buf; 283 284 if (uname(&uts_buf) < 0) 285 return (-1); 286 287 /* 288 * Just in case these have some differences, we 289 * translate utsname to new_utsname (which is the 290 * struct linux kernel uses). 291 */ 292 293 memset(buf, 0, sizeof(*buf)); 294 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname); 295 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename); 296 COPY_UTSNAME_FIELD(buf->release, uts_buf.release); 297 COPY_UTSNAME_FIELD(buf->version, uts_buf.version); 298 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine); 299 #ifdef _GNU_SOURCE 300 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname); 301 #endif 302 return (0); 303 304 #undef COPY_UTSNAME_FIELD 305 } 306 307 static int sys_getcwd1(char *buf, size_t size) 308 { 309 if (getcwd(buf, size) == NULL) { 310 /* getcwd() sets errno */ 311 return (-1); 312 } 313 return strlen(buf)+1; 314 } 315 316 #ifdef CONFIG_ATFILE 317 /* 318 * Host system seems to have atfile syscall stubs available. We 319 * now enable them one by one as specified by target syscall_nr.h. 320 */ 321 322 #ifdef TARGET_NR_faccessat 323 static int sys_faccessat(int dirfd, const char *pathname, int mode) 324 { 325 return (faccessat(dirfd, pathname, mode, 0)); 326 } 327 #endif 328 #ifdef TARGET_NR_fchmodat 329 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode) 330 { 331 return (fchmodat(dirfd, pathname, mode, 0)); 332 } 333 #endif 334 #if defined(TARGET_NR_fchownat) 335 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner, 336 gid_t group, int flags) 337 { 338 return (fchownat(dirfd, pathname, owner, group, flags)); 339 } 340 #endif 341 #ifdef __NR_fstatat64 342 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf, 343 int flags) 344 { 345 return (fstatat(dirfd, pathname, buf, flags)); 346 } 347 #endif 348 #ifdef __NR_newfstatat 349 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf, 350 int flags) 351 { 352 return (fstatat(dirfd, pathname, buf, flags)); 353 } 354 #endif 355 #ifdef TARGET_NR_futimesat 356 static int sys_futimesat(int dirfd, const char *pathname, 357 const struct timeval times[2]) 358 { 359 return (futimesat(dirfd, pathname, times)); 360 } 361 #endif 362 #ifdef TARGET_NR_linkat 363 static int sys_linkat(int olddirfd, const char *oldpath, 364 int newdirfd, const char *newpath, int flags) 365 { 366 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags)); 367 } 368 #endif 369 #ifdef TARGET_NR_mkdirat 370 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode) 371 { 372 return (mkdirat(dirfd, pathname, mode)); 373 } 374 #endif 375 #ifdef TARGET_NR_mknodat 376 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode, 377 dev_t dev) 378 { 379 return (mknodat(dirfd, pathname, mode, dev)); 380 } 381 #endif 382 #ifdef TARGET_NR_openat 383 static int sys_openat(int dirfd, const char *pathname, int flags, ...) 384 { 385 /* 386 * open(2) has extra parameter 'mode' when called with 387 * flag O_CREAT. 388 */ 389 if ((flags & O_CREAT) != 0) { 390 va_list ap; 391 mode_t mode; 392 393 /* 394 * Get the 'mode' parameter and translate it to 395 * host bits. 396 */ 397 va_start(ap, flags); 398 mode = va_arg(ap, mode_t); 399 mode = target_to_host_bitmask(mode, fcntl_flags_tbl); 400 va_end(ap); 401 402 return (openat(dirfd, pathname, flags, mode)); 403 } 404 return (openat(dirfd, pathname, flags)); 405 } 406 #endif 407 #ifdef TARGET_NR_readlinkat 408 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz) 409 { 410 return (readlinkat(dirfd, pathname, buf, bufsiz)); 411 } 412 #endif 413 #ifdef TARGET_NR_renameat 414 static int sys_renameat(int olddirfd, const char *oldpath, 415 int newdirfd, const char *newpath) 416 { 417 return (renameat(olddirfd, oldpath, newdirfd, newpath)); 418 } 419 #endif 420 #ifdef TARGET_NR_symlinkat 421 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath) 422 { 423 return (symlinkat(oldpath, newdirfd, newpath)); 424 } 425 #endif 426 #ifdef TARGET_NR_unlinkat 427 static int sys_unlinkat(int dirfd, const char *pathname, int flags) 428 { 429 return (unlinkat(dirfd, pathname, flags)); 430 } 431 #endif 432 #else /* !CONFIG_ATFILE */ 433 434 /* 435 * Try direct syscalls instead 436 */ 437 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 438 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode) 439 #endif 440 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat) 441 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode) 442 #endif 443 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) 444 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname, 445 uid_t,owner,gid_t,group,int,flags) 446 #endif 447 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \ 448 defined(__NR_fstatat64) 449 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname, 450 struct stat *,buf,int,flags) 451 #endif 452 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat) 453 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname, 454 const struct timeval *,times) 455 #endif 456 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \ 457 defined(__NR_newfstatat) 458 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname, 459 struct stat *,buf,int,flags) 460 #endif 461 #if defined(TARGET_NR_linkat) && defined(__NR_linkat) 462 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath, 463 int,newdirfd,const char *,newpath,int,flags) 464 #endif 465 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat) 466 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode) 467 #endif 468 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat) 469 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname, 470 mode_t,mode,dev_t,dev) 471 #endif 472 #if defined(TARGET_NR_openat) && defined(__NR_openat) 473 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode) 474 #endif 475 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat) 476 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname, 477 char *,buf,size_t,bufsize) 478 #endif 479 #if defined(TARGET_NR_renameat) && defined(__NR_renameat) 480 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath, 481 int,newdirfd,const char *,newpath) 482 #endif 483 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat) 484 _syscall3(int,sys_symlinkat,const char *,oldpath, 485 int,newdirfd,const char *,newpath) 486 #endif 487 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat) 488 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags) 489 #endif 490 491 #endif /* CONFIG_ATFILE */ 492 493 #ifdef CONFIG_UTIMENSAT 494 static int sys_utimensat(int dirfd, const char *pathname, 495 const struct timespec times[2], int flags) 496 { 497 if (pathname == NULL) 498 return futimens(dirfd, times); 499 else 500 return utimensat(dirfd, pathname, times, flags); 501 } 502 #else 503 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat) 504 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname, 505 const struct timespec *,tsp,int,flags) 506 #endif 507 #endif /* CONFIG_UTIMENSAT */ 508 509 #ifdef CONFIG_INOTIFY 510 #include <sys/inotify.h> 511 512 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 513 static int sys_inotify_init(void) 514 { 515 return (inotify_init()); 516 } 517 #endif 518 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 519 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask) 520 { 521 return (inotify_add_watch(fd, pathname, mask)); 522 } 523 #endif 524 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 525 static int sys_inotify_rm_watch(int fd, int32_t wd) 526 { 527 return (inotify_rm_watch(fd, wd)); 528 } 529 #endif 530 #ifdef CONFIG_INOTIFY1 531 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 532 static int sys_inotify_init1(int flags) 533 { 534 return (inotify_init1(flags)); 535 } 536 #endif 537 #endif 538 #else 539 /* Userspace can usually survive runtime without inotify */ 540 #undef TARGET_NR_inotify_init 541 #undef TARGET_NR_inotify_init1 542 #undef TARGET_NR_inotify_add_watch 543 #undef TARGET_NR_inotify_rm_watch 544 #endif /* CONFIG_INOTIFY */ 545 546 #if defined(TARGET_NR_ppoll) 547 #ifndef __NR_ppoll 548 # define __NR_ppoll -1 549 #endif 550 #define __NR_sys_ppoll __NR_ppoll 551 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds, 552 struct timespec *, timeout, const __sigset_t *, sigmask, 553 size_t, sigsetsize) 554 #endif 555 556 #if defined(TARGET_NR_pselect6) 557 #ifndef __NR_pselect6 558 # define __NR_pselect6 -1 559 #endif 560 #define __NR_sys_pselect6 __NR_pselect6 561 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, 562 fd_set *, exceptfds, struct timespec *, timeout, void *, sig); 563 #endif 564 565 #if defined(TARGET_NR_prlimit64) 566 #ifndef __NR_prlimit64 567 # define __NR_prlimit64 -1 568 #endif 569 #define __NR_sys_prlimit64 __NR_prlimit64 570 /* The glibc rlimit structure may not be that used by the underlying syscall */ 571 struct host_rlimit64 { 572 uint64_t rlim_cur; 573 uint64_t rlim_max; 574 }; 575 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource, 576 const struct host_rlimit64 *, new_limit, 577 struct host_rlimit64 *, old_limit) 578 #endif 579 580 extern int personality(int); 581 extern int flock(int, int); 582 extern int setfsuid(int); 583 extern int setfsgid(int); 584 extern int setgroups(int, gid_t *); 585 586 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */ 587 #ifdef TARGET_ARM 588 static inline int regpairs_aligned(void *cpu_env) { 589 return ((((CPUARMState *)cpu_env)->eabi) == 1) ; 590 } 591 #elif defined(TARGET_MIPS) 592 static inline int regpairs_aligned(void *cpu_env) { return 1; } 593 #else 594 static inline int regpairs_aligned(void *cpu_env) { return 0; } 595 #endif 596 597 #define ERRNO_TABLE_SIZE 1200 598 599 /* target_to_host_errno_table[] is initialized from 600 * host_to_target_errno_table[] in syscall_init(). */ 601 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = { 602 }; 603 604 /* 605 * This list is the union of errno values overridden in asm-<arch>/errno.h 606 * minus the errnos that are not actually generic to all archs. 607 */ 608 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = { 609 [EIDRM] = TARGET_EIDRM, 610 [ECHRNG] = TARGET_ECHRNG, 611 [EL2NSYNC] = TARGET_EL2NSYNC, 612 [EL3HLT] = TARGET_EL3HLT, 613 [EL3RST] = TARGET_EL3RST, 614 [ELNRNG] = TARGET_ELNRNG, 615 [EUNATCH] = TARGET_EUNATCH, 616 [ENOCSI] = TARGET_ENOCSI, 617 [EL2HLT] = TARGET_EL2HLT, 618 [EDEADLK] = TARGET_EDEADLK, 619 [ENOLCK] = TARGET_ENOLCK, 620 [EBADE] = TARGET_EBADE, 621 [EBADR] = TARGET_EBADR, 622 [EXFULL] = TARGET_EXFULL, 623 [ENOANO] = TARGET_ENOANO, 624 [EBADRQC] = TARGET_EBADRQC, 625 [EBADSLT] = TARGET_EBADSLT, 626 [EBFONT] = TARGET_EBFONT, 627 [ENOSTR] = TARGET_ENOSTR, 628 [ENODATA] = TARGET_ENODATA, 629 [ETIME] = TARGET_ETIME, 630 [ENOSR] = TARGET_ENOSR, 631 [ENONET] = TARGET_ENONET, 632 [ENOPKG] = TARGET_ENOPKG, 633 [EREMOTE] = TARGET_EREMOTE, 634 [ENOLINK] = TARGET_ENOLINK, 635 [EADV] = TARGET_EADV, 636 [ESRMNT] = TARGET_ESRMNT, 637 [ECOMM] = TARGET_ECOMM, 638 [EPROTO] = TARGET_EPROTO, 639 [EDOTDOT] = TARGET_EDOTDOT, 640 [EMULTIHOP] = TARGET_EMULTIHOP, 641 [EBADMSG] = TARGET_EBADMSG, 642 [ENAMETOOLONG] = TARGET_ENAMETOOLONG, 643 [EOVERFLOW] = TARGET_EOVERFLOW, 644 [ENOTUNIQ] = TARGET_ENOTUNIQ, 645 [EBADFD] = TARGET_EBADFD, 646 [EREMCHG] = TARGET_EREMCHG, 647 [ELIBACC] = TARGET_ELIBACC, 648 [ELIBBAD] = TARGET_ELIBBAD, 649 [ELIBSCN] = TARGET_ELIBSCN, 650 [ELIBMAX] = TARGET_ELIBMAX, 651 [ELIBEXEC] = TARGET_ELIBEXEC, 652 [EILSEQ] = TARGET_EILSEQ, 653 [ENOSYS] = TARGET_ENOSYS, 654 [ELOOP] = TARGET_ELOOP, 655 [ERESTART] = TARGET_ERESTART, 656 [ESTRPIPE] = TARGET_ESTRPIPE, 657 [ENOTEMPTY] = TARGET_ENOTEMPTY, 658 [EUSERS] = TARGET_EUSERS, 659 [ENOTSOCK] = TARGET_ENOTSOCK, 660 [EDESTADDRREQ] = TARGET_EDESTADDRREQ, 661 [EMSGSIZE] = TARGET_EMSGSIZE, 662 [EPROTOTYPE] = TARGET_EPROTOTYPE, 663 [ENOPROTOOPT] = TARGET_ENOPROTOOPT, 664 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT, 665 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT, 666 [EOPNOTSUPP] = TARGET_EOPNOTSUPP, 667 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT, 668 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT, 669 [EADDRINUSE] = TARGET_EADDRINUSE, 670 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL, 671 [ENETDOWN] = TARGET_ENETDOWN, 672 [ENETUNREACH] = TARGET_ENETUNREACH, 673 [ENETRESET] = TARGET_ENETRESET, 674 [ECONNABORTED] = TARGET_ECONNABORTED, 675 [ECONNRESET] = TARGET_ECONNRESET, 676 [ENOBUFS] = TARGET_ENOBUFS, 677 [EISCONN] = TARGET_EISCONN, 678 [ENOTCONN] = TARGET_ENOTCONN, 679 [EUCLEAN] = TARGET_EUCLEAN, 680 [ENOTNAM] = TARGET_ENOTNAM, 681 [ENAVAIL] = TARGET_ENAVAIL, 682 [EISNAM] = TARGET_EISNAM, 683 [EREMOTEIO] = TARGET_EREMOTEIO, 684 [ESHUTDOWN] = TARGET_ESHUTDOWN, 685 [ETOOMANYREFS] = TARGET_ETOOMANYREFS, 686 [ETIMEDOUT] = TARGET_ETIMEDOUT, 687 [ECONNREFUSED] = TARGET_ECONNREFUSED, 688 [EHOSTDOWN] = TARGET_EHOSTDOWN, 689 [EHOSTUNREACH] = TARGET_EHOSTUNREACH, 690 [EALREADY] = TARGET_EALREADY, 691 [EINPROGRESS] = TARGET_EINPROGRESS, 692 [ESTALE] = TARGET_ESTALE, 693 [ECANCELED] = TARGET_ECANCELED, 694 [ENOMEDIUM] = TARGET_ENOMEDIUM, 695 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE, 696 #ifdef ENOKEY 697 [ENOKEY] = TARGET_ENOKEY, 698 #endif 699 #ifdef EKEYEXPIRED 700 [EKEYEXPIRED] = TARGET_EKEYEXPIRED, 701 #endif 702 #ifdef EKEYREVOKED 703 [EKEYREVOKED] = TARGET_EKEYREVOKED, 704 #endif 705 #ifdef EKEYREJECTED 706 [EKEYREJECTED] = TARGET_EKEYREJECTED, 707 #endif 708 #ifdef EOWNERDEAD 709 [EOWNERDEAD] = TARGET_EOWNERDEAD, 710 #endif 711 #ifdef ENOTRECOVERABLE 712 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE, 713 #endif 714 }; 715 716 static inline int host_to_target_errno(int err) 717 { 718 if(host_to_target_errno_table[err]) 719 return host_to_target_errno_table[err]; 720 return err; 721 } 722 723 static inline int target_to_host_errno(int err) 724 { 725 if (target_to_host_errno_table[err]) 726 return target_to_host_errno_table[err]; 727 return err; 728 } 729 730 static inline abi_long get_errno(abi_long ret) 731 { 732 if (ret == -1) 733 return -host_to_target_errno(errno); 734 else 735 return ret; 736 } 737 738 static inline int is_error(abi_long ret) 739 { 740 return (abi_ulong)ret >= (abi_ulong)(-4096); 741 } 742 743 char *target_strerror(int err) 744 { 745 return strerror(target_to_host_errno(err)); 746 } 747 748 static abi_ulong target_brk; 749 static abi_ulong target_original_brk; 750 static abi_ulong brk_page; 751 752 void target_set_brk(abi_ulong new_brk) 753 { 754 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk); 755 brk_page = HOST_PAGE_ALIGN(target_brk); 756 } 757 758 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0) 759 #define DEBUGF_BRK(message, args...) 760 761 /* do_brk() must return target values and target errnos. */ 762 abi_long do_brk(abi_ulong new_brk) 763 { 764 abi_long mapped_addr; 765 int new_alloc_size; 766 767 DEBUGF_BRK("do_brk(%#010x) -> ", new_brk); 768 769 if (!new_brk) { 770 DEBUGF_BRK("%#010x (!new_brk)\n", target_brk); 771 return target_brk; 772 } 773 if (new_brk < target_original_brk) { 774 DEBUGF_BRK("%#010x (new_brk < target_original_brk)\n", target_brk); 775 return target_brk; 776 } 777 778 /* If the new brk is less than the highest page reserved to the 779 * target heap allocation, set it and we're almost done... */ 780 if (new_brk <= brk_page) { 781 /* Heap contents are initialized to zero, as for anonymous 782 * mapped pages. */ 783 if (new_brk > target_brk) { 784 memset(g2h(target_brk), 0, new_brk - target_brk); 785 } 786 target_brk = new_brk; 787 DEBUGF_BRK("%#010x (new_brk <= brk_page)\n", target_brk); 788 return target_brk; 789 } 790 791 /* We need to allocate more memory after the brk... Note that 792 * we don't use MAP_FIXED because that will map over the top of 793 * any existing mapping (like the one with the host libc or qemu 794 * itself); instead we treat "mapped but at wrong address" as 795 * a failure and unmap again. 796 */ 797 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page); 798 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, 799 PROT_READ|PROT_WRITE, 800 MAP_ANON|MAP_PRIVATE, 0, 0)); 801 802 if (mapped_addr == brk_page) { 803 /* Heap contents are initialized to zero, as for anonymous 804 * mapped pages. Technically the new pages are already 805 * initialized to zero since they *are* anonymous mapped 806 * pages, however we have to take care with the contents that 807 * come from the remaining part of the previous page: it may 808 * contains garbage data due to a previous heap usage (grown 809 * then shrunken). */ 810 memset(g2h(target_brk), 0, brk_page - target_brk); 811 812 target_brk = new_brk; 813 brk_page = HOST_PAGE_ALIGN(target_brk); 814 DEBUGF_BRK("%#010x (mapped_addr == brk_page)\n", target_brk); 815 return target_brk; 816 } else if (mapped_addr != -1) { 817 /* Mapped but at wrong address, meaning there wasn't actually 818 * enough space for this brk. 819 */ 820 target_munmap(mapped_addr, new_alloc_size); 821 mapped_addr = -1; 822 DEBUGF_BRK("%#010x (mapped_addr != -1)\n", target_brk); 823 } 824 else { 825 DEBUGF_BRK("%#010x (otherwise)\n", target_brk); 826 } 827 828 #if defined(TARGET_ALPHA) 829 /* We (partially) emulate OSF/1 on Alpha, which requires we 830 return a proper errno, not an unchanged brk value. */ 831 return -TARGET_ENOMEM; 832 #endif 833 /* For everything else, return the previous break. */ 834 return target_brk; 835 } 836 837 static inline abi_long copy_from_user_fdset(fd_set *fds, 838 abi_ulong target_fds_addr, 839 int n) 840 { 841 int i, nw, j, k; 842 abi_ulong b, *target_fds; 843 844 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 845 if (!(target_fds = lock_user(VERIFY_READ, 846 target_fds_addr, 847 sizeof(abi_ulong) * nw, 848 1))) 849 return -TARGET_EFAULT; 850 851 FD_ZERO(fds); 852 k = 0; 853 for (i = 0; i < nw; i++) { 854 /* grab the abi_ulong */ 855 __get_user(b, &target_fds[i]); 856 for (j = 0; j < TARGET_ABI_BITS; j++) { 857 /* check the bit inside the abi_ulong */ 858 if ((b >> j) & 1) 859 FD_SET(k, fds); 860 k++; 861 } 862 } 863 864 unlock_user(target_fds, target_fds_addr, 0); 865 866 return 0; 867 } 868 869 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr, 870 abi_ulong target_fds_addr, 871 int n) 872 { 873 if (target_fds_addr) { 874 if (copy_from_user_fdset(fds, target_fds_addr, n)) 875 return -TARGET_EFAULT; 876 *fds_ptr = fds; 877 } else { 878 *fds_ptr = NULL; 879 } 880 return 0; 881 } 882 883 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr, 884 const fd_set *fds, 885 int n) 886 { 887 int i, nw, j, k; 888 abi_long v; 889 abi_ulong *target_fds; 890 891 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 892 if (!(target_fds = lock_user(VERIFY_WRITE, 893 target_fds_addr, 894 sizeof(abi_ulong) * nw, 895 0))) 896 return -TARGET_EFAULT; 897 898 k = 0; 899 for (i = 0; i < nw; i++) { 900 v = 0; 901 for (j = 0; j < TARGET_ABI_BITS; j++) { 902 v |= ((FD_ISSET(k, fds) != 0) << j); 903 k++; 904 } 905 __put_user(v, &target_fds[i]); 906 } 907 908 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw); 909 910 return 0; 911 } 912 913 #if defined(__alpha__) 914 #define HOST_HZ 1024 915 #else 916 #define HOST_HZ 100 917 #endif 918 919 static inline abi_long host_to_target_clock_t(long ticks) 920 { 921 #if HOST_HZ == TARGET_HZ 922 return ticks; 923 #else 924 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ; 925 #endif 926 } 927 928 static inline abi_long host_to_target_rusage(abi_ulong target_addr, 929 const struct rusage *rusage) 930 { 931 struct target_rusage *target_rusage; 932 933 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0)) 934 return -TARGET_EFAULT; 935 target_rusage->ru_utime.tv_sec = tswapl(rusage->ru_utime.tv_sec); 936 target_rusage->ru_utime.tv_usec = tswapl(rusage->ru_utime.tv_usec); 937 target_rusage->ru_stime.tv_sec = tswapl(rusage->ru_stime.tv_sec); 938 target_rusage->ru_stime.tv_usec = tswapl(rusage->ru_stime.tv_usec); 939 target_rusage->ru_maxrss = tswapl(rusage->ru_maxrss); 940 target_rusage->ru_ixrss = tswapl(rusage->ru_ixrss); 941 target_rusage->ru_idrss = tswapl(rusage->ru_idrss); 942 target_rusage->ru_isrss = tswapl(rusage->ru_isrss); 943 target_rusage->ru_minflt = tswapl(rusage->ru_minflt); 944 target_rusage->ru_majflt = tswapl(rusage->ru_majflt); 945 target_rusage->ru_nswap = tswapl(rusage->ru_nswap); 946 target_rusage->ru_inblock = tswapl(rusage->ru_inblock); 947 target_rusage->ru_oublock = tswapl(rusage->ru_oublock); 948 target_rusage->ru_msgsnd = tswapl(rusage->ru_msgsnd); 949 target_rusage->ru_msgrcv = tswapl(rusage->ru_msgrcv); 950 target_rusage->ru_nsignals = tswapl(rusage->ru_nsignals); 951 target_rusage->ru_nvcsw = tswapl(rusage->ru_nvcsw); 952 target_rusage->ru_nivcsw = tswapl(rusage->ru_nivcsw); 953 unlock_user_struct(target_rusage, target_addr, 1); 954 955 return 0; 956 } 957 958 static inline rlim_t target_to_host_rlim(target_ulong target_rlim) 959 { 960 target_ulong target_rlim_swap; 961 rlim_t result; 962 963 target_rlim_swap = tswapl(target_rlim); 964 if (target_rlim_swap == TARGET_RLIM_INFINITY || target_rlim_swap != (rlim_t)target_rlim_swap) 965 result = RLIM_INFINITY; 966 else 967 result = target_rlim_swap; 968 969 return result; 970 } 971 972 static inline target_ulong host_to_target_rlim(rlim_t rlim) 973 { 974 target_ulong target_rlim_swap; 975 target_ulong result; 976 977 if (rlim == RLIM_INFINITY || rlim != (target_long)rlim) 978 target_rlim_swap = TARGET_RLIM_INFINITY; 979 else 980 target_rlim_swap = rlim; 981 result = tswapl(target_rlim_swap); 982 983 return result; 984 } 985 986 static inline int target_to_host_resource(int code) 987 { 988 switch (code) { 989 case TARGET_RLIMIT_AS: 990 return RLIMIT_AS; 991 case TARGET_RLIMIT_CORE: 992 return RLIMIT_CORE; 993 case TARGET_RLIMIT_CPU: 994 return RLIMIT_CPU; 995 case TARGET_RLIMIT_DATA: 996 return RLIMIT_DATA; 997 case TARGET_RLIMIT_FSIZE: 998 return RLIMIT_FSIZE; 999 case TARGET_RLIMIT_LOCKS: 1000 return RLIMIT_LOCKS; 1001 case TARGET_RLIMIT_MEMLOCK: 1002 return RLIMIT_MEMLOCK; 1003 case TARGET_RLIMIT_MSGQUEUE: 1004 return RLIMIT_MSGQUEUE; 1005 case TARGET_RLIMIT_NICE: 1006 return RLIMIT_NICE; 1007 case TARGET_RLIMIT_NOFILE: 1008 return RLIMIT_NOFILE; 1009 case TARGET_RLIMIT_NPROC: 1010 return RLIMIT_NPROC; 1011 case TARGET_RLIMIT_RSS: 1012 return RLIMIT_RSS; 1013 case TARGET_RLIMIT_RTPRIO: 1014 return RLIMIT_RTPRIO; 1015 case TARGET_RLIMIT_SIGPENDING: 1016 return RLIMIT_SIGPENDING; 1017 case TARGET_RLIMIT_STACK: 1018 return RLIMIT_STACK; 1019 default: 1020 return code; 1021 } 1022 } 1023 1024 static inline abi_long copy_from_user_timeval(struct timeval *tv, 1025 abi_ulong target_tv_addr) 1026 { 1027 struct target_timeval *target_tv; 1028 1029 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) 1030 return -TARGET_EFAULT; 1031 1032 __get_user(tv->tv_sec, &target_tv->tv_sec); 1033 __get_user(tv->tv_usec, &target_tv->tv_usec); 1034 1035 unlock_user_struct(target_tv, target_tv_addr, 0); 1036 1037 return 0; 1038 } 1039 1040 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr, 1041 const struct timeval *tv) 1042 { 1043 struct target_timeval *target_tv; 1044 1045 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) 1046 return -TARGET_EFAULT; 1047 1048 __put_user(tv->tv_sec, &target_tv->tv_sec); 1049 __put_user(tv->tv_usec, &target_tv->tv_usec); 1050 1051 unlock_user_struct(target_tv, target_tv_addr, 1); 1052 1053 return 0; 1054 } 1055 1056 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 1057 #include <mqueue.h> 1058 1059 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr, 1060 abi_ulong target_mq_attr_addr) 1061 { 1062 struct target_mq_attr *target_mq_attr; 1063 1064 if (!lock_user_struct(VERIFY_READ, target_mq_attr, 1065 target_mq_attr_addr, 1)) 1066 return -TARGET_EFAULT; 1067 1068 __get_user(attr->mq_flags, &target_mq_attr->mq_flags); 1069 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1070 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1071 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1072 1073 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0); 1074 1075 return 0; 1076 } 1077 1078 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr, 1079 const struct mq_attr *attr) 1080 { 1081 struct target_mq_attr *target_mq_attr; 1082 1083 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr, 1084 target_mq_attr_addr, 0)) 1085 return -TARGET_EFAULT; 1086 1087 __put_user(attr->mq_flags, &target_mq_attr->mq_flags); 1088 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1089 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1090 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1091 1092 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1); 1093 1094 return 0; 1095 } 1096 #endif 1097 1098 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) 1099 /* do_select() must return target values and target errnos. */ 1100 static abi_long do_select(int n, 1101 abi_ulong rfd_addr, abi_ulong wfd_addr, 1102 abi_ulong efd_addr, abi_ulong target_tv_addr) 1103 { 1104 fd_set rfds, wfds, efds; 1105 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 1106 struct timeval tv, *tv_ptr; 1107 abi_long ret; 1108 1109 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 1110 if (ret) { 1111 return ret; 1112 } 1113 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 1114 if (ret) { 1115 return ret; 1116 } 1117 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 1118 if (ret) { 1119 return ret; 1120 } 1121 1122 if (target_tv_addr) { 1123 if (copy_from_user_timeval(&tv, target_tv_addr)) 1124 return -TARGET_EFAULT; 1125 tv_ptr = &tv; 1126 } else { 1127 tv_ptr = NULL; 1128 } 1129 1130 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr)); 1131 1132 if (!is_error(ret)) { 1133 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 1134 return -TARGET_EFAULT; 1135 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 1136 return -TARGET_EFAULT; 1137 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 1138 return -TARGET_EFAULT; 1139 1140 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv)) 1141 return -TARGET_EFAULT; 1142 } 1143 1144 return ret; 1145 } 1146 #endif 1147 1148 static abi_long do_pipe2(int host_pipe[], int flags) 1149 { 1150 #ifdef CONFIG_PIPE2 1151 return pipe2(host_pipe, flags); 1152 #else 1153 return -ENOSYS; 1154 #endif 1155 } 1156 1157 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes, 1158 int flags, int is_pipe2) 1159 { 1160 int host_pipe[2]; 1161 abi_long ret; 1162 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe); 1163 1164 if (is_error(ret)) 1165 return get_errno(ret); 1166 1167 /* Several targets have special calling conventions for the original 1168 pipe syscall, but didn't replicate this into the pipe2 syscall. */ 1169 if (!is_pipe2) { 1170 #if defined(TARGET_ALPHA) 1171 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1]; 1172 return host_pipe[0]; 1173 #elif defined(TARGET_MIPS) 1174 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1]; 1175 return host_pipe[0]; 1176 #elif defined(TARGET_SH4) 1177 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1]; 1178 return host_pipe[0]; 1179 #endif 1180 } 1181 1182 if (put_user_s32(host_pipe[0], pipedes) 1183 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0]))) 1184 return -TARGET_EFAULT; 1185 return get_errno(ret); 1186 } 1187 1188 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn, 1189 abi_ulong target_addr, 1190 socklen_t len) 1191 { 1192 struct target_ip_mreqn *target_smreqn; 1193 1194 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1); 1195 if (!target_smreqn) 1196 return -TARGET_EFAULT; 1197 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr; 1198 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr; 1199 if (len == sizeof(struct target_ip_mreqn)) 1200 mreqn->imr_ifindex = tswapl(target_smreqn->imr_ifindex); 1201 unlock_user(target_smreqn, target_addr, 0); 1202 1203 return 0; 1204 } 1205 1206 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr, 1207 abi_ulong target_addr, 1208 socklen_t len) 1209 { 1210 const socklen_t unix_maxlen = sizeof (struct sockaddr_un); 1211 sa_family_t sa_family; 1212 struct target_sockaddr *target_saddr; 1213 1214 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 1215 if (!target_saddr) 1216 return -TARGET_EFAULT; 1217 1218 sa_family = tswap16(target_saddr->sa_family); 1219 1220 /* Oops. The caller might send a incomplete sun_path; sun_path 1221 * must be terminated by \0 (see the manual page), but 1222 * unfortunately it is quite common to specify sockaddr_un 1223 * length as "strlen(x->sun_path)" while it should be 1224 * "strlen(...) + 1". We'll fix that here if needed. 1225 * Linux kernel has a similar feature. 1226 */ 1227 1228 if (sa_family == AF_UNIX) { 1229 if (len < unix_maxlen && len > 0) { 1230 char *cp = (char*)target_saddr; 1231 1232 if ( cp[len-1] && !cp[len] ) 1233 len++; 1234 } 1235 if (len > unix_maxlen) 1236 len = unix_maxlen; 1237 } 1238 1239 memcpy(addr, target_saddr, len); 1240 addr->sa_family = sa_family; 1241 unlock_user(target_saddr, target_addr, 0); 1242 1243 return 0; 1244 } 1245 1246 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr, 1247 struct sockaddr *addr, 1248 socklen_t len) 1249 { 1250 struct target_sockaddr *target_saddr; 1251 1252 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0); 1253 if (!target_saddr) 1254 return -TARGET_EFAULT; 1255 memcpy(target_saddr, addr, len); 1256 target_saddr->sa_family = tswap16(addr->sa_family); 1257 unlock_user(target_saddr, target_addr, len); 1258 1259 return 0; 1260 } 1261 1262 /* ??? Should this also swap msgh->name? */ 1263 static inline abi_long target_to_host_cmsg(struct msghdr *msgh, 1264 struct target_msghdr *target_msgh) 1265 { 1266 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1267 abi_long msg_controllen; 1268 abi_ulong target_cmsg_addr; 1269 struct target_cmsghdr *target_cmsg; 1270 socklen_t space = 0; 1271 1272 msg_controllen = tswapl(target_msgh->msg_controllen); 1273 if (msg_controllen < sizeof (struct target_cmsghdr)) 1274 goto the_end; 1275 target_cmsg_addr = tswapl(target_msgh->msg_control); 1276 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1); 1277 if (!target_cmsg) 1278 return -TARGET_EFAULT; 1279 1280 while (cmsg && target_cmsg) { 1281 void *data = CMSG_DATA(cmsg); 1282 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1283 1284 int len = tswapl(target_cmsg->cmsg_len) 1285 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr)); 1286 1287 space += CMSG_SPACE(len); 1288 if (space > msgh->msg_controllen) { 1289 space -= CMSG_SPACE(len); 1290 gemu_log("Host cmsg overflow\n"); 1291 break; 1292 } 1293 1294 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level); 1295 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type); 1296 cmsg->cmsg_len = CMSG_LEN(len); 1297 1298 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) { 1299 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type); 1300 memcpy(data, target_data, len); 1301 } else { 1302 int *fd = (int *)data; 1303 int *target_fd = (int *)target_data; 1304 int i, numfds = len / sizeof(int); 1305 1306 for (i = 0; i < numfds; i++) 1307 fd[i] = tswap32(target_fd[i]); 1308 } 1309 1310 cmsg = CMSG_NXTHDR(msgh, cmsg); 1311 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1312 } 1313 unlock_user(target_cmsg, target_cmsg_addr, 0); 1314 the_end: 1315 msgh->msg_controllen = space; 1316 return 0; 1317 } 1318 1319 /* ??? Should this also swap msgh->name? */ 1320 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, 1321 struct msghdr *msgh) 1322 { 1323 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1324 abi_long msg_controllen; 1325 abi_ulong target_cmsg_addr; 1326 struct target_cmsghdr *target_cmsg; 1327 socklen_t space = 0; 1328 1329 msg_controllen = tswapl(target_msgh->msg_controllen); 1330 if (msg_controllen < sizeof (struct target_cmsghdr)) 1331 goto the_end; 1332 target_cmsg_addr = tswapl(target_msgh->msg_control); 1333 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0); 1334 if (!target_cmsg) 1335 return -TARGET_EFAULT; 1336 1337 while (cmsg && target_cmsg) { 1338 void *data = CMSG_DATA(cmsg); 1339 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1340 1341 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr)); 1342 1343 space += TARGET_CMSG_SPACE(len); 1344 if (space > msg_controllen) { 1345 space -= TARGET_CMSG_SPACE(len); 1346 gemu_log("Target cmsg overflow\n"); 1347 break; 1348 } 1349 1350 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level); 1351 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type); 1352 target_cmsg->cmsg_len = tswapl(TARGET_CMSG_LEN(len)); 1353 1354 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) { 1355 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type); 1356 memcpy(target_data, data, len); 1357 } else { 1358 int *fd = (int *)data; 1359 int *target_fd = (int *)target_data; 1360 int i, numfds = len / sizeof(int); 1361 1362 for (i = 0; i < numfds; i++) 1363 target_fd[i] = tswap32(fd[i]); 1364 } 1365 1366 cmsg = CMSG_NXTHDR(msgh, cmsg); 1367 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1368 } 1369 unlock_user(target_cmsg, target_cmsg_addr, space); 1370 the_end: 1371 target_msgh->msg_controllen = tswapl(space); 1372 return 0; 1373 } 1374 1375 /* do_setsockopt() Must return target values and target errnos. */ 1376 static abi_long do_setsockopt(int sockfd, int level, int optname, 1377 abi_ulong optval_addr, socklen_t optlen) 1378 { 1379 abi_long ret; 1380 int val; 1381 struct ip_mreqn *ip_mreq; 1382 struct ip_mreq_source *ip_mreq_source; 1383 1384 switch(level) { 1385 case SOL_TCP: 1386 /* TCP options all take an 'int' value. */ 1387 if (optlen < sizeof(uint32_t)) 1388 return -TARGET_EINVAL; 1389 1390 if (get_user_u32(val, optval_addr)) 1391 return -TARGET_EFAULT; 1392 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1393 break; 1394 case SOL_IP: 1395 switch(optname) { 1396 case IP_TOS: 1397 case IP_TTL: 1398 case IP_HDRINCL: 1399 case IP_ROUTER_ALERT: 1400 case IP_RECVOPTS: 1401 case IP_RETOPTS: 1402 case IP_PKTINFO: 1403 case IP_MTU_DISCOVER: 1404 case IP_RECVERR: 1405 case IP_RECVTOS: 1406 #ifdef IP_FREEBIND 1407 case IP_FREEBIND: 1408 #endif 1409 case IP_MULTICAST_TTL: 1410 case IP_MULTICAST_LOOP: 1411 val = 0; 1412 if (optlen >= sizeof(uint32_t)) { 1413 if (get_user_u32(val, optval_addr)) 1414 return -TARGET_EFAULT; 1415 } else if (optlen >= 1) { 1416 if (get_user_u8(val, optval_addr)) 1417 return -TARGET_EFAULT; 1418 } 1419 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1420 break; 1421 case IP_ADD_MEMBERSHIP: 1422 case IP_DROP_MEMBERSHIP: 1423 if (optlen < sizeof (struct target_ip_mreq) || 1424 optlen > sizeof (struct target_ip_mreqn)) 1425 return -TARGET_EINVAL; 1426 1427 ip_mreq = (struct ip_mreqn *) alloca(optlen); 1428 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen); 1429 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen)); 1430 break; 1431 1432 case IP_BLOCK_SOURCE: 1433 case IP_UNBLOCK_SOURCE: 1434 case IP_ADD_SOURCE_MEMBERSHIP: 1435 case IP_DROP_SOURCE_MEMBERSHIP: 1436 if (optlen != sizeof (struct target_ip_mreq_source)) 1437 return -TARGET_EINVAL; 1438 1439 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1); 1440 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen)); 1441 unlock_user (ip_mreq_source, optval_addr, 0); 1442 break; 1443 1444 default: 1445 goto unimplemented; 1446 } 1447 break; 1448 case TARGET_SOL_SOCKET: 1449 switch (optname) { 1450 /* Options with 'int' argument. */ 1451 case TARGET_SO_DEBUG: 1452 optname = SO_DEBUG; 1453 break; 1454 case TARGET_SO_REUSEADDR: 1455 optname = SO_REUSEADDR; 1456 break; 1457 case TARGET_SO_TYPE: 1458 optname = SO_TYPE; 1459 break; 1460 case TARGET_SO_ERROR: 1461 optname = SO_ERROR; 1462 break; 1463 case TARGET_SO_DONTROUTE: 1464 optname = SO_DONTROUTE; 1465 break; 1466 case TARGET_SO_BROADCAST: 1467 optname = SO_BROADCAST; 1468 break; 1469 case TARGET_SO_SNDBUF: 1470 optname = SO_SNDBUF; 1471 break; 1472 case TARGET_SO_RCVBUF: 1473 optname = SO_RCVBUF; 1474 break; 1475 case TARGET_SO_KEEPALIVE: 1476 optname = SO_KEEPALIVE; 1477 break; 1478 case TARGET_SO_OOBINLINE: 1479 optname = SO_OOBINLINE; 1480 break; 1481 case TARGET_SO_NO_CHECK: 1482 optname = SO_NO_CHECK; 1483 break; 1484 case TARGET_SO_PRIORITY: 1485 optname = SO_PRIORITY; 1486 break; 1487 #ifdef SO_BSDCOMPAT 1488 case TARGET_SO_BSDCOMPAT: 1489 optname = SO_BSDCOMPAT; 1490 break; 1491 #endif 1492 case TARGET_SO_PASSCRED: 1493 optname = SO_PASSCRED; 1494 break; 1495 case TARGET_SO_TIMESTAMP: 1496 optname = SO_TIMESTAMP; 1497 break; 1498 case TARGET_SO_RCVLOWAT: 1499 optname = SO_RCVLOWAT; 1500 break; 1501 case TARGET_SO_RCVTIMEO: 1502 optname = SO_RCVTIMEO; 1503 break; 1504 case TARGET_SO_SNDTIMEO: 1505 optname = SO_SNDTIMEO; 1506 break; 1507 break; 1508 default: 1509 goto unimplemented; 1510 } 1511 if (optlen < sizeof(uint32_t)) 1512 return -TARGET_EINVAL; 1513 1514 if (get_user_u32(val, optval_addr)) 1515 return -TARGET_EFAULT; 1516 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val))); 1517 break; 1518 default: 1519 unimplemented: 1520 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level, optname); 1521 ret = -TARGET_ENOPROTOOPT; 1522 } 1523 return ret; 1524 } 1525 1526 /* do_getsockopt() Must return target values and target errnos. */ 1527 static abi_long do_getsockopt(int sockfd, int level, int optname, 1528 abi_ulong optval_addr, abi_ulong optlen) 1529 { 1530 abi_long ret; 1531 int len, val; 1532 socklen_t lv; 1533 1534 switch(level) { 1535 case TARGET_SOL_SOCKET: 1536 level = SOL_SOCKET; 1537 switch (optname) { 1538 /* These don't just return a single integer */ 1539 case TARGET_SO_LINGER: 1540 case TARGET_SO_RCVTIMEO: 1541 case TARGET_SO_SNDTIMEO: 1542 case TARGET_SO_PEERCRED: 1543 case TARGET_SO_PEERNAME: 1544 goto unimplemented; 1545 /* Options with 'int' argument. */ 1546 case TARGET_SO_DEBUG: 1547 optname = SO_DEBUG; 1548 goto int_case; 1549 case TARGET_SO_REUSEADDR: 1550 optname = SO_REUSEADDR; 1551 goto int_case; 1552 case TARGET_SO_TYPE: 1553 optname = SO_TYPE; 1554 goto int_case; 1555 case TARGET_SO_ERROR: 1556 optname = SO_ERROR; 1557 goto int_case; 1558 case TARGET_SO_DONTROUTE: 1559 optname = SO_DONTROUTE; 1560 goto int_case; 1561 case TARGET_SO_BROADCAST: 1562 optname = SO_BROADCAST; 1563 goto int_case; 1564 case TARGET_SO_SNDBUF: 1565 optname = SO_SNDBUF; 1566 goto int_case; 1567 case TARGET_SO_RCVBUF: 1568 optname = SO_RCVBUF; 1569 goto int_case; 1570 case TARGET_SO_KEEPALIVE: 1571 optname = SO_KEEPALIVE; 1572 goto int_case; 1573 case TARGET_SO_OOBINLINE: 1574 optname = SO_OOBINLINE; 1575 goto int_case; 1576 case TARGET_SO_NO_CHECK: 1577 optname = SO_NO_CHECK; 1578 goto int_case; 1579 case TARGET_SO_PRIORITY: 1580 optname = SO_PRIORITY; 1581 goto int_case; 1582 #ifdef SO_BSDCOMPAT 1583 case TARGET_SO_BSDCOMPAT: 1584 optname = SO_BSDCOMPAT; 1585 goto int_case; 1586 #endif 1587 case TARGET_SO_PASSCRED: 1588 optname = SO_PASSCRED; 1589 goto int_case; 1590 case TARGET_SO_TIMESTAMP: 1591 optname = SO_TIMESTAMP; 1592 goto int_case; 1593 case TARGET_SO_RCVLOWAT: 1594 optname = SO_RCVLOWAT; 1595 goto int_case; 1596 default: 1597 goto int_case; 1598 } 1599 break; 1600 case SOL_TCP: 1601 /* TCP options all take an 'int' value. */ 1602 int_case: 1603 if (get_user_u32(len, optlen)) 1604 return -TARGET_EFAULT; 1605 if (len < 0) 1606 return -TARGET_EINVAL; 1607 lv = sizeof(lv); 1608 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1609 if (ret < 0) 1610 return ret; 1611 if (len > lv) 1612 len = lv; 1613 if (len == 4) { 1614 if (put_user_u32(val, optval_addr)) 1615 return -TARGET_EFAULT; 1616 } else { 1617 if (put_user_u8(val, optval_addr)) 1618 return -TARGET_EFAULT; 1619 } 1620 if (put_user_u32(len, optlen)) 1621 return -TARGET_EFAULT; 1622 break; 1623 case SOL_IP: 1624 switch(optname) { 1625 case IP_TOS: 1626 case IP_TTL: 1627 case IP_HDRINCL: 1628 case IP_ROUTER_ALERT: 1629 case IP_RECVOPTS: 1630 case IP_RETOPTS: 1631 case IP_PKTINFO: 1632 case IP_MTU_DISCOVER: 1633 case IP_RECVERR: 1634 case IP_RECVTOS: 1635 #ifdef IP_FREEBIND 1636 case IP_FREEBIND: 1637 #endif 1638 case IP_MULTICAST_TTL: 1639 case IP_MULTICAST_LOOP: 1640 if (get_user_u32(len, optlen)) 1641 return -TARGET_EFAULT; 1642 if (len < 0) 1643 return -TARGET_EINVAL; 1644 lv = sizeof(lv); 1645 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1646 if (ret < 0) 1647 return ret; 1648 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 1649 len = 1; 1650 if (put_user_u32(len, optlen) 1651 || put_user_u8(val, optval_addr)) 1652 return -TARGET_EFAULT; 1653 } else { 1654 if (len > sizeof(int)) 1655 len = sizeof(int); 1656 if (put_user_u32(len, optlen) 1657 || put_user_u32(val, optval_addr)) 1658 return -TARGET_EFAULT; 1659 } 1660 break; 1661 default: 1662 ret = -TARGET_ENOPROTOOPT; 1663 break; 1664 } 1665 break; 1666 default: 1667 unimplemented: 1668 gemu_log("getsockopt level=%d optname=%d not yet supported\n", 1669 level, optname); 1670 ret = -TARGET_EOPNOTSUPP; 1671 break; 1672 } 1673 return ret; 1674 } 1675 1676 /* FIXME 1677 * lock_iovec()/unlock_iovec() have a return code of 0 for success where 1678 * other lock functions have a return code of 0 for failure. 1679 */ 1680 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr, 1681 int count, int copy) 1682 { 1683 struct target_iovec *target_vec; 1684 abi_ulong base; 1685 int i; 1686 1687 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1); 1688 if (!target_vec) 1689 return -TARGET_EFAULT; 1690 for(i = 0;i < count; i++) { 1691 base = tswapl(target_vec[i].iov_base); 1692 vec[i].iov_len = tswapl(target_vec[i].iov_len); 1693 if (vec[i].iov_len != 0) { 1694 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy); 1695 /* Don't check lock_user return value. We must call writev even 1696 if a element has invalid base address. */ 1697 } else { 1698 /* zero length pointer is ignored */ 1699 vec[i].iov_base = NULL; 1700 } 1701 } 1702 unlock_user (target_vec, target_addr, 0); 1703 return 0; 1704 } 1705 1706 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr, 1707 int count, int copy) 1708 { 1709 struct target_iovec *target_vec; 1710 abi_ulong base; 1711 int i; 1712 1713 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1); 1714 if (!target_vec) 1715 return -TARGET_EFAULT; 1716 for(i = 0;i < count; i++) { 1717 if (target_vec[i].iov_base) { 1718 base = tswapl(target_vec[i].iov_base); 1719 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0); 1720 } 1721 } 1722 unlock_user (target_vec, target_addr, 0); 1723 1724 return 0; 1725 } 1726 1727 /* do_socket() Must return target values and target errnos. */ 1728 static abi_long do_socket(int domain, int type, int protocol) 1729 { 1730 #if defined(TARGET_MIPS) 1731 switch(type) { 1732 case TARGET_SOCK_DGRAM: 1733 type = SOCK_DGRAM; 1734 break; 1735 case TARGET_SOCK_STREAM: 1736 type = SOCK_STREAM; 1737 break; 1738 case TARGET_SOCK_RAW: 1739 type = SOCK_RAW; 1740 break; 1741 case TARGET_SOCK_RDM: 1742 type = SOCK_RDM; 1743 break; 1744 case TARGET_SOCK_SEQPACKET: 1745 type = SOCK_SEQPACKET; 1746 break; 1747 case TARGET_SOCK_PACKET: 1748 type = SOCK_PACKET; 1749 break; 1750 } 1751 #endif 1752 if (domain == PF_NETLINK) 1753 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */ 1754 return get_errno(socket(domain, type, protocol)); 1755 } 1756 1757 /* do_bind() Must return target values and target errnos. */ 1758 static abi_long do_bind(int sockfd, abi_ulong target_addr, 1759 socklen_t addrlen) 1760 { 1761 void *addr; 1762 abi_long ret; 1763 1764 if ((int)addrlen < 0) { 1765 return -TARGET_EINVAL; 1766 } 1767 1768 addr = alloca(addrlen+1); 1769 1770 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1771 if (ret) 1772 return ret; 1773 1774 return get_errno(bind(sockfd, addr, addrlen)); 1775 } 1776 1777 /* do_connect() Must return target values and target errnos. */ 1778 static abi_long do_connect(int sockfd, abi_ulong target_addr, 1779 socklen_t addrlen) 1780 { 1781 void *addr; 1782 abi_long ret; 1783 1784 if ((int)addrlen < 0) { 1785 return -TARGET_EINVAL; 1786 } 1787 1788 addr = alloca(addrlen); 1789 1790 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1791 if (ret) 1792 return ret; 1793 1794 return get_errno(connect(sockfd, addr, addrlen)); 1795 } 1796 1797 /* do_sendrecvmsg() Must return target values and target errnos. */ 1798 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg, 1799 int flags, int send) 1800 { 1801 abi_long ret, len; 1802 struct target_msghdr *msgp; 1803 struct msghdr msg; 1804 int count; 1805 struct iovec *vec; 1806 abi_ulong target_vec; 1807 1808 /* FIXME */ 1809 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE, 1810 msgp, 1811 target_msg, 1812 send ? 1 : 0)) 1813 return -TARGET_EFAULT; 1814 if (msgp->msg_name) { 1815 msg.msg_namelen = tswap32(msgp->msg_namelen); 1816 msg.msg_name = alloca(msg.msg_namelen); 1817 ret = target_to_host_sockaddr(msg.msg_name, tswapl(msgp->msg_name), 1818 msg.msg_namelen); 1819 if (ret) { 1820 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 1821 return ret; 1822 } 1823 } else { 1824 msg.msg_name = NULL; 1825 msg.msg_namelen = 0; 1826 } 1827 msg.msg_controllen = 2 * tswapl(msgp->msg_controllen); 1828 msg.msg_control = alloca(msg.msg_controllen); 1829 msg.msg_flags = tswap32(msgp->msg_flags); 1830 1831 count = tswapl(msgp->msg_iovlen); 1832 vec = alloca(count * sizeof(struct iovec)); 1833 target_vec = tswapl(msgp->msg_iov); 1834 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send); 1835 msg.msg_iovlen = count; 1836 msg.msg_iov = vec; 1837 1838 if (send) { 1839 ret = target_to_host_cmsg(&msg, msgp); 1840 if (ret == 0) 1841 ret = get_errno(sendmsg(fd, &msg, flags)); 1842 } else { 1843 ret = get_errno(recvmsg(fd, &msg, flags)); 1844 if (!is_error(ret)) { 1845 len = ret; 1846 ret = host_to_target_cmsg(msgp, &msg); 1847 if (!is_error(ret)) 1848 ret = len; 1849 } 1850 } 1851 unlock_iovec(vec, target_vec, count, !send); 1852 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 1853 return ret; 1854 } 1855 1856 /* do_accept() Must return target values and target errnos. */ 1857 static abi_long do_accept(int fd, abi_ulong target_addr, 1858 abi_ulong target_addrlen_addr) 1859 { 1860 socklen_t addrlen; 1861 void *addr; 1862 abi_long ret; 1863 1864 if (target_addr == 0) 1865 return get_errno(accept(fd, NULL, NULL)); 1866 1867 /* linux returns EINVAL if addrlen pointer is invalid */ 1868 if (get_user_u32(addrlen, target_addrlen_addr)) 1869 return -TARGET_EINVAL; 1870 1871 if ((int)addrlen < 0) { 1872 return -TARGET_EINVAL; 1873 } 1874 1875 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 1876 return -TARGET_EINVAL; 1877 1878 addr = alloca(addrlen); 1879 1880 ret = get_errno(accept(fd, addr, &addrlen)); 1881 if (!is_error(ret)) { 1882 host_to_target_sockaddr(target_addr, addr, addrlen); 1883 if (put_user_u32(addrlen, target_addrlen_addr)) 1884 ret = -TARGET_EFAULT; 1885 } 1886 return ret; 1887 } 1888 1889 /* do_getpeername() Must return target values and target errnos. */ 1890 static abi_long do_getpeername(int fd, abi_ulong target_addr, 1891 abi_ulong target_addrlen_addr) 1892 { 1893 socklen_t addrlen; 1894 void *addr; 1895 abi_long ret; 1896 1897 if (get_user_u32(addrlen, target_addrlen_addr)) 1898 return -TARGET_EFAULT; 1899 1900 if ((int)addrlen < 0) { 1901 return -TARGET_EINVAL; 1902 } 1903 1904 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 1905 return -TARGET_EFAULT; 1906 1907 addr = alloca(addrlen); 1908 1909 ret = get_errno(getpeername(fd, addr, &addrlen)); 1910 if (!is_error(ret)) { 1911 host_to_target_sockaddr(target_addr, addr, addrlen); 1912 if (put_user_u32(addrlen, target_addrlen_addr)) 1913 ret = -TARGET_EFAULT; 1914 } 1915 return ret; 1916 } 1917 1918 /* do_getsockname() Must return target values and target errnos. */ 1919 static abi_long do_getsockname(int fd, abi_ulong target_addr, 1920 abi_ulong target_addrlen_addr) 1921 { 1922 socklen_t addrlen; 1923 void *addr; 1924 abi_long ret; 1925 1926 if (get_user_u32(addrlen, target_addrlen_addr)) 1927 return -TARGET_EFAULT; 1928 1929 if ((int)addrlen < 0) { 1930 return -TARGET_EINVAL; 1931 } 1932 1933 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 1934 return -TARGET_EFAULT; 1935 1936 addr = alloca(addrlen); 1937 1938 ret = get_errno(getsockname(fd, addr, &addrlen)); 1939 if (!is_error(ret)) { 1940 host_to_target_sockaddr(target_addr, addr, addrlen); 1941 if (put_user_u32(addrlen, target_addrlen_addr)) 1942 ret = -TARGET_EFAULT; 1943 } 1944 return ret; 1945 } 1946 1947 /* do_socketpair() Must return target values and target errnos. */ 1948 static abi_long do_socketpair(int domain, int type, int protocol, 1949 abi_ulong target_tab_addr) 1950 { 1951 int tab[2]; 1952 abi_long ret; 1953 1954 ret = get_errno(socketpair(domain, type, protocol, tab)); 1955 if (!is_error(ret)) { 1956 if (put_user_s32(tab[0], target_tab_addr) 1957 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0]))) 1958 ret = -TARGET_EFAULT; 1959 } 1960 return ret; 1961 } 1962 1963 /* do_sendto() Must return target values and target errnos. */ 1964 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags, 1965 abi_ulong target_addr, socklen_t addrlen) 1966 { 1967 void *addr; 1968 void *host_msg; 1969 abi_long ret; 1970 1971 if ((int)addrlen < 0) { 1972 return -TARGET_EINVAL; 1973 } 1974 1975 host_msg = lock_user(VERIFY_READ, msg, len, 1); 1976 if (!host_msg) 1977 return -TARGET_EFAULT; 1978 if (target_addr) { 1979 addr = alloca(addrlen); 1980 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1981 if (ret) { 1982 unlock_user(host_msg, msg, 0); 1983 return ret; 1984 } 1985 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen)); 1986 } else { 1987 ret = get_errno(send(fd, host_msg, len, flags)); 1988 } 1989 unlock_user(host_msg, msg, 0); 1990 return ret; 1991 } 1992 1993 /* do_recvfrom() Must return target values and target errnos. */ 1994 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags, 1995 abi_ulong target_addr, 1996 abi_ulong target_addrlen) 1997 { 1998 socklen_t addrlen; 1999 void *addr; 2000 void *host_msg; 2001 abi_long ret; 2002 2003 host_msg = lock_user(VERIFY_WRITE, msg, len, 0); 2004 if (!host_msg) 2005 return -TARGET_EFAULT; 2006 if (target_addr) { 2007 if (get_user_u32(addrlen, target_addrlen)) { 2008 ret = -TARGET_EFAULT; 2009 goto fail; 2010 } 2011 if ((int)addrlen < 0) { 2012 ret = -TARGET_EINVAL; 2013 goto fail; 2014 } 2015 addr = alloca(addrlen); 2016 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen)); 2017 } else { 2018 addr = NULL; /* To keep compiler quiet. */ 2019 ret = get_errno(qemu_recv(fd, host_msg, len, flags)); 2020 } 2021 if (!is_error(ret)) { 2022 if (target_addr) { 2023 host_to_target_sockaddr(target_addr, addr, addrlen); 2024 if (put_user_u32(addrlen, target_addrlen)) { 2025 ret = -TARGET_EFAULT; 2026 goto fail; 2027 } 2028 } 2029 unlock_user(host_msg, msg, len); 2030 } else { 2031 fail: 2032 unlock_user(host_msg, msg, 0); 2033 } 2034 return ret; 2035 } 2036 2037 #ifdef TARGET_NR_socketcall 2038 /* do_socketcall() Must return target values and target errnos. */ 2039 static abi_long do_socketcall(int num, abi_ulong vptr) 2040 { 2041 abi_long ret; 2042 const int n = sizeof(abi_ulong); 2043 2044 switch(num) { 2045 case SOCKOP_socket: 2046 { 2047 abi_ulong domain, type, protocol; 2048 2049 if (get_user_ual(domain, vptr) 2050 || get_user_ual(type, vptr + n) 2051 || get_user_ual(protocol, vptr + 2 * n)) 2052 return -TARGET_EFAULT; 2053 2054 ret = do_socket(domain, type, protocol); 2055 } 2056 break; 2057 case SOCKOP_bind: 2058 { 2059 abi_ulong sockfd; 2060 abi_ulong target_addr; 2061 socklen_t addrlen; 2062 2063 if (get_user_ual(sockfd, vptr) 2064 || get_user_ual(target_addr, vptr + n) 2065 || get_user_ual(addrlen, vptr + 2 * n)) 2066 return -TARGET_EFAULT; 2067 2068 ret = do_bind(sockfd, target_addr, addrlen); 2069 } 2070 break; 2071 case SOCKOP_connect: 2072 { 2073 abi_ulong sockfd; 2074 abi_ulong target_addr; 2075 socklen_t addrlen; 2076 2077 if (get_user_ual(sockfd, vptr) 2078 || get_user_ual(target_addr, vptr + n) 2079 || get_user_ual(addrlen, vptr + 2 * n)) 2080 return -TARGET_EFAULT; 2081 2082 ret = do_connect(sockfd, target_addr, addrlen); 2083 } 2084 break; 2085 case SOCKOP_listen: 2086 { 2087 abi_ulong sockfd, backlog; 2088 2089 if (get_user_ual(sockfd, vptr) 2090 || get_user_ual(backlog, vptr + n)) 2091 return -TARGET_EFAULT; 2092 2093 ret = get_errno(listen(sockfd, backlog)); 2094 } 2095 break; 2096 case SOCKOP_accept: 2097 { 2098 abi_ulong sockfd; 2099 abi_ulong target_addr, target_addrlen; 2100 2101 if (get_user_ual(sockfd, vptr) 2102 || get_user_ual(target_addr, vptr + n) 2103 || get_user_ual(target_addrlen, vptr + 2 * n)) 2104 return -TARGET_EFAULT; 2105 2106 ret = do_accept(sockfd, target_addr, target_addrlen); 2107 } 2108 break; 2109 case SOCKOP_getsockname: 2110 { 2111 abi_ulong sockfd; 2112 abi_ulong target_addr, target_addrlen; 2113 2114 if (get_user_ual(sockfd, vptr) 2115 || get_user_ual(target_addr, vptr + n) 2116 || get_user_ual(target_addrlen, vptr + 2 * n)) 2117 return -TARGET_EFAULT; 2118 2119 ret = do_getsockname(sockfd, target_addr, target_addrlen); 2120 } 2121 break; 2122 case SOCKOP_getpeername: 2123 { 2124 abi_ulong sockfd; 2125 abi_ulong target_addr, target_addrlen; 2126 2127 if (get_user_ual(sockfd, vptr) 2128 || get_user_ual(target_addr, vptr + n) 2129 || get_user_ual(target_addrlen, vptr + 2 * n)) 2130 return -TARGET_EFAULT; 2131 2132 ret = do_getpeername(sockfd, target_addr, target_addrlen); 2133 } 2134 break; 2135 case SOCKOP_socketpair: 2136 { 2137 abi_ulong domain, type, protocol; 2138 abi_ulong tab; 2139 2140 if (get_user_ual(domain, vptr) 2141 || get_user_ual(type, vptr + n) 2142 || get_user_ual(protocol, vptr + 2 * n) 2143 || get_user_ual(tab, vptr + 3 * n)) 2144 return -TARGET_EFAULT; 2145 2146 ret = do_socketpair(domain, type, protocol, tab); 2147 } 2148 break; 2149 case SOCKOP_send: 2150 { 2151 abi_ulong sockfd; 2152 abi_ulong msg; 2153 size_t len; 2154 abi_ulong flags; 2155 2156 if (get_user_ual(sockfd, vptr) 2157 || get_user_ual(msg, vptr + n) 2158 || get_user_ual(len, vptr + 2 * n) 2159 || get_user_ual(flags, vptr + 3 * n)) 2160 return -TARGET_EFAULT; 2161 2162 ret = do_sendto(sockfd, msg, len, flags, 0, 0); 2163 } 2164 break; 2165 case SOCKOP_recv: 2166 { 2167 abi_ulong sockfd; 2168 abi_ulong msg; 2169 size_t len; 2170 abi_ulong flags; 2171 2172 if (get_user_ual(sockfd, vptr) 2173 || get_user_ual(msg, vptr + n) 2174 || get_user_ual(len, vptr + 2 * n) 2175 || get_user_ual(flags, vptr + 3 * n)) 2176 return -TARGET_EFAULT; 2177 2178 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0); 2179 } 2180 break; 2181 case SOCKOP_sendto: 2182 { 2183 abi_ulong sockfd; 2184 abi_ulong msg; 2185 size_t len; 2186 abi_ulong flags; 2187 abi_ulong addr; 2188 socklen_t addrlen; 2189 2190 if (get_user_ual(sockfd, vptr) 2191 || get_user_ual(msg, vptr + n) 2192 || get_user_ual(len, vptr + 2 * n) 2193 || get_user_ual(flags, vptr + 3 * n) 2194 || get_user_ual(addr, vptr + 4 * n) 2195 || get_user_ual(addrlen, vptr + 5 * n)) 2196 return -TARGET_EFAULT; 2197 2198 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen); 2199 } 2200 break; 2201 case SOCKOP_recvfrom: 2202 { 2203 abi_ulong sockfd; 2204 abi_ulong msg; 2205 size_t len; 2206 abi_ulong flags; 2207 abi_ulong addr; 2208 socklen_t addrlen; 2209 2210 if (get_user_ual(sockfd, vptr) 2211 || get_user_ual(msg, vptr + n) 2212 || get_user_ual(len, vptr + 2 * n) 2213 || get_user_ual(flags, vptr + 3 * n) 2214 || get_user_ual(addr, vptr + 4 * n) 2215 || get_user_ual(addrlen, vptr + 5 * n)) 2216 return -TARGET_EFAULT; 2217 2218 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen); 2219 } 2220 break; 2221 case SOCKOP_shutdown: 2222 { 2223 abi_ulong sockfd, how; 2224 2225 if (get_user_ual(sockfd, vptr) 2226 || get_user_ual(how, vptr + n)) 2227 return -TARGET_EFAULT; 2228 2229 ret = get_errno(shutdown(sockfd, how)); 2230 } 2231 break; 2232 case SOCKOP_sendmsg: 2233 case SOCKOP_recvmsg: 2234 { 2235 abi_ulong fd; 2236 abi_ulong target_msg; 2237 abi_ulong flags; 2238 2239 if (get_user_ual(fd, vptr) 2240 || get_user_ual(target_msg, vptr + n) 2241 || get_user_ual(flags, vptr + 2 * n)) 2242 return -TARGET_EFAULT; 2243 2244 ret = do_sendrecvmsg(fd, target_msg, flags, 2245 (num == SOCKOP_sendmsg)); 2246 } 2247 break; 2248 case SOCKOP_setsockopt: 2249 { 2250 abi_ulong sockfd; 2251 abi_ulong level; 2252 abi_ulong optname; 2253 abi_ulong optval; 2254 socklen_t optlen; 2255 2256 if (get_user_ual(sockfd, vptr) 2257 || get_user_ual(level, vptr + n) 2258 || get_user_ual(optname, vptr + 2 * n) 2259 || get_user_ual(optval, vptr + 3 * n) 2260 || get_user_ual(optlen, vptr + 4 * n)) 2261 return -TARGET_EFAULT; 2262 2263 ret = do_setsockopt(sockfd, level, optname, optval, optlen); 2264 } 2265 break; 2266 case SOCKOP_getsockopt: 2267 { 2268 abi_ulong sockfd; 2269 abi_ulong level; 2270 abi_ulong optname; 2271 abi_ulong optval; 2272 socklen_t optlen; 2273 2274 if (get_user_ual(sockfd, vptr) 2275 || get_user_ual(level, vptr + n) 2276 || get_user_ual(optname, vptr + 2 * n) 2277 || get_user_ual(optval, vptr + 3 * n) 2278 || get_user_ual(optlen, vptr + 4 * n)) 2279 return -TARGET_EFAULT; 2280 2281 ret = do_getsockopt(sockfd, level, optname, optval, optlen); 2282 } 2283 break; 2284 default: 2285 gemu_log("Unsupported socketcall: %d\n", num); 2286 ret = -TARGET_ENOSYS; 2287 break; 2288 } 2289 return ret; 2290 } 2291 #endif 2292 2293 #define N_SHM_REGIONS 32 2294 2295 static struct shm_region { 2296 abi_ulong start; 2297 abi_ulong size; 2298 } shm_regions[N_SHM_REGIONS]; 2299 2300 struct target_ipc_perm 2301 { 2302 abi_long __key; 2303 abi_ulong uid; 2304 abi_ulong gid; 2305 abi_ulong cuid; 2306 abi_ulong cgid; 2307 unsigned short int mode; 2308 unsigned short int __pad1; 2309 unsigned short int __seq; 2310 unsigned short int __pad2; 2311 abi_ulong __unused1; 2312 abi_ulong __unused2; 2313 }; 2314 2315 struct target_semid_ds 2316 { 2317 struct target_ipc_perm sem_perm; 2318 abi_ulong sem_otime; 2319 abi_ulong __unused1; 2320 abi_ulong sem_ctime; 2321 abi_ulong __unused2; 2322 abi_ulong sem_nsems; 2323 abi_ulong __unused3; 2324 abi_ulong __unused4; 2325 }; 2326 2327 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip, 2328 abi_ulong target_addr) 2329 { 2330 struct target_ipc_perm *target_ip; 2331 struct target_semid_ds *target_sd; 2332 2333 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2334 return -TARGET_EFAULT; 2335 target_ip = &(target_sd->sem_perm); 2336 host_ip->__key = tswapl(target_ip->__key); 2337 host_ip->uid = tswapl(target_ip->uid); 2338 host_ip->gid = tswapl(target_ip->gid); 2339 host_ip->cuid = tswapl(target_ip->cuid); 2340 host_ip->cgid = tswapl(target_ip->cgid); 2341 host_ip->mode = tswapl(target_ip->mode); 2342 unlock_user_struct(target_sd, target_addr, 0); 2343 return 0; 2344 } 2345 2346 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr, 2347 struct ipc_perm *host_ip) 2348 { 2349 struct target_ipc_perm *target_ip; 2350 struct target_semid_ds *target_sd; 2351 2352 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2353 return -TARGET_EFAULT; 2354 target_ip = &(target_sd->sem_perm); 2355 target_ip->__key = tswapl(host_ip->__key); 2356 target_ip->uid = tswapl(host_ip->uid); 2357 target_ip->gid = tswapl(host_ip->gid); 2358 target_ip->cuid = tswapl(host_ip->cuid); 2359 target_ip->cgid = tswapl(host_ip->cgid); 2360 target_ip->mode = tswapl(host_ip->mode); 2361 unlock_user_struct(target_sd, target_addr, 1); 2362 return 0; 2363 } 2364 2365 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd, 2366 abi_ulong target_addr) 2367 { 2368 struct target_semid_ds *target_sd; 2369 2370 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2371 return -TARGET_EFAULT; 2372 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr)) 2373 return -TARGET_EFAULT; 2374 host_sd->sem_nsems = tswapl(target_sd->sem_nsems); 2375 host_sd->sem_otime = tswapl(target_sd->sem_otime); 2376 host_sd->sem_ctime = tswapl(target_sd->sem_ctime); 2377 unlock_user_struct(target_sd, target_addr, 0); 2378 return 0; 2379 } 2380 2381 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr, 2382 struct semid_ds *host_sd) 2383 { 2384 struct target_semid_ds *target_sd; 2385 2386 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2387 return -TARGET_EFAULT; 2388 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm))) 2389 return -TARGET_EFAULT;; 2390 target_sd->sem_nsems = tswapl(host_sd->sem_nsems); 2391 target_sd->sem_otime = tswapl(host_sd->sem_otime); 2392 target_sd->sem_ctime = tswapl(host_sd->sem_ctime); 2393 unlock_user_struct(target_sd, target_addr, 1); 2394 return 0; 2395 } 2396 2397 struct target_seminfo { 2398 int semmap; 2399 int semmni; 2400 int semmns; 2401 int semmnu; 2402 int semmsl; 2403 int semopm; 2404 int semume; 2405 int semusz; 2406 int semvmx; 2407 int semaem; 2408 }; 2409 2410 static inline abi_long host_to_target_seminfo(abi_ulong target_addr, 2411 struct seminfo *host_seminfo) 2412 { 2413 struct target_seminfo *target_seminfo; 2414 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0)) 2415 return -TARGET_EFAULT; 2416 __put_user(host_seminfo->semmap, &target_seminfo->semmap); 2417 __put_user(host_seminfo->semmni, &target_seminfo->semmni); 2418 __put_user(host_seminfo->semmns, &target_seminfo->semmns); 2419 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu); 2420 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl); 2421 __put_user(host_seminfo->semopm, &target_seminfo->semopm); 2422 __put_user(host_seminfo->semume, &target_seminfo->semume); 2423 __put_user(host_seminfo->semusz, &target_seminfo->semusz); 2424 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx); 2425 __put_user(host_seminfo->semaem, &target_seminfo->semaem); 2426 unlock_user_struct(target_seminfo, target_addr, 1); 2427 return 0; 2428 } 2429 2430 union semun { 2431 int val; 2432 struct semid_ds *buf; 2433 unsigned short *array; 2434 struct seminfo *__buf; 2435 }; 2436 2437 union target_semun { 2438 int val; 2439 abi_ulong buf; 2440 abi_ulong array; 2441 abi_ulong __buf; 2442 }; 2443 2444 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array, 2445 abi_ulong target_addr) 2446 { 2447 int nsems; 2448 unsigned short *array; 2449 union semun semun; 2450 struct semid_ds semid_ds; 2451 int i, ret; 2452 2453 semun.buf = &semid_ds; 2454 2455 ret = semctl(semid, 0, IPC_STAT, semun); 2456 if (ret == -1) 2457 return get_errno(ret); 2458 2459 nsems = semid_ds.sem_nsems; 2460 2461 *host_array = malloc(nsems*sizeof(unsigned short)); 2462 array = lock_user(VERIFY_READ, target_addr, 2463 nsems*sizeof(unsigned short), 1); 2464 if (!array) 2465 return -TARGET_EFAULT; 2466 2467 for(i=0; i<nsems; i++) { 2468 __get_user((*host_array)[i], &array[i]); 2469 } 2470 unlock_user(array, target_addr, 0); 2471 2472 return 0; 2473 } 2474 2475 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr, 2476 unsigned short **host_array) 2477 { 2478 int nsems; 2479 unsigned short *array; 2480 union semun semun; 2481 struct semid_ds semid_ds; 2482 int i, ret; 2483 2484 semun.buf = &semid_ds; 2485 2486 ret = semctl(semid, 0, IPC_STAT, semun); 2487 if (ret == -1) 2488 return get_errno(ret); 2489 2490 nsems = semid_ds.sem_nsems; 2491 2492 array = lock_user(VERIFY_WRITE, target_addr, 2493 nsems*sizeof(unsigned short), 0); 2494 if (!array) 2495 return -TARGET_EFAULT; 2496 2497 for(i=0; i<nsems; i++) { 2498 __put_user((*host_array)[i], &array[i]); 2499 } 2500 free(*host_array); 2501 unlock_user(array, target_addr, 1); 2502 2503 return 0; 2504 } 2505 2506 static inline abi_long do_semctl(int semid, int semnum, int cmd, 2507 union target_semun target_su) 2508 { 2509 union semun arg; 2510 struct semid_ds dsarg; 2511 unsigned short *array = NULL; 2512 struct seminfo seminfo; 2513 abi_long ret = -TARGET_EINVAL; 2514 abi_long err; 2515 cmd &= 0xff; 2516 2517 switch( cmd ) { 2518 case GETVAL: 2519 case SETVAL: 2520 arg.val = tswapl(target_su.val); 2521 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2522 target_su.val = tswapl(arg.val); 2523 break; 2524 case GETALL: 2525 case SETALL: 2526 err = target_to_host_semarray(semid, &array, target_su.array); 2527 if (err) 2528 return err; 2529 arg.array = array; 2530 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2531 err = host_to_target_semarray(semid, target_su.array, &array); 2532 if (err) 2533 return err; 2534 break; 2535 case IPC_STAT: 2536 case IPC_SET: 2537 case SEM_STAT: 2538 err = target_to_host_semid_ds(&dsarg, target_su.buf); 2539 if (err) 2540 return err; 2541 arg.buf = &dsarg; 2542 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2543 err = host_to_target_semid_ds(target_su.buf, &dsarg); 2544 if (err) 2545 return err; 2546 break; 2547 case IPC_INFO: 2548 case SEM_INFO: 2549 arg.__buf = &seminfo; 2550 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2551 err = host_to_target_seminfo(target_su.__buf, &seminfo); 2552 if (err) 2553 return err; 2554 break; 2555 case IPC_RMID: 2556 case GETPID: 2557 case GETNCNT: 2558 case GETZCNT: 2559 ret = get_errno(semctl(semid, semnum, cmd, NULL)); 2560 break; 2561 } 2562 2563 return ret; 2564 } 2565 2566 struct target_sembuf { 2567 unsigned short sem_num; 2568 short sem_op; 2569 short sem_flg; 2570 }; 2571 2572 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf, 2573 abi_ulong target_addr, 2574 unsigned nsops) 2575 { 2576 struct target_sembuf *target_sembuf; 2577 int i; 2578 2579 target_sembuf = lock_user(VERIFY_READ, target_addr, 2580 nsops*sizeof(struct target_sembuf), 1); 2581 if (!target_sembuf) 2582 return -TARGET_EFAULT; 2583 2584 for(i=0; i<nsops; i++) { 2585 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num); 2586 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op); 2587 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg); 2588 } 2589 2590 unlock_user(target_sembuf, target_addr, 0); 2591 2592 return 0; 2593 } 2594 2595 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops) 2596 { 2597 struct sembuf sops[nsops]; 2598 2599 if (target_to_host_sembuf(sops, ptr, nsops)) 2600 return -TARGET_EFAULT; 2601 2602 return semop(semid, sops, nsops); 2603 } 2604 2605 struct target_msqid_ds 2606 { 2607 struct target_ipc_perm msg_perm; 2608 abi_ulong msg_stime; 2609 #if TARGET_ABI_BITS == 32 2610 abi_ulong __unused1; 2611 #endif 2612 abi_ulong msg_rtime; 2613 #if TARGET_ABI_BITS == 32 2614 abi_ulong __unused2; 2615 #endif 2616 abi_ulong msg_ctime; 2617 #if TARGET_ABI_BITS == 32 2618 abi_ulong __unused3; 2619 #endif 2620 abi_ulong __msg_cbytes; 2621 abi_ulong msg_qnum; 2622 abi_ulong msg_qbytes; 2623 abi_ulong msg_lspid; 2624 abi_ulong msg_lrpid; 2625 abi_ulong __unused4; 2626 abi_ulong __unused5; 2627 }; 2628 2629 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md, 2630 abi_ulong target_addr) 2631 { 2632 struct target_msqid_ds *target_md; 2633 2634 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1)) 2635 return -TARGET_EFAULT; 2636 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr)) 2637 return -TARGET_EFAULT; 2638 host_md->msg_stime = tswapl(target_md->msg_stime); 2639 host_md->msg_rtime = tswapl(target_md->msg_rtime); 2640 host_md->msg_ctime = tswapl(target_md->msg_ctime); 2641 host_md->__msg_cbytes = tswapl(target_md->__msg_cbytes); 2642 host_md->msg_qnum = tswapl(target_md->msg_qnum); 2643 host_md->msg_qbytes = tswapl(target_md->msg_qbytes); 2644 host_md->msg_lspid = tswapl(target_md->msg_lspid); 2645 host_md->msg_lrpid = tswapl(target_md->msg_lrpid); 2646 unlock_user_struct(target_md, target_addr, 0); 2647 return 0; 2648 } 2649 2650 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr, 2651 struct msqid_ds *host_md) 2652 { 2653 struct target_msqid_ds *target_md; 2654 2655 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0)) 2656 return -TARGET_EFAULT; 2657 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm))) 2658 return -TARGET_EFAULT; 2659 target_md->msg_stime = tswapl(host_md->msg_stime); 2660 target_md->msg_rtime = tswapl(host_md->msg_rtime); 2661 target_md->msg_ctime = tswapl(host_md->msg_ctime); 2662 target_md->__msg_cbytes = tswapl(host_md->__msg_cbytes); 2663 target_md->msg_qnum = tswapl(host_md->msg_qnum); 2664 target_md->msg_qbytes = tswapl(host_md->msg_qbytes); 2665 target_md->msg_lspid = tswapl(host_md->msg_lspid); 2666 target_md->msg_lrpid = tswapl(host_md->msg_lrpid); 2667 unlock_user_struct(target_md, target_addr, 1); 2668 return 0; 2669 } 2670 2671 struct target_msginfo { 2672 int msgpool; 2673 int msgmap; 2674 int msgmax; 2675 int msgmnb; 2676 int msgmni; 2677 int msgssz; 2678 int msgtql; 2679 unsigned short int msgseg; 2680 }; 2681 2682 static inline abi_long host_to_target_msginfo(abi_ulong target_addr, 2683 struct msginfo *host_msginfo) 2684 { 2685 struct target_msginfo *target_msginfo; 2686 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0)) 2687 return -TARGET_EFAULT; 2688 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool); 2689 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap); 2690 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax); 2691 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb); 2692 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni); 2693 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz); 2694 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql); 2695 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg); 2696 unlock_user_struct(target_msginfo, target_addr, 1); 2697 return 0; 2698 } 2699 2700 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr) 2701 { 2702 struct msqid_ds dsarg; 2703 struct msginfo msginfo; 2704 abi_long ret = -TARGET_EINVAL; 2705 2706 cmd &= 0xff; 2707 2708 switch (cmd) { 2709 case IPC_STAT: 2710 case IPC_SET: 2711 case MSG_STAT: 2712 if (target_to_host_msqid_ds(&dsarg,ptr)) 2713 return -TARGET_EFAULT; 2714 ret = get_errno(msgctl(msgid, cmd, &dsarg)); 2715 if (host_to_target_msqid_ds(ptr,&dsarg)) 2716 return -TARGET_EFAULT; 2717 break; 2718 case IPC_RMID: 2719 ret = get_errno(msgctl(msgid, cmd, NULL)); 2720 break; 2721 case IPC_INFO: 2722 case MSG_INFO: 2723 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo)); 2724 if (host_to_target_msginfo(ptr, &msginfo)) 2725 return -TARGET_EFAULT; 2726 break; 2727 } 2728 2729 return ret; 2730 } 2731 2732 struct target_msgbuf { 2733 abi_long mtype; 2734 char mtext[1]; 2735 }; 2736 2737 static inline abi_long do_msgsnd(int msqid, abi_long msgp, 2738 unsigned int msgsz, int msgflg) 2739 { 2740 struct target_msgbuf *target_mb; 2741 struct msgbuf *host_mb; 2742 abi_long ret = 0; 2743 2744 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0)) 2745 return -TARGET_EFAULT; 2746 host_mb = malloc(msgsz+sizeof(long)); 2747 host_mb->mtype = (abi_long) tswapl(target_mb->mtype); 2748 memcpy(host_mb->mtext, target_mb->mtext, msgsz); 2749 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg)); 2750 free(host_mb); 2751 unlock_user_struct(target_mb, msgp, 0); 2752 2753 return ret; 2754 } 2755 2756 static inline abi_long do_msgrcv(int msqid, abi_long msgp, 2757 unsigned int msgsz, abi_long msgtyp, 2758 int msgflg) 2759 { 2760 struct target_msgbuf *target_mb; 2761 char *target_mtext; 2762 struct msgbuf *host_mb; 2763 abi_long ret = 0; 2764 2765 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0)) 2766 return -TARGET_EFAULT; 2767 2768 host_mb = malloc(msgsz+sizeof(long)); 2769 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapl(msgtyp), msgflg)); 2770 2771 if (ret > 0) { 2772 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong); 2773 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0); 2774 if (!target_mtext) { 2775 ret = -TARGET_EFAULT; 2776 goto end; 2777 } 2778 memcpy(target_mb->mtext, host_mb->mtext, ret); 2779 unlock_user(target_mtext, target_mtext_addr, ret); 2780 } 2781 2782 target_mb->mtype = tswapl(host_mb->mtype); 2783 free(host_mb); 2784 2785 end: 2786 if (target_mb) 2787 unlock_user_struct(target_mb, msgp, 1); 2788 return ret; 2789 } 2790 2791 struct target_shmid_ds 2792 { 2793 struct target_ipc_perm shm_perm; 2794 abi_ulong shm_segsz; 2795 abi_ulong shm_atime; 2796 #if TARGET_ABI_BITS == 32 2797 abi_ulong __unused1; 2798 #endif 2799 abi_ulong shm_dtime; 2800 #if TARGET_ABI_BITS == 32 2801 abi_ulong __unused2; 2802 #endif 2803 abi_ulong shm_ctime; 2804 #if TARGET_ABI_BITS == 32 2805 abi_ulong __unused3; 2806 #endif 2807 int shm_cpid; 2808 int shm_lpid; 2809 abi_ulong shm_nattch; 2810 unsigned long int __unused4; 2811 unsigned long int __unused5; 2812 }; 2813 2814 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd, 2815 abi_ulong target_addr) 2816 { 2817 struct target_shmid_ds *target_sd; 2818 2819 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2820 return -TARGET_EFAULT; 2821 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr)) 2822 return -TARGET_EFAULT; 2823 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2824 __get_user(host_sd->shm_atime, &target_sd->shm_atime); 2825 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2826 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2827 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2828 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2829 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2830 unlock_user_struct(target_sd, target_addr, 0); 2831 return 0; 2832 } 2833 2834 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr, 2835 struct shmid_ds *host_sd) 2836 { 2837 struct target_shmid_ds *target_sd; 2838 2839 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2840 return -TARGET_EFAULT; 2841 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm))) 2842 return -TARGET_EFAULT; 2843 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2844 __put_user(host_sd->shm_atime, &target_sd->shm_atime); 2845 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2846 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2847 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2848 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2849 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2850 unlock_user_struct(target_sd, target_addr, 1); 2851 return 0; 2852 } 2853 2854 struct target_shminfo { 2855 abi_ulong shmmax; 2856 abi_ulong shmmin; 2857 abi_ulong shmmni; 2858 abi_ulong shmseg; 2859 abi_ulong shmall; 2860 }; 2861 2862 static inline abi_long host_to_target_shminfo(abi_ulong target_addr, 2863 struct shminfo *host_shminfo) 2864 { 2865 struct target_shminfo *target_shminfo; 2866 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0)) 2867 return -TARGET_EFAULT; 2868 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax); 2869 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin); 2870 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni); 2871 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg); 2872 __put_user(host_shminfo->shmall, &target_shminfo->shmall); 2873 unlock_user_struct(target_shminfo, target_addr, 1); 2874 return 0; 2875 } 2876 2877 struct target_shm_info { 2878 int used_ids; 2879 abi_ulong shm_tot; 2880 abi_ulong shm_rss; 2881 abi_ulong shm_swp; 2882 abi_ulong swap_attempts; 2883 abi_ulong swap_successes; 2884 }; 2885 2886 static inline abi_long host_to_target_shm_info(abi_ulong target_addr, 2887 struct shm_info *host_shm_info) 2888 { 2889 struct target_shm_info *target_shm_info; 2890 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0)) 2891 return -TARGET_EFAULT; 2892 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids); 2893 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot); 2894 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss); 2895 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp); 2896 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts); 2897 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes); 2898 unlock_user_struct(target_shm_info, target_addr, 1); 2899 return 0; 2900 } 2901 2902 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf) 2903 { 2904 struct shmid_ds dsarg; 2905 struct shminfo shminfo; 2906 struct shm_info shm_info; 2907 abi_long ret = -TARGET_EINVAL; 2908 2909 cmd &= 0xff; 2910 2911 switch(cmd) { 2912 case IPC_STAT: 2913 case IPC_SET: 2914 case SHM_STAT: 2915 if (target_to_host_shmid_ds(&dsarg, buf)) 2916 return -TARGET_EFAULT; 2917 ret = get_errno(shmctl(shmid, cmd, &dsarg)); 2918 if (host_to_target_shmid_ds(buf, &dsarg)) 2919 return -TARGET_EFAULT; 2920 break; 2921 case IPC_INFO: 2922 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo)); 2923 if (host_to_target_shminfo(buf, &shminfo)) 2924 return -TARGET_EFAULT; 2925 break; 2926 case SHM_INFO: 2927 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info)); 2928 if (host_to_target_shm_info(buf, &shm_info)) 2929 return -TARGET_EFAULT; 2930 break; 2931 case IPC_RMID: 2932 case SHM_LOCK: 2933 case SHM_UNLOCK: 2934 ret = get_errno(shmctl(shmid, cmd, NULL)); 2935 break; 2936 } 2937 2938 return ret; 2939 } 2940 2941 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg) 2942 { 2943 abi_long raddr; 2944 void *host_raddr; 2945 struct shmid_ds shm_info; 2946 int i,ret; 2947 2948 /* find out the length of the shared memory segment */ 2949 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); 2950 if (is_error(ret)) { 2951 /* can't get length, bail out */ 2952 return ret; 2953 } 2954 2955 mmap_lock(); 2956 2957 if (shmaddr) 2958 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg); 2959 else { 2960 abi_ulong mmap_start; 2961 2962 mmap_start = mmap_find_vma(0, shm_info.shm_segsz); 2963 2964 if (mmap_start == -1) { 2965 errno = ENOMEM; 2966 host_raddr = (void *)-1; 2967 } else 2968 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP); 2969 } 2970 2971 if (host_raddr == (void *)-1) { 2972 mmap_unlock(); 2973 return get_errno((long)host_raddr); 2974 } 2975 raddr=h2g((unsigned long)host_raddr); 2976 2977 page_set_flags(raddr, raddr + shm_info.shm_segsz, 2978 PAGE_VALID | PAGE_READ | 2979 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE)); 2980 2981 for (i = 0; i < N_SHM_REGIONS; i++) { 2982 if (shm_regions[i].start == 0) { 2983 shm_regions[i].start = raddr; 2984 shm_regions[i].size = shm_info.shm_segsz; 2985 break; 2986 } 2987 } 2988 2989 mmap_unlock(); 2990 return raddr; 2991 2992 } 2993 2994 static inline abi_long do_shmdt(abi_ulong shmaddr) 2995 { 2996 int i; 2997 2998 for (i = 0; i < N_SHM_REGIONS; ++i) { 2999 if (shm_regions[i].start == shmaddr) { 3000 shm_regions[i].start = 0; 3001 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0); 3002 break; 3003 } 3004 } 3005 3006 return get_errno(shmdt(g2h(shmaddr))); 3007 } 3008 3009 #ifdef TARGET_NR_ipc 3010 /* ??? This only works with linear mappings. */ 3011 /* do_ipc() must return target values and target errnos. */ 3012 static abi_long do_ipc(unsigned int call, int first, 3013 int second, int third, 3014 abi_long ptr, abi_long fifth) 3015 { 3016 int version; 3017 abi_long ret = 0; 3018 3019 version = call >> 16; 3020 call &= 0xffff; 3021 3022 switch (call) { 3023 case IPCOP_semop: 3024 ret = do_semop(first, ptr, second); 3025 break; 3026 3027 case IPCOP_semget: 3028 ret = get_errno(semget(first, second, third)); 3029 break; 3030 3031 case IPCOP_semctl: 3032 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr); 3033 break; 3034 3035 case IPCOP_msgget: 3036 ret = get_errno(msgget(first, second)); 3037 break; 3038 3039 case IPCOP_msgsnd: 3040 ret = do_msgsnd(first, ptr, second, third); 3041 break; 3042 3043 case IPCOP_msgctl: 3044 ret = do_msgctl(first, second, ptr); 3045 break; 3046 3047 case IPCOP_msgrcv: 3048 switch (version) { 3049 case 0: 3050 { 3051 struct target_ipc_kludge { 3052 abi_long msgp; 3053 abi_long msgtyp; 3054 } *tmp; 3055 3056 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) { 3057 ret = -TARGET_EFAULT; 3058 break; 3059 } 3060 3061 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third); 3062 3063 unlock_user_struct(tmp, ptr, 0); 3064 break; 3065 } 3066 default: 3067 ret = do_msgrcv(first, ptr, second, fifth, third); 3068 } 3069 break; 3070 3071 case IPCOP_shmat: 3072 switch (version) { 3073 default: 3074 { 3075 abi_ulong raddr; 3076 raddr = do_shmat(first, ptr, second); 3077 if (is_error(raddr)) 3078 return get_errno(raddr); 3079 if (put_user_ual(raddr, third)) 3080 return -TARGET_EFAULT; 3081 break; 3082 } 3083 case 1: 3084 ret = -TARGET_EINVAL; 3085 break; 3086 } 3087 break; 3088 case IPCOP_shmdt: 3089 ret = do_shmdt(ptr); 3090 break; 3091 3092 case IPCOP_shmget: 3093 /* IPC_* flag values are the same on all linux platforms */ 3094 ret = get_errno(shmget(first, second, third)); 3095 break; 3096 3097 /* IPC_* and SHM_* command values are the same on all linux platforms */ 3098 case IPCOP_shmctl: 3099 ret = do_shmctl(first, second, third); 3100 break; 3101 default: 3102 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version); 3103 ret = -TARGET_ENOSYS; 3104 break; 3105 } 3106 return ret; 3107 } 3108 #endif 3109 3110 /* kernel structure types definitions */ 3111 3112 #define STRUCT(name, ...) STRUCT_ ## name, 3113 #define STRUCT_SPECIAL(name) STRUCT_ ## name, 3114 enum { 3115 #include "syscall_types.h" 3116 }; 3117 #undef STRUCT 3118 #undef STRUCT_SPECIAL 3119 3120 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL }; 3121 #define STRUCT_SPECIAL(name) 3122 #include "syscall_types.h" 3123 #undef STRUCT 3124 #undef STRUCT_SPECIAL 3125 3126 typedef struct IOCTLEntry IOCTLEntry; 3127 3128 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp, 3129 int fd, abi_long cmd, abi_long arg); 3130 3131 struct IOCTLEntry { 3132 unsigned int target_cmd; 3133 unsigned int host_cmd; 3134 const char *name; 3135 int access; 3136 do_ioctl_fn *do_ioctl; 3137 const argtype arg_type[5]; 3138 }; 3139 3140 #define IOC_R 0x0001 3141 #define IOC_W 0x0002 3142 #define IOC_RW (IOC_R | IOC_W) 3143 3144 #define MAX_STRUCT_SIZE 4096 3145 3146 #ifdef CONFIG_FIEMAP 3147 /* So fiemap access checks don't overflow on 32 bit systems. 3148 * This is very slightly smaller than the limit imposed by 3149 * the underlying kernel. 3150 */ 3151 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \ 3152 / sizeof(struct fiemap_extent)) 3153 3154 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp, 3155 int fd, abi_long cmd, abi_long arg) 3156 { 3157 /* The parameter for this ioctl is a struct fiemap followed 3158 * by an array of struct fiemap_extent whose size is set 3159 * in fiemap->fm_extent_count. The array is filled in by the 3160 * ioctl. 3161 */ 3162 int target_size_in, target_size_out; 3163 struct fiemap *fm; 3164 const argtype *arg_type = ie->arg_type; 3165 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) }; 3166 void *argptr, *p; 3167 abi_long ret; 3168 int i, extent_size = thunk_type_size(extent_arg_type, 0); 3169 uint32_t outbufsz; 3170 int free_fm = 0; 3171 3172 assert(arg_type[0] == TYPE_PTR); 3173 assert(ie->access == IOC_RW); 3174 arg_type++; 3175 target_size_in = thunk_type_size(arg_type, 0); 3176 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1); 3177 if (!argptr) { 3178 return -TARGET_EFAULT; 3179 } 3180 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3181 unlock_user(argptr, arg, 0); 3182 fm = (struct fiemap *)buf_temp; 3183 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) { 3184 return -TARGET_EINVAL; 3185 } 3186 3187 outbufsz = sizeof (*fm) + 3188 (sizeof(struct fiemap_extent) * fm->fm_extent_count); 3189 3190 if (outbufsz > MAX_STRUCT_SIZE) { 3191 /* We can't fit all the extents into the fixed size buffer. 3192 * Allocate one that is large enough and use it instead. 3193 */ 3194 fm = malloc(outbufsz); 3195 if (!fm) { 3196 return -TARGET_ENOMEM; 3197 } 3198 memcpy(fm, buf_temp, sizeof(struct fiemap)); 3199 free_fm = 1; 3200 } 3201 ret = get_errno(ioctl(fd, ie->host_cmd, fm)); 3202 if (!is_error(ret)) { 3203 target_size_out = target_size_in; 3204 /* An extent_count of 0 means we were only counting the extents 3205 * so there are no structs to copy 3206 */ 3207 if (fm->fm_extent_count != 0) { 3208 target_size_out += fm->fm_mapped_extents * extent_size; 3209 } 3210 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0); 3211 if (!argptr) { 3212 ret = -TARGET_EFAULT; 3213 } else { 3214 /* Convert the struct fiemap */ 3215 thunk_convert(argptr, fm, arg_type, THUNK_TARGET); 3216 if (fm->fm_extent_count != 0) { 3217 p = argptr + target_size_in; 3218 /* ...and then all the struct fiemap_extents */ 3219 for (i = 0; i < fm->fm_mapped_extents; i++) { 3220 thunk_convert(p, &fm->fm_extents[i], extent_arg_type, 3221 THUNK_TARGET); 3222 p += extent_size; 3223 } 3224 } 3225 unlock_user(argptr, arg, target_size_out); 3226 } 3227 } 3228 if (free_fm) { 3229 free(fm); 3230 } 3231 return ret; 3232 } 3233 #endif 3234 3235 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, 3236 int fd, abi_long cmd, abi_long arg) 3237 { 3238 const argtype *arg_type = ie->arg_type; 3239 int target_size; 3240 void *argptr; 3241 int ret; 3242 struct ifconf *host_ifconf; 3243 uint32_t outbufsz; 3244 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) }; 3245 int target_ifreq_size; 3246 int nb_ifreq; 3247 int free_buf = 0; 3248 int i; 3249 int target_ifc_len; 3250 abi_long target_ifc_buf; 3251 int host_ifc_len; 3252 char *host_ifc_buf; 3253 3254 assert(arg_type[0] == TYPE_PTR); 3255 assert(ie->access == IOC_RW); 3256 3257 arg_type++; 3258 target_size = thunk_type_size(arg_type, 0); 3259 3260 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3261 if (!argptr) 3262 return -TARGET_EFAULT; 3263 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3264 unlock_user(argptr, arg, 0); 3265 3266 host_ifconf = (struct ifconf *)(unsigned long)buf_temp; 3267 target_ifc_len = host_ifconf->ifc_len; 3268 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf; 3269 3270 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0); 3271 nb_ifreq = target_ifc_len / target_ifreq_size; 3272 host_ifc_len = nb_ifreq * sizeof(struct ifreq); 3273 3274 outbufsz = sizeof(*host_ifconf) + host_ifc_len; 3275 if (outbufsz > MAX_STRUCT_SIZE) { 3276 /* We can't fit all the extents into the fixed size buffer. 3277 * Allocate one that is large enough and use it instead. 3278 */ 3279 host_ifconf = malloc(outbufsz); 3280 if (!host_ifconf) { 3281 return -TARGET_ENOMEM; 3282 } 3283 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf)); 3284 free_buf = 1; 3285 } 3286 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf); 3287 3288 host_ifconf->ifc_len = host_ifc_len; 3289 host_ifconf->ifc_buf = host_ifc_buf; 3290 3291 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf)); 3292 if (!is_error(ret)) { 3293 /* convert host ifc_len to target ifc_len */ 3294 3295 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq); 3296 target_ifc_len = nb_ifreq * target_ifreq_size; 3297 host_ifconf->ifc_len = target_ifc_len; 3298 3299 /* restore target ifc_buf */ 3300 3301 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf; 3302 3303 /* copy struct ifconf to target user */ 3304 3305 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3306 if (!argptr) 3307 return -TARGET_EFAULT; 3308 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET); 3309 unlock_user(argptr, arg, target_size); 3310 3311 /* copy ifreq[] to target user */ 3312 3313 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0); 3314 for (i = 0; i < nb_ifreq ; i++) { 3315 thunk_convert(argptr + i * target_ifreq_size, 3316 host_ifc_buf + i * sizeof(struct ifreq), 3317 ifreq_arg_type, THUNK_TARGET); 3318 } 3319 unlock_user(argptr, target_ifc_buf, target_ifc_len); 3320 } 3321 3322 if (free_buf) { 3323 free(host_ifconf); 3324 } 3325 3326 return ret; 3327 } 3328 3329 static IOCTLEntry ioctl_entries[] = { 3330 #define IOCTL(cmd, access, ...) \ 3331 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } }, 3332 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \ 3333 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } }, 3334 #include "ioctls.h" 3335 { 0, 0, }, 3336 }; 3337 3338 /* ??? Implement proper locking for ioctls. */ 3339 /* do_ioctl() Must return target values and target errnos. */ 3340 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg) 3341 { 3342 const IOCTLEntry *ie; 3343 const argtype *arg_type; 3344 abi_long ret; 3345 uint8_t buf_temp[MAX_STRUCT_SIZE]; 3346 int target_size; 3347 void *argptr; 3348 3349 ie = ioctl_entries; 3350 for(;;) { 3351 if (ie->target_cmd == 0) { 3352 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd); 3353 return -TARGET_ENOSYS; 3354 } 3355 if (ie->target_cmd == cmd) 3356 break; 3357 ie++; 3358 } 3359 arg_type = ie->arg_type; 3360 #if defined(DEBUG) 3361 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name); 3362 #endif 3363 if (ie->do_ioctl) { 3364 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg); 3365 } 3366 3367 switch(arg_type[0]) { 3368 case TYPE_NULL: 3369 /* no argument */ 3370 ret = get_errno(ioctl(fd, ie->host_cmd)); 3371 break; 3372 case TYPE_PTRVOID: 3373 case TYPE_INT: 3374 /* int argment */ 3375 ret = get_errno(ioctl(fd, ie->host_cmd, arg)); 3376 break; 3377 case TYPE_PTR: 3378 arg_type++; 3379 target_size = thunk_type_size(arg_type, 0); 3380 switch(ie->access) { 3381 case IOC_R: 3382 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3383 if (!is_error(ret)) { 3384 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3385 if (!argptr) 3386 return -TARGET_EFAULT; 3387 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3388 unlock_user(argptr, arg, target_size); 3389 } 3390 break; 3391 case IOC_W: 3392 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3393 if (!argptr) 3394 return -TARGET_EFAULT; 3395 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3396 unlock_user(argptr, arg, 0); 3397 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3398 break; 3399 default: 3400 case IOC_RW: 3401 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3402 if (!argptr) 3403 return -TARGET_EFAULT; 3404 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3405 unlock_user(argptr, arg, 0); 3406 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3407 if (!is_error(ret)) { 3408 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3409 if (!argptr) 3410 return -TARGET_EFAULT; 3411 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3412 unlock_user(argptr, arg, target_size); 3413 } 3414 break; 3415 } 3416 break; 3417 default: 3418 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n", 3419 (long)cmd, arg_type[0]); 3420 ret = -TARGET_ENOSYS; 3421 break; 3422 } 3423 return ret; 3424 } 3425 3426 static const bitmask_transtbl iflag_tbl[] = { 3427 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK }, 3428 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT }, 3429 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR }, 3430 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK }, 3431 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK }, 3432 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP }, 3433 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR }, 3434 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR }, 3435 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL }, 3436 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC }, 3437 { TARGET_IXON, TARGET_IXON, IXON, IXON }, 3438 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY }, 3439 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF }, 3440 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL }, 3441 { 0, 0, 0, 0 } 3442 }; 3443 3444 static const bitmask_transtbl oflag_tbl[] = { 3445 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST }, 3446 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC }, 3447 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR }, 3448 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL }, 3449 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR }, 3450 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET }, 3451 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL }, 3452 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL }, 3453 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 }, 3454 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 }, 3455 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 }, 3456 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 }, 3457 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 }, 3458 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 }, 3459 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 }, 3460 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 }, 3461 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 }, 3462 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 }, 3463 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 }, 3464 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 }, 3465 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 }, 3466 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 }, 3467 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 }, 3468 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 }, 3469 { 0, 0, 0, 0 } 3470 }; 3471 3472 static const bitmask_transtbl cflag_tbl[] = { 3473 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 }, 3474 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 }, 3475 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 }, 3476 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 }, 3477 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 }, 3478 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 }, 3479 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 }, 3480 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 }, 3481 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 }, 3482 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 }, 3483 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 }, 3484 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 }, 3485 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 }, 3486 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 }, 3487 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 }, 3488 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 }, 3489 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 }, 3490 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 }, 3491 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 }, 3492 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 }, 3493 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 }, 3494 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 }, 3495 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 }, 3496 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 }, 3497 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB }, 3498 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD }, 3499 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB }, 3500 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD }, 3501 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL }, 3502 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL }, 3503 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS }, 3504 { 0, 0, 0, 0 } 3505 }; 3506 3507 static const bitmask_transtbl lflag_tbl[] = { 3508 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG }, 3509 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON }, 3510 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE }, 3511 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO }, 3512 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE }, 3513 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK }, 3514 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL }, 3515 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH }, 3516 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP }, 3517 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL }, 3518 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT }, 3519 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE }, 3520 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO }, 3521 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN }, 3522 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN }, 3523 { 0, 0, 0, 0 } 3524 }; 3525 3526 static void target_to_host_termios (void *dst, const void *src) 3527 { 3528 struct host_termios *host = dst; 3529 const struct target_termios *target = src; 3530 3531 host->c_iflag = 3532 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl); 3533 host->c_oflag = 3534 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl); 3535 host->c_cflag = 3536 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl); 3537 host->c_lflag = 3538 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl); 3539 host->c_line = target->c_line; 3540 3541 memset(host->c_cc, 0, sizeof(host->c_cc)); 3542 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR]; 3543 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT]; 3544 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE]; 3545 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL]; 3546 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF]; 3547 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME]; 3548 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN]; 3549 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC]; 3550 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART]; 3551 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP]; 3552 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP]; 3553 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL]; 3554 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT]; 3555 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD]; 3556 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE]; 3557 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT]; 3558 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2]; 3559 } 3560 3561 static void host_to_target_termios (void *dst, const void *src) 3562 { 3563 struct target_termios *target = dst; 3564 const struct host_termios *host = src; 3565 3566 target->c_iflag = 3567 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl)); 3568 target->c_oflag = 3569 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl)); 3570 target->c_cflag = 3571 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl)); 3572 target->c_lflag = 3573 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl)); 3574 target->c_line = host->c_line; 3575 3576 memset(target->c_cc, 0, sizeof(target->c_cc)); 3577 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR]; 3578 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT]; 3579 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE]; 3580 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL]; 3581 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF]; 3582 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME]; 3583 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN]; 3584 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC]; 3585 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART]; 3586 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP]; 3587 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP]; 3588 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL]; 3589 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT]; 3590 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD]; 3591 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE]; 3592 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT]; 3593 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2]; 3594 } 3595 3596 static const StructEntry struct_termios_def = { 3597 .convert = { host_to_target_termios, target_to_host_termios }, 3598 .size = { sizeof(struct target_termios), sizeof(struct host_termios) }, 3599 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) }, 3600 }; 3601 3602 static bitmask_transtbl mmap_flags_tbl[] = { 3603 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED }, 3604 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE }, 3605 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED }, 3606 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS }, 3607 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN }, 3608 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE }, 3609 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE }, 3610 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED }, 3611 { 0, 0, 0, 0 } 3612 }; 3613 3614 #if defined(TARGET_I386) 3615 3616 /* NOTE: there is really one LDT for all the threads */ 3617 static uint8_t *ldt_table; 3618 3619 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount) 3620 { 3621 int size; 3622 void *p; 3623 3624 if (!ldt_table) 3625 return 0; 3626 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE; 3627 if (size > bytecount) 3628 size = bytecount; 3629 p = lock_user(VERIFY_WRITE, ptr, size, 0); 3630 if (!p) 3631 return -TARGET_EFAULT; 3632 /* ??? Should this by byteswapped? */ 3633 memcpy(p, ldt_table, size); 3634 unlock_user(p, ptr, size); 3635 return size; 3636 } 3637 3638 /* XXX: add locking support */ 3639 static abi_long write_ldt(CPUX86State *env, 3640 abi_ulong ptr, unsigned long bytecount, int oldmode) 3641 { 3642 struct target_modify_ldt_ldt_s ldt_info; 3643 struct target_modify_ldt_ldt_s *target_ldt_info; 3644 int seg_32bit, contents, read_exec_only, limit_in_pages; 3645 int seg_not_present, useable, lm; 3646 uint32_t *lp, entry_1, entry_2; 3647 3648 if (bytecount != sizeof(ldt_info)) 3649 return -TARGET_EINVAL; 3650 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1)) 3651 return -TARGET_EFAULT; 3652 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 3653 ldt_info.base_addr = tswapl(target_ldt_info->base_addr); 3654 ldt_info.limit = tswap32(target_ldt_info->limit); 3655 ldt_info.flags = tswap32(target_ldt_info->flags); 3656 unlock_user_struct(target_ldt_info, ptr, 0); 3657 3658 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES) 3659 return -TARGET_EINVAL; 3660 seg_32bit = ldt_info.flags & 1; 3661 contents = (ldt_info.flags >> 1) & 3; 3662 read_exec_only = (ldt_info.flags >> 3) & 1; 3663 limit_in_pages = (ldt_info.flags >> 4) & 1; 3664 seg_not_present = (ldt_info.flags >> 5) & 1; 3665 useable = (ldt_info.flags >> 6) & 1; 3666 #ifdef TARGET_ABI32 3667 lm = 0; 3668 #else 3669 lm = (ldt_info.flags >> 7) & 1; 3670 #endif 3671 if (contents == 3) { 3672 if (oldmode) 3673 return -TARGET_EINVAL; 3674 if (seg_not_present == 0) 3675 return -TARGET_EINVAL; 3676 } 3677 /* allocate the LDT */ 3678 if (!ldt_table) { 3679 env->ldt.base = target_mmap(0, 3680 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE, 3681 PROT_READ|PROT_WRITE, 3682 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 3683 if (env->ldt.base == -1) 3684 return -TARGET_ENOMEM; 3685 memset(g2h(env->ldt.base), 0, 3686 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE); 3687 env->ldt.limit = 0xffff; 3688 ldt_table = g2h(env->ldt.base); 3689 } 3690 3691 /* NOTE: same code as Linux kernel */ 3692 /* Allow LDTs to be cleared by the user. */ 3693 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 3694 if (oldmode || 3695 (contents == 0 && 3696 read_exec_only == 1 && 3697 seg_32bit == 0 && 3698 limit_in_pages == 0 && 3699 seg_not_present == 1 && 3700 useable == 0 )) { 3701 entry_1 = 0; 3702 entry_2 = 0; 3703 goto install; 3704 } 3705 } 3706 3707 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 3708 (ldt_info.limit & 0x0ffff); 3709 entry_2 = (ldt_info.base_addr & 0xff000000) | 3710 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 3711 (ldt_info.limit & 0xf0000) | 3712 ((read_exec_only ^ 1) << 9) | 3713 (contents << 10) | 3714 ((seg_not_present ^ 1) << 15) | 3715 (seg_32bit << 22) | 3716 (limit_in_pages << 23) | 3717 (lm << 21) | 3718 0x7000; 3719 if (!oldmode) 3720 entry_2 |= (useable << 20); 3721 3722 /* Install the new entry ... */ 3723 install: 3724 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3)); 3725 lp[0] = tswap32(entry_1); 3726 lp[1] = tswap32(entry_2); 3727 return 0; 3728 } 3729 3730 /* specific and weird i386 syscalls */ 3731 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr, 3732 unsigned long bytecount) 3733 { 3734 abi_long ret; 3735 3736 switch (func) { 3737 case 0: 3738 ret = read_ldt(ptr, bytecount); 3739 break; 3740 case 1: 3741 ret = write_ldt(env, ptr, bytecount, 1); 3742 break; 3743 case 0x11: 3744 ret = write_ldt(env, ptr, bytecount, 0); 3745 break; 3746 default: 3747 ret = -TARGET_ENOSYS; 3748 break; 3749 } 3750 return ret; 3751 } 3752 3753 #if defined(TARGET_I386) && defined(TARGET_ABI32) 3754 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr) 3755 { 3756 uint64_t *gdt_table = g2h(env->gdt.base); 3757 struct target_modify_ldt_ldt_s ldt_info; 3758 struct target_modify_ldt_ldt_s *target_ldt_info; 3759 int seg_32bit, contents, read_exec_only, limit_in_pages; 3760 int seg_not_present, useable, lm; 3761 uint32_t *lp, entry_1, entry_2; 3762 int i; 3763 3764 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 3765 if (!target_ldt_info) 3766 return -TARGET_EFAULT; 3767 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 3768 ldt_info.base_addr = tswapl(target_ldt_info->base_addr); 3769 ldt_info.limit = tswap32(target_ldt_info->limit); 3770 ldt_info.flags = tswap32(target_ldt_info->flags); 3771 if (ldt_info.entry_number == -1) { 3772 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) { 3773 if (gdt_table[i] == 0) { 3774 ldt_info.entry_number = i; 3775 target_ldt_info->entry_number = tswap32(i); 3776 break; 3777 } 3778 } 3779 } 3780 unlock_user_struct(target_ldt_info, ptr, 1); 3781 3782 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 3783 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX) 3784 return -TARGET_EINVAL; 3785 seg_32bit = ldt_info.flags & 1; 3786 contents = (ldt_info.flags >> 1) & 3; 3787 read_exec_only = (ldt_info.flags >> 3) & 1; 3788 limit_in_pages = (ldt_info.flags >> 4) & 1; 3789 seg_not_present = (ldt_info.flags >> 5) & 1; 3790 useable = (ldt_info.flags >> 6) & 1; 3791 #ifdef TARGET_ABI32 3792 lm = 0; 3793 #else 3794 lm = (ldt_info.flags >> 7) & 1; 3795 #endif 3796 3797 if (contents == 3) { 3798 if (seg_not_present == 0) 3799 return -TARGET_EINVAL; 3800 } 3801 3802 /* NOTE: same code as Linux kernel */ 3803 /* Allow LDTs to be cleared by the user. */ 3804 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 3805 if ((contents == 0 && 3806 read_exec_only == 1 && 3807 seg_32bit == 0 && 3808 limit_in_pages == 0 && 3809 seg_not_present == 1 && 3810 useable == 0 )) { 3811 entry_1 = 0; 3812 entry_2 = 0; 3813 goto install; 3814 } 3815 } 3816 3817 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 3818 (ldt_info.limit & 0x0ffff); 3819 entry_2 = (ldt_info.base_addr & 0xff000000) | 3820 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 3821 (ldt_info.limit & 0xf0000) | 3822 ((read_exec_only ^ 1) << 9) | 3823 (contents << 10) | 3824 ((seg_not_present ^ 1) << 15) | 3825 (seg_32bit << 22) | 3826 (limit_in_pages << 23) | 3827 (useable << 20) | 3828 (lm << 21) | 3829 0x7000; 3830 3831 /* Install the new entry ... */ 3832 install: 3833 lp = (uint32_t *)(gdt_table + ldt_info.entry_number); 3834 lp[0] = tswap32(entry_1); 3835 lp[1] = tswap32(entry_2); 3836 return 0; 3837 } 3838 3839 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr) 3840 { 3841 struct target_modify_ldt_ldt_s *target_ldt_info; 3842 uint64_t *gdt_table = g2h(env->gdt.base); 3843 uint32_t base_addr, limit, flags; 3844 int seg_32bit, contents, read_exec_only, limit_in_pages, idx; 3845 int seg_not_present, useable, lm; 3846 uint32_t *lp, entry_1, entry_2; 3847 3848 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 3849 if (!target_ldt_info) 3850 return -TARGET_EFAULT; 3851 idx = tswap32(target_ldt_info->entry_number); 3852 if (idx < TARGET_GDT_ENTRY_TLS_MIN || 3853 idx > TARGET_GDT_ENTRY_TLS_MAX) { 3854 unlock_user_struct(target_ldt_info, ptr, 1); 3855 return -TARGET_EINVAL; 3856 } 3857 lp = (uint32_t *)(gdt_table + idx); 3858 entry_1 = tswap32(lp[0]); 3859 entry_2 = tswap32(lp[1]); 3860 3861 read_exec_only = ((entry_2 >> 9) & 1) ^ 1; 3862 contents = (entry_2 >> 10) & 3; 3863 seg_not_present = ((entry_2 >> 15) & 1) ^ 1; 3864 seg_32bit = (entry_2 >> 22) & 1; 3865 limit_in_pages = (entry_2 >> 23) & 1; 3866 useable = (entry_2 >> 20) & 1; 3867 #ifdef TARGET_ABI32 3868 lm = 0; 3869 #else 3870 lm = (entry_2 >> 21) & 1; 3871 #endif 3872 flags = (seg_32bit << 0) | (contents << 1) | 3873 (read_exec_only << 3) | (limit_in_pages << 4) | 3874 (seg_not_present << 5) | (useable << 6) | (lm << 7); 3875 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000); 3876 base_addr = (entry_1 >> 16) | 3877 (entry_2 & 0xff000000) | 3878 ((entry_2 & 0xff) << 16); 3879 target_ldt_info->base_addr = tswapl(base_addr); 3880 target_ldt_info->limit = tswap32(limit); 3881 target_ldt_info->flags = tswap32(flags); 3882 unlock_user_struct(target_ldt_info, ptr, 1); 3883 return 0; 3884 } 3885 #endif /* TARGET_I386 && TARGET_ABI32 */ 3886 3887 #ifndef TARGET_ABI32 3888 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 3889 { 3890 abi_long ret = 0; 3891 abi_ulong val; 3892 int idx; 3893 3894 switch(code) { 3895 case TARGET_ARCH_SET_GS: 3896 case TARGET_ARCH_SET_FS: 3897 if (code == TARGET_ARCH_SET_GS) 3898 idx = R_GS; 3899 else 3900 idx = R_FS; 3901 cpu_x86_load_seg(env, idx, 0); 3902 env->segs[idx].base = addr; 3903 break; 3904 case TARGET_ARCH_GET_GS: 3905 case TARGET_ARCH_GET_FS: 3906 if (code == TARGET_ARCH_GET_GS) 3907 idx = R_GS; 3908 else 3909 idx = R_FS; 3910 val = env->segs[idx].base; 3911 if (put_user(val, addr, abi_ulong)) 3912 ret = -TARGET_EFAULT; 3913 break; 3914 default: 3915 ret = -TARGET_EINVAL; 3916 break; 3917 } 3918 return ret; 3919 } 3920 #endif 3921 3922 #endif /* defined(TARGET_I386) */ 3923 3924 #define NEW_STACK_SIZE 0x40000 3925 3926 #if defined(CONFIG_USE_NPTL) 3927 3928 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; 3929 typedef struct { 3930 CPUState *env; 3931 pthread_mutex_t mutex; 3932 pthread_cond_t cond; 3933 pthread_t thread; 3934 uint32_t tid; 3935 abi_ulong child_tidptr; 3936 abi_ulong parent_tidptr; 3937 sigset_t sigmask; 3938 } new_thread_info; 3939 3940 static void *clone_func(void *arg) 3941 { 3942 new_thread_info *info = arg; 3943 CPUState *env; 3944 TaskState *ts; 3945 3946 env = info->env; 3947 thread_env = env; 3948 ts = (TaskState *)thread_env->opaque; 3949 info->tid = gettid(); 3950 env->host_tid = info->tid; 3951 task_settid(ts); 3952 if (info->child_tidptr) 3953 put_user_u32(info->tid, info->child_tidptr); 3954 if (info->parent_tidptr) 3955 put_user_u32(info->tid, info->parent_tidptr); 3956 /* Enable signals. */ 3957 sigprocmask(SIG_SETMASK, &info->sigmask, NULL); 3958 /* Signal to the parent that we're ready. */ 3959 pthread_mutex_lock(&info->mutex); 3960 pthread_cond_broadcast(&info->cond); 3961 pthread_mutex_unlock(&info->mutex); 3962 /* Wait until the parent has finshed initializing the tls state. */ 3963 pthread_mutex_lock(&clone_lock); 3964 pthread_mutex_unlock(&clone_lock); 3965 cpu_loop(env); 3966 /* never exits */ 3967 return NULL; 3968 } 3969 #else 3970 3971 static int clone_func(void *arg) 3972 { 3973 CPUState *env = arg; 3974 cpu_loop(env); 3975 /* never exits */ 3976 return 0; 3977 } 3978 #endif 3979 3980 /* do_fork() Must return host values and target errnos (unlike most 3981 do_*() functions). */ 3982 static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp, 3983 abi_ulong parent_tidptr, target_ulong newtls, 3984 abi_ulong child_tidptr) 3985 { 3986 int ret; 3987 TaskState *ts; 3988 CPUState *new_env; 3989 #if defined(CONFIG_USE_NPTL) 3990 unsigned int nptl_flags; 3991 sigset_t sigmask; 3992 #else 3993 uint8_t *new_stack; 3994 #endif 3995 3996 /* Emulate vfork() with fork() */ 3997 if (flags & CLONE_VFORK) 3998 flags &= ~(CLONE_VFORK | CLONE_VM); 3999 4000 if (flags & CLONE_VM) { 4001 TaskState *parent_ts = (TaskState *)env->opaque; 4002 #if defined(CONFIG_USE_NPTL) 4003 new_thread_info info; 4004 pthread_attr_t attr; 4005 #endif 4006 ts = g_malloc0(sizeof(TaskState)); 4007 init_task_state(ts); 4008 /* we create a new CPU instance. */ 4009 new_env = cpu_copy(env); 4010 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC) 4011 cpu_reset(new_env); 4012 #endif 4013 /* Init regs that differ from the parent. */ 4014 cpu_clone_regs(new_env, newsp); 4015 new_env->opaque = ts; 4016 ts->bprm = parent_ts->bprm; 4017 ts->info = parent_ts->info; 4018 #if defined(CONFIG_USE_NPTL) 4019 nptl_flags = flags; 4020 flags &= ~CLONE_NPTL_FLAGS2; 4021 4022 if (nptl_flags & CLONE_CHILD_CLEARTID) { 4023 ts->child_tidptr = child_tidptr; 4024 } 4025 4026 if (nptl_flags & CLONE_SETTLS) 4027 cpu_set_tls (new_env, newtls); 4028 4029 /* Grab a mutex so that thread setup appears atomic. */ 4030 pthread_mutex_lock(&clone_lock); 4031 4032 memset(&info, 0, sizeof(info)); 4033 pthread_mutex_init(&info.mutex, NULL); 4034 pthread_mutex_lock(&info.mutex); 4035 pthread_cond_init(&info.cond, NULL); 4036 info.env = new_env; 4037 if (nptl_flags & CLONE_CHILD_SETTID) 4038 info.child_tidptr = child_tidptr; 4039 if (nptl_flags & CLONE_PARENT_SETTID) 4040 info.parent_tidptr = parent_tidptr; 4041 4042 ret = pthread_attr_init(&attr); 4043 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE); 4044 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 4045 /* It is not safe to deliver signals until the child has finished 4046 initializing, so temporarily block all signals. */ 4047 sigfillset(&sigmask); 4048 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); 4049 4050 ret = pthread_create(&info.thread, &attr, clone_func, &info); 4051 /* TODO: Free new CPU state if thread creation failed. */ 4052 4053 sigprocmask(SIG_SETMASK, &info.sigmask, NULL); 4054 pthread_attr_destroy(&attr); 4055 if (ret == 0) { 4056 /* Wait for the child to initialize. */ 4057 pthread_cond_wait(&info.cond, &info.mutex); 4058 ret = info.tid; 4059 if (flags & CLONE_PARENT_SETTID) 4060 put_user_u32(ret, parent_tidptr); 4061 } else { 4062 ret = -1; 4063 } 4064 pthread_mutex_unlock(&info.mutex); 4065 pthread_cond_destroy(&info.cond); 4066 pthread_mutex_destroy(&info.mutex); 4067 pthread_mutex_unlock(&clone_lock); 4068 #else 4069 if (flags & CLONE_NPTL_FLAGS2) 4070 return -EINVAL; 4071 /* This is probably going to die very quickly, but do it anyway. */ 4072 new_stack = g_malloc0 (NEW_STACK_SIZE); 4073 #ifdef __ia64__ 4074 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env); 4075 #else 4076 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env); 4077 #endif 4078 #endif 4079 } else { 4080 /* if no CLONE_VM, we consider it is a fork */ 4081 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) 4082 return -EINVAL; 4083 fork_start(); 4084 ret = fork(); 4085 if (ret == 0) { 4086 /* Child Process. */ 4087 cpu_clone_regs(env, newsp); 4088 fork_end(1); 4089 #if defined(CONFIG_USE_NPTL) 4090 /* There is a race condition here. The parent process could 4091 theoretically read the TID in the child process before the child 4092 tid is set. This would require using either ptrace 4093 (not implemented) or having *_tidptr to point at a shared memory 4094 mapping. We can't repeat the spinlock hack used above because 4095 the child process gets its own copy of the lock. */ 4096 if (flags & CLONE_CHILD_SETTID) 4097 put_user_u32(gettid(), child_tidptr); 4098 if (flags & CLONE_PARENT_SETTID) 4099 put_user_u32(gettid(), parent_tidptr); 4100 ts = (TaskState *)env->opaque; 4101 if (flags & CLONE_SETTLS) 4102 cpu_set_tls (env, newtls); 4103 if (flags & CLONE_CHILD_CLEARTID) 4104 ts->child_tidptr = child_tidptr; 4105 #endif 4106 } else { 4107 fork_end(0); 4108 } 4109 } 4110 return ret; 4111 } 4112 4113 /* warning : doesn't handle linux specific flags... */ 4114 static int target_to_host_fcntl_cmd(int cmd) 4115 { 4116 switch(cmd) { 4117 case TARGET_F_DUPFD: 4118 case TARGET_F_GETFD: 4119 case TARGET_F_SETFD: 4120 case TARGET_F_GETFL: 4121 case TARGET_F_SETFL: 4122 return cmd; 4123 case TARGET_F_GETLK: 4124 return F_GETLK; 4125 case TARGET_F_SETLK: 4126 return F_SETLK; 4127 case TARGET_F_SETLKW: 4128 return F_SETLKW; 4129 case TARGET_F_GETOWN: 4130 return F_GETOWN; 4131 case TARGET_F_SETOWN: 4132 return F_SETOWN; 4133 case TARGET_F_GETSIG: 4134 return F_GETSIG; 4135 case TARGET_F_SETSIG: 4136 return F_SETSIG; 4137 #if TARGET_ABI_BITS == 32 4138 case TARGET_F_GETLK64: 4139 return F_GETLK64; 4140 case TARGET_F_SETLK64: 4141 return F_SETLK64; 4142 case TARGET_F_SETLKW64: 4143 return F_SETLKW64; 4144 #endif 4145 case TARGET_F_SETLEASE: 4146 return F_SETLEASE; 4147 case TARGET_F_GETLEASE: 4148 return F_GETLEASE; 4149 #ifdef F_DUPFD_CLOEXEC 4150 case TARGET_F_DUPFD_CLOEXEC: 4151 return F_DUPFD_CLOEXEC; 4152 #endif 4153 case TARGET_F_NOTIFY: 4154 return F_NOTIFY; 4155 default: 4156 return -TARGET_EINVAL; 4157 } 4158 return -TARGET_EINVAL; 4159 } 4160 4161 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) 4162 { 4163 struct flock fl; 4164 struct target_flock *target_fl; 4165 struct flock64 fl64; 4166 struct target_flock64 *target_fl64; 4167 abi_long ret; 4168 int host_cmd = target_to_host_fcntl_cmd(cmd); 4169 4170 if (host_cmd == -TARGET_EINVAL) 4171 return host_cmd; 4172 4173 switch(cmd) { 4174 case TARGET_F_GETLK: 4175 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4176 return -TARGET_EFAULT; 4177 fl.l_type = tswap16(target_fl->l_type); 4178 fl.l_whence = tswap16(target_fl->l_whence); 4179 fl.l_start = tswapl(target_fl->l_start); 4180 fl.l_len = tswapl(target_fl->l_len); 4181 fl.l_pid = tswap32(target_fl->l_pid); 4182 unlock_user_struct(target_fl, arg, 0); 4183 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4184 if (ret == 0) { 4185 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0)) 4186 return -TARGET_EFAULT; 4187 target_fl->l_type = tswap16(fl.l_type); 4188 target_fl->l_whence = tswap16(fl.l_whence); 4189 target_fl->l_start = tswapl(fl.l_start); 4190 target_fl->l_len = tswapl(fl.l_len); 4191 target_fl->l_pid = tswap32(fl.l_pid); 4192 unlock_user_struct(target_fl, arg, 1); 4193 } 4194 break; 4195 4196 case TARGET_F_SETLK: 4197 case TARGET_F_SETLKW: 4198 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4199 return -TARGET_EFAULT; 4200 fl.l_type = tswap16(target_fl->l_type); 4201 fl.l_whence = tswap16(target_fl->l_whence); 4202 fl.l_start = tswapl(target_fl->l_start); 4203 fl.l_len = tswapl(target_fl->l_len); 4204 fl.l_pid = tswap32(target_fl->l_pid); 4205 unlock_user_struct(target_fl, arg, 0); 4206 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4207 break; 4208 4209 case TARGET_F_GETLK64: 4210 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4211 return -TARGET_EFAULT; 4212 fl64.l_type = tswap16(target_fl64->l_type) >> 1; 4213 fl64.l_whence = tswap16(target_fl64->l_whence); 4214 fl64.l_start = tswapl(target_fl64->l_start); 4215 fl64.l_len = tswapl(target_fl64->l_len); 4216 fl64.l_pid = tswap32(target_fl64->l_pid); 4217 unlock_user_struct(target_fl64, arg, 0); 4218 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4219 if (ret == 0) { 4220 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0)) 4221 return -TARGET_EFAULT; 4222 target_fl64->l_type = tswap16(fl64.l_type) >> 1; 4223 target_fl64->l_whence = tswap16(fl64.l_whence); 4224 target_fl64->l_start = tswapl(fl64.l_start); 4225 target_fl64->l_len = tswapl(fl64.l_len); 4226 target_fl64->l_pid = tswap32(fl64.l_pid); 4227 unlock_user_struct(target_fl64, arg, 1); 4228 } 4229 break; 4230 case TARGET_F_SETLK64: 4231 case TARGET_F_SETLKW64: 4232 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4233 return -TARGET_EFAULT; 4234 fl64.l_type = tswap16(target_fl64->l_type) >> 1; 4235 fl64.l_whence = tswap16(target_fl64->l_whence); 4236 fl64.l_start = tswapl(target_fl64->l_start); 4237 fl64.l_len = tswapl(target_fl64->l_len); 4238 fl64.l_pid = tswap32(target_fl64->l_pid); 4239 unlock_user_struct(target_fl64, arg, 0); 4240 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4241 break; 4242 4243 case TARGET_F_GETFL: 4244 ret = get_errno(fcntl(fd, host_cmd, arg)); 4245 if (ret >= 0) { 4246 ret = host_to_target_bitmask(ret, fcntl_flags_tbl); 4247 } 4248 break; 4249 4250 case TARGET_F_SETFL: 4251 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl))); 4252 break; 4253 4254 case TARGET_F_SETOWN: 4255 case TARGET_F_GETOWN: 4256 case TARGET_F_SETSIG: 4257 case TARGET_F_GETSIG: 4258 case TARGET_F_SETLEASE: 4259 case TARGET_F_GETLEASE: 4260 ret = get_errno(fcntl(fd, host_cmd, arg)); 4261 break; 4262 4263 default: 4264 ret = get_errno(fcntl(fd, cmd, arg)); 4265 break; 4266 } 4267 return ret; 4268 } 4269 4270 #ifdef USE_UID16 4271 4272 static inline int high2lowuid(int uid) 4273 { 4274 if (uid > 65535) 4275 return 65534; 4276 else 4277 return uid; 4278 } 4279 4280 static inline int high2lowgid(int gid) 4281 { 4282 if (gid > 65535) 4283 return 65534; 4284 else 4285 return gid; 4286 } 4287 4288 static inline int low2highuid(int uid) 4289 { 4290 if ((int16_t)uid == -1) 4291 return -1; 4292 else 4293 return uid; 4294 } 4295 4296 static inline int low2highgid(int gid) 4297 { 4298 if ((int16_t)gid == -1) 4299 return -1; 4300 else 4301 return gid; 4302 } 4303 static inline int tswapid(int id) 4304 { 4305 return tswap16(id); 4306 } 4307 #else /* !USE_UID16 */ 4308 static inline int high2lowuid(int uid) 4309 { 4310 return uid; 4311 } 4312 static inline int high2lowgid(int gid) 4313 { 4314 return gid; 4315 } 4316 static inline int low2highuid(int uid) 4317 { 4318 return uid; 4319 } 4320 static inline int low2highgid(int gid) 4321 { 4322 return gid; 4323 } 4324 static inline int tswapid(int id) 4325 { 4326 return tswap32(id); 4327 } 4328 #endif /* USE_UID16 */ 4329 4330 void syscall_init(void) 4331 { 4332 IOCTLEntry *ie; 4333 const argtype *arg_type; 4334 int size; 4335 int i; 4336 4337 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def); 4338 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def); 4339 #include "syscall_types.h" 4340 #undef STRUCT 4341 #undef STRUCT_SPECIAL 4342 4343 /* we patch the ioctl size if necessary. We rely on the fact that 4344 no ioctl has all the bits at '1' in the size field */ 4345 ie = ioctl_entries; 4346 while (ie->target_cmd != 0) { 4347 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) == 4348 TARGET_IOC_SIZEMASK) { 4349 arg_type = ie->arg_type; 4350 if (arg_type[0] != TYPE_PTR) { 4351 fprintf(stderr, "cannot patch size for ioctl 0x%x\n", 4352 ie->target_cmd); 4353 exit(1); 4354 } 4355 arg_type++; 4356 size = thunk_type_size(arg_type, 0); 4357 ie->target_cmd = (ie->target_cmd & 4358 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) | 4359 (size << TARGET_IOC_SIZESHIFT); 4360 } 4361 4362 /* Build target_to_host_errno_table[] table from 4363 * host_to_target_errno_table[]. */ 4364 for (i=0; i < ERRNO_TABLE_SIZE; i++) 4365 target_to_host_errno_table[host_to_target_errno_table[i]] = i; 4366 4367 /* automatic consistency check if same arch */ 4368 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 4369 (defined(__x86_64__) && defined(TARGET_X86_64)) 4370 if (unlikely(ie->target_cmd != ie->host_cmd)) { 4371 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n", 4372 ie->name, ie->target_cmd, ie->host_cmd); 4373 } 4374 #endif 4375 ie++; 4376 } 4377 } 4378 4379 #if TARGET_ABI_BITS == 32 4380 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1) 4381 { 4382 #ifdef TARGET_WORDS_BIGENDIAN 4383 return ((uint64_t)word0 << 32) | word1; 4384 #else 4385 return ((uint64_t)word1 << 32) | word0; 4386 #endif 4387 } 4388 #else /* TARGET_ABI_BITS == 32 */ 4389 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1) 4390 { 4391 return word0; 4392 } 4393 #endif /* TARGET_ABI_BITS != 32 */ 4394 4395 #ifdef TARGET_NR_truncate64 4396 static inline abi_long target_truncate64(void *cpu_env, const char *arg1, 4397 abi_long arg2, 4398 abi_long arg3, 4399 abi_long arg4) 4400 { 4401 if (regpairs_aligned(cpu_env)) { 4402 arg2 = arg3; 4403 arg3 = arg4; 4404 } 4405 return get_errno(truncate64(arg1, target_offset64(arg2, arg3))); 4406 } 4407 #endif 4408 4409 #ifdef TARGET_NR_ftruncate64 4410 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1, 4411 abi_long arg2, 4412 abi_long arg3, 4413 abi_long arg4) 4414 { 4415 if (regpairs_aligned(cpu_env)) { 4416 arg2 = arg3; 4417 arg3 = arg4; 4418 } 4419 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3))); 4420 } 4421 #endif 4422 4423 static inline abi_long target_to_host_timespec(struct timespec *host_ts, 4424 abi_ulong target_addr) 4425 { 4426 struct target_timespec *target_ts; 4427 4428 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) 4429 return -TARGET_EFAULT; 4430 host_ts->tv_sec = tswapl(target_ts->tv_sec); 4431 host_ts->tv_nsec = tswapl(target_ts->tv_nsec); 4432 unlock_user_struct(target_ts, target_addr, 0); 4433 return 0; 4434 } 4435 4436 static inline abi_long host_to_target_timespec(abi_ulong target_addr, 4437 struct timespec *host_ts) 4438 { 4439 struct target_timespec *target_ts; 4440 4441 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) 4442 return -TARGET_EFAULT; 4443 target_ts->tv_sec = tswapl(host_ts->tv_sec); 4444 target_ts->tv_nsec = tswapl(host_ts->tv_nsec); 4445 unlock_user_struct(target_ts, target_addr, 1); 4446 return 0; 4447 } 4448 4449 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat) 4450 static inline abi_long host_to_target_stat64(void *cpu_env, 4451 abi_ulong target_addr, 4452 struct stat *host_st) 4453 { 4454 #ifdef TARGET_ARM 4455 if (((CPUARMState *)cpu_env)->eabi) { 4456 struct target_eabi_stat64 *target_st; 4457 4458 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 4459 return -TARGET_EFAULT; 4460 memset(target_st, 0, sizeof(struct target_eabi_stat64)); 4461 __put_user(host_st->st_dev, &target_st->st_dev); 4462 __put_user(host_st->st_ino, &target_st->st_ino); 4463 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 4464 __put_user(host_st->st_ino, &target_st->__st_ino); 4465 #endif 4466 __put_user(host_st->st_mode, &target_st->st_mode); 4467 __put_user(host_st->st_nlink, &target_st->st_nlink); 4468 __put_user(host_st->st_uid, &target_st->st_uid); 4469 __put_user(host_st->st_gid, &target_st->st_gid); 4470 __put_user(host_st->st_rdev, &target_st->st_rdev); 4471 __put_user(host_st->st_size, &target_st->st_size); 4472 __put_user(host_st->st_blksize, &target_st->st_blksize); 4473 __put_user(host_st->st_blocks, &target_st->st_blocks); 4474 __put_user(host_st->st_atime, &target_st->target_st_atime); 4475 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 4476 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 4477 unlock_user_struct(target_st, target_addr, 1); 4478 } else 4479 #endif 4480 { 4481 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA) 4482 struct target_stat *target_st; 4483 #else 4484 struct target_stat64 *target_st; 4485 #endif 4486 4487 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 4488 return -TARGET_EFAULT; 4489 memset(target_st, 0, sizeof(*target_st)); 4490 __put_user(host_st->st_dev, &target_st->st_dev); 4491 __put_user(host_st->st_ino, &target_st->st_ino); 4492 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 4493 __put_user(host_st->st_ino, &target_st->__st_ino); 4494 #endif 4495 __put_user(host_st->st_mode, &target_st->st_mode); 4496 __put_user(host_st->st_nlink, &target_st->st_nlink); 4497 __put_user(host_st->st_uid, &target_st->st_uid); 4498 __put_user(host_st->st_gid, &target_st->st_gid); 4499 __put_user(host_st->st_rdev, &target_st->st_rdev); 4500 /* XXX: better use of kernel struct */ 4501 __put_user(host_st->st_size, &target_st->st_size); 4502 __put_user(host_st->st_blksize, &target_st->st_blksize); 4503 __put_user(host_st->st_blocks, &target_st->st_blocks); 4504 __put_user(host_st->st_atime, &target_st->target_st_atime); 4505 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 4506 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 4507 unlock_user_struct(target_st, target_addr, 1); 4508 } 4509 4510 return 0; 4511 } 4512 #endif 4513 4514 #if defined(CONFIG_USE_NPTL) 4515 /* ??? Using host futex calls even when target atomic operations 4516 are not really atomic probably breaks things. However implementing 4517 futexes locally would make futexes shared between multiple processes 4518 tricky. However they're probably useless because guest atomic 4519 operations won't work either. */ 4520 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout, 4521 target_ulong uaddr2, int val3) 4522 { 4523 struct timespec ts, *pts; 4524 int base_op; 4525 4526 /* ??? We assume FUTEX_* constants are the same on both host 4527 and target. */ 4528 #ifdef FUTEX_CMD_MASK 4529 base_op = op & FUTEX_CMD_MASK; 4530 #else 4531 base_op = op; 4532 #endif 4533 switch (base_op) { 4534 case FUTEX_WAIT: 4535 if (timeout) { 4536 pts = &ts; 4537 target_to_host_timespec(pts, timeout); 4538 } else { 4539 pts = NULL; 4540 } 4541 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val), 4542 pts, NULL, 0)); 4543 case FUTEX_WAKE: 4544 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 4545 case FUTEX_FD: 4546 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 4547 case FUTEX_REQUEUE: 4548 case FUTEX_CMP_REQUEUE: 4549 case FUTEX_WAKE_OP: 4550 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 4551 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 4552 But the prototype takes a `struct timespec *'; insert casts 4553 to satisfy the compiler. We do not need to tswap TIMEOUT 4554 since it's not compared to guest memory. */ 4555 pts = (struct timespec *)(uintptr_t) timeout; 4556 return get_errno(sys_futex(g2h(uaddr), op, val, pts, 4557 g2h(uaddr2), 4558 (base_op == FUTEX_CMP_REQUEUE 4559 ? tswap32(val3) 4560 : val3))); 4561 default: 4562 return -TARGET_ENOSYS; 4563 } 4564 } 4565 #endif 4566 4567 /* Map host to target signal numbers for the wait family of syscalls. 4568 Assume all other status bits are the same. */ 4569 static int host_to_target_waitstatus(int status) 4570 { 4571 if (WIFSIGNALED(status)) { 4572 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f); 4573 } 4574 if (WIFSTOPPED(status)) { 4575 return (host_to_target_signal(WSTOPSIG(status)) << 8) 4576 | (status & 0xff); 4577 } 4578 return status; 4579 } 4580 4581 int get_osversion(void) 4582 { 4583 static int osversion; 4584 struct new_utsname buf; 4585 const char *s; 4586 int i, n, tmp; 4587 if (osversion) 4588 return osversion; 4589 if (qemu_uname_release && *qemu_uname_release) { 4590 s = qemu_uname_release; 4591 } else { 4592 if (sys_uname(&buf)) 4593 return 0; 4594 s = buf.release; 4595 } 4596 tmp = 0; 4597 for (i = 0; i < 3; i++) { 4598 n = 0; 4599 while (*s >= '0' && *s <= '9') { 4600 n *= 10; 4601 n += *s - '0'; 4602 s++; 4603 } 4604 tmp = (tmp << 8) + n; 4605 if (*s == '.') 4606 s++; 4607 } 4608 osversion = tmp; 4609 return osversion; 4610 } 4611 4612 /* do_syscall() should always have a single exit point at the end so 4613 that actions, such as logging of syscall results, can be performed. 4614 All errnos that do_syscall() returns must be -TARGET_<errcode>. */ 4615 abi_long do_syscall(void *cpu_env, int num, abi_long arg1, 4616 abi_long arg2, abi_long arg3, abi_long arg4, 4617 abi_long arg5, abi_long arg6, abi_long arg7, 4618 abi_long arg8) 4619 { 4620 abi_long ret; 4621 struct stat st; 4622 struct statfs stfs; 4623 void *p; 4624 4625 #ifdef DEBUG 4626 gemu_log("syscall %d", num); 4627 #endif 4628 if(do_strace) 4629 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); 4630 4631 switch(num) { 4632 case TARGET_NR_exit: 4633 #ifdef CONFIG_USE_NPTL 4634 /* In old applications this may be used to implement _exit(2). 4635 However in threaded applictions it is used for thread termination, 4636 and _exit_group is used for application termination. 4637 Do thread termination if we have more then one thread. */ 4638 /* FIXME: This probably breaks if a signal arrives. We should probably 4639 be disabling signals. */ 4640 if (first_cpu->next_cpu) { 4641 TaskState *ts; 4642 CPUState **lastp; 4643 CPUState *p; 4644 4645 cpu_list_lock(); 4646 lastp = &first_cpu; 4647 p = first_cpu; 4648 while (p && p != (CPUState *)cpu_env) { 4649 lastp = &p->next_cpu; 4650 p = p->next_cpu; 4651 } 4652 /* If we didn't find the CPU for this thread then something is 4653 horribly wrong. */ 4654 if (!p) 4655 abort(); 4656 /* Remove the CPU from the list. */ 4657 *lastp = p->next_cpu; 4658 cpu_list_unlock(); 4659 ts = ((CPUState *)cpu_env)->opaque; 4660 if (ts->child_tidptr) { 4661 put_user_u32(0, ts->child_tidptr); 4662 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX, 4663 NULL, NULL, 0); 4664 } 4665 thread_env = NULL; 4666 g_free(cpu_env); 4667 g_free(ts); 4668 pthread_exit(NULL); 4669 } 4670 #endif 4671 #ifdef TARGET_GPROF 4672 _mcleanup(); 4673 #endif 4674 gdb_exit(cpu_env, arg1); 4675 _exit(arg1); 4676 ret = 0; /* avoid warning */ 4677 break; 4678 case TARGET_NR_read: 4679 if (arg3 == 0) 4680 ret = 0; 4681 else { 4682 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 4683 goto efault; 4684 ret = get_errno(read(arg1, p, arg3)); 4685 unlock_user(p, arg2, ret); 4686 } 4687 break; 4688 case TARGET_NR_write: 4689 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 4690 goto efault; 4691 ret = get_errno(write(arg1, p, arg3)); 4692 unlock_user(p, arg2, 0); 4693 break; 4694 case TARGET_NR_open: 4695 if (!(p = lock_user_string(arg1))) 4696 goto efault; 4697 ret = get_errno(open(path(p), 4698 target_to_host_bitmask(arg2, fcntl_flags_tbl), 4699 arg3)); 4700 unlock_user(p, arg1, 0); 4701 break; 4702 #if defined(TARGET_NR_openat) && defined(__NR_openat) 4703 case TARGET_NR_openat: 4704 if (!(p = lock_user_string(arg2))) 4705 goto efault; 4706 ret = get_errno(sys_openat(arg1, 4707 path(p), 4708 target_to_host_bitmask(arg3, fcntl_flags_tbl), 4709 arg4)); 4710 unlock_user(p, arg2, 0); 4711 break; 4712 #endif 4713 case TARGET_NR_close: 4714 ret = get_errno(close(arg1)); 4715 break; 4716 case TARGET_NR_brk: 4717 ret = do_brk(arg1); 4718 break; 4719 case TARGET_NR_fork: 4720 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0)); 4721 break; 4722 #ifdef TARGET_NR_waitpid 4723 case TARGET_NR_waitpid: 4724 { 4725 int status; 4726 ret = get_errno(waitpid(arg1, &status, arg3)); 4727 if (!is_error(ret) && arg2 4728 && put_user_s32(host_to_target_waitstatus(status), arg2)) 4729 goto efault; 4730 } 4731 break; 4732 #endif 4733 #ifdef TARGET_NR_waitid 4734 case TARGET_NR_waitid: 4735 { 4736 siginfo_t info; 4737 info.si_pid = 0; 4738 ret = get_errno(waitid(arg1, arg2, &info, arg4)); 4739 if (!is_error(ret) && arg3 && info.si_pid != 0) { 4740 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) 4741 goto efault; 4742 host_to_target_siginfo(p, &info); 4743 unlock_user(p, arg3, sizeof(target_siginfo_t)); 4744 } 4745 } 4746 break; 4747 #endif 4748 #ifdef TARGET_NR_creat /* not on alpha */ 4749 case TARGET_NR_creat: 4750 if (!(p = lock_user_string(arg1))) 4751 goto efault; 4752 ret = get_errno(creat(p, arg2)); 4753 unlock_user(p, arg1, 0); 4754 break; 4755 #endif 4756 case TARGET_NR_link: 4757 { 4758 void * p2; 4759 p = lock_user_string(arg1); 4760 p2 = lock_user_string(arg2); 4761 if (!p || !p2) 4762 ret = -TARGET_EFAULT; 4763 else 4764 ret = get_errno(link(p, p2)); 4765 unlock_user(p2, arg2, 0); 4766 unlock_user(p, arg1, 0); 4767 } 4768 break; 4769 #if defined(TARGET_NR_linkat) && defined(__NR_linkat) 4770 case TARGET_NR_linkat: 4771 { 4772 void * p2 = NULL; 4773 if (!arg2 || !arg4) 4774 goto efault; 4775 p = lock_user_string(arg2); 4776 p2 = lock_user_string(arg4); 4777 if (!p || !p2) 4778 ret = -TARGET_EFAULT; 4779 else 4780 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5)); 4781 unlock_user(p, arg2, 0); 4782 unlock_user(p2, arg4, 0); 4783 } 4784 break; 4785 #endif 4786 case TARGET_NR_unlink: 4787 if (!(p = lock_user_string(arg1))) 4788 goto efault; 4789 ret = get_errno(unlink(p)); 4790 unlock_user(p, arg1, 0); 4791 break; 4792 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat) 4793 case TARGET_NR_unlinkat: 4794 if (!(p = lock_user_string(arg2))) 4795 goto efault; 4796 ret = get_errno(sys_unlinkat(arg1, p, arg3)); 4797 unlock_user(p, arg2, 0); 4798 break; 4799 #endif 4800 case TARGET_NR_execve: 4801 { 4802 char **argp, **envp; 4803 int argc, envc; 4804 abi_ulong gp; 4805 abi_ulong guest_argp; 4806 abi_ulong guest_envp; 4807 abi_ulong addr; 4808 char **q; 4809 4810 argc = 0; 4811 guest_argp = arg2; 4812 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { 4813 if (get_user_ual(addr, gp)) 4814 goto efault; 4815 if (!addr) 4816 break; 4817 argc++; 4818 } 4819 envc = 0; 4820 guest_envp = arg3; 4821 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { 4822 if (get_user_ual(addr, gp)) 4823 goto efault; 4824 if (!addr) 4825 break; 4826 envc++; 4827 } 4828 4829 argp = alloca((argc + 1) * sizeof(void *)); 4830 envp = alloca((envc + 1) * sizeof(void *)); 4831 4832 for (gp = guest_argp, q = argp; gp; 4833 gp += sizeof(abi_ulong), q++) { 4834 if (get_user_ual(addr, gp)) 4835 goto execve_efault; 4836 if (!addr) 4837 break; 4838 if (!(*q = lock_user_string(addr))) 4839 goto execve_efault; 4840 } 4841 *q = NULL; 4842 4843 for (gp = guest_envp, q = envp; gp; 4844 gp += sizeof(abi_ulong), q++) { 4845 if (get_user_ual(addr, gp)) 4846 goto execve_efault; 4847 if (!addr) 4848 break; 4849 if (!(*q = lock_user_string(addr))) 4850 goto execve_efault; 4851 } 4852 *q = NULL; 4853 4854 if (!(p = lock_user_string(arg1))) 4855 goto execve_efault; 4856 ret = get_errno(execve(p, argp, envp)); 4857 unlock_user(p, arg1, 0); 4858 4859 goto execve_end; 4860 4861 execve_efault: 4862 ret = -TARGET_EFAULT; 4863 4864 execve_end: 4865 for (gp = guest_argp, q = argp; *q; 4866 gp += sizeof(abi_ulong), q++) { 4867 if (get_user_ual(addr, gp) 4868 || !addr) 4869 break; 4870 unlock_user(*q, addr, 0); 4871 } 4872 for (gp = guest_envp, q = envp; *q; 4873 gp += sizeof(abi_ulong), q++) { 4874 if (get_user_ual(addr, gp) 4875 || !addr) 4876 break; 4877 unlock_user(*q, addr, 0); 4878 } 4879 } 4880 break; 4881 case TARGET_NR_chdir: 4882 if (!(p = lock_user_string(arg1))) 4883 goto efault; 4884 ret = get_errno(chdir(p)); 4885 unlock_user(p, arg1, 0); 4886 break; 4887 #ifdef TARGET_NR_time 4888 case TARGET_NR_time: 4889 { 4890 time_t host_time; 4891 ret = get_errno(time(&host_time)); 4892 if (!is_error(ret) 4893 && arg1 4894 && put_user_sal(host_time, arg1)) 4895 goto efault; 4896 } 4897 break; 4898 #endif 4899 case TARGET_NR_mknod: 4900 if (!(p = lock_user_string(arg1))) 4901 goto efault; 4902 ret = get_errno(mknod(p, arg2, arg3)); 4903 unlock_user(p, arg1, 0); 4904 break; 4905 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat) 4906 case TARGET_NR_mknodat: 4907 if (!(p = lock_user_string(arg2))) 4908 goto efault; 4909 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4)); 4910 unlock_user(p, arg2, 0); 4911 break; 4912 #endif 4913 case TARGET_NR_chmod: 4914 if (!(p = lock_user_string(arg1))) 4915 goto efault; 4916 ret = get_errno(chmod(p, arg2)); 4917 unlock_user(p, arg1, 0); 4918 break; 4919 #ifdef TARGET_NR_break 4920 case TARGET_NR_break: 4921 goto unimplemented; 4922 #endif 4923 #ifdef TARGET_NR_oldstat 4924 case TARGET_NR_oldstat: 4925 goto unimplemented; 4926 #endif 4927 case TARGET_NR_lseek: 4928 ret = get_errno(lseek(arg1, arg2, arg3)); 4929 break; 4930 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) 4931 /* Alpha specific */ 4932 case TARGET_NR_getxpid: 4933 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid(); 4934 ret = get_errno(getpid()); 4935 break; 4936 #endif 4937 #ifdef TARGET_NR_getpid 4938 case TARGET_NR_getpid: 4939 ret = get_errno(getpid()); 4940 break; 4941 #endif 4942 case TARGET_NR_mount: 4943 { 4944 /* need to look at the data field */ 4945 void *p2, *p3; 4946 p = lock_user_string(arg1); 4947 p2 = lock_user_string(arg2); 4948 p3 = lock_user_string(arg3); 4949 if (!p || !p2 || !p3) 4950 ret = -TARGET_EFAULT; 4951 else { 4952 /* FIXME - arg5 should be locked, but it isn't clear how to 4953 * do that since it's not guaranteed to be a NULL-terminated 4954 * string. 4955 */ 4956 if ( ! arg5 ) 4957 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL)); 4958 else 4959 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5))); 4960 } 4961 unlock_user(p, arg1, 0); 4962 unlock_user(p2, arg2, 0); 4963 unlock_user(p3, arg3, 0); 4964 break; 4965 } 4966 #ifdef TARGET_NR_umount 4967 case TARGET_NR_umount: 4968 if (!(p = lock_user_string(arg1))) 4969 goto efault; 4970 ret = get_errno(umount(p)); 4971 unlock_user(p, arg1, 0); 4972 break; 4973 #endif 4974 #ifdef TARGET_NR_stime /* not on alpha */ 4975 case TARGET_NR_stime: 4976 { 4977 time_t host_time; 4978 if (get_user_sal(host_time, arg1)) 4979 goto efault; 4980 ret = get_errno(stime(&host_time)); 4981 } 4982 break; 4983 #endif 4984 case TARGET_NR_ptrace: 4985 goto unimplemented; 4986 #ifdef TARGET_NR_alarm /* not on alpha */ 4987 case TARGET_NR_alarm: 4988 ret = alarm(arg1); 4989 break; 4990 #endif 4991 #ifdef TARGET_NR_oldfstat 4992 case TARGET_NR_oldfstat: 4993 goto unimplemented; 4994 #endif 4995 #ifdef TARGET_NR_pause /* not on alpha */ 4996 case TARGET_NR_pause: 4997 ret = get_errno(pause()); 4998 break; 4999 #endif 5000 #ifdef TARGET_NR_utime 5001 case TARGET_NR_utime: 5002 { 5003 struct utimbuf tbuf, *host_tbuf; 5004 struct target_utimbuf *target_tbuf; 5005 if (arg2) { 5006 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) 5007 goto efault; 5008 tbuf.actime = tswapl(target_tbuf->actime); 5009 tbuf.modtime = tswapl(target_tbuf->modtime); 5010 unlock_user_struct(target_tbuf, arg2, 0); 5011 host_tbuf = &tbuf; 5012 } else { 5013 host_tbuf = NULL; 5014 } 5015 if (!(p = lock_user_string(arg1))) 5016 goto efault; 5017 ret = get_errno(utime(p, host_tbuf)); 5018 unlock_user(p, arg1, 0); 5019 } 5020 break; 5021 #endif 5022 case TARGET_NR_utimes: 5023 { 5024 struct timeval *tvp, tv[2]; 5025 if (arg2) { 5026 if (copy_from_user_timeval(&tv[0], arg2) 5027 || copy_from_user_timeval(&tv[1], 5028 arg2 + sizeof(struct target_timeval))) 5029 goto efault; 5030 tvp = tv; 5031 } else { 5032 tvp = NULL; 5033 } 5034 if (!(p = lock_user_string(arg1))) 5035 goto efault; 5036 ret = get_errno(utimes(p, tvp)); 5037 unlock_user(p, arg1, 0); 5038 } 5039 break; 5040 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat) 5041 case TARGET_NR_futimesat: 5042 { 5043 struct timeval *tvp, tv[2]; 5044 if (arg3) { 5045 if (copy_from_user_timeval(&tv[0], arg3) 5046 || copy_from_user_timeval(&tv[1], 5047 arg3 + sizeof(struct target_timeval))) 5048 goto efault; 5049 tvp = tv; 5050 } else { 5051 tvp = NULL; 5052 } 5053 if (!(p = lock_user_string(arg2))) 5054 goto efault; 5055 ret = get_errno(sys_futimesat(arg1, path(p), tvp)); 5056 unlock_user(p, arg2, 0); 5057 } 5058 break; 5059 #endif 5060 #ifdef TARGET_NR_stty 5061 case TARGET_NR_stty: 5062 goto unimplemented; 5063 #endif 5064 #ifdef TARGET_NR_gtty 5065 case TARGET_NR_gtty: 5066 goto unimplemented; 5067 #endif 5068 case TARGET_NR_access: 5069 if (!(p = lock_user_string(arg1))) 5070 goto efault; 5071 ret = get_errno(access(path(p), arg2)); 5072 unlock_user(p, arg1, 0); 5073 break; 5074 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 5075 case TARGET_NR_faccessat: 5076 if (!(p = lock_user_string(arg2))) 5077 goto efault; 5078 ret = get_errno(sys_faccessat(arg1, p, arg3)); 5079 unlock_user(p, arg2, 0); 5080 break; 5081 #endif 5082 #ifdef TARGET_NR_nice /* not on alpha */ 5083 case TARGET_NR_nice: 5084 ret = get_errno(nice(arg1)); 5085 break; 5086 #endif 5087 #ifdef TARGET_NR_ftime 5088 case TARGET_NR_ftime: 5089 goto unimplemented; 5090 #endif 5091 case TARGET_NR_sync: 5092 sync(); 5093 ret = 0; 5094 break; 5095 case TARGET_NR_kill: 5096 ret = get_errno(kill(arg1, target_to_host_signal(arg2))); 5097 break; 5098 case TARGET_NR_rename: 5099 { 5100 void *p2; 5101 p = lock_user_string(arg1); 5102 p2 = lock_user_string(arg2); 5103 if (!p || !p2) 5104 ret = -TARGET_EFAULT; 5105 else 5106 ret = get_errno(rename(p, p2)); 5107 unlock_user(p2, arg2, 0); 5108 unlock_user(p, arg1, 0); 5109 } 5110 break; 5111 #if defined(TARGET_NR_renameat) && defined(__NR_renameat) 5112 case TARGET_NR_renameat: 5113 { 5114 void *p2; 5115 p = lock_user_string(arg2); 5116 p2 = lock_user_string(arg4); 5117 if (!p || !p2) 5118 ret = -TARGET_EFAULT; 5119 else 5120 ret = get_errno(sys_renameat(arg1, p, arg3, p2)); 5121 unlock_user(p2, arg4, 0); 5122 unlock_user(p, arg2, 0); 5123 } 5124 break; 5125 #endif 5126 case TARGET_NR_mkdir: 5127 if (!(p = lock_user_string(arg1))) 5128 goto efault; 5129 ret = get_errno(mkdir(p, arg2)); 5130 unlock_user(p, arg1, 0); 5131 break; 5132 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat) 5133 case TARGET_NR_mkdirat: 5134 if (!(p = lock_user_string(arg2))) 5135 goto efault; 5136 ret = get_errno(sys_mkdirat(arg1, p, arg3)); 5137 unlock_user(p, arg2, 0); 5138 break; 5139 #endif 5140 case TARGET_NR_rmdir: 5141 if (!(p = lock_user_string(arg1))) 5142 goto efault; 5143 ret = get_errno(rmdir(p)); 5144 unlock_user(p, arg1, 0); 5145 break; 5146 case TARGET_NR_dup: 5147 ret = get_errno(dup(arg1)); 5148 break; 5149 case TARGET_NR_pipe: 5150 ret = do_pipe(cpu_env, arg1, 0, 0); 5151 break; 5152 #ifdef TARGET_NR_pipe2 5153 case TARGET_NR_pipe2: 5154 ret = do_pipe(cpu_env, arg1, arg2, 1); 5155 break; 5156 #endif 5157 case TARGET_NR_times: 5158 { 5159 struct target_tms *tmsp; 5160 struct tms tms; 5161 ret = get_errno(times(&tms)); 5162 if (arg1) { 5163 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); 5164 if (!tmsp) 5165 goto efault; 5166 tmsp->tms_utime = tswapl(host_to_target_clock_t(tms.tms_utime)); 5167 tmsp->tms_stime = tswapl(host_to_target_clock_t(tms.tms_stime)); 5168 tmsp->tms_cutime = tswapl(host_to_target_clock_t(tms.tms_cutime)); 5169 tmsp->tms_cstime = tswapl(host_to_target_clock_t(tms.tms_cstime)); 5170 } 5171 if (!is_error(ret)) 5172 ret = host_to_target_clock_t(ret); 5173 } 5174 break; 5175 #ifdef TARGET_NR_prof 5176 case TARGET_NR_prof: 5177 goto unimplemented; 5178 #endif 5179 #ifdef TARGET_NR_signal 5180 case TARGET_NR_signal: 5181 goto unimplemented; 5182 #endif 5183 case TARGET_NR_acct: 5184 if (arg1 == 0) { 5185 ret = get_errno(acct(NULL)); 5186 } else { 5187 if (!(p = lock_user_string(arg1))) 5188 goto efault; 5189 ret = get_errno(acct(path(p))); 5190 unlock_user(p, arg1, 0); 5191 } 5192 break; 5193 #ifdef TARGET_NR_umount2 /* not on alpha */ 5194 case TARGET_NR_umount2: 5195 if (!(p = lock_user_string(arg1))) 5196 goto efault; 5197 ret = get_errno(umount2(p, arg2)); 5198 unlock_user(p, arg1, 0); 5199 break; 5200 #endif 5201 #ifdef TARGET_NR_lock 5202 case TARGET_NR_lock: 5203 goto unimplemented; 5204 #endif 5205 case TARGET_NR_ioctl: 5206 ret = do_ioctl(arg1, arg2, arg3); 5207 break; 5208 case TARGET_NR_fcntl: 5209 ret = do_fcntl(arg1, arg2, arg3); 5210 break; 5211 #ifdef TARGET_NR_mpx 5212 case TARGET_NR_mpx: 5213 goto unimplemented; 5214 #endif 5215 case TARGET_NR_setpgid: 5216 ret = get_errno(setpgid(arg1, arg2)); 5217 break; 5218 #ifdef TARGET_NR_ulimit 5219 case TARGET_NR_ulimit: 5220 goto unimplemented; 5221 #endif 5222 #ifdef TARGET_NR_oldolduname 5223 case TARGET_NR_oldolduname: 5224 goto unimplemented; 5225 #endif 5226 case TARGET_NR_umask: 5227 ret = get_errno(umask(arg1)); 5228 break; 5229 case TARGET_NR_chroot: 5230 if (!(p = lock_user_string(arg1))) 5231 goto efault; 5232 ret = get_errno(chroot(p)); 5233 unlock_user(p, arg1, 0); 5234 break; 5235 case TARGET_NR_ustat: 5236 goto unimplemented; 5237 case TARGET_NR_dup2: 5238 ret = get_errno(dup2(arg1, arg2)); 5239 break; 5240 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) 5241 case TARGET_NR_dup3: 5242 ret = get_errno(dup3(arg1, arg2, arg3)); 5243 break; 5244 #endif 5245 #ifdef TARGET_NR_getppid /* not on alpha */ 5246 case TARGET_NR_getppid: 5247 ret = get_errno(getppid()); 5248 break; 5249 #endif 5250 case TARGET_NR_getpgrp: 5251 ret = get_errno(getpgrp()); 5252 break; 5253 case TARGET_NR_setsid: 5254 ret = get_errno(setsid()); 5255 break; 5256 #ifdef TARGET_NR_sigaction 5257 case TARGET_NR_sigaction: 5258 { 5259 #if defined(TARGET_ALPHA) 5260 struct target_sigaction act, oact, *pact = 0; 5261 struct target_old_sigaction *old_act; 5262 if (arg2) { 5263 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5264 goto efault; 5265 act._sa_handler = old_act->_sa_handler; 5266 target_siginitset(&act.sa_mask, old_act->sa_mask); 5267 act.sa_flags = old_act->sa_flags; 5268 act.sa_restorer = 0; 5269 unlock_user_struct(old_act, arg2, 0); 5270 pact = &act; 5271 } 5272 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5273 if (!is_error(ret) && arg3) { 5274 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5275 goto efault; 5276 old_act->_sa_handler = oact._sa_handler; 5277 old_act->sa_mask = oact.sa_mask.sig[0]; 5278 old_act->sa_flags = oact.sa_flags; 5279 unlock_user_struct(old_act, arg3, 1); 5280 } 5281 #elif defined(TARGET_MIPS) 5282 struct target_sigaction act, oact, *pact, *old_act; 5283 5284 if (arg2) { 5285 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5286 goto efault; 5287 act._sa_handler = old_act->_sa_handler; 5288 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); 5289 act.sa_flags = old_act->sa_flags; 5290 unlock_user_struct(old_act, arg2, 0); 5291 pact = &act; 5292 } else { 5293 pact = NULL; 5294 } 5295 5296 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5297 5298 if (!is_error(ret) && arg3) { 5299 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5300 goto efault; 5301 old_act->_sa_handler = oact._sa_handler; 5302 old_act->sa_flags = oact.sa_flags; 5303 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; 5304 old_act->sa_mask.sig[1] = 0; 5305 old_act->sa_mask.sig[2] = 0; 5306 old_act->sa_mask.sig[3] = 0; 5307 unlock_user_struct(old_act, arg3, 1); 5308 } 5309 #else 5310 struct target_old_sigaction *old_act; 5311 struct target_sigaction act, oact, *pact; 5312 if (arg2) { 5313 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5314 goto efault; 5315 act._sa_handler = old_act->_sa_handler; 5316 target_siginitset(&act.sa_mask, old_act->sa_mask); 5317 act.sa_flags = old_act->sa_flags; 5318 act.sa_restorer = old_act->sa_restorer; 5319 unlock_user_struct(old_act, arg2, 0); 5320 pact = &act; 5321 } else { 5322 pact = NULL; 5323 } 5324 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5325 if (!is_error(ret) && arg3) { 5326 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5327 goto efault; 5328 old_act->_sa_handler = oact._sa_handler; 5329 old_act->sa_mask = oact.sa_mask.sig[0]; 5330 old_act->sa_flags = oact.sa_flags; 5331 old_act->sa_restorer = oact.sa_restorer; 5332 unlock_user_struct(old_act, arg3, 1); 5333 } 5334 #endif 5335 } 5336 break; 5337 #endif 5338 case TARGET_NR_rt_sigaction: 5339 { 5340 #if defined(TARGET_ALPHA) 5341 struct target_sigaction act, oact, *pact = 0; 5342 struct target_rt_sigaction *rt_act; 5343 /* ??? arg4 == sizeof(sigset_t). */ 5344 if (arg2) { 5345 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1)) 5346 goto efault; 5347 act._sa_handler = rt_act->_sa_handler; 5348 act.sa_mask = rt_act->sa_mask; 5349 act.sa_flags = rt_act->sa_flags; 5350 act.sa_restorer = arg5; 5351 unlock_user_struct(rt_act, arg2, 0); 5352 pact = &act; 5353 } 5354 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5355 if (!is_error(ret) && arg3) { 5356 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0)) 5357 goto efault; 5358 rt_act->_sa_handler = oact._sa_handler; 5359 rt_act->sa_mask = oact.sa_mask; 5360 rt_act->sa_flags = oact.sa_flags; 5361 unlock_user_struct(rt_act, arg3, 1); 5362 } 5363 #else 5364 struct target_sigaction *act; 5365 struct target_sigaction *oact; 5366 5367 if (arg2) { 5368 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) 5369 goto efault; 5370 } else 5371 act = NULL; 5372 if (arg3) { 5373 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { 5374 ret = -TARGET_EFAULT; 5375 goto rt_sigaction_fail; 5376 } 5377 } else 5378 oact = NULL; 5379 ret = get_errno(do_sigaction(arg1, act, oact)); 5380 rt_sigaction_fail: 5381 if (act) 5382 unlock_user_struct(act, arg2, 0); 5383 if (oact) 5384 unlock_user_struct(oact, arg3, 1); 5385 #endif 5386 } 5387 break; 5388 #ifdef TARGET_NR_sgetmask /* not on alpha */ 5389 case TARGET_NR_sgetmask: 5390 { 5391 sigset_t cur_set; 5392 abi_ulong target_set; 5393 sigprocmask(0, NULL, &cur_set); 5394 host_to_target_old_sigset(&target_set, &cur_set); 5395 ret = target_set; 5396 } 5397 break; 5398 #endif 5399 #ifdef TARGET_NR_ssetmask /* not on alpha */ 5400 case TARGET_NR_ssetmask: 5401 { 5402 sigset_t set, oset, cur_set; 5403 abi_ulong target_set = arg1; 5404 sigprocmask(0, NULL, &cur_set); 5405 target_to_host_old_sigset(&set, &target_set); 5406 sigorset(&set, &set, &cur_set); 5407 sigprocmask(SIG_SETMASK, &set, &oset); 5408 host_to_target_old_sigset(&target_set, &oset); 5409 ret = target_set; 5410 } 5411 break; 5412 #endif 5413 #ifdef TARGET_NR_sigprocmask 5414 case TARGET_NR_sigprocmask: 5415 { 5416 #if defined(TARGET_ALPHA) 5417 sigset_t set, oldset; 5418 abi_ulong mask; 5419 int how; 5420 5421 switch (arg1) { 5422 case TARGET_SIG_BLOCK: 5423 how = SIG_BLOCK; 5424 break; 5425 case TARGET_SIG_UNBLOCK: 5426 how = SIG_UNBLOCK; 5427 break; 5428 case TARGET_SIG_SETMASK: 5429 how = SIG_SETMASK; 5430 break; 5431 default: 5432 ret = -TARGET_EINVAL; 5433 goto fail; 5434 } 5435 mask = arg2; 5436 target_to_host_old_sigset(&set, &mask); 5437 5438 ret = get_errno(sigprocmask(how, &set, &oldset)); 5439 5440 if (!is_error(ret)) { 5441 host_to_target_old_sigset(&mask, &oldset); 5442 ret = mask; 5443 ((CPUAlphaState *)cpu_env)->[IR_V0] = 0; /* force no error */ 5444 } 5445 #else 5446 sigset_t set, oldset, *set_ptr; 5447 int how; 5448 5449 if (arg2) { 5450 switch (arg1) { 5451 case TARGET_SIG_BLOCK: 5452 how = SIG_BLOCK; 5453 break; 5454 case TARGET_SIG_UNBLOCK: 5455 how = SIG_UNBLOCK; 5456 break; 5457 case TARGET_SIG_SETMASK: 5458 how = SIG_SETMASK; 5459 break; 5460 default: 5461 ret = -TARGET_EINVAL; 5462 goto fail; 5463 } 5464 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 5465 goto efault; 5466 target_to_host_old_sigset(&set, p); 5467 unlock_user(p, arg2, 0); 5468 set_ptr = &set; 5469 } else { 5470 how = 0; 5471 set_ptr = NULL; 5472 } 5473 ret = get_errno(sigprocmask(how, set_ptr, &oldset)); 5474 if (!is_error(ret) && arg3) { 5475 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 5476 goto efault; 5477 host_to_target_old_sigset(p, &oldset); 5478 unlock_user(p, arg3, sizeof(target_sigset_t)); 5479 } 5480 #endif 5481 } 5482 break; 5483 #endif 5484 case TARGET_NR_rt_sigprocmask: 5485 { 5486 int how = arg1; 5487 sigset_t set, oldset, *set_ptr; 5488 5489 if (arg2) { 5490 switch(how) { 5491 case TARGET_SIG_BLOCK: 5492 how = SIG_BLOCK; 5493 break; 5494 case TARGET_SIG_UNBLOCK: 5495 how = SIG_UNBLOCK; 5496 break; 5497 case TARGET_SIG_SETMASK: 5498 how = SIG_SETMASK; 5499 break; 5500 default: 5501 ret = -TARGET_EINVAL; 5502 goto fail; 5503 } 5504 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 5505 goto efault; 5506 target_to_host_sigset(&set, p); 5507 unlock_user(p, arg2, 0); 5508 set_ptr = &set; 5509 } else { 5510 how = 0; 5511 set_ptr = NULL; 5512 } 5513 ret = get_errno(sigprocmask(how, set_ptr, &oldset)); 5514 if (!is_error(ret) && arg3) { 5515 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 5516 goto efault; 5517 host_to_target_sigset(p, &oldset); 5518 unlock_user(p, arg3, sizeof(target_sigset_t)); 5519 } 5520 } 5521 break; 5522 #ifdef TARGET_NR_sigpending 5523 case TARGET_NR_sigpending: 5524 { 5525 sigset_t set; 5526 ret = get_errno(sigpending(&set)); 5527 if (!is_error(ret)) { 5528 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 5529 goto efault; 5530 host_to_target_old_sigset(p, &set); 5531 unlock_user(p, arg1, sizeof(target_sigset_t)); 5532 } 5533 } 5534 break; 5535 #endif 5536 case TARGET_NR_rt_sigpending: 5537 { 5538 sigset_t set; 5539 ret = get_errno(sigpending(&set)); 5540 if (!is_error(ret)) { 5541 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 5542 goto efault; 5543 host_to_target_sigset(p, &set); 5544 unlock_user(p, arg1, sizeof(target_sigset_t)); 5545 } 5546 } 5547 break; 5548 #ifdef TARGET_NR_sigsuspend 5549 case TARGET_NR_sigsuspend: 5550 { 5551 sigset_t set; 5552 #if defined(TARGET_ALPHA) 5553 abi_ulong mask = arg1; 5554 target_to_host_old_sigset(&set, &mask); 5555 #else 5556 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 5557 goto efault; 5558 target_to_host_old_sigset(&set, p); 5559 unlock_user(p, arg1, 0); 5560 #endif 5561 ret = get_errno(sigsuspend(&set)); 5562 } 5563 break; 5564 #endif 5565 case TARGET_NR_rt_sigsuspend: 5566 { 5567 sigset_t set; 5568 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 5569 goto efault; 5570 target_to_host_sigset(&set, p); 5571 unlock_user(p, arg1, 0); 5572 ret = get_errno(sigsuspend(&set)); 5573 } 5574 break; 5575 case TARGET_NR_rt_sigtimedwait: 5576 { 5577 sigset_t set; 5578 struct timespec uts, *puts; 5579 siginfo_t uinfo; 5580 5581 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 5582 goto efault; 5583 target_to_host_sigset(&set, p); 5584 unlock_user(p, arg1, 0); 5585 if (arg3) { 5586 puts = &uts; 5587 target_to_host_timespec(puts, arg3); 5588 } else { 5589 puts = NULL; 5590 } 5591 ret = get_errno(sigtimedwait(&set, &uinfo, puts)); 5592 if (!is_error(ret) && arg2) { 5593 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0))) 5594 goto efault; 5595 host_to_target_siginfo(p, &uinfo); 5596 unlock_user(p, arg2, sizeof(target_siginfo_t)); 5597 } 5598 } 5599 break; 5600 case TARGET_NR_rt_sigqueueinfo: 5601 { 5602 siginfo_t uinfo; 5603 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1))) 5604 goto efault; 5605 target_to_host_siginfo(&uinfo, p); 5606 unlock_user(p, arg1, 0); 5607 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo)); 5608 } 5609 break; 5610 #ifdef TARGET_NR_sigreturn 5611 case TARGET_NR_sigreturn: 5612 /* NOTE: ret is eax, so not transcoding must be done */ 5613 ret = do_sigreturn(cpu_env); 5614 break; 5615 #endif 5616 case TARGET_NR_rt_sigreturn: 5617 /* NOTE: ret is eax, so not transcoding must be done */ 5618 ret = do_rt_sigreturn(cpu_env); 5619 break; 5620 case TARGET_NR_sethostname: 5621 if (!(p = lock_user_string(arg1))) 5622 goto efault; 5623 ret = get_errno(sethostname(p, arg2)); 5624 unlock_user(p, arg1, 0); 5625 break; 5626 case TARGET_NR_setrlimit: 5627 { 5628 int resource = target_to_host_resource(arg1); 5629 struct target_rlimit *target_rlim; 5630 struct rlimit rlim; 5631 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) 5632 goto efault; 5633 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur); 5634 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max); 5635 unlock_user_struct(target_rlim, arg2, 0); 5636 ret = get_errno(setrlimit(resource, &rlim)); 5637 } 5638 break; 5639 case TARGET_NR_getrlimit: 5640 { 5641 int resource = target_to_host_resource(arg1); 5642 struct target_rlimit *target_rlim; 5643 struct rlimit rlim; 5644 5645 ret = get_errno(getrlimit(resource, &rlim)); 5646 if (!is_error(ret)) { 5647 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 5648 goto efault; 5649 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 5650 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 5651 unlock_user_struct(target_rlim, arg2, 1); 5652 } 5653 } 5654 break; 5655 case TARGET_NR_getrusage: 5656 { 5657 struct rusage rusage; 5658 ret = get_errno(getrusage(arg1, &rusage)); 5659 if (!is_error(ret)) { 5660 host_to_target_rusage(arg2, &rusage); 5661 } 5662 } 5663 break; 5664 case TARGET_NR_gettimeofday: 5665 { 5666 struct timeval tv; 5667 ret = get_errno(gettimeofday(&tv, NULL)); 5668 if (!is_error(ret)) { 5669 if (copy_to_user_timeval(arg1, &tv)) 5670 goto efault; 5671 } 5672 } 5673 break; 5674 case TARGET_NR_settimeofday: 5675 { 5676 struct timeval tv; 5677 if (copy_from_user_timeval(&tv, arg1)) 5678 goto efault; 5679 ret = get_errno(settimeofday(&tv, NULL)); 5680 } 5681 break; 5682 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390) 5683 case TARGET_NR_select: 5684 { 5685 struct target_sel_arg_struct *sel; 5686 abi_ulong inp, outp, exp, tvp; 5687 long nsel; 5688 5689 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) 5690 goto efault; 5691 nsel = tswapl(sel->n); 5692 inp = tswapl(sel->inp); 5693 outp = tswapl(sel->outp); 5694 exp = tswapl(sel->exp); 5695 tvp = tswapl(sel->tvp); 5696 unlock_user_struct(sel, arg1, 0); 5697 ret = do_select(nsel, inp, outp, exp, tvp); 5698 } 5699 break; 5700 #endif 5701 #ifdef TARGET_NR_pselect6 5702 case TARGET_NR_pselect6: 5703 { 5704 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr; 5705 fd_set rfds, wfds, efds; 5706 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 5707 struct timespec ts, *ts_ptr; 5708 5709 /* 5710 * The 6th arg is actually two args smashed together, 5711 * so we cannot use the C library. 5712 */ 5713 sigset_t set; 5714 struct { 5715 sigset_t *set; 5716 size_t size; 5717 } sig, *sig_ptr; 5718 5719 abi_ulong arg_sigset, arg_sigsize, *arg7; 5720 target_sigset_t *target_sigset; 5721 5722 n = arg1; 5723 rfd_addr = arg2; 5724 wfd_addr = arg3; 5725 efd_addr = arg4; 5726 ts_addr = arg5; 5727 5728 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 5729 if (ret) { 5730 goto fail; 5731 } 5732 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 5733 if (ret) { 5734 goto fail; 5735 } 5736 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 5737 if (ret) { 5738 goto fail; 5739 } 5740 5741 /* 5742 * This takes a timespec, and not a timeval, so we cannot 5743 * use the do_select() helper ... 5744 */ 5745 if (ts_addr) { 5746 if (target_to_host_timespec(&ts, ts_addr)) { 5747 goto efault; 5748 } 5749 ts_ptr = &ts; 5750 } else { 5751 ts_ptr = NULL; 5752 } 5753 5754 /* Extract the two packed args for the sigset */ 5755 if (arg6) { 5756 sig_ptr = &sig; 5757 sig.size = _NSIG / 8; 5758 5759 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1); 5760 if (!arg7) { 5761 goto efault; 5762 } 5763 arg_sigset = tswapl(arg7[0]); 5764 arg_sigsize = tswapl(arg7[1]); 5765 unlock_user(arg7, arg6, 0); 5766 5767 if (arg_sigset) { 5768 sig.set = &set; 5769 if (arg_sigsize != sizeof(*target_sigset)) { 5770 /* Like the kernel, we enforce correct size sigsets */ 5771 ret = -TARGET_EINVAL; 5772 goto fail; 5773 } 5774 target_sigset = lock_user(VERIFY_READ, arg_sigset, 5775 sizeof(*target_sigset), 1); 5776 if (!target_sigset) { 5777 goto efault; 5778 } 5779 target_to_host_sigset(&set, target_sigset); 5780 unlock_user(target_sigset, arg_sigset, 0); 5781 } else { 5782 sig.set = NULL; 5783 } 5784 } else { 5785 sig_ptr = NULL; 5786 } 5787 5788 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 5789 ts_ptr, sig_ptr)); 5790 5791 if (!is_error(ret)) { 5792 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 5793 goto efault; 5794 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 5795 goto efault; 5796 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 5797 goto efault; 5798 5799 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) 5800 goto efault; 5801 } 5802 } 5803 break; 5804 #endif 5805 case TARGET_NR_symlink: 5806 { 5807 void *p2; 5808 p = lock_user_string(arg1); 5809 p2 = lock_user_string(arg2); 5810 if (!p || !p2) 5811 ret = -TARGET_EFAULT; 5812 else 5813 ret = get_errno(symlink(p, p2)); 5814 unlock_user(p2, arg2, 0); 5815 unlock_user(p, arg1, 0); 5816 } 5817 break; 5818 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat) 5819 case TARGET_NR_symlinkat: 5820 { 5821 void *p2; 5822 p = lock_user_string(arg1); 5823 p2 = lock_user_string(arg3); 5824 if (!p || !p2) 5825 ret = -TARGET_EFAULT; 5826 else 5827 ret = get_errno(sys_symlinkat(p, arg2, p2)); 5828 unlock_user(p2, arg3, 0); 5829 unlock_user(p, arg1, 0); 5830 } 5831 break; 5832 #endif 5833 #ifdef TARGET_NR_oldlstat 5834 case TARGET_NR_oldlstat: 5835 goto unimplemented; 5836 #endif 5837 case TARGET_NR_readlink: 5838 { 5839 void *p2, *temp; 5840 p = lock_user_string(arg1); 5841 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); 5842 if (!p || !p2) 5843 ret = -TARGET_EFAULT; 5844 else { 5845 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) { 5846 char real[PATH_MAX]; 5847 temp = realpath(exec_path,real); 5848 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ; 5849 snprintf((char *)p2, arg3, "%s", real); 5850 } 5851 else 5852 ret = get_errno(readlink(path(p), p2, arg3)); 5853 } 5854 unlock_user(p2, arg2, ret); 5855 unlock_user(p, arg1, 0); 5856 } 5857 break; 5858 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat) 5859 case TARGET_NR_readlinkat: 5860 { 5861 void *p2; 5862 p = lock_user_string(arg2); 5863 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); 5864 if (!p || !p2) 5865 ret = -TARGET_EFAULT; 5866 else 5867 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4)); 5868 unlock_user(p2, arg3, ret); 5869 unlock_user(p, arg2, 0); 5870 } 5871 break; 5872 #endif 5873 #ifdef TARGET_NR_uselib 5874 case TARGET_NR_uselib: 5875 goto unimplemented; 5876 #endif 5877 #ifdef TARGET_NR_swapon 5878 case TARGET_NR_swapon: 5879 if (!(p = lock_user_string(arg1))) 5880 goto efault; 5881 ret = get_errno(swapon(p, arg2)); 5882 unlock_user(p, arg1, 0); 5883 break; 5884 #endif 5885 case TARGET_NR_reboot: 5886 goto unimplemented; 5887 #ifdef TARGET_NR_readdir 5888 case TARGET_NR_readdir: 5889 goto unimplemented; 5890 #endif 5891 #ifdef TARGET_NR_mmap 5892 case TARGET_NR_mmap: 5893 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \ 5894 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ 5895 || defined(TARGET_S390X) 5896 { 5897 abi_ulong *v; 5898 abi_ulong v1, v2, v3, v4, v5, v6; 5899 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) 5900 goto efault; 5901 v1 = tswapl(v[0]); 5902 v2 = tswapl(v[1]); 5903 v3 = tswapl(v[2]); 5904 v4 = tswapl(v[3]); 5905 v5 = tswapl(v[4]); 5906 v6 = tswapl(v[5]); 5907 unlock_user(v, arg1, 0); 5908 ret = get_errno(target_mmap(v1, v2, v3, 5909 target_to_host_bitmask(v4, mmap_flags_tbl), 5910 v5, v6)); 5911 } 5912 #else 5913 ret = get_errno(target_mmap(arg1, arg2, arg3, 5914 target_to_host_bitmask(arg4, mmap_flags_tbl), 5915 arg5, 5916 arg6)); 5917 #endif 5918 break; 5919 #endif 5920 #ifdef TARGET_NR_mmap2 5921 case TARGET_NR_mmap2: 5922 #ifndef MMAP_SHIFT 5923 #define MMAP_SHIFT 12 5924 #endif 5925 ret = get_errno(target_mmap(arg1, arg2, arg3, 5926 target_to_host_bitmask(arg4, mmap_flags_tbl), 5927 arg5, 5928 arg6 << MMAP_SHIFT)); 5929 break; 5930 #endif 5931 case TARGET_NR_munmap: 5932 ret = get_errno(target_munmap(arg1, arg2)); 5933 break; 5934 case TARGET_NR_mprotect: 5935 { 5936 TaskState *ts = ((CPUState *)cpu_env)->opaque; 5937 /* Special hack to detect libc making the stack executable. */ 5938 if ((arg3 & PROT_GROWSDOWN) 5939 && arg1 >= ts->info->stack_limit 5940 && arg1 <= ts->info->start_stack) { 5941 arg3 &= ~PROT_GROWSDOWN; 5942 arg2 = arg2 + arg1 - ts->info->stack_limit; 5943 arg1 = ts->info->stack_limit; 5944 } 5945 } 5946 ret = get_errno(target_mprotect(arg1, arg2, arg3)); 5947 break; 5948 #ifdef TARGET_NR_mremap 5949 case TARGET_NR_mremap: 5950 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); 5951 break; 5952 #endif 5953 /* ??? msync/mlock/munlock are broken for softmmu. */ 5954 #ifdef TARGET_NR_msync 5955 case TARGET_NR_msync: 5956 ret = get_errno(msync(g2h(arg1), arg2, arg3)); 5957 break; 5958 #endif 5959 #ifdef TARGET_NR_mlock 5960 case TARGET_NR_mlock: 5961 ret = get_errno(mlock(g2h(arg1), arg2)); 5962 break; 5963 #endif 5964 #ifdef TARGET_NR_munlock 5965 case TARGET_NR_munlock: 5966 ret = get_errno(munlock(g2h(arg1), arg2)); 5967 break; 5968 #endif 5969 #ifdef TARGET_NR_mlockall 5970 case TARGET_NR_mlockall: 5971 ret = get_errno(mlockall(arg1)); 5972 break; 5973 #endif 5974 #ifdef TARGET_NR_munlockall 5975 case TARGET_NR_munlockall: 5976 ret = get_errno(munlockall()); 5977 break; 5978 #endif 5979 case TARGET_NR_truncate: 5980 if (!(p = lock_user_string(arg1))) 5981 goto efault; 5982 ret = get_errno(truncate(p, arg2)); 5983 unlock_user(p, arg1, 0); 5984 break; 5985 case TARGET_NR_ftruncate: 5986 ret = get_errno(ftruncate(arg1, arg2)); 5987 break; 5988 case TARGET_NR_fchmod: 5989 ret = get_errno(fchmod(arg1, arg2)); 5990 break; 5991 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat) 5992 case TARGET_NR_fchmodat: 5993 if (!(p = lock_user_string(arg2))) 5994 goto efault; 5995 ret = get_errno(sys_fchmodat(arg1, p, arg3)); 5996 unlock_user(p, arg2, 0); 5997 break; 5998 #endif 5999 case TARGET_NR_getpriority: 6000 /* libc does special remapping of the return value of 6001 * sys_getpriority() so it's just easiest to call 6002 * sys_getpriority() directly rather than through libc. */ 6003 ret = get_errno(sys_getpriority(arg1, arg2)); 6004 break; 6005 case TARGET_NR_setpriority: 6006 ret = get_errno(setpriority(arg1, arg2, arg3)); 6007 break; 6008 #ifdef TARGET_NR_profil 6009 case TARGET_NR_profil: 6010 goto unimplemented; 6011 #endif 6012 case TARGET_NR_statfs: 6013 if (!(p = lock_user_string(arg1))) 6014 goto efault; 6015 ret = get_errno(statfs(path(p), &stfs)); 6016 unlock_user(p, arg1, 0); 6017 convert_statfs: 6018 if (!is_error(ret)) { 6019 struct target_statfs *target_stfs; 6020 6021 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) 6022 goto efault; 6023 __put_user(stfs.f_type, &target_stfs->f_type); 6024 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 6025 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 6026 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 6027 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 6028 __put_user(stfs.f_files, &target_stfs->f_files); 6029 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 6030 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 6031 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 6032 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 6033 unlock_user_struct(target_stfs, arg2, 1); 6034 } 6035 break; 6036 case TARGET_NR_fstatfs: 6037 ret = get_errno(fstatfs(arg1, &stfs)); 6038 goto convert_statfs; 6039 #ifdef TARGET_NR_statfs64 6040 case TARGET_NR_statfs64: 6041 if (!(p = lock_user_string(arg1))) 6042 goto efault; 6043 ret = get_errno(statfs(path(p), &stfs)); 6044 unlock_user(p, arg1, 0); 6045 convert_statfs64: 6046 if (!is_error(ret)) { 6047 struct target_statfs64 *target_stfs; 6048 6049 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) 6050 goto efault; 6051 __put_user(stfs.f_type, &target_stfs->f_type); 6052 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 6053 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 6054 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 6055 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 6056 __put_user(stfs.f_files, &target_stfs->f_files); 6057 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 6058 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 6059 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 6060 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 6061 unlock_user_struct(target_stfs, arg3, 1); 6062 } 6063 break; 6064 case TARGET_NR_fstatfs64: 6065 ret = get_errno(fstatfs(arg1, &stfs)); 6066 goto convert_statfs64; 6067 #endif 6068 #ifdef TARGET_NR_ioperm 6069 case TARGET_NR_ioperm: 6070 goto unimplemented; 6071 #endif 6072 #ifdef TARGET_NR_socketcall 6073 case TARGET_NR_socketcall: 6074 ret = do_socketcall(arg1, arg2); 6075 break; 6076 #endif 6077 #ifdef TARGET_NR_accept 6078 case TARGET_NR_accept: 6079 ret = do_accept(arg1, arg2, arg3); 6080 break; 6081 #endif 6082 #ifdef TARGET_NR_bind 6083 case TARGET_NR_bind: 6084 ret = do_bind(arg1, arg2, arg3); 6085 break; 6086 #endif 6087 #ifdef TARGET_NR_connect 6088 case TARGET_NR_connect: 6089 ret = do_connect(arg1, arg2, arg3); 6090 break; 6091 #endif 6092 #ifdef TARGET_NR_getpeername 6093 case TARGET_NR_getpeername: 6094 ret = do_getpeername(arg1, arg2, arg3); 6095 break; 6096 #endif 6097 #ifdef TARGET_NR_getsockname 6098 case TARGET_NR_getsockname: 6099 ret = do_getsockname(arg1, arg2, arg3); 6100 break; 6101 #endif 6102 #ifdef TARGET_NR_getsockopt 6103 case TARGET_NR_getsockopt: 6104 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5); 6105 break; 6106 #endif 6107 #ifdef TARGET_NR_listen 6108 case TARGET_NR_listen: 6109 ret = get_errno(listen(arg1, arg2)); 6110 break; 6111 #endif 6112 #ifdef TARGET_NR_recv 6113 case TARGET_NR_recv: 6114 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); 6115 break; 6116 #endif 6117 #ifdef TARGET_NR_recvfrom 6118 case TARGET_NR_recvfrom: 6119 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); 6120 break; 6121 #endif 6122 #ifdef TARGET_NR_recvmsg 6123 case TARGET_NR_recvmsg: 6124 ret = do_sendrecvmsg(arg1, arg2, arg3, 0); 6125 break; 6126 #endif 6127 #ifdef TARGET_NR_send 6128 case TARGET_NR_send: 6129 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0); 6130 break; 6131 #endif 6132 #ifdef TARGET_NR_sendmsg 6133 case TARGET_NR_sendmsg: 6134 ret = do_sendrecvmsg(arg1, arg2, arg3, 1); 6135 break; 6136 #endif 6137 #ifdef TARGET_NR_sendto 6138 case TARGET_NR_sendto: 6139 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); 6140 break; 6141 #endif 6142 #ifdef TARGET_NR_shutdown 6143 case TARGET_NR_shutdown: 6144 ret = get_errno(shutdown(arg1, arg2)); 6145 break; 6146 #endif 6147 #ifdef TARGET_NR_socket 6148 case TARGET_NR_socket: 6149 ret = do_socket(arg1, arg2, arg3); 6150 break; 6151 #endif 6152 #ifdef TARGET_NR_socketpair 6153 case TARGET_NR_socketpair: 6154 ret = do_socketpair(arg1, arg2, arg3, arg4); 6155 break; 6156 #endif 6157 #ifdef TARGET_NR_setsockopt 6158 case TARGET_NR_setsockopt: 6159 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); 6160 break; 6161 #endif 6162 6163 case TARGET_NR_syslog: 6164 if (!(p = lock_user_string(arg2))) 6165 goto efault; 6166 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); 6167 unlock_user(p, arg2, 0); 6168 break; 6169 6170 case TARGET_NR_setitimer: 6171 { 6172 struct itimerval value, ovalue, *pvalue; 6173 6174 if (arg2) { 6175 pvalue = &value; 6176 if (copy_from_user_timeval(&pvalue->it_interval, arg2) 6177 || copy_from_user_timeval(&pvalue->it_value, 6178 arg2 + sizeof(struct target_timeval))) 6179 goto efault; 6180 } else { 6181 pvalue = NULL; 6182 } 6183 ret = get_errno(setitimer(arg1, pvalue, &ovalue)); 6184 if (!is_error(ret) && arg3) { 6185 if (copy_to_user_timeval(arg3, 6186 &ovalue.it_interval) 6187 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), 6188 &ovalue.it_value)) 6189 goto efault; 6190 } 6191 } 6192 break; 6193 case TARGET_NR_getitimer: 6194 { 6195 struct itimerval value; 6196 6197 ret = get_errno(getitimer(arg1, &value)); 6198 if (!is_error(ret) && arg2) { 6199 if (copy_to_user_timeval(arg2, 6200 &value.it_interval) 6201 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), 6202 &value.it_value)) 6203 goto efault; 6204 } 6205 } 6206 break; 6207 case TARGET_NR_stat: 6208 if (!(p = lock_user_string(arg1))) 6209 goto efault; 6210 ret = get_errno(stat(path(p), &st)); 6211 unlock_user(p, arg1, 0); 6212 goto do_stat; 6213 case TARGET_NR_lstat: 6214 if (!(p = lock_user_string(arg1))) 6215 goto efault; 6216 ret = get_errno(lstat(path(p), &st)); 6217 unlock_user(p, arg1, 0); 6218 goto do_stat; 6219 case TARGET_NR_fstat: 6220 { 6221 ret = get_errno(fstat(arg1, &st)); 6222 do_stat: 6223 if (!is_error(ret)) { 6224 struct target_stat *target_st; 6225 6226 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) 6227 goto efault; 6228 memset(target_st, 0, sizeof(*target_st)); 6229 __put_user(st.st_dev, &target_st->st_dev); 6230 __put_user(st.st_ino, &target_st->st_ino); 6231 __put_user(st.st_mode, &target_st->st_mode); 6232 __put_user(st.st_uid, &target_st->st_uid); 6233 __put_user(st.st_gid, &target_st->st_gid); 6234 __put_user(st.st_nlink, &target_st->st_nlink); 6235 __put_user(st.st_rdev, &target_st->st_rdev); 6236 __put_user(st.st_size, &target_st->st_size); 6237 __put_user(st.st_blksize, &target_st->st_blksize); 6238 __put_user(st.st_blocks, &target_st->st_blocks); 6239 __put_user(st.st_atime, &target_st->target_st_atime); 6240 __put_user(st.st_mtime, &target_st->target_st_mtime); 6241 __put_user(st.st_ctime, &target_st->target_st_ctime); 6242 unlock_user_struct(target_st, arg2, 1); 6243 } 6244 } 6245 break; 6246 #ifdef TARGET_NR_olduname 6247 case TARGET_NR_olduname: 6248 goto unimplemented; 6249 #endif 6250 #ifdef TARGET_NR_iopl 6251 case TARGET_NR_iopl: 6252 goto unimplemented; 6253 #endif 6254 case TARGET_NR_vhangup: 6255 ret = get_errno(vhangup()); 6256 break; 6257 #ifdef TARGET_NR_idle 6258 case TARGET_NR_idle: 6259 goto unimplemented; 6260 #endif 6261 #ifdef TARGET_NR_syscall 6262 case TARGET_NR_syscall: 6263 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5, 6264 arg6, arg7, arg8, 0); 6265 break; 6266 #endif 6267 case TARGET_NR_wait4: 6268 { 6269 int status; 6270 abi_long status_ptr = arg2; 6271 struct rusage rusage, *rusage_ptr; 6272 abi_ulong target_rusage = arg4; 6273 if (target_rusage) 6274 rusage_ptr = &rusage; 6275 else 6276 rusage_ptr = NULL; 6277 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr)); 6278 if (!is_error(ret)) { 6279 if (status_ptr) { 6280 status = host_to_target_waitstatus(status); 6281 if (put_user_s32(status, status_ptr)) 6282 goto efault; 6283 } 6284 if (target_rusage) 6285 host_to_target_rusage(target_rusage, &rusage); 6286 } 6287 } 6288 break; 6289 #ifdef TARGET_NR_swapoff 6290 case TARGET_NR_swapoff: 6291 if (!(p = lock_user_string(arg1))) 6292 goto efault; 6293 ret = get_errno(swapoff(p)); 6294 unlock_user(p, arg1, 0); 6295 break; 6296 #endif 6297 case TARGET_NR_sysinfo: 6298 { 6299 struct target_sysinfo *target_value; 6300 struct sysinfo value; 6301 ret = get_errno(sysinfo(&value)); 6302 if (!is_error(ret) && arg1) 6303 { 6304 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) 6305 goto efault; 6306 __put_user(value.uptime, &target_value->uptime); 6307 __put_user(value.loads[0], &target_value->loads[0]); 6308 __put_user(value.loads[1], &target_value->loads[1]); 6309 __put_user(value.loads[2], &target_value->loads[2]); 6310 __put_user(value.totalram, &target_value->totalram); 6311 __put_user(value.freeram, &target_value->freeram); 6312 __put_user(value.sharedram, &target_value->sharedram); 6313 __put_user(value.bufferram, &target_value->bufferram); 6314 __put_user(value.totalswap, &target_value->totalswap); 6315 __put_user(value.freeswap, &target_value->freeswap); 6316 __put_user(value.procs, &target_value->procs); 6317 __put_user(value.totalhigh, &target_value->totalhigh); 6318 __put_user(value.freehigh, &target_value->freehigh); 6319 __put_user(value.mem_unit, &target_value->mem_unit); 6320 unlock_user_struct(target_value, arg1, 1); 6321 } 6322 } 6323 break; 6324 #ifdef TARGET_NR_ipc 6325 case TARGET_NR_ipc: 6326 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6); 6327 break; 6328 #endif 6329 #ifdef TARGET_NR_semget 6330 case TARGET_NR_semget: 6331 ret = get_errno(semget(arg1, arg2, arg3)); 6332 break; 6333 #endif 6334 #ifdef TARGET_NR_semop 6335 case TARGET_NR_semop: 6336 ret = get_errno(do_semop(arg1, arg2, arg3)); 6337 break; 6338 #endif 6339 #ifdef TARGET_NR_semctl 6340 case TARGET_NR_semctl: 6341 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4); 6342 break; 6343 #endif 6344 #ifdef TARGET_NR_msgctl 6345 case TARGET_NR_msgctl: 6346 ret = do_msgctl(arg1, arg2, arg3); 6347 break; 6348 #endif 6349 #ifdef TARGET_NR_msgget 6350 case TARGET_NR_msgget: 6351 ret = get_errno(msgget(arg1, arg2)); 6352 break; 6353 #endif 6354 #ifdef TARGET_NR_msgrcv 6355 case TARGET_NR_msgrcv: 6356 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5); 6357 break; 6358 #endif 6359 #ifdef TARGET_NR_msgsnd 6360 case TARGET_NR_msgsnd: 6361 ret = do_msgsnd(arg1, arg2, arg3, arg4); 6362 break; 6363 #endif 6364 #ifdef TARGET_NR_shmget 6365 case TARGET_NR_shmget: 6366 ret = get_errno(shmget(arg1, arg2, arg3)); 6367 break; 6368 #endif 6369 #ifdef TARGET_NR_shmctl 6370 case TARGET_NR_shmctl: 6371 ret = do_shmctl(arg1, arg2, arg3); 6372 break; 6373 #endif 6374 #ifdef TARGET_NR_shmat 6375 case TARGET_NR_shmat: 6376 ret = do_shmat(arg1, arg2, arg3); 6377 break; 6378 #endif 6379 #ifdef TARGET_NR_shmdt 6380 case TARGET_NR_shmdt: 6381 ret = do_shmdt(arg1); 6382 break; 6383 #endif 6384 case TARGET_NR_fsync: 6385 ret = get_errno(fsync(arg1)); 6386 break; 6387 case TARGET_NR_clone: 6388 #if defined(TARGET_SH4) || defined(TARGET_ALPHA) 6389 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); 6390 #elif defined(TARGET_CRIS) 6391 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5)); 6392 #elif defined(TARGET_S390X) 6393 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); 6394 #else 6395 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); 6396 #endif 6397 break; 6398 #ifdef __NR_exit_group 6399 /* new thread calls */ 6400 case TARGET_NR_exit_group: 6401 #ifdef TARGET_GPROF 6402 _mcleanup(); 6403 #endif 6404 gdb_exit(cpu_env, arg1); 6405 ret = get_errno(exit_group(arg1)); 6406 break; 6407 #endif 6408 case TARGET_NR_setdomainname: 6409 if (!(p = lock_user_string(arg1))) 6410 goto efault; 6411 ret = get_errno(setdomainname(p, arg2)); 6412 unlock_user(p, arg1, 0); 6413 break; 6414 case TARGET_NR_uname: 6415 /* no need to transcode because we use the linux syscall */ 6416 { 6417 struct new_utsname * buf; 6418 6419 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) 6420 goto efault; 6421 ret = get_errno(sys_uname(buf)); 6422 if (!is_error(ret)) { 6423 /* Overrite the native machine name with whatever is being 6424 emulated. */ 6425 strcpy (buf->machine, cpu_to_uname_machine(cpu_env)); 6426 /* Allow the user to override the reported release. */ 6427 if (qemu_uname_release && *qemu_uname_release) 6428 strcpy (buf->release, qemu_uname_release); 6429 } 6430 unlock_user_struct(buf, arg1, 1); 6431 } 6432 break; 6433 #ifdef TARGET_I386 6434 case TARGET_NR_modify_ldt: 6435 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3); 6436 break; 6437 #if !defined(TARGET_X86_64) 6438 case TARGET_NR_vm86old: 6439 goto unimplemented; 6440 case TARGET_NR_vm86: 6441 ret = do_vm86(cpu_env, arg1, arg2); 6442 break; 6443 #endif 6444 #endif 6445 case TARGET_NR_adjtimex: 6446 goto unimplemented; 6447 #ifdef TARGET_NR_create_module 6448 case TARGET_NR_create_module: 6449 #endif 6450 case TARGET_NR_init_module: 6451 case TARGET_NR_delete_module: 6452 #ifdef TARGET_NR_get_kernel_syms 6453 case TARGET_NR_get_kernel_syms: 6454 #endif 6455 goto unimplemented; 6456 case TARGET_NR_quotactl: 6457 goto unimplemented; 6458 case TARGET_NR_getpgid: 6459 ret = get_errno(getpgid(arg1)); 6460 break; 6461 case TARGET_NR_fchdir: 6462 ret = get_errno(fchdir(arg1)); 6463 break; 6464 #ifdef TARGET_NR_bdflush /* not on x86_64 */ 6465 case TARGET_NR_bdflush: 6466 goto unimplemented; 6467 #endif 6468 #ifdef TARGET_NR_sysfs 6469 case TARGET_NR_sysfs: 6470 goto unimplemented; 6471 #endif 6472 case TARGET_NR_personality: 6473 ret = get_errno(personality(arg1)); 6474 break; 6475 #ifdef TARGET_NR_afs_syscall 6476 case TARGET_NR_afs_syscall: 6477 goto unimplemented; 6478 #endif 6479 #ifdef TARGET_NR__llseek /* Not on alpha */ 6480 case TARGET_NR__llseek: 6481 { 6482 int64_t res; 6483 #if !defined(__NR_llseek) 6484 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5); 6485 if (res == -1) { 6486 ret = get_errno(res); 6487 } else { 6488 ret = 0; 6489 } 6490 #else 6491 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); 6492 #endif 6493 if ((ret == 0) && put_user_s64(res, arg4)) { 6494 goto efault; 6495 } 6496 } 6497 break; 6498 #endif 6499 case TARGET_NR_getdents: 6500 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 6501 { 6502 struct target_dirent *target_dirp; 6503 struct linux_dirent *dirp; 6504 abi_long count = arg3; 6505 6506 dirp = malloc(count); 6507 if (!dirp) { 6508 ret = -TARGET_ENOMEM; 6509 goto fail; 6510 } 6511 6512 ret = get_errno(sys_getdents(arg1, dirp, count)); 6513 if (!is_error(ret)) { 6514 struct linux_dirent *de; 6515 struct target_dirent *tde; 6516 int len = ret; 6517 int reclen, treclen; 6518 int count1, tnamelen; 6519 6520 count1 = 0; 6521 de = dirp; 6522 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 6523 goto efault; 6524 tde = target_dirp; 6525 while (len > 0) { 6526 reclen = de->d_reclen; 6527 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long))); 6528 tde->d_reclen = tswap16(treclen); 6529 tde->d_ino = tswapl(de->d_ino); 6530 tde->d_off = tswapl(de->d_off); 6531 tnamelen = treclen - (2 * sizeof(abi_long) + 2); 6532 if (tnamelen > 256) 6533 tnamelen = 256; 6534 /* XXX: may not be correct */ 6535 pstrcpy(tde->d_name, tnamelen, de->d_name); 6536 de = (struct linux_dirent *)((char *)de + reclen); 6537 len -= reclen; 6538 tde = (struct target_dirent *)((char *)tde + treclen); 6539 count1 += treclen; 6540 } 6541 ret = count1; 6542 unlock_user(target_dirp, arg2, ret); 6543 } 6544 free(dirp); 6545 } 6546 #else 6547 { 6548 struct linux_dirent *dirp; 6549 abi_long count = arg3; 6550 6551 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 6552 goto efault; 6553 ret = get_errno(sys_getdents(arg1, dirp, count)); 6554 if (!is_error(ret)) { 6555 struct linux_dirent *de; 6556 int len = ret; 6557 int reclen; 6558 de = dirp; 6559 while (len > 0) { 6560 reclen = de->d_reclen; 6561 if (reclen > len) 6562 break; 6563 de->d_reclen = tswap16(reclen); 6564 tswapls(&de->d_ino); 6565 tswapls(&de->d_off); 6566 de = (struct linux_dirent *)((char *)de + reclen); 6567 len -= reclen; 6568 } 6569 } 6570 unlock_user(dirp, arg2, ret); 6571 } 6572 #endif 6573 break; 6574 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 6575 case TARGET_NR_getdents64: 6576 { 6577 struct linux_dirent64 *dirp; 6578 abi_long count = arg3; 6579 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 6580 goto efault; 6581 ret = get_errno(sys_getdents64(arg1, dirp, count)); 6582 if (!is_error(ret)) { 6583 struct linux_dirent64 *de; 6584 int len = ret; 6585 int reclen; 6586 de = dirp; 6587 while (len > 0) { 6588 reclen = de->d_reclen; 6589 if (reclen > len) 6590 break; 6591 de->d_reclen = tswap16(reclen); 6592 tswap64s((uint64_t *)&de->d_ino); 6593 tswap64s((uint64_t *)&de->d_off); 6594 de = (struct linux_dirent64 *)((char *)de + reclen); 6595 len -= reclen; 6596 } 6597 } 6598 unlock_user(dirp, arg2, ret); 6599 } 6600 break; 6601 #endif /* TARGET_NR_getdents64 */ 6602 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X) 6603 #ifdef TARGET_S390X 6604 case TARGET_NR_select: 6605 #else 6606 case TARGET_NR__newselect: 6607 #endif 6608 ret = do_select(arg1, arg2, arg3, arg4, arg5); 6609 break; 6610 #endif 6611 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) 6612 # ifdef TARGET_NR_poll 6613 case TARGET_NR_poll: 6614 # endif 6615 # ifdef TARGET_NR_ppoll 6616 case TARGET_NR_ppoll: 6617 # endif 6618 { 6619 struct target_pollfd *target_pfd; 6620 unsigned int nfds = arg2; 6621 int timeout = arg3; 6622 struct pollfd *pfd; 6623 unsigned int i; 6624 6625 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1); 6626 if (!target_pfd) 6627 goto efault; 6628 6629 pfd = alloca(sizeof(struct pollfd) * nfds); 6630 for(i = 0; i < nfds; i++) { 6631 pfd[i].fd = tswap32(target_pfd[i].fd); 6632 pfd[i].events = tswap16(target_pfd[i].events); 6633 } 6634 6635 # ifdef TARGET_NR_ppoll 6636 if (num == TARGET_NR_ppoll) { 6637 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; 6638 target_sigset_t *target_set; 6639 sigset_t _set, *set = &_set; 6640 6641 if (arg3) { 6642 if (target_to_host_timespec(timeout_ts, arg3)) { 6643 unlock_user(target_pfd, arg1, 0); 6644 goto efault; 6645 } 6646 } else { 6647 timeout_ts = NULL; 6648 } 6649 6650 if (arg4) { 6651 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1); 6652 if (!target_set) { 6653 unlock_user(target_pfd, arg1, 0); 6654 goto efault; 6655 } 6656 target_to_host_sigset(set, target_set); 6657 } else { 6658 set = NULL; 6659 } 6660 6661 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8)); 6662 6663 if (!is_error(ret) && arg3) { 6664 host_to_target_timespec(arg3, timeout_ts); 6665 } 6666 if (arg4) { 6667 unlock_user(target_set, arg4, 0); 6668 } 6669 } else 6670 # endif 6671 ret = get_errno(poll(pfd, nfds, timeout)); 6672 6673 if (!is_error(ret)) { 6674 for(i = 0; i < nfds; i++) { 6675 target_pfd[i].revents = tswap16(pfd[i].revents); 6676 } 6677 } 6678 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds); 6679 } 6680 break; 6681 #endif 6682 case TARGET_NR_flock: 6683 /* NOTE: the flock constant seems to be the same for every 6684 Linux platform */ 6685 ret = get_errno(flock(arg1, arg2)); 6686 break; 6687 case TARGET_NR_readv: 6688 { 6689 int count = arg3; 6690 struct iovec *vec; 6691 6692 vec = alloca(count * sizeof(struct iovec)); 6693 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0) 6694 goto efault; 6695 ret = get_errno(readv(arg1, vec, count)); 6696 unlock_iovec(vec, arg2, count, 1); 6697 } 6698 break; 6699 case TARGET_NR_writev: 6700 { 6701 int count = arg3; 6702 struct iovec *vec; 6703 6704 vec = alloca(count * sizeof(struct iovec)); 6705 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0) 6706 goto efault; 6707 ret = get_errno(writev(arg1, vec, count)); 6708 unlock_iovec(vec, arg2, count, 0); 6709 } 6710 break; 6711 case TARGET_NR_getsid: 6712 ret = get_errno(getsid(arg1)); 6713 break; 6714 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ 6715 case TARGET_NR_fdatasync: 6716 ret = get_errno(fdatasync(arg1)); 6717 break; 6718 #endif 6719 case TARGET_NR__sysctl: 6720 /* We don't implement this, but ENOTDIR is always a safe 6721 return value. */ 6722 ret = -TARGET_ENOTDIR; 6723 break; 6724 case TARGET_NR_sched_getaffinity: 6725 { 6726 unsigned int mask_size; 6727 unsigned long *mask; 6728 6729 /* 6730 * sched_getaffinity needs multiples of ulong, so need to take 6731 * care of mismatches between target ulong and host ulong sizes. 6732 */ 6733 if (arg2 & (sizeof(abi_ulong) - 1)) { 6734 ret = -TARGET_EINVAL; 6735 break; 6736 } 6737 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 6738 6739 mask = alloca(mask_size); 6740 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask)); 6741 6742 if (!is_error(ret)) { 6743 if (copy_to_user(arg3, mask, ret)) { 6744 goto efault; 6745 } 6746 } 6747 } 6748 break; 6749 case TARGET_NR_sched_setaffinity: 6750 { 6751 unsigned int mask_size; 6752 unsigned long *mask; 6753 6754 /* 6755 * sched_setaffinity needs multiples of ulong, so need to take 6756 * care of mismatches between target ulong and host ulong sizes. 6757 */ 6758 if (arg2 & (sizeof(abi_ulong) - 1)) { 6759 ret = -TARGET_EINVAL; 6760 break; 6761 } 6762 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 6763 6764 mask = alloca(mask_size); 6765 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) { 6766 goto efault; 6767 } 6768 memcpy(mask, p, arg2); 6769 unlock_user_struct(p, arg2, 0); 6770 6771 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask)); 6772 } 6773 break; 6774 case TARGET_NR_sched_setparam: 6775 { 6776 struct sched_param *target_schp; 6777 struct sched_param schp; 6778 6779 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) 6780 goto efault; 6781 schp.sched_priority = tswap32(target_schp->sched_priority); 6782 unlock_user_struct(target_schp, arg2, 0); 6783 ret = get_errno(sched_setparam(arg1, &schp)); 6784 } 6785 break; 6786 case TARGET_NR_sched_getparam: 6787 { 6788 struct sched_param *target_schp; 6789 struct sched_param schp; 6790 ret = get_errno(sched_getparam(arg1, &schp)); 6791 if (!is_error(ret)) { 6792 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) 6793 goto efault; 6794 target_schp->sched_priority = tswap32(schp.sched_priority); 6795 unlock_user_struct(target_schp, arg2, 1); 6796 } 6797 } 6798 break; 6799 case TARGET_NR_sched_setscheduler: 6800 { 6801 struct sched_param *target_schp; 6802 struct sched_param schp; 6803 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) 6804 goto efault; 6805 schp.sched_priority = tswap32(target_schp->sched_priority); 6806 unlock_user_struct(target_schp, arg3, 0); 6807 ret = get_errno(sched_setscheduler(arg1, arg2, &schp)); 6808 } 6809 break; 6810 case TARGET_NR_sched_getscheduler: 6811 ret = get_errno(sched_getscheduler(arg1)); 6812 break; 6813 case TARGET_NR_sched_yield: 6814 ret = get_errno(sched_yield()); 6815 break; 6816 case TARGET_NR_sched_get_priority_max: 6817 ret = get_errno(sched_get_priority_max(arg1)); 6818 break; 6819 case TARGET_NR_sched_get_priority_min: 6820 ret = get_errno(sched_get_priority_min(arg1)); 6821 break; 6822 case TARGET_NR_sched_rr_get_interval: 6823 { 6824 struct timespec ts; 6825 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 6826 if (!is_error(ret)) { 6827 host_to_target_timespec(arg2, &ts); 6828 } 6829 } 6830 break; 6831 case TARGET_NR_nanosleep: 6832 { 6833 struct timespec req, rem; 6834 target_to_host_timespec(&req, arg1); 6835 ret = get_errno(nanosleep(&req, &rem)); 6836 if (is_error(ret) && arg2) { 6837 host_to_target_timespec(arg2, &rem); 6838 } 6839 } 6840 break; 6841 #ifdef TARGET_NR_query_module 6842 case TARGET_NR_query_module: 6843 goto unimplemented; 6844 #endif 6845 #ifdef TARGET_NR_nfsservctl 6846 case TARGET_NR_nfsservctl: 6847 goto unimplemented; 6848 #endif 6849 case TARGET_NR_prctl: 6850 switch (arg1) 6851 { 6852 case PR_GET_PDEATHSIG: 6853 { 6854 int deathsig; 6855 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5)); 6856 if (!is_error(ret) && arg2 6857 && put_user_ual(deathsig, arg2)) 6858 goto efault; 6859 } 6860 break; 6861 default: 6862 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5)); 6863 break; 6864 } 6865 break; 6866 #ifdef TARGET_NR_arch_prctl 6867 case TARGET_NR_arch_prctl: 6868 #if defined(TARGET_I386) && !defined(TARGET_ABI32) 6869 ret = do_arch_prctl(cpu_env, arg1, arg2); 6870 break; 6871 #else 6872 goto unimplemented; 6873 #endif 6874 #endif 6875 #ifdef TARGET_NR_pread 6876 case TARGET_NR_pread: 6877 if (regpairs_aligned(cpu_env)) 6878 arg4 = arg5; 6879 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 6880 goto efault; 6881 ret = get_errno(pread(arg1, p, arg3, arg4)); 6882 unlock_user(p, arg2, ret); 6883 break; 6884 case TARGET_NR_pwrite: 6885 if (regpairs_aligned(cpu_env)) 6886 arg4 = arg5; 6887 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 6888 goto efault; 6889 ret = get_errno(pwrite(arg1, p, arg3, arg4)); 6890 unlock_user(p, arg2, 0); 6891 break; 6892 #endif 6893 #ifdef TARGET_NR_pread64 6894 case TARGET_NR_pread64: 6895 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 6896 goto efault; 6897 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); 6898 unlock_user(p, arg2, ret); 6899 break; 6900 case TARGET_NR_pwrite64: 6901 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 6902 goto efault; 6903 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); 6904 unlock_user(p, arg2, 0); 6905 break; 6906 #endif 6907 case TARGET_NR_getcwd: 6908 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) 6909 goto efault; 6910 ret = get_errno(sys_getcwd1(p, arg2)); 6911 unlock_user(p, arg1, ret); 6912 break; 6913 case TARGET_NR_capget: 6914 goto unimplemented; 6915 case TARGET_NR_capset: 6916 goto unimplemented; 6917 case TARGET_NR_sigaltstack: 6918 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \ 6919 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \ 6920 defined(TARGET_M68K) || defined(TARGET_S390X) 6921 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env)); 6922 break; 6923 #else 6924 goto unimplemented; 6925 #endif 6926 case TARGET_NR_sendfile: 6927 goto unimplemented; 6928 #ifdef TARGET_NR_getpmsg 6929 case TARGET_NR_getpmsg: 6930 goto unimplemented; 6931 #endif 6932 #ifdef TARGET_NR_putpmsg 6933 case TARGET_NR_putpmsg: 6934 goto unimplemented; 6935 #endif 6936 #ifdef TARGET_NR_vfork 6937 case TARGET_NR_vfork: 6938 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 6939 0, 0, 0, 0)); 6940 break; 6941 #endif 6942 #ifdef TARGET_NR_ugetrlimit 6943 case TARGET_NR_ugetrlimit: 6944 { 6945 struct rlimit rlim; 6946 int resource = target_to_host_resource(arg1); 6947 ret = get_errno(getrlimit(resource, &rlim)); 6948 if (!is_error(ret)) { 6949 struct target_rlimit *target_rlim; 6950 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 6951 goto efault; 6952 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 6953 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 6954 unlock_user_struct(target_rlim, arg2, 1); 6955 } 6956 break; 6957 } 6958 #endif 6959 #ifdef TARGET_NR_truncate64 6960 case TARGET_NR_truncate64: 6961 if (!(p = lock_user_string(arg1))) 6962 goto efault; 6963 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); 6964 unlock_user(p, arg1, 0); 6965 break; 6966 #endif 6967 #ifdef TARGET_NR_ftruncate64 6968 case TARGET_NR_ftruncate64: 6969 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); 6970 break; 6971 #endif 6972 #ifdef TARGET_NR_stat64 6973 case TARGET_NR_stat64: 6974 if (!(p = lock_user_string(arg1))) 6975 goto efault; 6976 ret = get_errno(stat(path(p), &st)); 6977 unlock_user(p, arg1, 0); 6978 if (!is_error(ret)) 6979 ret = host_to_target_stat64(cpu_env, arg2, &st); 6980 break; 6981 #endif 6982 #ifdef TARGET_NR_lstat64 6983 case TARGET_NR_lstat64: 6984 if (!(p = lock_user_string(arg1))) 6985 goto efault; 6986 ret = get_errno(lstat(path(p), &st)); 6987 unlock_user(p, arg1, 0); 6988 if (!is_error(ret)) 6989 ret = host_to_target_stat64(cpu_env, arg2, &st); 6990 break; 6991 #endif 6992 #ifdef TARGET_NR_fstat64 6993 case TARGET_NR_fstat64: 6994 ret = get_errno(fstat(arg1, &st)); 6995 if (!is_error(ret)) 6996 ret = host_to_target_stat64(cpu_env, arg2, &st); 6997 break; 6998 #endif 6999 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \ 7000 (defined(__NR_fstatat64) || defined(__NR_newfstatat)) 7001 #ifdef TARGET_NR_fstatat64 7002 case TARGET_NR_fstatat64: 7003 #endif 7004 #ifdef TARGET_NR_newfstatat 7005 case TARGET_NR_newfstatat: 7006 #endif 7007 if (!(p = lock_user_string(arg2))) 7008 goto efault; 7009 #ifdef __NR_fstatat64 7010 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4)); 7011 #else 7012 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4)); 7013 #endif 7014 if (!is_error(ret)) 7015 ret = host_to_target_stat64(cpu_env, arg3, &st); 7016 break; 7017 #endif 7018 case TARGET_NR_lchown: 7019 if (!(p = lock_user_string(arg1))) 7020 goto efault; 7021 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); 7022 unlock_user(p, arg1, 0); 7023 break; 7024 #ifdef TARGET_NR_getuid 7025 case TARGET_NR_getuid: 7026 ret = get_errno(high2lowuid(getuid())); 7027 break; 7028 #endif 7029 #ifdef TARGET_NR_getgid 7030 case TARGET_NR_getgid: 7031 ret = get_errno(high2lowgid(getgid())); 7032 break; 7033 #endif 7034 #ifdef TARGET_NR_geteuid 7035 case TARGET_NR_geteuid: 7036 ret = get_errno(high2lowuid(geteuid())); 7037 break; 7038 #endif 7039 #ifdef TARGET_NR_getegid 7040 case TARGET_NR_getegid: 7041 ret = get_errno(high2lowgid(getegid())); 7042 break; 7043 #endif 7044 case TARGET_NR_setreuid: 7045 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); 7046 break; 7047 case TARGET_NR_setregid: 7048 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); 7049 break; 7050 case TARGET_NR_getgroups: 7051 { 7052 int gidsetsize = arg1; 7053 target_id *target_grouplist; 7054 gid_t *grouplist; 7055 int i; 7056 7057 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7058 ret = get_errno(getgroups(gidsetsize, grouplist)); 7059 if (gidsetsize == 0) 7060 break; 7061 if (!is_error(ret)) { 7062 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0); 7063 if (!target_grouplist) 7064 goto efault; 7065 for(i = 0;i < ret; i++) 7066 target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); 7067 unlock_user(target_grouplist, arg2, gidsetsize * 2); 7068 } 7069 } 7070 break; 7071 case TARGET_NR_setgroups: 7072 { 7073 int gidsetsize = arg1; 7074 target_id *target_grouplist; 7075 gid_t *grouplist; 7076 int i; 7077 7078 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7079 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1); 7080 if (!target_grouplist) { 7081 ret = -TARGET_EFAULT; 7082 goto fail; 7083 } 7084 for(i = 0;i < gidsetsize; i++) 7085 grouplist[i] = low2highgid(tswapid(target_grouplist[i])); 7086 unlock_user(target_grouplist, arg2, 0); 7087 ret = get_errno(setgroups(gidsetsize, grouplist)); 7088 } 7089 break; 7090 case TARGET_NR_fchown: 7091 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); 7092 break; 7093 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) 7094 case TARGET_NR_fchownat: 7095 if (!(p = lock_user_string(arg2))) 7096 goto efault; 7097 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5)); 7098 unlock_user(p, arg2, 0); 7099 break; 7100 #endif 7101 #ifdef TARGET_NR_setresuid 7102 case TARGET_NR_setresuid: 7103 ret = get_errno(setresuid(low2highuid(arg1), 7104 low2highuid(arg2), 7105 low2highuid(arg3))); 7106 break; 7107 #endif 7108 #ifdef TARGET_NR_getresuid 7109 case TARGET_NR_getresuid: 7110 { 7111 uid_t ruid, euid, suid; 7112 ret = get_errno(getresuid(&ruid, &euid, &suid)); 7113 if (!is_error(ret)) { 7114 if (put_user_u16(high2lowuid(ruid), arg1) 7115 || put_user_u16(high2lowuid(euid), arg2) 7116 || put_user_u16(high2lowuid(suid), arg3)) 7117 goto efault; 7118 } 7119 } 7120 break; 7121 #endif 7122 #ifdef TARGET_NR_getresgid 7123 case TARGET_NR_setresgid: 7124 ret = get_errno(setresgid(low2highgid(arg1), 7125 low2highgid(arg2), 7126 low2highgid(arg3))); 7127 break; 7128 #endif 7129 #ifdef TARGET_NR_getresgid 7130 case TARGET_NR_getresgid: 7131 { 7132 gid_t rgid, egid, sgid; 7133 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 7134 if (!is_error(ret)) { 7135 if (put_user_u16(high2lowgid(rgid), arg1) 7136 || put_user_u16(high2lowgid(egid), arg2) 7137 || put_user_u16(high2lowgid(sgid), arg3)) 7138 goto efault; 7139 } 7140 } 7141 break; 7142 #endif 7143 case TARGET_NR_chown: 7144 if (!(p = lock_user_string(arg1))) 7145 goto efault; 7146 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); 7147 unlock_user(p, arg1, 0); 7148 break; 7149 case TARGET_NR_setuid: 7150 ret = get_errno(setuid(low2highuid(arg1))); 7151 break; 7152 case TARGET_NR_setgid: 7153 ret = get_errno(setgid(low2highgid(arg1))); 7154 break; 7155 case TARGET_NR_setfsuid: 7156 ret = get_errno(setfsuid(arg1)); 7157 break; 7158 case TARGET_NR_setfsgid: 7159 ret = get_errno(setfsgid(arg1)); 7160 break; 7161 7162 #ifdef TARGET_NR_lchown32 7163 case TARGET_NR_lchown32: 7164 if (!(p = lock_user_string(arg1))) 7165 goto efault; 7166 ret = get_errno(lchown(p, arg2, arg3)); 7167 unlock_user(p, arg1, 0); 7168 break; 7169 #endif 7170 #ifdef TARGET_NR_getuid32 7171 case TARGET_NR_getuid32: 7172 ret = get_errno(getuid()); 7173 break; 7174 #endif 7175 7176 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) 7177 /* Alpha specific */ 7178 case TARGET_NR_getxuid: 7179 { 7180 uid_t euid; 7181 euid=geteuid(); 7182 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid; 7183 } 7184 ret = get_errno(getuid()); 7185 break; 7186 #endif 7187 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) 7188 /* Alpha specific */ 7189 case TARGET_NR_getxgid: 7190 { 7191 uid_t egid; 7192 egid=getegid(); 7193 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid; 7194 } 7195 ret = get_errno(getgid()); 7196 break; 7197 #endif 7198 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) 7199 /* Alpha specific */ 7200 case TARGET_NR_osf_getsysinfo: 7201 ret = -TARGET_EOPNOTSUPP; 7202 switch (arg1) { 7203 case TARGET_GSI_IEEE_FP_CONTROL: 7204 { 7205 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env); 7206 7207 /* Copied from linux ieee_fpcr_to_swcr. */ 7208 swcr = (fpcr >> 35) & SWCR_STATUS_MASK; 7209 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ; 7210 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV 7211 | SWCR_TRAP_ENABLE_DZE 7212 | SWCR_TRAP_ENABLE_OVF); 7213 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF 7214 | SWCR_TRAP_ENABLE_INE); 7215 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ; 7216 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO; 7217 7218 if (put_user_u64 (swcr, arg2)) 7219 goto efault; 7220 ret = 0; 7221 } 7222 break; 7223 7224 /* case GSI_IEEE_STATE_AT_SIGNAL: 7225 -- Not implemented in linux kernel. 7226 case GSI_UACPROC: 7227 -- Retrieves current unaligned access state; not much used. 7228 case GSI_PROC_TYPE: 7229 -- Retrieves implver information; surely not used. 7230 case GSI_GET_HWRPB: 7231 -- Grabs a copy of the HWRPB; surely not used. 7232 */ 7233 } 7234 break; 7235 #endif 7236 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) 7237 /* Alpha specific */ 7238 case TARGET_NR_osf_setsysinfo: 7239 ret = -TARGET_EOPNOTSUPP; 7240 switch (arg1) { 7241 case TARGET_SSI_IEEE_FP_CONTROL: 7242 case TARGET_SSI_IEEE_RAISE_EXCEPTION: 7243 { 7244 uint64_t swcr, fpcr, orig_fpcr; 7245 7246 if (get_user_u64 (swcr, arg2)) 7247 goto efault; 7248 orig_fpcr = cpu_alpha_load_fpcr (cpu_env); 7249 fpcr = orig_fpcr & FPCR_DYN_MASK; 7250 7251 /* Copied from linux ieee_swcr_to_fpcr. */ 7252 fpcr |= (swcr & SWCR_STATUS_MASK) << 35; 7253 fpcr |= (swcr & SWCR_MAP_DMZ) << 36; 7254 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV 7255 | SWCR_TRAP_ENABLE_DZE 7256 | SWCR_TRAP_ENABLE_OVF)) << 48; 7257 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF 7258 | SWCR_TRAP_ENABLE_INE)) << 57; 7259 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0); 7260 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41; 7261 7262 cpu_alpha_store_fpcr (cpu_env, fpcr); 7263 ret = 0; 7264 7265 if (arg1 == TARGET_SSI_IEEE_RAISE_EXCEPTION) { 7266 /* Old exceptions are not signaled. */ 7267 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK); 7268 7269 /* If any exceptions set by this call, and are unmasked, 7270 send a signal. */ 7271 /* ??? FIXME */ 7272 } 7273 } 7274 break; 7275 7276 /* case SSI_NVPAIRS: 7277 -- Used with SSIN_UACPROC to enable unaligned accesses. 7278 case SSI_IEEE_STATE_AT_SIGNAL: 7279 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: 7280 -- Not implemented in linux kernel 7281 */ 7282 } 7283 break; 7284 #endif 7285 #ifdef TARGET_NR_osf_sigprocmask 7286 /* Alpha specific. */ 7287 case TARGET_NR_osf_sigprocmask: 7288 { 7289 abi_ulong mask; 7290 int how; 7291 sigset_t set, oldset; 7292 7293 switch(arg1) { 7294 case TARGET_SIG_BLOCK: 7295 how = SIG_BLOCK; 7296 break; 7297 case TARGET_SIG_UNBLOCK: 7298 how = SIG_UNBLOCK; 7299 break; 7300 case TARGET_SIG_SETMASK: 7301 how = SIG_SETMASK; 7302 break; 7303 default: 7304 ret = -TARGET_EINVAL; 7305 goto fail; 7306 } 7307 mask = arg2; 7308 target_to_host_old_sigset(&set, &mask); 7309 sigprocmask(how, &set, &oldset); 7310 host_to_target_old_sigset(&mask, &oldset); 7311 ret = mask; 7312 } 7313 break; 7314 #endif 7315 7316 #ifdef TARGET_NR_getgid32 7317 case TARGET_NR_getgid32: 7318 ret = get_errno(getgid()); 7319 break; 7320 #endif 7321 #ifdef TARGET_NR_geteuid32 7322 case TARGET_NR_geteuid32: 7323 ret = get_errno(geteuid()); 7324 break; 7325 #endif 7326 #ifdef TARGET_NR_getegid32 7327 case TARGET_NR_getegid32: 7328 ret = get_errno(getegid()); 7329 break; 7330 #endif 7331 #ifdef TARGET_NR_setreuid32 7332 case TARGET_NR_setreuid32: 7333 ret = get_errno(setreuid(arg1, arg2)); 7334 break; 7335 #endif 7336 #ifdef TARGET_NR_setregid32 7337 case TARGET_NR_setregid32: 7338 ret = get_errno(setregid(arg1, arg2)); 7339 break; 7340 #endif 7341 #ifdef TARGET_NR_getgroups32 7342 case TARGET_NR_getgroups32: 7343 { 7344 int gidsetsize = arg1; 7345 uint32_t *target_grouplist; 7346 gid_t *grouplist; 7347 int i; 7348 7349 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7350 ret = get_errno(getgroups(gidsetsize, grouplist)); 7351 if (gidsetsize == 0) 7352 break; 7353 if (!is_error(ret)) { 7354 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); 7355 if (!target_grouplist) { 7356 ret = -TARGET_EFAULT; 7357 goto fail; 7358 } 7359 for(i = 0;i < ret; i++) 7360 target_grouplist[i] = tswap32(grouplist[i]); 7361 unlock_user(target_grouplist, arg2, gidsetsize * 4); 7362 } 7363 } 7364 break; 7365 #endif 7366 #ifdef TARGET_NR_setgroups32 7367 case TARGET_NR_setgroups32: 7368 { 7369 int gidsetsize = arg1; 7370 uint32_t *target_grouplist; 7371 gid_t *grouplist; 7372 int i; 7373 7374 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7375 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); 7376 if (!target_grouplist) { 7377 ret = -TARGET_EFAULT; 7378 goto fail; 7379 } 7380 for(i = 0;i < gidsetsize; i++) 7381 grouplist[i] = tswap32(target_grouplist[i]); 7382 unlock_user(target_grouplist, arg2, 0); 7383 ret = get_errno(setgroups(gidsetsize, grouplist)); 7384 } 7385 break; 7386 #endif 7387 #ifdef TARGET_NR_fchown32 7388 case TARGET_NR_fchown32: 7389 ret = get_errno(fchown(arg1, arg2, arg3)); 7390 break; 7391 #endif 7392 #ifdef TARGET_NR_setresuid32 7393 case TARGET_NR_setresuid32: 7394 ret = get_errno(setresuid(arg1, arg2, arg3)); 7395 break; 7396 #endif 7397 #ifdef TARGET_NR_getresuid32 7398 case TARGET_NR_getresuid32: 7399 { 7400 uid_t ruid, euid, suid; 7401 ret = get_errno(getresuid(&ruid, &euid, &suid)); 7402 if (!is_error(ret)) { 7403 if (put_user_u32(ruid, arg1) 7404 || put_user_u32(euid, arg2) 7405 || put_user_u32(suid, arg3)) 7406 goto efault; 7407 } 7408 } 7409 break; 7410 #endif 7411 #ifdef TARGET_NR_setresgid32 7412 case TARGET_NR_setresgid32: 7413 ret = get_errno(setresgid(arg1, arg2, arg3)); 7414 break; 7415 #endif 7416 #ifdef TARGET_NR_getresgid32 7417 case TARGET_NR_getresgid32: 7418 { 7419 gid_t rgid, egid, sgid; 7420 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 7421 if (!is_error(ret)) { 7422 if (put_user_u32(rgid, arg1) 7423 || put_user_u32(egid, arg2) 7424 || put_user_u32(sgid, arg3)) 7425 goto efault; 7426 } 7427 } 7428 break; 7429 #endif 7430 #ifdef TARGET_NR_chown32 7431 case TARGET_NR_chown32: 7432 if (!(p = lock_user_string(arg1))) 7433 goto efault; 7434 ret = get_errno(chown(p, arg2, arg3)); 7435 unlock_user(p, arg1, 0); 7436 break; 7437 #endif 7438 #ifdef TARGET_NR_setuid32 7439 case TARGET_NR_setuid32: 7440 ret = get_errno(setuid(arg1)); 7441 break; 7442 #endif 7443 #ifdef TARGET_NR_setgid32 7444 case TARGET_NR_setgid32: 7445 ret = get_errno(setgid(arg1)); 7446 break; 7447 #endif 7448 #ifdef TARGET_NR_setfsuid32 7449 case TARGET_NR_setfsuid32: 7450 ret = get_errno(setfsuid(arg1)); 7451 break; 7452 #endif 7453 #ifdef TARGET_NR_setfsgid32 7454 case TARGET_NR_setfsgid32: 7455 ret = get_errno(setfsgid(arg1)); 7456 break; 7457 #endif 7458 7459 case TARGET_NR_pivot_root: 7460 goto unimplemented; 7461 #ifdef TARGET_NR_mincore 7462 case TARGET_NR_mincore: 7463 { 7464 void *a; 7465 ret = -TARGET_EFAULT; 7466 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0))) 7467 goto efault; 7468 if (!(p = lock_user_string(arg3))) 7469 goto mincore_fail; 7470 ret = get_errno(mincore(a, arg2, p)); 7471 unlock_user(p, arg3, ret); 7472 mincore_fail: 7473 unlock_user(a, arg1, 0); 7474 } 7475 break; 7476 #endif 7477 #ifdef TARGET_NR_arm_fadvise64_64 7478 case TARGET_NR_arm_fadvise64_64: 7479 { 7480 /* 7481 * arm_fadvise64_64 looks like fadvise64_64 but 7482 * with different argument order 7483 */ 7484 abi_long temp; 7485 temp = arg3; 7486 arg3 = arg4; 7487 arg4 = temp; 7488 } 7489 #endif 7490 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64) 7491 #ifdef TARGET_NR_fadvise64_64 7492 case TARGET_NR_fadvise64_64: 7493 #endif 7494 #ifdef TARGET_NR_fadvise64 7495 case TARGET_NR_fadvise64: 7496 #endif 7497 #ifdef TARGET_S390X 7498 switch (arg4) { 7499 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ 7500 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ 7501 case 6: arg4 = POSIX_FADV_DONTNEED; break; 7502 case 7: arg4 = POSIX_FADV_NOREUSE; break; 7503 default: break; 7504 } 7505 #endif 7506 ret = -posix_fadvise(arg1, arg2, arg3, arg4); 7507 break; 7508 #endif 7509 #ifdef TARGET_NR_madvise 7510 case TARGET_NR_madvise: 7511 /* A straight passthrough may not be safe because qemu sometimes 7512 turns private flie-backed mappings into anonymous mappings. 7513 This will break MADV_DONTNEED. 7514 This is a hint, so ignoring and returning success is ok. */ 7515 ret = get_errno(0); 7516 break; 7517 #endif 7518 #if TARGET_ABI_BITS == 32 7519 case TARGET_NR_fcntl64: 7520 { 7521 int cmd; 7522 struct flock64 fl; 7523 struct target_flock64 *target_fl; 7524 #ifdef TARGET_ARM 7525 struct target_eabi_flock64 *target_efl; 7526 #endif 7527 7528 cmd = target_to_host_fcntl_cmd(arg2); 7529 if (cmd == -TARGET_EINVAL) 7530 return cmd; 7531 7532 switch(arg2) { 7533 case TARGET_F_GETLK64: 7534 #ifdef TARGET_ARM 7535 if (((CPUARMState *)cpu_env)->eabi) { 7536 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 7537 goto efault; 7538 fl.l_type = tswap16(target_efl->l_type); 7539 fl.l_whence = tswap16(target_efl->l_whence); 7540 fl.l_start = tswap64(target_efl->l_start); 7541 fl.l_len = tswap64(target_efl->l_len); 7542 fl.l_pid = tswap32(target_efl->l_pid); 7543 unlock_user_struct(target_efl, arg3, 0); 7544 } else 7545 #endif 7546 { 7547 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 7548 goto efault; 7549 fl.l_type = tswap16(target_fl->l_type); 7550 fl.l_whence = tswap16(target_fl->l_whence); 7551 fl.l_start = tswap64(target_fl->l_start); 7552 fl.l_len = tswap64(target_fl->l_len); 7553 fl.l_pid = tswap32(target_fl->l_pid); 7554 unlock_user_struct(target_fl, arg3, 0); 7555 } 7556 ret = get_errno(fcntl(arg1, cmd, &fl)); 7557 if (ret == 0) { 7558 #ifdef TARGET_ARM 7559 if (((CPUARMState *)cpu_env)->eabi) { 7560 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0)) 7561 goto efault; 7562 target_efl->l_type = tswap16(fl.l_type); 7563 target_efl->l_whence = tswap16(fl.l_whence); 7564 target_efl->l_start = tswap64(fl.l_start); 7565 target_efl->l_len = tswap64(fl.l_len); 7566 target_efl->l_pid = tswap32(fl.l_pid); 7567 unlock_user_struct(target_efl, arg3, 1); 7568 } else 7569 #endif 7570 { 7571 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0)) 7572 goto efault; 7573 target_fl->l_type = tswap16(fl.l_type); 7574 target_fl->l_whence = tswap16(fl.l_whence); 7575 target_fl->l_start = tswap64(fl.l_start); 7576 target_fl->l_len = tswap64(fl.l_len); 7577 target_fl->l_pid = tswap32(fl.l_pid); 7578 unlock_user_struct(target_fl, arg3, 1); 7579 } 7580 } 7581 break; 7582 7583 case TARGET_F_SETLK64: 7584 case TARGET_F_SETLKW64: 7585 #ifdef TARGET_ARM 7586 if (((CPUARMState *)cpu_env)->eabi) { 7587 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 7588 goto efault; 7589 fl.l_type = tswap16(target_efl->l_type); 7590 fl.l_whence = tswap16(target_efl->l_whence); 7591 fl.l_start = tswap64(target_efl->l_start); 7592 fl.l_len = tswap64(target_efl->l_len); 7593 fl.l_pid = tswap32(target_efl->l_pid); 7594 unlock_user_struct(target_efl, arg3, 0); 7595 } else 7596 #endif 7597 { 7598 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 7599 goto efault; 7600 fl.l_type = tswap16(target_fl->l_type); 7601 fl.l_whence = tswap16(target_fl->l_whence); 7602 fl.l_start = tswap64(target_fl->l_start); 7603 fl.l_len = tswap64(target_fl->l_len); 7604 fl.l_pid = tswap32(target_fl->l_pid); 7605 unlock_user_struct(target_fl, arg3, 0); 7606 } 7607 ret = get_errno(fcntl(arg1, cmd, &fl)); 7608 break; 7609 default: 7610 ret = do_fcntl(arg1, arg2, arg3); 7611 break; 7612 } 7613 break; 7614 } 7615 #endif 7616 #ifdef TARGET_NR_cacheflush 7617 case TARGET_NR_cacheflush: 7618 /* self-modifying code is handled automatically, so nothing needed */ 7619 ret = 0; 7620 break; 7621 #endif 7622 #ifdef TARGET_NR_security 7623 case TARGET_NR_security: 7624 goto unimplemented; 7625 #endif 7626 #ifdef TARGET_NR_getpagesize 7627 case TARGET_NR_getpagesize: 7628 ret = TARGET_PAGE_SIZE; 7629 break; 7630 #endif 7631 case TARGET_NR_gettid: 7632 ret = get_errno(gettid()); 7633 break; 7634 #ifdef TARGET_NR_readahead 7635 case TARGET_NR_readahead: 7636 #if TARGET_ABI_BITS == 32 7637 if (regpairs_aligned(cpu_env)) { 7638 arg2 = arg3; 7639 arg3 = arg4; 7640 arg4 = arg5; 7641 } 7642 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4)); 7643 #else 7644 ret = get_errno(readahead(arg1, arg2, arg3)); 7645 #endif 7646 break; 7647 #endif 7648 #ifdef CONFIG_ATTR 7649 #ifdef TARGET_NR_setxattr 7650 case TARGET_NR_lsetxattr: 7651 case TARGET_NR_fsetxattr: 7652 case TARGET_NR_lgetxattr: 7653 case TARGET_NR_fgetxattr: 7654 case TARGET_NR_listxattr: 7655 case TARGET_NR_llistxattr: 7656 case TARGET_NR_flistxattr: 7657 case TARGET_NR_lremovexattr: 7658 case TARGET_NR_fremovexattr: 7659 ret = -TARGET_EOPNOTSUPP; 7660 break; 7661 case TARGET_NR_setxattr: 7662 { 7663 void *p, *n, *v; 7664 p = lock_user_string(arg1); 7665 n = lock_user_string(arg2); 7666 v = lock_user(VERIFY_READ, arg3, arg4, 1); 7667 if (p && n && v) { 7668 ret = get_errno(setxattr(p, n, v, arg4, arg5)); 7669 } else { 7670 ret = -TARGET_EFAULT; 7671 } 7672 unlock_user(p, arg1, 0); 7673 unlock_user(n, arg2, 0); 7674 unlock_user(v, arg3, 0); 7675 } 7676 break; 7677 case TARGET_NR_getxattr: 7678 { 7679 void *p, *n, *v; 7680 p = lock_user_string(arg1); 7681 n = lock_user_string(arg2); 7682 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 7683 if (p && n && v) { 7684 ret = get_errno(getxattr(p, n, v, arg4)); 7685 } else { 7686 ret = -TARGET_EFAULT; 7687 } 7688 unlock_user(p, arg1, 0); 7689 unlock_user(n, arg2, 0); 7690 unlock_user(v, arg3, arg4); 7691 } 7692 break; 7693 case TARGET_NR_removexattr: 7694 { 7695 void *p, *n; 7696 p = lock_user_string(arg1); 7697 n = lock_user_string(arg2); 7698 if (p && n) { 7699 ret = get_errno(removexattr(p, n)); 7700 } else { 7701 ret = -TARGET_EFAULT; 7702 } 7703 unlock_user(p, arg1, 0); 7704 unlock_user(n, arg2, 0); 7705 } 7706 break; 7707 #endif 7708 #endif /* CONFIG_ATTR */ 7709 #ifdef TARGET_NR_set_thread_area 7710 case TARGET_NR_set_thread_area: 7711 #if defined(TARGET_MIPS) 7712 ((CPUMIPSState *) cpu_env)->tls_value = arg1; 7713 ret = 0; 7714 break; 7715 #elif defined(TARGET_CRIS) 7716 if (arg1 & 0xff) 7717 ret = -TARGET_EINVAL; 7718 else { 7719 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1; 7720 ret = 0; 7721 } 7722 break; 7723 #elif defined(TARGET_I386) && defined(TARGET_ABI32) 7724 ret = do_set_thread_area(cpu_env, arg1); 7725 break; 7726 #else 7727 goto unimplemented_nowarn; 7728 #endif 7729 #endif 7730 #ifdef TARGET_NR_get_thread_area 7731 case TARGET_NR_get_thread_area: 7732 #if defined(TARGET_I386) && defined(TARGET_ABI32) 7733 ret = do_get_thread_area(cpu_env, arg1); 7734 #else 7735 goto unimplemented_nowarn; 7736 #endif 7737 #endif 7738 #ifdef TARGET_NR_getdomainname 7739 case TARGET_NR_getdomainname: 7740 goto unimplemented_nowarn; 7741 #endif 7742 7743 #ifdef TARGET_NR_clock_gettime 7744 case TARGET_NR_clock_gettime: 7745 { 7746 struct timespec ts; 7747 ret = get_errno(clock_gettime(arg1, &ts)); 7748 if (!is_error(ret)) { 7749 host_to_target_timespec(arg2, &ts); 7750 } 7751 break; 7752 } 7753 #endif 7754 #ifdef TARGET_NR_clock_getres 7755 case TARGET_NR_clock_getres: 7756 { 7757 struct timespec ts; 7758 ret = get_errno(clock_getres(arg1, &ts)); 7759 if (!is_error(ret)) { 7760 host_to_target_timespec(arg2, &ts); 7761 } 7762 break; 7763 } 7764 #endif 7765 #ifdef TARGET_NR_clock_nanosleep 7766 case TARGET_NR_clock_nanosleep: 7767 { 7768 struct timespec ts; 7769 target_to_host_timespec(&ts, arg3); 7770 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL)); 7771 if (arg4) 7772 host_to_target_timespec(arg4, &ts); 7773 break; 7774 } 7775 #endif 7776 7777 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 7778 case TARGET_NR_set_tid_address: 7779 ret = get_errno(set_tid_address((int *)g2h(arg1))); 7780 break; 7781 #endif 7782 7783 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 7784 case TARGET_NR_tkill: 7785 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2))); 7786 break; 7787 #endif 7788 7789 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 7790 case TARGET_NR_tgkill: 7791 ret = get_errno(sys_tgkill((int)arg1, (int)arg2, 7792 target_to_host_signal(arg3))); 7793 break; 7794 #endif 7795 7796 #ifdef TARGET_NR_set_robust_list 7797 case TARGET_NR_set_robust_list: 7798 goto unimplemented_nowarn; 7799 #endif 7800 7801 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat) 7802 case TARGET_NR_utimensat: 7803 { 7804 struct timespec *tsp, ts[2]; 7805 if (!arg3) { 7806 tsp = NULL; 7807 } else { 7808 target_to_host_timespec(ts, arg3); 7809 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec)); 7810 tsp = ts; 7811 } 7812 if (!arg2) 7813 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 7814 else { 7815 if (!(p = lock_user_string(arg2))) { 7816 ret = -TARGET_EFAULT; 7817 goto fail; 7818 } 7819 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 7820 unlock_user(p, arg2, 0); 7821 } 7822 } 7823 break; 7824 #endif 7825 #if defined(CONFIG_USE_NPTL) 7826 case TARGET_NR_futex: 7827 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6); 7828 break; 7829 #endif 7830 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 7831 case TARGET_NR_inotify_init: 7832 ret = get_errno(sys_inotify_init()); 7833 break; 7834 #endif 7835 #ifdef CONFIG_INOTIFY1 7836 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 7837 case TARGET_NR_inotify_init1: 7838 ret = get_errno(sys_inotify_init1(arg1)); 7839 break; 7840 #endif 7841 #endif 7842 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 7843 case TARGET_NR_inotify_add_watch: 7844 p = lock_user_string(arg2); 7845 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3)); 7846 unlock_user(p, arg2, 0); 7847 break; 7848 #endif 7849 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 7850 case TARGET_NR_inotify_rm_watch: 7851 ret = get_errno(sys_inotify_rm_watch(arg1, arg2)); 7852 break; 7853 #endif 7854 7855 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 7856 case TARGET_NR_mq_open: 7857 { 7858 struct mq_attr posix_mq_attr; 7859 7860 p = lock_user_string(arg1 - 1); 7861 if (arg4 != 0) 7862 copy_from_user_mq_attr (&posix_mq_attr, arg4); 7863 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr)); 7864 unlock_user (p, arg1, 0); 7865 } 7866 break; 7867 7868 case TARGET_NR_mq_unlink: 7869 p = lock_user_string(arg1 - 1); 7870 ret = get_errno(mq_unlink(p)); 7871 unlock_user (p, arg1, 0); 7872 break; 7873 7874 case TARGET_NR_mq_timedsend: 7875 { 7876 struct timespec ts; 7877 7878 p = lock_user (VERIFY_READ, arg2, arg3, 1); 7879 if (arg5 != 0) { 7880 target_to_host_timespec(&ts, arg5); 7881 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts)); 7882 host_to_target_timespec(arg5, &ts); 7883 } 7884 else 7885 ret = get_errno(mq_send(arg1, p, arg3, arg4)); 7886 unlock_user (p, arg2, arg3); 7887 } 7888 break; 7889 7890 case TARGET_NR_mq_timedreceive: 7891 { 7892 struct timespec ts; 7893 unsigned int prio; 7894 7895 p = lock_user (VERIFY_READ, arg2, arg3, 1); 7896 if (arg5 != 0) { 7897 target_to_host_timespec(&ts, arg5); 7898 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts)); 7899 host_to_target_timespec(arg5, &ts); 7900 } 7901 else 7902 ret = get_errno(mq_receive(arg1, p, arg3, &prio)); 7903 unlock_user (p, arg2, arg3); 7904 if (arg4 != 0) 7905 put_user_u32(prio, arg4); 7906 } 7907 break; 7908 7909 /* Not implemented for now... */ 7910 /* case TARGET_NR_mq_notify: */ 7911 /* break; */ 7912 7913 case TARGET_NR_mq_getsetattr: 7914 { 7915 struct mq_attr posix_mq_attr_in, posix_mq_attr_out; 7916 ret = 0; 7917 if (arg3 != 0) { 7918 ret = mq_getattr(arg1, &posix_mq_attr_out); 7919 copy_to_user_mq_attr(arg3, &posix_mq_attr_out); 7920 } 7921 if (arg2 != 0) { 7922 copy_from_user_mq_attr(&posix_mq_attr_in, arg2); 7923 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out); 7924 } 7925 7926 } 7927 break; 7928 #endif 7929 7930 #ifdef CONFIG_SPLICE 7931 #ifdef TARGET_NR_tee 7932 case TARGET_NR_tee: 7933 { 7934 ret = get_errno(tee(arg1,arg2,arg3,arg4)); 7935 } 7936 break; 7937 #endif 7938 #ifdef TARGET_NR_splice 7939 case TARGET_NR_splice: 7940 { 7941 loff_t loff_in, loff_out; 7942 loff_t *ploff_in = NULL, *ploff_out = NULL; 7943 if(arg2) { 7944 get_user_u64(loff_in, arg2); 7945 ploff_in = &loff_in; 7946 } 7947 if(arg4) { 7948 get_user_u64(loff_out, arg2); 7949 ploff_out = &loff_out; 7950 } 7951 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); 7952 } 7953 break; 7954 #endif 7955 #ifdef TARGET_NR_vmsplice 7956 case TARGET_NR_vmsplice: 7957 { 7958 int count = arg3; 7959 struct iovec *vec; 7960 7961 vec = alloca(count * sizeof(struct iovec)); 7962 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0) 7963 goto efault; 7964 ret = get_errno(vmsplice(arg1, vec, count, arg4)); 7965 unlock_iovec(vec, arg2, count, 0); 7966 } 7967 break; 7968 #endif 7969 #endif /* CONFIG_SPLICE */ 7970 #ifdef CONFIG_EVENTFD 7971 #if defined(TARGET_NR_eventfd) 7972 case TARGET_NR_eventfd: 7973 ret = get_errno(eventfd(arg1, 0)); 7974 break; 7975 #endif 7976 #if defined(TARGET_NR_eventfd2) 7977 case TARGET_NR_eventfd2: 7978 ret = get_errno(eventfd(arg1, arg2)); 7979 break; 7980 #endif 7981 #endif /* CONFIG_EVENTFD */ 7982 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) 7983 case TARGET_NR_fallocate: 7984 ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); 7985 break; 7986 #endif 7987 #if defined(CONFIG_SYNC_FILE_RANGE) 7988 #if defined(TARGET_NR_sync_file_range) 7989 case TARGET_NR_sync_file_range: 7990 #if TARGET_ABI_BITS == 32 7991 #if defined(TARGET_MIPS) 7992 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 7993 target_offset64(arg5, arg6), arg7)); 7994 #else 7995 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3), 7996 target_offset64(arg4, arg5), arg6)); 7997 #endif /* !TARGET_MIPS */ 7998 #else 7999 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4)); 8000 #endif 8001 break; 8002 #endif 8003 #if defined(TARGET_NR_sync_file_range2) 8004 case TARGET_NR_sync_file_range2: 8005 /* This is like sync_file_range but the arguments are reordered */ 8006 #if TARGET_ABI_BITS == 32 8007 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 8008 target_offset64(arg5, arg6), arg2)); 8009 #else 8010 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2)); 8011 #endif 8012 break; 8013 #endif 8014 #endif 8015 #if defined(CONFIG_EPOLL) 8016 #if defined(TARGET_NR_epoll_create) 8017 case TARGET_NR_epoll_create: 8018 ret = get_errno(epoll_create(arg1)); 8019 break; 8020 #endif 8021 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1) 8022 case TARGET_NR_epoll_create1: 8023 ret = get_errno(epoll_create1(arg1)); 8024 break; 8025 #endif 8026 #if defined(TARGET_NR_epoll_ctl) 8027 case TARGET_NR_epoll_ctl: 8028 { 8029 struct epoll_event ep; 8030 struct epoll_event *epp = 0; 8031 if (arg4) { 8032 struct target_epoll_event *target_ep; 8033 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) { 8034 goto efault; 8035 } 8036 ep.events = tswap32(target_ep->events); 8037 /* The epoll_data_t union is just opaque data to the kernel, 8038 * so we transfer all 64 bits across and need not worry what 8039 * actual data type it is. 8040 */ 8041 ep.data.u64 = tswap64(target_ep->data.u64); 8042 unlock_user_struct(target_ep, arg4, 0); 8043 epp = &ep; 8044 } 8045 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp)); 8046 break; 8047 } 8048 #endif 8049 8050 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT) 8051 #define IMPLEMENT_EPOLL_PWAIT 8052 #endif 8053 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT) 8054 #if defined(TARGET_NR_epoll_wait) 8055 case TARGET_NR_epoll_wait: 8056 #endif 8057 #if defined(IMPLEMENT_EPOLL_PWAIT) 8058 case TARGET_NR_epoll_pwait: 8059 #endif 8060 { 8061 struct target_epoll_event *target_ep; 8062 struct epoll_event *ep; 8063 int epfd = arg1; 8064 int maxevents = arg3; 8065 int timeout = arg4; 8066 8067 target_ep = lock_user(VERIFY_WRITE, arg2, 8068 maxevents * sizeof(struct target_epoll_event), 1); 8069 if (!target_ep) { 8070 goto efault; 8071 } 8072 8073 ep = alloca(maxevents * sizeof(struct epoll_event)); 8074 8075 switch (num) { 8076 #if defined(IMPLEMENT_EPOLL_PWAIT) 8077 case TARGET_NR_epoll_pwait: 8078 { 8079 target_sigset_t *target_set; 8080 sigset_t _set, *set = &_set; 8081 8082 if (arg5) { 8083 target_set = lock_user(VERIFY_READ, arg5, 8084 sizeof(target_sigset_t), 1); 8085 if (!target_set) { 8086 unlock_user(target_ep, arg2, 0); 8087 goto efault; 8088 } 8089 target_to_host_sigset(set, target_set); 8090 unlock_user(target_set, arg5, 0); 8091 } else { 8092 set = NULL; 8093 } 8094 8095 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set)); 8096 break; 8097 } 8098 #endif 8099 #if defined(TARGET_NR_epoll_wait) 8100 case TARGET_NR_epoll_wait: 8101 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout)); 8102 break; 8103 #endif 8104 default: 8105 ret = -TARGET_ENOSYS; 8106 } 8107 if (!is_error(ret)) { 8108 int i; 8109 for (i = 0; i < ret; i++) { 8110 target_ep[i].events = tswap32(ep[i].events); 8111 target_ep[i].data.u64 = tswap64(ep[i].data.u64); 8112 } 8113 } 8114 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event)); 8115 break; 8116 } 8117 #endif 8118 #endif 8119 #ifdef TARGET_NR_prlimit64 8120 case TARGET_NR_prlimit64: 8121 { 8122 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */ 8123 struct target_rlimit64 *target_rnew, *target_rold; 8124 struct host_rlimit64 rnew, rold, *rnewp = 0; 8125 if (arg3) { 8126 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { 8127 goto efault; 8128 } 8129 rnew.rlim_cur = tswap64(target_rnew->rlim_cur); 8130 rnew.rlim_max = tswap64(target_rnew->rlim_max); 8131 unlock_user_struct(target_rnew, arg3, 0); 8132 rnewp = &rnew; 8133 } 8134 8135 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0)); 8136 if (!is_error(ret) && arg4) { 8137 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { 8138 goto efault; 8139 } 8140 target_rold->rlim_cur = tswap64(rold.rlim_cur); 8141 target_rold->rlim_max = tswap64(rold.rlim_max); 8142 unlock_user_struct(target_rold, arg4, 1); 8143 } 8144 break; 8145 } 8146 #endif 8147 default: 8148 unimplemented: 8149 gemu_log("qemu: Unsupported syscall: %d\n", num); 8150 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list) 8151 unimplemented_nowarn: 8152 #endif 8153 ret = -TARGET_ENOSYS; 8154 break; 8155 } 8156 fail: 8157 #ifdef DEBUG 8158 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret); 8159 #endif 8160 if(do_strace) 8161 print_syscall_ret(num, ret); 8162 return ret; 8163 efault: 8164 ret = -TARGET_EFAULT; 8165 goto fail; 8166 } 8167