1 /* 2 * Linux syscalls 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #define _ATFILE_SOURCE 20 #include <stdlib.h> 21 #include <stdio.h> 22 #include <stdarg.h> 23 #include <string.h> 24 #include <elf.h> 25 #include <endian.h> 26 #include <errno.h> 27 #include <unistd.h> 28 #include <fcntl.h> 29 #include <time.h> 30 #include <limits.h> 31 #include <sys/types.h> 32 #include <sys/ipc.h> 33 #include <sys/msg.h> 34 #include <sys/wait.h> 35 #include <sys/time.h> 36 #include <sys/stat.h> 37 #include <sys/mount.h> 38 #include <sys/prctl.h> 39 #include <sys/resource.h> 40 #include <sys/mman.h> 41 #include <sys/swap.h> 42 #include <signal.h> 43 #include <sched.h> 44 #ifdef __ia64__ 45 int __clone2(int (*fn)(void *), void *child_stack_base, 46 size_t stack_size, int flags, void *arg, ...); 47 #endif 48 #include <sys/socket.h> 49 #include <sys/un.h> 50 #include <sys/uio.h> 51 #include <sys/poll.h> 52 #include <sys/times.h> 53 #include <sys/shm.h> 54 #include <sys/sem.h> 55 #include <sys/statfs.h> 56 #include <utime.h> 57 #include <sys/sysinfo.h> 58 #include <sys/utsname.h> 59 //#include <sys/user.h> 60 #include <netinet/ip.h> 61 #include <netinet/tcp.h> 62 #include <linux/wireless.h> 63 #include "qemu-common.h" 64 #ifdef TARGET_GPROF 65 #include <sys/gmon.h> 66 #endif 67 #ifdef CONFIG_EVENTFD 68 #include <sys/eventfd.h> 69 #endif 70 #ifdef CONFIG_EPOLL 71 #include <sys/epoll.h> 72 #endif 73 #ifdef CONFIG_ATTR 74 #include "qemu-xattr.h" 75 #endif 76 77 #define termios host_termios 78 #define winsize host_winsize 79 #define termio host_termio 80 #define sgttyb host_sgttyb /* same as target */ 81 #define tchars host_tchars /* same as target */ 82 #define ltchars host_ltchars /* same as target */ 83 84 #include <linux/termios.h> 85 #include <linux/unistd.h> 86 #include <linux/utsname.h> 87 #include <linux/cdrom.h> 88 #include <linux/hdreg.h> 89 #include <linux/soundcard.h> 90 #include <linux/kd.h> 91 #include <linux/mtio.h> 92 #include <linux/fs.h> 93 #if defined(CONFIG_FIEMAP) 94 #include <linux/fiemap.h> 95 #endif 96 #include <linux/fb.h> 97 #include <linux/vt.h> 98 #include "linux_loop.h" 99 #include "cpu-uname.h" 100 101 #include "qemu.h" 102 103 #if defined(CONFIG_USE_NPTL) 104 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \ 105 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID) 106 #else 107 /* XXX: Hardcode the above values. */ 108 #define CLONE_NPTL_FLAGS2 0 109 #endif 110 111 //#define DEBUG 112 113 //#include <linux/msdos_fs.h> 114 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2]) 115 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2]) 116 117 118 #undef _syscall0 119 #undef _syscall1 120 #undef _syscall2 121 #undef _syscall3 122 #undef _syscall4 123 #undef _syscall5 124 #undef _syscall6 125 126 #define _syscall0(type,name) \ 127 static type name (void) \ 128 { \ 129 return syscall(__NR_##name); \ 130 } 131 132 #define _syscall1(type,name,type1,arg1) \ 133 static type name (type1 arg1) \ 134 { \ 135 return syscall(__NR_##name, arg1); \ 136 } 137 138 #define _syscall2(type,name,type1,arg1,type2,arg2) \ 139 static type name (type1 arg1,type2 arg2) \ 140 { \ 141 return syscall(__NR_##name, arg1, arg2); \ 142 } 143 144 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 145 static type name (type1 arg1,type2 arg2,type3 arg3) \ 146 { \ 147 return syscall(__NR_##name, arg1, arg2, arg3); \ 148 } 149 150 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 151 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ 152 { \ 153 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 154 } 155 156 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 157 type5,arg5) \ 158 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ 159 { \ 160 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 161 } 162 163 164 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 165 type5,arg5,type6,arg6) \ 166 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \ 167 type6 arg6) \ 168 { \ 169 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 170 } 171 172 173 #define __NR_sys_uname __NR_uname 174 #define __NR_sys_faccessat __NR_faccessat 175 #define __NR_sys_fchmodat __NR_fchmodat 176 #define __NR_sys_fchownat __NR_fchownat 177 #define __NR_sys_fstatat64 __NR_fstatat64 178 #define __NR_sys_futimesat __NR_futimesat 179 #define __NR_sys_getcwd1 __NR_getcwd 180 #define __NR_sys_getdents __NR_getdents 181 #define __NR_sys_getdents64 __NR_getdents64 182 #define __NR_sys_getpriority __NR_getpriority 183 #define __NR_sys_linkat __NR_linkat 184 #define __NR_sys_mkdirat __NR_mkdirat 185 #define __NR_sys_mknodat __NR_mknodat 186 #define __NR_sys_newfstatat __NR_newfstatat 187 #define __NR_sys_openat __NR_openat 188 #define __NR_sys_readlinkat __NR_readlinkat 189 #define __NR_sys_renameat __NR_renameat 190 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo 191 #define __NR_sys_symlinkat __NR_symlinkat 192 #define __NR_sys_syslog __NR_syslog 193 #define __NR_sys_tgkill __NR_tgkill 194 #define __NR_sys_tkill __NR_tkill 195 #define __NR_sys_unlinkat __NR_unlinkat 196 #define __NR_sys_utimensat __NR_utimensat 197 #define __NR_sys_futex __NR_futex 198 #define __NR_sys_inotify_init __NR_inotify_init 199 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch 200 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch 201 202 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \ 203 defined(__s390x__) 204 #define __NR__llseek __NR_lseek 205 #endif 206 207 #ifdef __NR_gettid 208 _syscall0(int, gettid) 209 #else 210 /* This is a replacement for the host gettid() and must return a host 211 errno. */ 212 static int gettid(void) { 213 return -ENOSYS; 214 } 215 #endif 216 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count); 217 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 218 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count); 219 #endif 220 _syscall2(int, sys_getpriority, int, which, int, who); 221 #if defined(TARGET_NR__llseek) && defined(__NR_llseek) 222 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo, 223 loff_t *, res, uint, wh); 224 #endif 225 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo) 226 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len) 227 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 228 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig) 229 #endif 230 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 231 _syscall2(int,sys_tkill,int,tid,int,sig) 232 #endif 233 #ifdef __NR_exit_group 234 _syscall1(int,exit_group,int,error_code) 235 #endif 236 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 237 _syscall1(int,set_tid_address,int *,tidptr) 238 #endif 239 #if defined(CONFIG_USE_NPTL) 240 #if defined(TARGET_NR_futex) && defined(__NR_futex) 241 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, 242 const struct timespec *,timeout,int *,uaddr2,int,val3) 243 #endif 244 #endif 245 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity 246 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, 247 unsigned long *, user_mask_ptr); 248 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity 249 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len, 250 unsigned long *, user_mask_ptr); 251 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd, 252 void *, arg); 253 254 static bitmask_transtbl fcntl_flags_tbl[] = { 255 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, }, 256 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, }, 257 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, }, 258 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, }, 259 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, }, 260 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, }, 261 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, }, 262 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, }, 263 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, }, 264 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, }, 265 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, }, 266 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, }, 267 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, }, 268 #if defined(O_DIRECT) 269 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, }, 270 #endif 271 { 0, 0, 0, 0 } 272 }; 273 274 #define COPY_UTSNAME_FIELD(dest, src) \ 275 do { \ 276 /* __NEW_UTS_LEN doesn't include terminating null */ \ 277 (void) strncpy((dest), (src), __NEW_UTS_LEN); \ 278 (dest)[__NEW_UTS_LEN] = '\0'; \ 279 } while (0) 280 281 static int sys_uname(struct new_utsname *buf) 282 { 283 struct utsname uts_buf; 284 285 if (uname(&uts_buf) < 0) 286 return (-1); 287 288 /* 289 * Just in case these have some differences, we 290 * translate utsname to new_utsname (which is the 291 * struct linux kernel uses). 292 */ 293 294 memset(buf, 0, sizeof(*buf)); 295 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname); 296 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename); 297 COPY_UTSNAME_FIELD(buf->release, uts_buf.release); 298 COPY_UTSNAME_FIELD(buf->version, uts_buf.version); 299 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine); 300 #ifdef _GNU_SOURCE 301 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname); 302 #endif 303 return (0); 304 305 #undef COPY_UTSNAME_FIELD 306 } 307 308 static int sys_getcwd1(char *buf, size_t size) 309 { 310 if (getcwd(buf, size) == NULL) { 311 /* getcwd() sets errno */ 312 return (-1); 313 } 314 return strlen(buf)+1; 315 } 316 317 #ifdef CONFIG_ATFILE 318 /* 319 * Host system seems to have atfile syscall stubs available. We 320 * now enable them one by one as specified by target syscall_nr.h. 321 */ 322 323 #ifdef TARGET_NR_faccessat 324 static int sys_faccessat(int dirfd, const char *pathname, int mode) 325 { 326 return (faccessat(dirfd, pathname, mode, 0)); 327 } 328 #endif 329 #ifdef TARGET_NR_fchmodat 330 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode) 331 { 332 return (fchmodat(dirfd, pathname, mode, 0)); 333 } 334 #endif 335 #if defined(TARGET_NR_fchownat) 336 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner, 337 gid_t group, int flags) 338 { 339 return (fchownat(dirfd, pathname, owner, group, flags)); 340 } 341 #endif 342 #ifdef __NR_fstatat64 343 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf, 344 int flags) 345 { 346 return (fstatat(dirfd, pathname, buf, flags)); 347 } 348 #endif 349 #ifdef __NR_newfstatat 350 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf, 351 int flags) 352 { 353 return (fstatat(dirfd, pathname, buf, flags)); 354 } 355 #endif 356 #ifdef TARGET_NR_futimesat 357 static int sys_futimesat(int dirfd, const char *pathname, 358 const struct timeval times[2]) 359 { 360 return (futimesat(dirfd, pathname, times)); 361 } 362 #endif 363 #ifdef TARGET_NR_linkat 364 static int sys_linkat(int olddirfd, const char *oldpath, 365 int newdirfd, const char *newpath, int flags) 366 { 367 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags)); 368 } 369 #endif 370 #ifdef TARGET_NR_mkdirat 371 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode) 372 { 373 return (mkdirat(dirfd, pathname, mode)); 374 } 375 #endif 376 #ifdef TARGET_NR_mknodat 377 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode, 378 dev_t dev) 379 { 380 return (mknodat(dirfd, pathname, mode, dev)); 381 } 382 #endif 383 #ifdef TARGET_NR_openat 384 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode) 385 { 386 /* 387 * open(2) has extra parameter 'mode' when called with 388 * flag O_CREAT. 389 */ 390 if ((flags & O_CREAT) != 0) { 391 return (openat(dirfd, pathname, flags, mode)); 392 } 393 return (openat(dirfd, pathname, flags)); 394 } 395 #endif 396 #ifdef TARGET_NR_readlinkat 397 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz) 398 { 399 return (readlinkat(dirfd, pathname, buf, bufsiz)); 400 } 401 #endif 402 #ifdef TARGET_NR_renameat 403 static int sys_renameat(int olddirfd, const char *oldpath, 404 int newdirfd, const char *newpath) 405 { 406 return (renameat(olddirfd, oldpath, newdirfd, newpath)); 407 } 408 #endif 409 #ifdef TARGET_NR_symlinkat 410 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath) 411 { 412 return (symlinkat(oldpath, newdirfd, newpath)); 413 } 414 #endif 415 #ifdef TARGET_NR_unlinkat 416 static int sys_unlinkat(int dirfd, const char *pathname, int flags) 417 { 418 return (unlinkat(dirfd, pathname, flags)); 419 } 420 #endif 421 #else /* !CONFIG_ATFILE */ 422 423 /* 424 * Try direct syscalls instead 425 */ 426 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 427 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode) 428 #endif 429 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat) 430 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode) 431 #endif 432 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) 433 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname, 434 uid_t,owner,gid_t,group,int,flags) 435 #endif 436 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \ 437 defined(__NR_fstatat64) 438 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname, 439 struct stat *,buf,int,flags) 440 #endif 441 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat) 442 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname, 443 const struct timeval *,times) 444 #endif 445 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \ 446 defined(__NR_newfstatat) 447 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname, 448 struct stat *,buf,int,flags) 449 #endif 450 #if defined(TARGET_NR_linkat) && defined(__NR_linkat) 451 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath, 452 int,newdirfd,const char *,newpath,int,flags) 453 #endif 454 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat) 455 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode) 456 #endif 457 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat) 458 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname, 459 mode_t,mode,dev_t,dev) 460 #endif 461 #if defined(TARGET_NR_openat) && defined(__NR_openat) 462 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode) 463 #endif 464 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat) 465 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname, 466 char *,buf,size_t,bufsize) 467 #endif 468 #if defined(TARGET_NR_renameat) && defined(__NR_renameat) 469 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath, 470 int,newdirfd,const char *,newpath) 471 #endif 472 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat) 473 _syscall3(int,sys_symlinkat,const char *,oldpath, 474 int,newdirfd,const char *,newpath) 475 #endif 476 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat) 477 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags) 478 #endif 479 480 #endif /* CONFIG_ATFILE */ 481 482 #ifdef CONFIG_UTIMENSAT 483 static int sys_utimensat(int dirfd, const char *pathname, 484 const struct timespec times[2], int flags) 485 { 486 if (pathname == NULL) 487 return futimens(dirfd, times); 488 else 489 return utimensat(dirfd, pathname, times, flags); 490 } 491 #else 492 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat) 493 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname, 494 const struct timespec *,tsp,int,flags) 495 #endif 496 #endif /* CONFIG_UTIMENSAT */ 497 498 #ifdef CONFIG_INOTIFY 499 #include <sys/inotify.h> 500 501 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 502 static int sys_inotify_init(void) 503 { 504 return (inotify_init()); 505 } 506 #endif 507 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 508 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask) 509 { 510 return (inotify_add_watch(fd, pathname, mask)); 511 } 512 #endif 513 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 514 static int sys_inotify_rm_watch(int fd, int32_t wd) 515 { 516 return (inotify_rm_watch(fd, wd)); 517 } 518 #endif 519 #ifdef CONFIG_INOTIFY1 520 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 521 static int sys_inotify_init1(int flags) 522 { 523 return (inotify_init1(flags)); 524 } 525 #endif 526 #endif 527 #else 528 /* Userspace can usually survive runtime without inotify */ 529 #undef TARGET_NR_inotify_init 530 #undef TARGET_NR_inotify_init1 531 #undef TARGET_NR_inotify_add_watch 532 #undef TARGET_NR_inotify_rm_watch 533 #endif /* CONFIG_INOTIFY */ 534 535 #if defined(TARGET_NR_ppoll) 536 #ifndef __NR_ppoll 537 # define __NR_ppoll -1 538 #endif 539 #define __NR_sys_ppoll __NR_ppoll 540 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds, 541 struct timespec *, timeout, const __sigset_t *, sigmask, 542 size_t, sigsetsize) 543 #endif 544 545 #if defined(TARGET_NR_pselect6) 546 #ifndef __NR_pselect6 547 # define __NR_pselect6 -1 548 #endif 549 #define __NR_sys_pselect6 __NR_pselect6 550 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, 551 fd_set *, exceptfds, struct timespec *, timeout, void *, sig); 552 #endif 553 554 #if defined(TARGET_NR_prlimit64) 555 #ifndef __NR_prlimit64 556 # define __NR_prlimit64 -1 557 #endif 558 #define __NR_sys_prlimit64 __NR_prlimit64 559 /* The glibc rlimit structure may not be that used by the underlying syscall */ 560 struct host_rlimit64 { 561 uint64_t rlim_cur; 562 uint64_t rlim_max; 563 }; 564 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource, 565 const struct host_rlimit64 *, new_limit, 566 struct host_rlimit64 *, old_limit) 567 #endif 568 569 extern int personality(int); 570 extern int flock(int, int); 571 extern int setfsuid(int); 572 extern int setfsgid(int); 573 extern int setgroups(int, gid_t *); 574 575 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */ 576 #ifdef TARGET_ARM 577 static inline int regpairs_aligned(void *cpu_env) { 578 return ((((CPUARMState *)cpu_env)->eabi) == 1) ; 579 } 580 #elif defined(TARGET_MIPS) 581 static inline int regpairs_aligned(void *cpu_env) { return 1; } 582 #else 583 static inline int regpairs_aligned(void *cpu_env) { return 0; } 584 #endif 585 586 #define ERRNO_TABLE_SIZE 1200 587 588 /* target_to_host_errno_table[] is initialized from 589 * host_to_target_errno_table[] in syscall_init(). */ 590 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = { 591 }; 592 593 /* 594 * This list is the union of errno values overridden in asm-<arch>/errno.h 595 * minus the errnos that are not actually generic to all archs. 596 */ 597 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = { 598 [EIDRM] = TARGET_EIDRM, 599 [ECHRNG] = TARGET_ECHRNG, 600 [EL2NSYNC] = TARGET_EL2NSYNC, 601 [EL3HLT] = TARGET_EL3HLT, 602 [EL3RST] = TARGET_EL3RST, 603 [ELNRNG] = TARGET_ELNRNG, 604 [EUNATCH] = TARGET_EUNATCH, 605 [ENOCSI] = TARGET_ENOCSI, 606 [EL2HLT] = TARGET_EL2HLT, 607 [EDEADLK] = TARGET_EDEADLK, 608 [ENOLCK] = TARGET_ENOLCK, 609 [EBADE] = TARGET_EBADE, 610 [EBADR] = TARGET_EBADR, 611 [EXFULL] = TARGET_EXFULL, 612 [ENOANO] = TARGET_ENOANO, 613 [EBADRQC] = TARGET_EBADRQC, 614 [EBADSLT] = TARGET_EBADSLT, 615 [EBFONT] = TARGET_EBFONT, 616 [ENOSTR] = TARGET_ENOSTR, 617 [ENODATA] = TARGET_ENODATA, 618 [ETIME] = TARGET_ETIME, 619 [ENOSR] = TARGET_ENOSR, 620 [ENONET] = TARGET_ENONET, 621 [ENOPKG] = TARGET_ENOPKG, 622 [EREMOTE] = TARGET_EREMOTE, 623 [ENOLINK] = TARGET_ENOLINK, 624 [EADV] = TARGET_EADV, 625 [ESRMNT] = TARGET_ESRMNT, 626 [ECOMM] = TARGET_ECOMM, 627 [EPROTO] = TARGET_EPROTO, 628 [EDOTDOT] = TARGET_EDOTDOT, 629 [EMULTIHOP] = TARGET_EMULTIHOP, 630 [EBADMSG] = TARGET_EBADMSG, 631 [ENAMETOOLONG] = TARGET_ENAMETOOLONG, 632 [EOVERFLOW] = TARGET_EOVERFLOW, 633 [ENOTUNIQ] = TARGET_ENOTUNIQ, 634 [EBADFD] = TARGET_EBADFD, 635 [EREMCHG] = TARGET_EREMCHG, 636 [ELIBACC] = TARGET_ELIBACC, 637 [ELIBBAD] = TARGET_ELIBBAD, 638 [ELIBSCN] = TARGET_ELIBSCN, 639 [ELIBMAX] = TARGET_ELIBMAX, 640 [ELIBEXEC] = TARGET_ELIBEXEC, 641 [EILSEQ] = TARGET_EILSEQ, 642 [ENOSYS] = TARGET_ENOSYS, 643 [ELOOP] = TARGET_ELOOP, 644 [ERESTART] = TARGET_ERESTART, 645 [ESTRPIPE] = TARGET_ESTRPIPE, 646 [ENOTEMPTY] = TARGET_ENOTEMPTY, 647 [EUSERS] = TARGET_EUSERS, 648 [ENOTSOCK] = TARGET_ENOTSOCK, 649 [EDESTADDRREQ] = TARGET_EDESTADDRREQ, 650 [EMSGSIZE] = TARGET_EMSGSIZE, 651 [EPROTOTYPE] = TARGET_EPROTOTYPE, 652 [ENOPROTOOPT] = TARGET_ENOPROTOOPT, 653 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT, 654 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT, 655 [EOPNOTSUPP] = TARGET_EOPNOTSUPP, 656 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT, 657 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT, 658 [EADDRINUSE] = TARGET_EADDRINUSE, 659 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL, 660 [ENETDOWN] = TARGET_ENETDOWN, 661 [ENETUNREACH] = TARGET_ENETUNREACH, 662 [ENETRESET] = TARGET_ENETRESET, 663 [ECONNABORTED] = TARGET_ECONNABORTED, 664 [ECONNRESET] = TARGET_ECONNRESET, 665 [ENOBUFS] = TARGET_ENOBUFS, 666 [EISCONN] = TARGET_EISCONN, 667 [ENOTCONN] = TARGET_ENOTCONN, 668 [EUCLEAN] = TARGET_EUCLEAN, 669 [ENOTNAM] = TARGET_ENOTNAM, 670 [ENAVAIL] = TARGET_ENAVAIL, 671 [EISNAM] = TARGET_EISNAM, 672 [EREMOTEIO] = TARGET_EREMOTEIO, 673 [ESHUTDOWN] = TARGET_ESHUTDOWN, 674 [ETOOMANYREFS] = TARGET_ETOOMANYREFS, 675 [ETIMEDOUT] = TARGET_ETIMEDOUT, 676 [ECONNREFUSED] = TARGET_ECONNREFUSED, 677 [EHOSTDOWN] = TARGET_EHOSTDOWN, 678 [EHOSTUNREACH] = TARGET_EHOSTUNREACH, 679 [EALREADY] = TARGET_EALREADY, 680 [EINPROGRESS] = TARGET_EINPROGRESS, 681 [ESTALE] = TARGET_ESTALE, 682 [ECANCELED] = TARGET_ECANCELED, 683 [ENOMEDIUM] = TARGET_ENOMEDIUM, 684 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE, 685 #ifdef ENOKEY 686 [ENOKEY] = TARGET_ENOKEY, 687 #endif 688 #ifdef EKEYEXPIRED 689 [EKEYEXPIRED] = TARGET_EKEYEXPIRED, 690 #endif 691 #ifdef EKEYREVOKED 692 [EKEYREVOKED] = TARGET_EKEYREVOKED, 693 #endif 694 #ifdef EKEYREJECTED 695 [EKEYREJECTED] = TARGET_EKEYREJECTED, 696 #endif 697 #ifdef EOWNERDEAD 698 [EOWNERDEAD] = TARGET_EOWNERDEAD, 699 #endif 700 #ifdef ENOTRECOVERABLE 701 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE, 702 #endif 703 }; 704 705 static inline int host_to_target_errno(int err) 706 { 707 if(host_to_target_errno_table[err]) 708 return host_to_target_errno_table[err]; 709 return err; 710 } 711 712 static inline int target_to_host_errno(int err) 713 { 714 if (target_to_host_errno_table[err]) 715 return target_to_host_errno_table[err]; 716 return err; 717 } 718 719 static inline abi_long get_errno(abi_long ret) 720 { 721 if (ret == -1) 722 return -host_to_target_errno(errno); 723 else 724 return ret; 725 } 726 727 static inline int is_error(abi_long ret) 728 { 729 return (abi_ulong)ret >= (abi_ulong)(-4096); 730 } 731 732 char *target_strerror(int err) 733 { 734 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) { 735 return NULL; 736 } 737 return strerror(target_to_host_errno(err)); 738 } 739 740 static abi_ulong target_brk; 741 static abi_ulong target_original_brk; 742 static abi_ulong brk_page; 743 744 void target_set_brk(abi_ulong new_brk) 745 { 746 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk); 747 brk_page = HOST_PAGE_ALIGN(target_brk); 748 } 749 750 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0) 751 #define DEBUGF_BRK(message, args...) 752 753 /* do_brk() must return target values and target errnos. */ 754 abi_long do_brk(abi_ulong new_brk) 755 { 756 abi_long mapped_addr; 757 int new_alloc_size; 758 759 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk); 760 761 if (!new_brk) { 762 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk); 763 return target_brk; 764 } 765 if (new_brk < target_original_brk) { 766 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n", 767 target_brk); 768 return target_brk; 769 } 770 771 /* If the new brk is less than the highest page reserved to the 772 * target heap allocation, set it and we're almost done... */ 773 if (new_brk <= brk_page) { 774 /* Heap contents are initialized to zero, as for anonymous 775 * mapped pages. */ 776 if (new_brk > target_brk) { 777 memset(g2h(target_brk), 0, new_brk - target_brk); 778 } 779 target_brk = new_brk; 780 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk); 781 return target_brk; 782 } 783 784 /* We need to allocate more memory after the brk... Note that 785 * we don't use MAP_FIXED because that will map over the top of 786 * any existing mapping (like the one with the host libc or qemu 787 * itself); instead we treat "mapped but at wrong address" as 788 * a failure and unmap again. 789 */ 790 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page); 791 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, 792 PROT_READ|PROT_WRITE, 793 MAP_ANON|MAP_PRIVATE, 0, 0)); 794 795 if (mapped_addr == brk_page) { 796 /* Heap contents are initialized to zero, as for anonymous 797 * mapped pages. Technically the new pages are already 798 * initialized to zero since they *are* anonymous mapped 799 * pages, however we have to take care with the contents that 800 * come from the remaining part of the previous page: it may 801 * contains garbage data due to a previous heap usage (grown 802 * then shrunken). */ 803 memset(g2h(target_brk), 0, brk_page - target_brk); 804 805 target_brk = new_brk; 806 brk_page = HOST_PAGE_ALIGN(target_brk); 807 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n", 808 target_brk); 809 return target_brk; 810 } else if (mapped_addr != -1) { 811 /* Mapped but at wrong address, meaning there wasn't actually 812 * enough space for this brk. 813 */ 814 target_munmap(mapped_addr, new_alloc_size); 815 mapped_addr = -1; 816 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk); 817 } 818 else { 819 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk); 820 } 821 822 #if defined(TARGET_ALPHA) 823 /* We (partially) emulate OSF/1 on Alpha, which requires we 824 return a proper errno, not an unchanged brk value. */ 825 return -TARGET_ENOMEM; 826 #endif 827 /* For everything else, return the previous break. */ 828 return target_brk; 829 } 830 831 static inline abi_long copy_from_user_fdset(fd_set *fds, 832 abi_ulong target_fds_addr, 833 int n) 834 { 835 int i, nw, j, k; 836 abi_ulong b, *target_fds; 837 838 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 839 if (!(target_fds = lock_user(VERIFY_READ, 840 target_fds_addr, 841 sizeof(abi_ulong) * nw, 842 1))) 843 return -TARGET_EFAULT; 844 845 FD_ZERO(fds); 846 k = 0; 847 for (i = 0; i < nw; i++) { 848 /* grab the abi_ulong */ 849 __get_user(b, &target_fds[i]); 850 for (j = 0; j < TARGET_ABI_BITS; j++) { 851 /* check the bit inside the abi_ulong */ 852 if ((b >> j) & 1) 853 FD_SET(k, fds); 854 k++; 855 } 856 } 857 858 unlock_user(target_fds, target_fds_addr, 0); 859 860 return 0; 861 } 862 863 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr, 864 abi_ulong target_fds_addr, 865 int n) 866 { 867 if (target_fds_addr) { 868 if (copy_from_user_fdset(fds, target_fds_addr, n)) 869 return -TARGET_EFAULT; 870 *fds_ptr = fds; 871 } else { 872 *fds_ptr = NULL; 873 } 874 return 0; 875 } 876 877 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr, 878 const fd_set *fds, 879 int n) 880 { 881 int i, nw, j, k; 882 abi_long v; 883 abi_ulong *target_fds; 884 885 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 886 if (!(target_fds = lock_user(VERIFY_WRITE, 887 target_fds_addr, 888 sizeof(abi_ulong) * nw, 889 0))) 890 return -TARGET_EFAULT; 891 892 k = 0; 893 for (i = 0; i < nw; i++) { 894 v = 0; 895 for (j = 0; j < TARGET_ABI_BITS; j++) { 896 v |= ((FD_ISSET(k, fds) != 0) << j); 897 k++; 898 } 899 __put_user(v, &target_fds[i]); 900 } 901 902 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw); 903 904 return 0; 905 } 906 907 #if defined(__alpha__) 908 #define HOST_HZ 1024 909 #else 910 #define HOST_HZ 100 911 #endif 912 913 static inline abi_long host_to_target_clock_t(long ticks) 914 { 915 #if HOST_HZ == TARGET_HZ 916 return ticks; 917 #else 918 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ; 919 #endif 920 } 921 922 static inline abi_long host_to_target_rusage(abi_ulong target_addr, 923 const struct rusage *rusage) 924 { 925 struct target_rusage *target_rusage; 926 927 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0)) 928 return -TARGET_EFAULT; 929 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec); 930 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec); 931 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec); 932 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec); 933 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss); 934 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss); 935 target_rusage->ru_idrss = tswapal(rusage->ru_idrss); 936 target_rusage->ru_isrss = tswapal(rusage->ru_isrss); 937 target_rusage->ru_minflt = tswapal(rusage->ru_minflt); 938 target_rusage->ru_majflt = tswapal(rusage->ru_majflt); 939 target_rusage->ru_nswap = tswapal(rusage->ru_nswap); 940 target_rusage->ru_inblock = tswapal(rusage->ru_inblock); 941 target_rusage->ru_oublock = tswapal(rusage->ru_oublock); 942 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd); 943 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv); 944 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals); 945 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw); 946 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw); 947 unlock_user_struct(target_rusage, target_addr, 1); 948 949 return 0; 950 } 951 952 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim) 953 { 954 abi_ulong target_rlim_swap; 955 rlim_t result; 956 957 target_rlim_swap = tswapal(target_rlim); 958 if (target_rlim_swap == TARGET_RLIM_INFINITY) 959 return RLIM_INFINITY; 960 961 result = target_rlim_swap; 962 if (target_rlim_swap != (rlim_t)result) 963 return RLIM_INFINITY; 964 965 return result; 966 } 967 968 static inline abi_ulong host_to_target_rlim(rlim_t rlim) 969 { 970 abi_ulong target_rlim_swap; 971 abi_ulong result; 972 973 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim) 974 target_rlim_swap = TARGET_RLIM_INFINITY; 975 else 976 target_rlim_swap = rlim; 977 result = tswapal(target_rlim_swap); 978 979 return result; 980 } 981 982 static inline int target_to_host_resource(int code) 983 { 984 switch (code) { 985 case TARGET_RLIMIT_AS: 986 return RLIMIT_AS; 987 case TARGET_RLIMIT_CORE: 988 return RLIMIT_CORE; 989 case TARGET_RLIMIT_CPU: 990 return RLIMIT_CPU; 991 case TARGET_RLIMIT_DATA: 992 return RLIMIT_DATA; 993 case TARGET_RLIMIT_FSIZE: 994 return RLIMIT_FSIZE; 995 case TARGET_RLIMIT_LOCKS: 996 return RLIMIT_LOCKS; 997 case TARGET_RLIMIT_MEMLOCK: 998 return RLIMIT_MEMLOCK; 999 case TARGET_RLIMIT_MSGQUEUE: 1000 return RLIMIT_MSGQUEUE; 1001 case TARGET_RLIMIT_NICE: 1002 return RLIMIT_NICE; 1003 case TARGET_RLIMIT_NOFILE: 1004 return RLIMIT_NOFILE; 1005 case TARGET_RLIMIT_NPROC: 1006 return RLIMIT_NPROC; 1007 case TARGET_RLIMIT_RSS: 1008 return RLIMIT_RSS; 1009 case TARGET_RLIMIT_RTPRIO: 1010 return RLIMIT_RTPRIO; 1011 case TARGET_RLIMIT_SIGPENDING: 1012 return RLIMIT_SIGPENDING; 1013 case TARGET_RLIMIT_STACK: 1014 return RLIMIT_STACK; 1015 default: 1016 return code; 1017 } 1018 } 1019 1020 static inline abi_long copy_from_user_timeval(struct timeval *tv, 1021 abi_ulong target_tv_addr) 1022 { 1023 struct target_timeval *target_tv; 1024 1025 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) 1026 return -TARGET_EFAULT; 1027 1028 __get_user(tv->tv_sec, &target_tv->tv_sec); 1029 __get_user(tv->tv_usec, &target_tv->tv_usec); 1030 1031 unlock_user_struct(target_tv, target_tv_addr, 0); 1032 1033 return 0; 1034 } 1035 1036 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr, 1037 const struct timeval *tv) 1038 { 1039 struct target_timeval *target_tv; 1040 1041 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) 1042 return -TARGET_EFAULT; 1043 1044 __put_user(tv->tv_sec, &target_tv->tv_sec); 1045 __put_user(tv->tv_usec, &target_tv->tv_usec); 1046 1047 unlock_user_struct(target_tv, target_tv_addr, 1); 1048 1049 return 0; 1050 } 1051 1052 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 1053 #include <mqueue.h> 1054 1055 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr, 1056 abi_ulong target_mq_attr_addr) 1057 { 1058 struct target_mq_attr *target_mq_attr; 1059 1060 if (!lock_user_struct(VERIFY_READ, target_mq_attr, 1061 target_mq_attr_addr, 1)) 1062 return -TARGET_EFAULT; 1063 1064 __get_user(attr->mq_flags, &target_mq_attr->mq_flags); 1065 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1066 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1067 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1068 1069 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0); 1070 1071 return 0; 1072 } 1073 1074 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr, 1075 const struct mq_attr *attr) 1076 { 1077 struct target_mq_attr *target_mq_attr; 1078 1079 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr, 1080 target_mq_attr_addr, 0)) 1081 return -TARGET_EFAULT; 1082 1083 __put_user(attr->mq_flags, &target_mq_attr->mq_flags); 1084 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1085 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1086 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1087 1088 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1); 1089 1090 return 0; 1091 } 1092 #endif 1093 1094 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) 1095 /* do_select() must return target values and target errnos. */ 1096 static abi_long do_select(int n, 1097 abi_ulong rfd_addr, abi_ulong wfd_addr, 1098 abi_ulong efd_addr, abi_ulong target_tv_addr) 1099 { 1100 fd_set rfds, wfds, efds; 1101 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 1102 struct timeval tv, *tv_ptr; 1103 abi_long ret; 1104 1105 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 1106 if (ret) { 1107 return ret; 1108 } 1109 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 1110 if (ret) { 1111 return ret; 1112 } 1113 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 1114 if (ret) { 1115 return ret; 1116 } 1117 1118 if (target_tv_addr) { 1119 if (copy_from_user_timeval(&tv, target_tv_addr)) 1120 return -TARGET_EFAULT; 1121 tv_ptr = &tv; 1122 } else { 1123 tv_ptr = NULL; 1124 } 1125 1126 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr)); 1127 1128 if (!is_error(ret)) { 1129 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 1130 return -TARGET_EFAULT; 1131 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 1132 return -TARGET_EFAULT; 1133 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 1134 return -TARGET_EFAULT; 1135 1136 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv)) 1137 return -TARGET_EFAULT; 1138 } 1139 1140 return ret; 1141 } 1142 #endif 1143 1144 static abi_long do_pipe2(int host_pipe[], int flags) 1145 { 1146 #ifdef CONFIG_PIPE2 1147 return pipe2(host_pipe, flags); 1148 #else 1149 return -ENOSYS; 1150 #endif 1151 } 1152 1153 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes, 1154 int flags, int is_pipe2) 1155 { 1156 int host_pipe[2]; 1157 abi_long ret; 1158 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe); 1159 1160 if (is_error(ret)) 1161 return get_errno(ret); 1162 1163 /* Several targets have special calling conventions for the original 1164 pipe syscall, but didn't replicate this into the pipe2 syscall. */ 1165 if (!is_pipe2) { 1166 #if defined(TARGET_ALPHA) 1167 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1]; 1168 return host_pipe[0]; 1169 #elif defined(TARGET_MIPS) 1170 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1]; 1171 return host_pipe[0]; 1172 #elif defined(TARGET_SH4) 1173 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1]; 1174 return host_pipe[0]; 1175 #endif 1176 } 1177 1178 if (put_user_s32(host_pipe[0], pipedes) 1179 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0]))) 1180 return -TARGET_EFAULT; 1181 return get_errno(ret); 1182 } 1183 1184 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn, 1185 abi_ulong target_addr, 1186 socklen_t len) 1187 { 1188 struct target_ip_mreqn *target_smreqn; 1189 1190 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1); 1191 if (!target_smreqn) 1192 return -TARGET_EFAULT; 1193 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr; 1194 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr; 1195 if (len == sizeof(struct target_ip_mreqn)) 1196 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex); 1197 unlock_user(target_smreqn, target_addr, 0); 1198 1199 return 0; 1200 } 1201 1202 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr, 1203 abi_ulong target_addr, 1204 socklen_t len) 1205 { 1206 const socklen_t unix_maxlen = sizeof (struct sockaddr_un); 1207 sa_family_t sa_family; 1208 struct target_sockaddr *target_saddr; 1209 1210 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 1211 if (!target_saddr) 1212 return -TARGET_EFAULT; 1213 1214 sa_family = tswap16(target_saddr->sa_family); 1215 1216 /* Oops. The caller might send a incomplete sun_path; sun_path 1217 * must be terminated by \0 (see the manual page), but 1218 * unfortunately it is quite common to specify sockaddr_un 1219 * length as "strlen(x->sun_path)" while it should be 1220 * "strlen(...) + 1". We'll fix that here if needed. 1221 * Linux kernel has a similar feature. 1222 */ 1223 1224 if (sa_family == AF_UNIX) { 1225 if (len < unix_maxlen && len > 0) { 1226 char *cp = (char*)target_saddr; 1227 1228 if ( cp[len-1] && !cp[len] ) 1229 len++; 1230 } 1231 if (len > unix_maxlen) 1232 len = unix_maxlen; 1233 } 1234 1235 memcpy(addr, target_saddr, len); 1236 addr->sa_family = sa_family; 1237 unlock_user(target_saddr, target_addr, 0); 1238 1239 return 0; 1240 } 1241 1242 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr, 1243 struct sockaddr *addr, 1244 socklen_t len) 1245 { 1246 struct target_sockaddr *target_saddr; 1247 1248 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0); 1249 if (!target_saddr) 1250 return -TARGET_EFAULT; 1251 memcpy(target_saddr, addr, len); 1252 target_saddr->sa_family = tswap16(addr->sa_family); 1253 unlock_user(target_saddr, target_addr, len); 1254 1255 return 0; 1256 } 1257 1258 /* ??? Should this also swap msgh->name? */ 1259 static inline abi_long target_to_host_cmsg(struct msghdr *msgh, 1260 struct target_msghdr *target_msgh) 1261 { 1262 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1263 abi_long msg_controllen; 1264 abi_ulong target_cmsg_addr; 1265 struct target_cmsghdr *target_cmsg; 1266 socklen_t space = 0; 1267 1268 msg_controllen = tswapal(target_msgh->msg_controllen); 1269 if (msg_controllen < sizeof (struct target_cmsghdr)) 1270 goto the_end; 1271 target_cmsg_addr = tswapal(target_msgh->msg_control); 1272 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1); 1273 if (!target_cmsg) 1274 return -TARGET_EFAULT; 1275 1276 while (cmsg && target_cmsg) { 1277 void *data = CMSG_DATA(cmsg); 1278 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1279 1280 int len = tswapal(target_cmsg->cmsg_len) 1281 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr)); 1282 1283 space += CMSG_SPACE(len); 1284 if (space > msgh->msg_controllen) { 1285 space -= CMSG_SPACE(len); 1286 gemu_log("Host cmsg overflow\n"); 1287 break; 1288 } 1289 1290 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level); 1291 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type); 1292 cmsg->cmsg_len = CMSG_LEN(len); 1293 1294 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) { 1295 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type); 1296 memcpy(data, target_data, len); 1297 } else { 1298 int *fd = (int *)data; 1299 int *target_fd = (int *)target_data; 1300 int i, numfds = len / sizeof(int); 1301 1302 for (i = 0; i < numfds; i++) 1303 fd[i] = tswap32(target_fd[i]); 1304 } 1305 1306 cmsg = CMSG_NXTHDR(msgh, cmsg); 1307 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1308 } 1309 unlock_user(target_cmsg, target_cmsg_addr, 0); 1310 the_end: 1311 msgh->msg_controllen = space; 1312 return 0; 1313 } 1314 1315 /* ??? Should this also swap msgh->name? */ 1316 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, 1317 struct msghdr *msgh) 1318 { 1319 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1320 abi_long msg_controllen; 1321 abi_ulong target_cmsg_addr; 1322 struct target_cmsghdr *target_cmsg; 1323 socklen_t space = 0; 1324 1325 msg_controllen = tswapal(target_msgh->msg_controllen); 1326 if (msg_controllen < sizeof (struct target_cmsghdr)) 1327 goto the_end; 1328 target_cmsg_addr = tswapal(target_msgh->msg_control); 1329 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0); 1330 if (!target_cmsg) 1331 return -TARGET_EFAULT; 1332 1333 while (cmsg && target_cmsg) { 1334 void *data = CMSG_DATA(cmsg); 1335 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1336 1337 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr)); 1338 1339 space += TARGET_CMSG_SPACE(len); 1340 if (space > msg_controllen) { 1341 space -= TARGET_CMSG_SPACE(len); 1342 gemu_log("Target cmsg overflow\n"); 1343 break; 1344 } 1345 1346 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level); 1347 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type); 1348 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len)); 1349 1350 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) { 1351 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type); 1352 memcpy(target_data, data, len); 1353 } else { 1354 int *fd = (int *)data; 1355 int *target_fd = (int *)target_data; 1356 int i, numfds = len / sizeof(int); 1357 1358 for (i = 0; i < numfds; i++) 1359 target_fd[i] = tswap32(fd[i]); 1360 } 1361 1362 cmsg = CMSG_NXTHDR(msgh, cmsg); 1363 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1364 } 1365 unlock_user(target_cmsg, target_cmsg_addr, space); 1366 the_end: 1367 target_msgh->msg_controllen = tswapal(space); 1368 return 0; 1369 } 1370 1371 /* do_setsockopt() Must return target values and target errnos. */ 1372 static abi_long do_setsockopt(int sockfd, int level, int optname, 1373 abi_ulong optval_addr, socklen_t optlen) 1374 { 1375 abi_long ret; 1376 int val; 1377 struct ip_mreqn *ip_mreq; 1378 struct ip_mreq_source *ip_mreq_source; 1379 1380 switch(level) { 1381 case SOL_TCP: 1382 /* TCP options all take an 'int' value. */ 1383 if (optlen < sizeof(uint32_t)) 1384 return -TARGET_EINVAL; 1385 1386 if (get_user_u32(val, optval_addr)) 1387 return -TARGET_EFAULT; 1388 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1389 break; 1390 case SOL_IP: 1391 switch(optname) { 1392 case IP_TOS: 1393 case IP_TTL: 1394 case IP_HDRINCL: 1395 case IP_ROUTER_ALERT: 1396 case IP_RECVOPTS: 1397 case IP_RETOPTS: 1398 case IP_PKTINFO: 1399 case IP_MTU_DISCOVER: 1400 case IP_RECVERR: 1401 case IP_RECVTOS: 1402 #ifdef IP_FREEBIND 1403 case IP_FREEBIND: 1404 #endif 1405 case IP_MULTICAST_TTL: 1406 case IP_MULTICAST_LOOP: 1407 val = 0; 1408 if (optlen >= sizeof(uint32_t)) { 1409 if (get_user_u32(val, optval_addr)) 1410 return -TARGET_EFAULT; 1411 } else if (optlen >= 1) { 1412 if (get_user_u8(val, optval_addr)) 1413 return -TARGET_EFAULT; 1414 } 1415 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1416 break; 1417 case IP_ADD_MEMBERSHIP: 1418 case IP_DROP_MEMBERSHIP: 1419 if (optlen < sizeof (struct target_ip_mreq) || 1420 optlen > sizeof (struct target_ip_mreqn)) 1421 return -TARGET_EINVAL; 1422 1423 ip_mreq = (struct ip_mreqn *) alloca(optlen); 1424 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen); 1425 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen)); 1426 break; 1427 1428 case IP_BLOCK_SOURCE: 1429 case IP_UNBLOCK_SOURCE: 1430 case IP_ADD_SOURCE_MEMBERSHIP: 1431 case IP_DROP_SOURCE_MEMBERSHIP: 1432 if (optlen != sizeof (struct target_ip_mreq_source)) 1433 return -TARGET_EINVAL; 1434 1435 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1); 1436 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen)); 1437 unlock_user (ip_mreq_source, optval_addr, 0); 1438 break; 1439 1440 default: 1441 goto unimplemented; 1442 } 1443 break; 1444 case TARGET_SOL_SOCKET: 1445 switch (optname) { 1446 /* Options with 'int' argument. */ 1447 case TARGET_SO_DEBUG: 1448 optname = SO_DEBUG; 1449 break; 1450 case TARGET_SO_REUSEADDR: 1451 optname = SO_REUSEADDR; 1452 break; 1453 case TARGET_SO_TYPE: 1454 optname = SO_TYPE; 1455 break; 1456 case TARGET_SO_ERROR: 1457 optname = SO_ERROR; 1458 break; 1459 case TARGET_SO_DONTROUTE: 1460 optname = SO_DONTROUTE; 1461 break; 1462 case TARGET_SO_BROADCAST: 1463 optname = SO_BROADCAST; 1464 break; 1465 case TARGET_SO_SNDBUF: 1466 optname = SO_SNDBUF; 1467 break; 1468 case TARGET_SO_RCVBUF: 1469 optname = SO_RCVBUF; 1470 break; 1471 case TARGET_SO_KEEPALIVE: 1472 optname = SO_KEEPALIVE; 1473 break; 1474 case TARGET_SO_OOBINLINE: 1475 optname = SO_OOBINLINE; 1476 break; 1477 case TARGET_SO_NO_CHECK: 1478 optname = SO_NO_CHECK; 1479 break; 1480 case TARGET_SO_PRIORITY: 1481 optname = SO_PRIORITY; 1482 break; 1483 #ifdef SO_BSDCOMPAT 1484 case TARGET_SO_BSDCOMPAT: 1485 optname = SO_BSDCOMPAT; 1486 break; 1487 #endif 1488 case TARGET_SO_PASSCRED: 1489 optname = SO_PASSCRED; 1490 break; 1491 case TARGET_SO_TIMESTAMP: 1492 optname = SO_TIMESTAMP; 1493 break; 1494 case TARGET_SO_RCVLOWAT: 1495 optname = SO_RCVLOWAT; 1496 break; 1497 case TARGET_SO_RCVTIMEO: 1498 optname = SO_RCVTIMEO; 1499 break; 1500 case TARGET_SO_SNDTIMEO: 1501 optname = SO_SNDTIMEO; 1502 break; 1503 break; 1504 default: 1505 goto unimplemented; 1506 } 1507 if (optlen < sizeof(uint32_t)) 1508 return -TARGET_EINVAL; 1509 1510 if (get_user_u32(val, optval_addr)) 1511 return -TARGET_EFAULT; 1512 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val))); 1513 break; 1514 default: 1515 unimplemented: 1516 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname); 1517 ret = -TARGET_ENOPROTOOPT; 1518 } 1519 return ret; 1520 } 1521 1522 /* do_getsockopt() Must return target values and target errnos. */ 1523 static abi_long do_getsockopt(int sockfd, int level, int optname, 1524 abi_ulong optval_addr, abi_ulong optlen) 1525 { 1526 abi_long ret; 1527 int len, val; 1528 socklen_t lv; 1529 1530 switch(level) { 1531 case TARGET_SOL_SOCKET: 1532 level = SOL_SOCKET; 1533 switch (optname) { 1534 /* These don't just return a single integer */ 1535 case TARGET_SO_LINGER: 1536 case TARGET_SO_RCVTIMEO: 1537 case TARGET_SO_SNDTIMEO: 1538 case TARGET_SO_PEERNAME: 1539 goto unimplemented; 1540 case TARGET_SO_PEERCRED: { 1541 struct ucred cr; 1542 socklen_t crlen; 1543 struct target_ucred *tcr; 1544 1545 if (get_user_u32(len, optlen)) { 1546 return -TARGET_EFAULT; 1547 } 1548 if (len < 0) { 1549 return -TARGET_EINVAL; 1550 } 1551 1552 crlen = sizeof(cr); 1553 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED, 1554 &cr, &crlen)); 1555 if (ret < 0) { 1556 return ret; 1557 } 1558 if (len > crlen) { 1559 len = crlen; 1560 } 1561 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) { 1562 return -TARGET_EFAULT; 1563 } 1564 __put_user(cr.pid, &tcr->pid); 1565 __put_user(cr.uid, &tcr->uid); 1566 __put_user(cr.gid, &tcr->gid); 1567 unlock_user_struct(tcr, optval_addr, 1); 1568 if (put_user_u32(len, optlen)) { 1569 return -TARGET_EFAULT; 1570 } 1571 break; 1572 } 1573 /* Options with 'int' argument. */ 1574 case TARGET_SO_DEBUG: 1575 optname = SO_DEBUG; 1576 goto int_case; 1577 case TARGET_SO_REUSEADDR: 1578 optname = SO_REUSEADDR; 1579 goto int_case; 1580 case TARGET_SO_TYPE: 1581 optname = SO_TYPE; 1582 goto int_case; 1583 case TARGET_SO_ERROR: 1584 optname = SO_ERROR; 1585 goto int_case; 1586 case TARGET_SO_DONTROUTE: 1587 optname = SO_DONTROUTE; 1588 goto int_case; 1589 case TARGET_SO_BROADCAST: 1590 optname = SO_BROADCAST; 1591 goto int_case; 1592 case TARGET_SO_SNDBUF: 1593 optname = SO_SNDBUF; 1594 goto int_case; 1595 case TARGET_SO_RCVBUF: 1596 optname = SO_RCVBUF; 1597 goto int_case; 1598 case TARGET_SO_KEEPALIVE: 1599 optname = SO_KEEPALIVE; 1600 goto int_case; 1601 case TARGET_SO_OOBINLINE: 1602 optname = SO_OOBINLINE; 1603 goto int_case; 1604 case TARGET_SO_NO_CHECK: 1605 optname = SO_NO_CHECK; 1606 goto int_case; 1607 case TARGET_SO_PRIORITY: 1608 optname = SO_PRIORITY; 1609 goto int_case; 1610 #ifdef SO_BSDCOMPAT 1611 case TARGET_SO_BSDCOMPAT: 1612 optname = SO_BSDCOMPAT; 1613 goto int_case; 1614 #endif 1615 case TARGET_SO_PASSCRED: 1616 optname = SO_PASSCRED; 1617 goto int_case; 1618 case TARGET_SO_TIMESTAMP: 1619 optname = SO_TIMESTAMP; 1620 goto int_case; 1621 case TARGET_SO_RCVLOWAT: 1622 optname = SO_RCVLOWAT; 1623 goto int_case; 1624 default: 1625 goto int_case; 1626 } 1627 break; 1628 case SOL_TCP: 1629 /* TCP options all take an 'int' value. */ 1630 int_case: 1631 if (get_user_u32(len, optlen)) 1632 return -TARGET_EFAULT; 1633 if (len < 0) 1634 return -TARGET_EINVAL; 1635 lv = sizeof(lv); 1636 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1637 if (ret < 0) 1638 return ret; 1639 if (len > lv) 1640 len = lv; 1641 if (len == 4) { 1642 if (put_user_u32(val, optval_addr)) 1643 return -TARGET_EFAULT; 1644 } else { 1645 if (put_user_u8(val, optval_addr)) 1646 return -TARGET_EFAULT; 1647 } 1648 if (put_user_u32(len, optlen)) 1649 return -TARGET_EFAULT; 1650 break; 1651 case SOL_IP: 1652 switch(optname) { 1653 case IP_TOS: 1654 case IP_TTL: 1655 case IP_HDRINCL: 1656 case IP_ROUTER_ALERT: 1657 case IP_RECVOPTS: 1658 case IP_RETOPTS: 1659 case IP_PKTINFO: 1660 case IP_MTU_DISCOVER: 1661 case IP_RECVERR: 1662 case IP_RECVTOS: 1663 #ifdef IP_FREEBIND 1664 case IP_FREEBIND: 1665 #endif 1666 case IP_MULTICAST_TTL: 1667 case IP_MULTICAST_LOOP: 1668 if (get_user_u32(len, optlen)) 1669 return -TARGET_EFAULT; 1670 if (len < 0) 1671 return -TARGET_EINVAL; 1672 lv = sizeof(lv); 1673 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1674 if (ret < 0) 1675 return ret; 1676 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 1677 len = 1; 1678 if (put_user_u32(len, optlen) 1679 || put_user_u8(val, optval_addr)) 1680 return -TARGET_EFAULT; 1681 } else { 1682 if (len > sizeof(int)) 1683 len = sizeof(int); 1684 if (put_user_u32(len, optlen) 1685 || put_user_u32(val, optval_addr)) 1686 return -TARGET_EFAULT; 1687 } 1688 break; 1689 default: 1690 ret = -TARGET_ENOPROTOOPT; 1691 break; 1692 } 1693 break; 1694 default: 1695 unimplemented: 1696 gemu_log("getsockopt level=%d optname=%d not yet supported\n", 1697 level, optname); 1698 ret = -TARGET_EOPNOTSUPP; 1699 break; 1700 } 1701 return ret; 1702 } 1703 1704 /* FIXME 1705 * lock_iovec()/unlock_iovec() have a return code of 0 for success where 1706 * other lock functions have a return code of 0 for failure. 1707 */ 1708 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr, 1709 int count, int copy) 1710 { 1711 struct target_iovec *target_vec; 1712 abi_ulong base; 1713 int i; 1714 1715 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1); 1716 if (!target_vec) 1717 return -TARGET_EFAULT; 1718 for(i = 0;i < count; i++) { 1719 base = tswapal(target_vec[i].iov_base); 1720 vec[i].iov_len = tswapal(target_vec[i].iov_len); 1721 if (vec[i].iov_len != 0) { 1722 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy); 1723 /* Don't check lock_user return value. We must call writev even 1724 if a element has invalid base address. */ 1725 } else { 1726 /* zero length pointer is ignored */ 1727 vec[i].iov_base = NULL; 1728 } 1729 } 1730 unlock_user (target_vec, target_addr, 0); 1731 return 0; 1732 } 1733 1734 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr, 1735 int count, int copy) 1736 { 1737 struct target_iovec *target_vec; 1738 abi_ulong base; 1739 int i; 1740 1741 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1); 1742 if (!target_vec) 1743 return -TARGET_EFAULT; 1744 for(i = 0;i < count; i++) { 1745 if (target_vec[i].iov_base) { 1746 base = tswapal(target_vec[i].iov_base); 1747 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0); 1748 } 1749 } 1750 unlock_user (target_vec, target_addr, 0); 1751 1752 return 0; 1753 } 1754 1755 /* do_socket() Must return target values and target errnos. */ 1756 static abi_long do_socket(int domain, int type, int protocol) 1757 { 1758 #if defined(TARGET_MIPS) 1759 switch(type) { 1760 case TARGET_SOCK_DGRAM: 1761 type = SOCK_DGRAM; 1762 break; 1763 case TARGET_SOCK_STREAM: 1764 type = SOCK_STREAM; 1765 break; 1766 case TARGET_SOCK_RAW: 1767 type = SOCK_RAW; 1768 break; 1769 case TARGET_SOCK_RDM: 1770 type = SOCK_RDM; 1771 break; 1772 case TARGET_SOCK_SEQPACKET: 1773 type = SOCK_SEQPACKET; 1774 break; 1775 case TARGET_SOCK_PACKET: 1776 type = SOCK_PACKET; 1777 break; 1778 } 1779 #endif 1780 if (domain == PF_NETLINK) 1781 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */ 1782 return get_errno(socket(domain, type, protocol)); 1783 } 1784 1785 /* do_bind() Must return target values and target errnos. */ 1786 static abi_long do_bind(int sockfd, abi_ulong target_addr, 1787 socklen_t addrlen) 1788 { 1789 void *addr; 1790 abi_long ret; 1791 1792 if ((int)addrlen < 0) { 1793 return -TARGET_EINVAL; 1794 } 1795 1796 addr = alloca(addrlen+1); 1797 1798 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1799 if (ret) 1800 return ret; 1801 1802 return get_errno(bind(sockfd, addr, addrlen)); 1803 } 1804 1805 /* do_connect() Must return target values and target errnos. */ 1806 static abi_long do_connect(int sockfd, abi_ulong target_addr, 1807 socklen_t addrlen) 1808 { 1809 void *addr; 1810 abi_long ret; 1811 1812 if ((int)addrlen < 0) { 1813 return -TARGET_EINVAL; 1814 } 1815 1816 addr = alloca(addrlen); 1817 1818 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1819 if (ret) 1820 return ret; 1821 1822 return get_errno(connect(sockfd, addr, addrlen)); 1823 } 1824 1825 /* do_sendrecvmsg() Must return target values and target errnos. */ 1826 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg, 1827 int flags, int send) 1828 { 1829 abi_long ret, len; 1830 struct target_msghdr *msgp; 1831 struct msghdr msg; 1832 int count; 1833 struct iovec *vec; 1834 abi_ulong target_vec; 1835 1836 /* FIXME */ 1837 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE, 1838 msgp, 1839 target_msg, 1840 send ? 1 : 0)) 1841 return -TARGET_EFAULT; 1842 if (msgp->msg_name) { 1843 msg.msg_namelen = tswap32(msgp->msg_namelen); 1844 msg.msg_name = alloca(msg.msg_namelen); 1845 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name), 1846 msg.msg_namelen); 1847 if (ret) { 1848 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 1849 return ret; 1850 } 1851 } else { 1852 msg.msg_name = NULL; 1853 msg.msg_namelen = 0; 1854 } 1855 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen); 1856 msg.msg_control = alloca(msg.msg_controllen); 1857 msg.msg_flags = tswap32(msgp->msg_flags); 1858 1859 count = tswapal(msgp->msg_iovlen); 1860 vec = alloca(count * sizeof(struct iovec)); 1861 target_vec = tswapal(msgp->msg_iov); 1862 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send); 1863 msg.msg_iovlen = count; 1864 msg.msg_iov = vec; 1865 1866 if (send) { 1867 ret = target_to_host_cmsg(&msg, msgp); 1868 if (ret == 0) 1869 ret = get_errno(sendmsg(fd, &msg, flags)); 1870 } else { 1871 ret = get_errno(recvmsg(fd, &msg, flags)); 1872 if (!is_error(ret)) { 1873 len = ret; 1874 ret = host_to_target_cmsg(msgp, &msg); 1875 if (!is_error(ret)) 1876 ret = len; 1877 } 1878 } 1879 unlock_iovec(vec, target_vec, count, !send); 1880 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 1881 return ret; 1882 } 1883 1884 /* do_accept() Must return target values and target errnos. */ 1885 static abi_long do_accept(int fd, abi_ulong target_addr, 1886 abi_ulong target_addrlen_addr) 1887 { 1888 socklen_t addrlen; 1889 void *addr; 1890 abi_long ret; 1891 1892 if (target_addr == 0) 1893 return get_errno(accept(fd, NULL, NULL)); 1894 1895 /* linux returns EINVAL if addrlen pointer is invalid */ 1896 if (get_user_u32(addrlen, target_addrlen_addr)) 1897 return -TARGET_EINVAL; 1898 1899 if ((int)addrlen < 0) { 1900 return -TARGET_EINVAL; 1901 } 1902 1903 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 1904 return -TARGET_EINVAL; 1905 1906 addr = alloca(addrlen); 1907 1908 ret = get_errno(accept(fd, addr, &addrlen)); 1909 if (!is_error(ret)) { 1910 host_to_target_sockaddr(target_addr, addr, addrlen); 1911 if (put_user_u32(addrlen, target_addrlen_addr)) 1912 ret = -TARGET_EFAULT; 1913 } 1914 return ret; 1915 } 1916 1917 /* do_getpeername() Must return target values and target errnos. */ 1918 static abi_long do_getpeername(int fd, abi_ulong target_addr, 1919 abi_ulong target_addrlen_addr) 1920 { 1921 socklen_t addrlen; 1922 void *addr; 1923 abi_long ret; 1924 1925 if (get_user_u32(addrlen, target_addrlen_addr)) 1926 return -TARGET_EFAULT; 1927 1928 if ((int)addrlen < 0) { 1929 return -TARGET_EINVAL; 1930 } 1931 1932 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 1933 return -TARGET_EFAULT; 1934 1935 addr = alloca(addrlen); 1936 1937 ret = get_errno(getpeername(fd, addr, &addrlen)); 1938 if (!is_error(ret)) { 1939 host_to_target_sockaddr(target_addr, addr, addrlen); 1940 if (put_user_u32(addrlen, target_addrlen_addr)) 1941 ret = -TARGET_EFAULT; 1942 } 1943 return ret; 1944 } 1945 1946 /* do_getsockname() Must return target values and target errnos. */ 1947 static abi_long do_getsockname(int fd, abi_ulong target_addr, 1948 abi_ulong target_addrlen_addr) 1949 { 1950 socklen_t addrlen; 1951 void *addr; 1952 abi_long ret; 1953 1954 if (get_user_u32(addrlen, target_addrlen_addr)) 1955 return -TARGET_EFAULT; 1956 1957 if ((int)addrlen < 0) { 1958 return -TARGET_EINVAL; 1959 } 1960 1961 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 1962 return -TARGET_EFAULT; 1963 1964 addr = alloca(addrlen); 1965 1966 ret = get_errno(getsockname(fd, addr, &addrlen)); 1967 if (!is_error(ret)) { 1968 host_to_target_sockaddr(target_addr, addr, addrlen); 1969 if (put_user_u32(addrlen, target_addrlen_addr)) 1970 ret = -TARGET_EFAULT; 1971 } 1972 return ret; 1973 } 1974 1975 /* do_socketpair() Must return target values and target errnos. */ 1976 static abi_long do_socketpair(int domain, int type, int protocol, 1977 abi_ulong target_tab_addr) 1978 { 1979 int tab[2]; 1980 abi_long ret; 1981 1982 ret = get_errno(socketpair(domain, type, protocol, tab)); 1983 if (!is_error(ret)) { 1984 if (put_user_s32(tab[0], target_tab_addr) 1985 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0]))) 1986 ret = -TARGET_EFAULT; 1987 } 1988 return ret; 1989 } 1990 1991 /* do_sendto() Must return target values and target errnos. */ 1992 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags, 1993 abi_ulong target_addr, socklen_t addrlen) 1994 { 1995 void *addr; 1996 void *host_msg; 1997 abi_long ret; 1998 1999 if ((int)addrlen < 0) { 2000 return -TARGET_EINVAL; 2001 } 2002 2003 host_msg = lock_user(VERIFY_READ, msg, len, 1); 2004 if (!host_msg) 2005 return -TARGET_EFAULT; 2006 if (target_addr) { 2007 addr = alloca(addrlen); 2008 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 2009 if (ret) { 2010 unlock_user(host_msg, msg, 0); 2011 return ret; 2012 } 2013 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen)); 2014 } else { 2015 ret = get_errno(send(fd, host_msg, len, flags)); 2016 } 2017 unlock_user(host_msg, msg, 0); 2018 return ret; 2019 } 2020 2021 /* do_recvfrom() Must return target values and target errnos. */ 2022 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags, 2023 abi_ulong target_addr, 2024 abi_ulong target_addrlen) 2025 { 2026 socklen_t addrlen; 2027 void *addr; 2028 void *host_msg; 2029 abi_long ret; 2030 2031 host_msg = lock_user(VERIFY_WRITE, msg, len, 0); 2032 if (!host_msg) 2033 return -TARGET_EFAULT; 2034 if (target_addr) { 2035 if (get_user_u32(addrlen, target_addrlen)) { 2036 ret = -TARGET_EFAULT; 2037 goto fail; 2038 } 2039 if ((int)addrlen < 0) { 2040 ret = -TARGET_EINVAL; 2041 goto fail; 2042 } 2043 addr = alloca(addrlen); 2044 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen)); 2045 } else { 2046 addr = NULL; /* To keep compiler quiet. */ 2047 ret = get_errno(qemu_recv(fd, host_msg, len, flags)); 2048 } 2049 if (!is_error(ret)) { 2050 if (target_addr) { 2051 host_to_target_sockaddr(target_addr, addr, addrlen); 2052 if (put_user_u32(addrlen, target_addrlen)) { 2053 ret = -TARGET_EFAULT; 2054 goto fail; 2055 } 2056 } 2057 unlock_user(host_msg, msg, len); 2058 } else { 2059 fail: 2060 unlock_user(host_msg, msg, 0); 2061 } 2062 return ret; 2063 } 2064 2065 #ifdef TARGET_NR_socketcall 2066 /* do_socketcall() Must return target values and target errnos. */ 2067 static abi_long do_socketcall(int num, abi_ulong vptr) 2068 { 2069 abi_long ret; 2070 const int n = sizeof(abi_ulong); 2071 2072 switch(num) { 2073 case SOCKOP_socket: 2074 { 2075 abi_ulong domain, type, protocol; 2076 2077 if (get_user_ual(domain, vptr) 2078 || get_user_ual(type, vptr + n) 2079 || get_user_ual(protocol, vptr + 2 * n)) 2080 return -TARGET_EFAULT; 2081 2082 ret = do_socket(domain, type, protocol); 2083 } 2084 break; 2085 case SOCKOP_bind: 2086 { 2087 abi_ulong sockfd; 2088 abi_ulong target_addr; 2089 socklen_t addrlen; 2090 2091 if (get_user_ual(sockfd, vptr) 2092 || get_user_ual(target_addr, vptr + n) 2093 || get_user_ual(addrlen, vptr + 2 * n)) 2094 return -TARGET_EFAULT; 2095 2096 ret = do_bind(sockfd, target_addr, addrlen); 2097 } 2098 break; 2099 case SOCKOP_connect: 2100 { 2101 abi_ulong sockfd; 2102 abi_ulong target_addr; 2103 socklen_t addrlen; 2104 2105 if (get_user_ual(sockfd, vptr) 2106 || get_user_ual(target_addr, vptr + n) 2107 || get_user_ual(addrlen, vptr + 2 * n)) 2108 return -TARGET_EFAULT; 2109 2110 ret = do_connect(sockfd, target_addr, addrlen); 2111 } 2112 break; 2113 case SOCKOP_listen: 2114 { 2115 abi_ulong sockfd, backlog; 2116 2117 if (get_user_ual(sockfd, vptr) 2118 || get_user_ual(backlog, vptr + n)) 2119 return -TARGET_EFAULT; 2120 2121 ret = get_errno(listen(sockfd, backlog)); 2122 } 2123 break; 2124 case SOCKOP_accept: 2125 { 2126 abi_ulong sockfd; 2127 abi_ulong target_addr, target_addrlen; 2128 2129 if (get_user_ual(sockfd, vptr) 2130 || get_user_ual(target_addr, vptr + n) 2131 || get_user_ual(target_addrlen, vptr + 2 * n)) 2132 return -TARGET_EFAULT; 2133 2134 ret = do_accept(sockfd, target_addr, target_addrlen); 2135 } 2136 break; 2137 case SOCKOP_getsockname: 2138 { 2139 abi_ulong sockfd; 2140 abi_ulong target_addr, target_addrlen; 2141 2142 if (get_user_ual(sockfd, vptr) 2143 || get_user_ual(target_addr, vptr + n) 2144 || get_user_ual(target_addrlen, vptr + 2 * n)) 2145 return -TARGET_EFAULT; 2146 2147 ret = do_getsockname(sockfd, target_addr, target_addrlen); 2148 } 2149 break; 2150 case SOCKOP_getpeername: 2151 { 2152 abi_ulong sockfd; 2153 abi_ulong target_addr, target_addrlen; 2154 2155 if (get_user_ual(sockfd, vptr) 2156 || get_user_ual(target_addr, vptr + n) 2157 || get_user_ual(target_addrlen, vptr + 2 * n)) 2158 return -TARGET_EFAULT; 2159 2160 ret = do_getpeername(sockfd, target_addr, target_addrlen); 2161 } 2162 break; 2163 case SOCKOP_socketpair: 2164 { 2165 abi_ulong domain, type, protocol; 2166 abi_ulong tab; 2167 2168 if (get_user_ual(domain, vptr) 2169 || get_user_ual(type, vptr + n) 2170 || get_user_ual(protocol, vptr + 2 * n) 2171 || get_user_ual(tab, vptr + 3 * n)) 2172 return -TARGET_EFAULT; 2173 2174 ret = do_socketpair(domain, type, protocol, tab); 2175 } 2176 break; 2177 case SOCKOP_send: 2178 { 2179 abi_ulong sockfd; 2180 abi_ulong msg; 2181 size_t len; 2182 abi_ulong flags; 2183 2184 if (get_user_ual(sockfd, vptr) 2185 || get_user_ual(msg, vptr + n) 2186 || get_user_ual(len, vptr + 2 * n) 2187 || get_user_ual(flags, vptr + 3 * n)) 2188 return -TARGET_EFAULT; 2189 2190 ret = do_sendto(sockfd, msg, len, flags, 0, 0); 2191 } 2192 break; 2193 case SOCKOP_recv: 2194 { 2195 abi_ulong sockfd; 2196 abi_ulong msg; 2197 size_t len; 2198 abi_ulong flags; 2199 2200 if (get_user_ual(sockfd, vptr) 2201 || get_user_ual(msg, vptr + n) 2202 || get_user_ual(len, vptr + 2 * n) 2203 || get_user_ual(flags, vptr + 3 * n)) 2204 return -TARGET_EFAULT; 2205 2206 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0); 2207 } 2208 break; 2209 case SOCKOP_sendto: 2210 { 2211 abi_ulong sockfd; 2212 abi_ulong msg; 2213 size_t len; 2214 abi_ulong flags; 2215 abi_ulong addr; 2216 socklen_t addrlen; 2217 2218 if (get_user_ual(sockfd, vptr) 2219 || get_user_ual(msg, vptr + n) 2220 || get_user_ual(len, vptr + 2 * n) 2221 || get_user_ual(flags, vptr + 3 * n) 2222 || get_user_ual(addr, vptr + 4 * n) 2223 || get_user_ual(addrlen, vptr + 5 * n)) 2224 return -TARGET_EFAULT; 2225 2226 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen); 2227 } 2228 break; 2229 case SOCKOP_recvfrom: 2230 { 2231 abi_ulong sockfd; 2232 abi_ulong msg; 2233 size_t len; 2234 abi_ulong flags; 2235 abi_ulong addr; 2236 socklen_t addrlen; 2237 2238 if (get_user_ual(sockfd, vptr) 2239 || get_user_ual(msg, vptr + n) 2240 || get_user_ual(len, vptr + 2 * n) 2241 || get_user_ual(flags, vptr + 3 * n) 2242 || get_user_ual(addr, vptr + 4 * n) 2243 || get_user_ual(addrlen, vptr + 5 * n)) 2244 return -TARGET_EFAULT; 2245 2246 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen); 2247 } 2248 break; 2249 case SOCKOP_shutdown: 2250 { 2251 abi_ulong sockfd, how; 2252 2253 if (get_user_ual(sockfd, vptr) 2254 || get_user_ual(how, vptr + n)) 2255 return -TARGET_EFAULT; 2256 2257 ret = get_errno(shutdown(sockfd, how)); 2258 } 2259 break; 2260 case SOCKOP_sendmsg: 2261 case SOCKOP_recvmsg: 2262 { 2263 abi_ulong fd; 2264 abi_ulong target_msg; 2265 abi_ulong flags; 2266 2267 if (get_user_ual(fd, vptr) 2268 || get_user_ual(target_msg, vptr + n) 2269 || get_user_ual(flags, vptr + 2 * n)) 2270 return -TARGET_EFAULT; 2271 2272 ret = do_sendrecvmsg(fd, target_msg, flags, 2273 (num == SOCKOP_sendmsg)); 2274 } 2275 break; 2276 case SOCKOP_setsockopt: 2277 { 2278 abi_ulong sockfd; 2279 abi_ulong level; 2280 abi_ulong optname; 2281 abi_ulong optval; 2282 socklen_t optlen; 2283 2284 if (get_user_ual(sockfd, vptr) 2285 || get_user_ual(level, vptr + n) 2286 || get_user_ual(optname, vptr + 2 * n) 2287 || get_user_ual(optval, vptr + 3 * n) 2288 || get_user_ual(optlen, vptr + 4 * n)) 2289 return -TARGET_EFAULT; 2290 2291 ret = do_setsockopt(sockfd, level, optname, optval, optlen); 2292 } 2293 break; 2294 case SOCKOP_getsockopt: 2295 { 2296 abi_ulong sockfd; 2297 abi_ulong level; 2298 abi_ulong optname; 2299 abi_ulong optval; 2300 socklen_t optlen; 2301 2302 if (get_user_ual(sockfd, vptr) 2303 || get_user_ual(level, vptr + n) 2304 || get_user_ual(optname, vptr + 2 * n) 2305 || get_user_ual(optval, vptr + 3 * n) 2306 || get_user_ual(optlen, vptr + 4 * n)) 2307 return -TARGET_EFAULT; 2308 2309 ret = do_getsockopt(sockfd, level, optname, optval, optlen); 2310 } 2311 break; 2312 default: 2313 gemu_log("Unsupported socketcall: %d\n", num); 2314 ret = -TARGET_ENOSYS; 2315 break; 2316 } 2317 return ret; 2318 } 2319 #endif 2320 2321 #define N_SHM_REGIONS 32 2322 2323 static struct shm_region { 2324 abi_ulong start; 2325 abi_ulong size; 2326 } shm_regions[N_SHM_REGIONS]; 2327 2328 struct target_ipc_perm 2329 { 2330 abi_long __key; 2331 abi_ulong uid; 2332 abi_ulong gid; 2333 abi_ulong cuid; 2334 abi_ulong cgid; 2335 unsigned short int mode; 2336 unsigned short int __pad1; 2337 unsigned short int __seq; 2338 unsigned short int __pad2; 2339 abi_ulong __unused1; 2340 abi_ulong __unused2; 2341 }; 2342 2343 struct target_semid_ds 2344 { 2345 struct target_ipc_perm sem_perm; 2346 abi_ulong sem_otime; 2347 abi_ulong __unused1; 2348 abi_ulong sem_ctime; 2349 abi_ulong __unused2; 2350 abi_ulong sem_nsems; 2351 abi_ulong __unused3; 2352 abi_ulong __unused4; 2353 }; 2354 2355 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip, 2356 abi_ulong target_addr) 2357 { 2358 struct target_ipc_perm *target_ip; 2359 struct target_semid_ds *target_sd; 2360 2361 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2362 return -TARGET_EFAULT; 2363 target_ip = &(target_sd->sem_perm); 2364 host_ip->__key = tswapal(target_ip->__key); 2365 host_ip->uid = tswapal(target_ip->uid); 2366 host_ip->gid = tswapal(target_ip->gid); 2367 host_ip->cuid = tswapal(target_ip->cuid); 2368 host_ip->cgid = tswapal(target_ip->cgid); 2369 host_ip->mode = tswap16(target_ip->mode); 2370 unlock_user_struct(target_sd, target_addr, 0); 2371 return 0; 2372 } 2373 2374 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr, 2375 struct ipc_perm *host_ip) 2376 { 2377 struct target_ipc_perm *target_ip; 2378 struct target_semid_ds *target_sd; 2379 2380 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2381 return -TARGET_EFAULT; 2382 target_ip = &(target_sd->sem_perm); 2383 target_ip->__key = tswapal(host_ip->__key); 2384 target_ip->uid = tswapal(host_ip->uid); 2385 target_ip->gid = tswapal(host_ip->gid); 2386 target_ip->cuid = tswapal(host_ip->cuid); 2387 target_ip->cgid = tswapal(host_ip->cgid); 2388 target_ip->mode = tswap16(host_ip->mode); 2389 unlock_user_struct(target_sd, target_addr, 1); 2390 return 0; 2391 } 2392 2393 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd, 2394 abi_ulong target_addr) 2395 { 2396 struct target_semid_ds *target_sd; 2397 2398 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2399 return -TARGET_EFAULT; 2400 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr)) 2401 return -TARGET_EFAULT; 2402 host_sd->sem_nsems = tswapal(target_sd->sem_nsems); 2403 host_sd->sem_otime = tswapal(target_sd->sem_otime); 2404 host_sd->sem_ctime = tswapal(target_sd->sem_ctime); 2405 unlock_user_struct(target_sd, target_addr, 0); 2406 return 0; 2407 } 2408 2409 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr, 2410 struct semid_ds *host_sd) 2411 { 2412 struct target_semid_ds *target_sd; 2413 2414 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2415 return -TARGET_EFAULT; 2416 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm))) 2417 return -TARGET_EFAULT; 2418 target_sd->sem_nsems = tswapal(host_sd->sem_nsems); 2419 target_sd->sem_otime = tswapal(host_sd->sem_otime); 2420 target_sd->sem_ctime = tswapal(host_sd->sem_ctime); 2421 unlock_user_struct(target_sd, target_addr, 1); 2422 return 0; 2423 } 2424 2425 struct target_seminfo { 2426 int semmap; 2427 int semmni; 2428 int semmns; 2429 int semmnu; 2430 int semmsl; 2431 int semopm; 2432 int semume; 2433 int semusz; 2434 int semvmx; 2435 int semaem; 2436 }; 2437 2438 static inline abi_long host_to_target_seminfo(abi_ulong target_addr, 2439 struct seminfo *host_seminfo) 2440 { 2441 struct target_seminfo *target_seminfo; 2442 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0)) 2443 return -TARGET_EFAULT; 2444 __put_user(host_seminfo->semmap, &target_seminfo->semmap); 2445 __put_user(host_seminfo->semmni, &target_seminfo->semmni); 2446 __put_user(host_seminfo->semmns, &target_seminfo->semmns); 2447 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu); 2448 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl); 2449 __put_user(host_seminfo->semopm, &target_seminfo->semopm); 2450 __put_user(host_seminfo->semume, &target_seminfo->semume); 2451 __put_user(host_seminfo->semusz, &target_seminfo->semusz); 2452 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx); 2453 __put_user(host_seminfo->semaem, &target_seminfo->semaem); 2454 unlock_user_struct(target_seminfo, target_addr, 1); 2455 return 0; 2456 } 2457 2458 union semun { 2459 int val; 2460 struct semid_ds *buf; 2461 unsigned short *array; 2462 struct seminfo *__buf; 2463 }; 2464 2465 union target_semun { 2466 int val; 2467 abi_ulong buf; 2468 abi_ulong array; 2469 abi_ulong __buf; 2470 }; 2471 2472 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array, 2473 abi_ulong target_addr) 2474 { 2475 int nsems; 2476 unsigned short *array; 2477 union semun semun; 2478 struct semid_ds semid_ds; 2479 int i, ret; 2480 2481 semun.buf = &semid_ds; 2482 2483 ret = semctl(semid, 0, IPC_STAT, semun); 2484 if (ret == -1) 2485 return get_errno(ret); 2486 2487 nsems = semid_ds.sem_nsems; 2488 2489 *host_array = malloc(nsems*sizeof(unsigned short)); 2490 array = lock_user(VERIFY_READ, target_addr, 2491 nsems*sizeof(unsigned short), 1); 2492 if (!array) 2493 return -TARGET_EFAULT; 2494 2495 for(i=0; i<nsems; i++) { 2496 __get_user((*host_array)[i], &array[i]); 2497 } 2498 unlock_user(array, target_addr, 0); 2499 2500 return 0; 2501 } 2502 2503 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr, 2504 unsigned short **host_array) 2505 { 2506 int nsems; 2507 unsigned short *array; 2508 union semun semun; 2509 struct semid_ds semid_ds; 2510 int i, ret; 2511 2512 semun.buf = &semid_ds; 2513 2514 ret = semctl(semid, 0, IPC_STAT, semun); 2515 if (ret == -1) 2516 return get_errno(ret); 2517 2518 nsems = semid_ds.sem_nsems; 2519 2520 array = lock_user(VERIFY_WRITE, target_addr, 2521 nsems*sizeof(unsigned short), 0); 2522 if (!array) 2523 return -TARGET_EFAULT; 2524 2525 for(i=0; i<nsems; i++) { 2526 __put_user((*host_array)[i], &array[i]); 2527 } 2528 free(*host_array); 2529 unlock_user(array, target_addr, 1); 2530 2531 return 0; 2532 } 2533 2534 static inline abi_long do_semctl(int semid, int semnum, int cmd, 2535 union target_semun target_su) 2536 { 2537 union semun arg; 2538 struct semid_ds dsarg; 2539 unsigned short *array = NULL; 2540 struct seminfo seminfo; 2541 abi_long ret = -TARGET_EINVAL; 2542 abi_long err; 2543 cmd &= 0xff; 2544 2545 switch( cmd ) { 2546 case GETVAL: 2547 case SETVAL: 2548 arg.val = tswap32(target_su.val); 2549 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2550 target_su.val = tswap32(arg.val); 2551 break; 2552 case GETALL: 2553 case SETALL: 2554 err = target_to_host_semarray(semid, &array, target_su.array); 2555 if (err) 2556 return err; 2557 arg.array = array; 2558 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2559 err = host_to_target_semarray(semid, target_su.array, &array); 2560 if (err) 2561 return err; 2562 break; 2563 case IPC_STAT: 2564 case IPC_SET: 2565 case SEM_STAT: 2566 err = target_to_host_semid_ds(&dsarg, target_su.buf); 2567 if (err) 2568 return err; 2569 arg.buf = &dsarg; 2570 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2571 err = host_to_target_semid_ds(target_su.buf, &dsarg); 2572 if (err) 2573 return err; 2574 break; 2575 case IPC_INFO: 2576 case SEM_INFO: 2577 arg.__buf = &seminfo; 2578 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2579 err = host_to_target_seminfo(target_su.__buf, &seminfo); 2580 if (err) 2581 return err; 2582 break; 2583 case IPC_RMID: 2584 case GETPID: 2585 case GETNCNT: 2586 case GETZCNT: 2587 ret = get_errno(semctl(semid, semnum, cmd, NULL)); 2588 break; 2589 } 2590 2591 return ret; 2592 } 2593 2594 struct target_sembuf { 2595 unsigned short sem_num; 2596 short sem_op; 2597 short sem_flg; 2598 }; 2599 2600 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf, 2601 abi_ulong target_addr, 2602 unsigned nsops) 2603 { 2604 struct target_sembuf *target_sembuf; 2605 int i; 2606 2607 target_sembuf = lock_user(VERIFY_READ, target_addr, 2608 nsops*sizeof(struct target_sembuf), 1); 2609 if (!target_sembuf) 2610 return -TARGET_EFAULT; 2611 2612 for(i=0; i<nsops; i++) { 2613 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num); 2614 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op); 2615 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg); 2616 } 2617 2618 unlock_user(target_sembuf, target_addr, 0); 2619 2620 return 0; 2621 } 2622 2623 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops) 2624 { 2625 struct sembuf sops[nsops]; 2626 2627 if (target_to_host_sembuf(sops, ptr, nsops)) 2628 return -TARGET_EFAULT; 2629 2630 return semop(semid, sops, nsops); 2631 } 2632 2633 struct target_msqid_ds 2634 { 2635 struct target_ipc_perm msg_perm; 2636 abi_ulong msg_stime; 2637 #if TARGET_ABI_BITS == 32 2638 abi_ulong __unused1; 2639 #endif 2640 abi_ulong msg_rtime; 2641 #if TARGET_ABI_BITS == 32 2642 abi_ulong __unused2; 2643 #endif 2644 abi_ulong msg_ctime; 2645 #if TARGET_ABI_BITS == 32 2646 abi_ulong __unused3; 2647 #endif 2648 abi_ulong __msg_cbytes; 2649 abi_ulong msg_qnum; 2650 abi_ulong msg_qbytes; 2651 abi_ulong msg_lspid; 2652 abi_ulong msg_lrpid; 2653 abi_ulong __unused4; 2654 abi_ulong __unused5; 2655 }; 2656 2657 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md, 2658 abi_ulong target_addr) 2659 { 2660 struct target_msqid_ds *target_md; 2661 2662 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1)) 2663 return -TARGET_EFAULT; 2664 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr)) 2665 return -TARGET_EFAULT; 2666 host_md->msg_stime = tswapal(target_md->msg_stime); 2667 host_md->msg_rtime = tswapal(target_md->msg_rtime); 2668 host_md->msg_ctime = tswapal(target_md->msg_ctime); 2669 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes); 2670 host_md->msg_qnum = tswapal(target_md->msg_qnum); 2671 host_md->msg_qbytes = tswapal(target_md->msg_qbytes); 2672 host_md->msg_lspid = tswapal(target_md->msg_lspid); 2673 host_md->msg_lrpid = tswapal(target_md->msg_lrpid); 2674 unlock_user_struct(target_md, target_addr, 0); 2675 return 0; 2676 } 2677 2678 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr, 2679 struct msqid_ds *host_md) 2680 { 2681 struct target_msqid_ds *target_md; 2682 2683 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0)) 2684 return -TARGET_EFAULT; 2685 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm))) 2686 return -TARGET_EFAULT; 2687 target_md->msg_stime = tswapal(host_md->msg_stime); 2688 target_md->msg_rtime = tswapal(host_md->msg_rtime); 2689 target_md->msg_ctime = tswapal(host_md->msg_ctime); 2690 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes); 2691 target_md->msg_qnum = tswapal(host_md->msg_qnum); 2692 target_md->msg_qbytes = tswapal(host_md->msg_qbytes); 2693 target_md->msg_lspid = tswapal(host_md->msg_lspid); 2694 target_md->msg_lrpid = tswapal(host_md->msg_lrpid); 2695 unlock_user_struct(target_md, target_addr, 1); 2696 return 0; 2697 } 2698 2699 struct target_msginfo { 2700 int msgpool; 2701 int msgmap; 2702 int msgmax; 2703 int msgmnb; 2704 int msgmni; 2705 int msgssz; 2706 int msgtql; 2707 unsigned short int msgseg; 2708 }; 2709 2710 static inline abi_long host_to_target_msginfo(abi_ulong target_addr, 2711 struct msginfo *host_msginfo) 2712 { 2713 struct target_msginfo *target_msginfo; 2714 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0)) 2715 return -TARGET_EFAULT; 2716 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool); 2717 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap); 2718 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax); 2719 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb); 2720 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni); 2721 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz); 2722 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql); 2723 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg); 2724 unlock_user_struct(target_msginfo, target_addr, 1); 2725 return 0; 2726 } 2727 2728 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr) 2729 { 2730 struct msqid_ds dsarg; 2731 struct msginfo msginfo; 2732 abi_long ret = -TARGET_EINVAL; 2733 2734 cmd &= 0xff; 2735 2736 switch (cmd) { 2737 case IPC_STAT: 2738 case IPC_SET: 2739 case MSG_STAT: 2740 if (target_to_host_msqid_ds(&dsarg,ptr)) 2741 return -TARGET_EFAULT; 2742 ret = get_errno(msgctl(msgid, cmd, &dsarg)); 2743 if (host_to_target_msqid_ds(ptr,&dsarg)) 2744 return -TARGET_EFAULT; 2745 break; 2746 case IPC_RMID: 2747 ret = get_errno(msgctl(msgid, cmd, NULL)); 2748 break; 2749 case IPC_INFO: 2750 case MSG_INFO: 2751 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo)); 2752 if (host_to_target_msginfo(ptr, &msginfo)) 2753 return -TARGET_EFAULT; 2754 break; 2755 } 2756 2757 return ret; 2758 } 2759 2760 struct target_msgbuf { 2761 abi_long mtype; 2762 char mtext[1]; 2763 }; 2764 2765 static inline abi_long do_msgsnd(int msqid, abi_long msgp, 2766 unsigned int msgsz, int msgflg) 2767 { 2768 struct target_msgbuf *target_mb; 2769 struct msgbuf *host_mb; 2770 abi_long ret = 0; 2771 2772 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0)) 2773 return -TARGET_EFAULT; 2774 host_mb = malloc(msgsz+sizeof(long)); 2775 host_mb->mtype = (abi_long) tswapal(target_mb->mtype); 2776 memcpy(host_mb->mtext, target_mb->mtext, msgsz); 2777 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg)); 2778 free(host_mb); 2779 unlock_user_struct(target_mb, msgp, 0); 2780 2781 return ret; 2782 } 2783 2784 static inline abi_long do_msgrcv(int msqid, abi_long msgp, 2785 unsigned int msgsz, abi_long msgtyp, 2786 int msgflg) 2787 { 2788 struct target_msgbuf *target_mb; 2789 char *target_mtext; 2790 struct msgbuf *host_mb; 2791 abi_long ret = 0; 2792 2793 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0)) 2794 return -TARGET_EFAULT; 2795 2796 host_mb = malloc(msgsz+sizeof(long)); 2797 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapal(msgtyp), msgflg)); 2798 2799 if (ret > 0) { 2800 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong); 2801 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0); 2802 if (!target_mtext) { 2803 ret = -TARGET_EFAULT; 2804 goto end; 2805 } 2806 memcpy(target_mb->mtext, host_mb->mtext, ret); 2807 unlock_user(target_mtext, target_mtext_addr, ret); 2808 } 2809 2810 target_mb->mtype = tswapal(host_mb->mtype); 2811 free(host_mb); 2812 2813 end: 2814 if (target_mb) 2815 unlock_user_struct(target_mb, msgp, 1); 2816 return ret; 2817 } 2818 2819 struct target_shmid_ds 2820 { 2821 struct target_ipc_perm shm_perm; 2822 abi_ulong shm_segsz; 2823 abi_ulong shm_atime; 2824 #if TARGET_ABI_BITS == 32 2825 abi_ulong __unused1; 2826 #endif 2827 abi_ulong shm_dtime; 2828 #if TARGET_ABI_BITS == 32 2829 abi_ulong __unused2; 2830 #endif 2831 abi_ulong shm_ctime; 2832 #if TARGET_ABI_BITS == 32 2833 abi_ulong __unused3; 2834 #endif 2835 int shm_cpid; 2836 int shm_lpid; 2837 abi_ulong shm_nattch; 2838 unsigned long int __unused4; 2839 unsigned long int __unused5; 2840 }; 2841 2842 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd, 2843 abi_ulong target_addr) 2844 { 2845 struct target_shmid_ds *target_sd; 2846 2847 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2848 return -TARGET_EFAULT; 2849 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr)) 2850 return -TARGET_EFAULT; 2851 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2852 __get_user(host_sd->shm_atime, &target_sd->shm_atime); 2853 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2854 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2855 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2856 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2857 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2858 unlock_user_struct(target_sd, target_addr, 0); 2859 return 0; 2860 } 2861 2862 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr, 2863 struct shmid_ds *host_sd) 2864 { 2865 struct target_shmid_ds *target_sd; 2866 2867 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2868 return -TARGET_EFAULT; 2869 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm))) 2870 return -TARGET_EFAULT; 2871 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2872 __put_user(host_sd->shm_atime, &target_sd->shm_atime); 2873 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2874 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2875 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2876 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2877 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2878 unlock_user_struct(target_sd, target_addr, 1); 2879 return 0; 2880 } 2881 2882 struct target_shminfo { 2883 abi_ulong shmmax; 2884 abi_ulong shmmin; 2885 abi_ulong shmmni; 2886 abi_ulong shmseg; 2887 abi_ulong shmall; 2888 }; 2889 2890 static inline abi_long host_to_target_shminfo(abi_ulong target_addr, 2891 struct shminfo *host_shminfo) 2892 { 2893 struct target_shminfo *target_shminfo; 2894 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0)) 2895 return -TARGET_EFAULT; 2896 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax); 2897 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin); 2898 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni); 2899 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg); 2900 __put_user(host_shminfo->shmall, &target_shminfo->shmall); 2901 unlock_user_struct(target_shminfo, target_addr, 1); 2902 return 0; 2903 } 2904 2905 struct target_shm_info { 2906 int used_ids; 2907 abi_ulong shm_tot; 2908 abi_ulong shm_rss; 2909 abi_ulong shm_swp; 2910 abi_ulong swap_attempts; 2911 abi_ulong swap_successes; 2912 }; 2913 2914 static inline abi_long host_to_target_shm_info(abi_ulong target_addr, 2915 struct shm_info *host_shm_info) 2916 { 2917 struct target_shm_info *target_shm_info; 2918 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0)) 2919 return -TARGET_EFAULT; 2920 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids); 2921 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot); 2922 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss); 2923 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp); 2924 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts); 2925 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes); 2926 unlock_user_struct(target_shm_info, target_addr, 1); 2927 return 0; 2928 } 2929 2930 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf) 2931 { 2932 struct shmid_ds dsarg; 2933 struct shminfo shminfo; 2934 struct shm_info shm_info; 2935 abi_long ret = -TARGET_EINVAL; 2936 2937 cmd &= 0xff; 2938 2939 switch(cmd) { 2940 case IPC_STAT: 2941 case IPC_SET: 2942 case SHM_STAT: 2943 if (target_to_host_shmid_ds(&dsarg, buf)) 2944 return -TARGET_EFAULT; 2945 ret = get_errno(shmctl(shmid, cmd, &dsarg)); 2946 if (host_to_target_shmid_ds(buf, &dsarg)) 2947 return -TARGET_EFAULT; 2948 break; 2949 case IPC_INFO: 2950 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo)); 2951 if (host_to_target_shminfo(buf, &shminfo)) 2952 return -TARGET_EFAULT; 2953 break; 2954 case SHM_INFO: 2955 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info)); 2956 if (host_to_target_shm_info(buf, &shm_info)) 2957 return -TARGET_EFAULT; 2958 break; 2959 case IPC_RMID: 2960 case SHM_LOCK: 2961 case SHM_UNLOCK: 2962 ret = get_errno(shmctl(shmid, cmd, NULL)); 2963 break; 2964 } 2965 2966 return ret; 2967 } 2968 2969 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg) 2970 { 2971 abi_long raddr; 2972 void *host_raddr; 2973 struct shmid_ds shm_info; 2974 int i,ret; 2975 2976 /* find out the length of the shared memory segment */ 2977 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); 2978 if (is_error(ret)) { 2979 /* can't get length, bail out */ 2980 return ret; 2981 } 2982 2983 mmap_lock(); 2984 2985 if (shmaddr) 2986 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg); 2987 else { 2988 abi_ulong mmap_start; 2989 2990 mmap_start = mmap_find_vma(0, shm_info.shm_segsz); 2991 2992 if (mmap_start == -1) { 2993 errno = ENOMEM; 2994 host_raddr = (void *)-1; 2995 } else 2996 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP); 2997 } 2998 2999 if (host_raddr == (void *)-1) { 3000 mmap_unlock(); 3001 return get_errno((long)host_raddr); 3002 } 3003 raddr=h2g((unsigned long)host_raddr); 3004 3005 page_set_flags(raddr, raddr + shm_info.shm_segsz, 3006 PAGE_VALID | PAGE_READ | 3007 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE)); 3008 3009 for (i = 0; i < N_SHM_REGIONS; i++) { 3010 if (shm_regions[i].start == 0) { 3011 shm_regions[i].start = raddr; 3012 shm_regions[i].size = shm_info.shm_segsz; 3013 break; 3014 } 3015 } 3016 3017 mmap_unlock(); 3018 return raddr; 3019 3020 } 3021 3022 static inline abi_long do_shmdt(abi_ulong shmaddr) 3023 { 3024 int i; 3025 3026 for (i = 0; i < N_SHM_REGIONS; ++i) { 3027 if (shm_regions[i].start == shmaddr) { 3028 shm_regions[i].start = 0; 3029 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0); 3030 break; 3031 } 3032 } 3033 3034 return get_errno(shmdt(g2h(shmaddr))); 3035 } 3036 3037 #ifdef TARGET_NR_ipc 3038 /* ??? This only works with linear mappings. */ 3039 /* do_ipc() must return target values and target errnos. */ 3040 static abi_long do_ipc(unsigned int call, int first, 3041 int second, int third, 3042 abi_long ptr, abi_long fifth) 3043 { 3044 int version; 3045 abi_long ret = 0; 3046 3047 version = call >> 16; 3048 call &= 0xffff; 3049 3050 switch (call) { 3051 case IPCOP_semop: 3052 ret = do_semop(first, ptr, second); 3053 break; 3054 3055 case IPCOP_semget: 3056 ret = get_errno(semget(first, second, third)); 3057 break; 3058 3059 case IPCOP_semctl: 3060 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr); 3061 break; 3062 3063 case IPCOP_msgget: 3064 ret = get_errno(msgget(first, second)); 3065 break; 3066 3067 case IPCOP_msgsnd: 3068 ret = do_msgsnd(first, ptr, second, third); 3069 break; 3070 3071 case IPCOP_msgctl: 3072 ret = do_msgctl(first, second, ptr); 3073 break; 3074 3075 case IPCOP_msgrcv: 3076 switch (version) { 3077 case 0: 3078 { 3079 struct target_ipc_kludge { 3080 abi_long msgp; 3081 abi_long msgtyp; 3082 } *tmp; 3083 3084 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) { 3085 ret = -TARGET_EFAULT; 3086 break; 3087 } 3088 3089 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third); 3090 3091 unlock_user_struct(tmp, ptr, 0); 3092 break; 3093 } 3094 default: 3095 ret = do_msgrcv(first, ptr, second, fifth, third); 3096 } 3097 break; 3098 3099 case IPCOP_shmat: 3100 switch (version) { 3101 default: 3102 { 3103 abi_ulong raddr; 3104 raddr = do_shmat(first, ptr, second); 3105 if (is_error(raddr)) 3106 return get_errno(raddr); 3107 if (put_user_ual(raddr, third)) 3108 return -TARGET_EFAULT; 3109 break; 3110 } 3111 case 1: 3112 ret = -TARGET_EINVAL; 3113 break; 3114 } 3115 break; 3116 case IPCOP_shmdt: 3117 ret = do_shmdt(ptr); 3118 break; 3119 3120 case IPCOP_shmget: 3121 /* IPC_* flag values are the same on all linux platforms */ 3122 ret = get_errno(shmget(first, second, third)); 3123 break; 3124 3125 /* IPC_* and SHM_* command values are the same on all linux platforms */ 3126 case IPCOP_shmctl: 3127 ret = do_shmctl(first, second, third); 3128 break; 3129 default: 3130 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version); 3131 ret = -TARGET_ENOSYS; 3132 break; 3133 } 3134 return ret; 3135 } 3136 #endif 3137 3138 /* kernel structure types definitions */ 3139 3140 #define STRUCT(name, ...) STRUCT_ ## name, 3141 #define STRUCT_SPECIAL(name) STRUCT_ ## name, 3142 enum { 3143 #include "syscall_types.h" 3144 }; 3145 #undef STRUCT 3146 #undef STRUCT_SPECIAL 3147 3148 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL }; 3149 #define STRUCT_SPECIAL(name) 3150 #include "syscall_types.h" 3151 #undef STRUCT 3152 #undef STRUCT_SPECIAL 3153 3154 typedef struct IOCTLEntry IOCTLEntry; 3155 3156 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp, 3157 int fd, abi_long cmd, abi_long arg); 3158 3159 struct IOCTLEntry { 3160 unsigned int target_cmd; 3161 unsigned int host_cmd; 3162 const char *name; 3163 int access; 3164 do_ioctl_fn *do_ioctl; 3165 const argtype arg_type[5]; 3166 }; 3167 3168 #define IOC_R 0x0001 3169 #define IOC_W 0x0002 3170 #define IOC_RW (IOC_R | IOC_W) 3171 3172 #define MAX_STRUCT_SIZE 4096 3173 3174 #ifdef CONFIG_FIEMAP 3175 /* So fiemap access checks don't overflow on 32 bit systems. 3176 * This is very slightly smaller than the limit imposed by 3177 * the underlying kernel. 3178 */ 3179 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \ 3180 / sizeof(struct fiemap_extent)) 3181 3182 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp, 3183 int fd, abi_long cmd, abi_long arg) 3184 { 3185 /* The parameter for this ioctl is a struct fiemap followed 3186 * by an array of struct fiemap_extent whose size is set 3187 * in fiemap->fm_extent_count. The array is filled in by the 3188 * ioctl. 3189 */ 3190 int target_size_in, target_size_out; 3191 struct fiemap *fm; 3192 const argtype *arg_type = ie->arg_type; 3193 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) }; 3194 void *argptr, *p; 3195 abi_long ret; 3196 int i, extent_size = thunk_type_size(extent_arg_type, 0); 3197 uint32_t outbufsz; 3198 int free_fm = 0; 3199 3200 assert(arg_type[0] == TYPE_PTR); 3201 assert(ie->access == IOC_RW); 3202 arg_type++; 3203 target_size_in = thunk_type_size(arg_type, 0); 3204 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1); 3205 if (!argptr) { 3206 return -TARGET_EFAULT; 3207 } 3208 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3209 unlock_user(argptr, arg, 0); 3210 fm = (struct fiemap *)buf_temp; 3211 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) { 3212 return -TARGET_EINVAL; 3213 } 3214 3215 outbufsz = sizeof (*fm) + 3216 (sizeof(struct fiemap_extent) * fm->fm_extent_count); 3217 3218 if (outbufsz > MAX_STRUCT_SIZE) { 3219 /* We can't fit all the extents into the fixed size buffer. 3220 * Allocate one that is large enough and use it instead. 3221 */ 3222 fm = malloc(outbufsz); 3223 if (!fm) { 3224 return -TARGET_ENOMEM; 3225 } 3226 memcpy(fm, buf_temp, sizeof(struct fiemap)); 3227 free_fm = 1; 3228 } 3229 ret = get_errno(ioctl(fd, ie->host_cmd, fm)); 3230 if (!is_error(ret)) { 3231 target_size_out = target_size_in; 3232 /* An extent_count of 0 means we were only counting the extents 3233 * so there are no structs to copy 3234 */ 3235 if (fm->fm_extent_count != 0) { 3236 target_size_out += fm->fm_mapped_extents * extent_size; 3237 } 3238 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0); 3239 if (!argptr) { 3240 ret = -TARGET_EFAULT; 3241 } else { 3242 /* Convert the struct fiemap */ 3243 thunk_convert(argptr, fm, arg_type, THUNK_TARGET); 3244 if (fm->fm_extent_count != 0) { 3245 p = argptr + target_size_in; 3246 /* ...and then all the struct fiemap_extents */ 3247 for (i = 0; i < fm->fm_mapped_extents; i++) { 3248 thunk_convert(p, &fm->fm_extents[i], extent_arg_type, 3249 THUNK_TARGET); 3250 p += extent_size; 3251 } 3252 } 3253 unlock_user(argptr, arg, target_size_out); 3254 } 3255 } 3256 if (free_fm) { 3257 free(fm); 3258 } 3259 return ret; 3260 } 3261 #endif 3262 3263 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, 3264 int fd, abi_long cmd, abi_long arg) 3265 { 3266 const argtype *arg_type = ie->arg_type; 3267 int target_size; 3268 void *argptr; 3269 int ret; 3270 struct ifconf *host_ifconf; 3271 uint32_t outbufsz; 3272 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) }; 3273 int target_ifreq_size; 3274 int nb_ifreq; 3275 int free_buf = 0; 3276 int i; 3277 int target_ifc_len; 3278 abi_long target_ifc_buf; 3279 int host_ifc_len; 3280 char *host_ifc_buf; 3281 3282 assert(arg_type[0] == TYPE_PTR); 3283 assert(ie->access == IOC_RW); 3284 3285 arg_type++; 3286 target_size = thunk_type_size(arg_type, 0); 3287 3288 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3289 if (!argptr) 3290 return -TARGET_EFAULT; 3291 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3292 unlock_user(argptr, arg, 0); 3293 3294 host_ifconf = (struct ifconf *)(unsigned long)buf_temp; 3295 target_ifc_len = host_ifconf->ifc_len; 3296 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf; 3297 3298 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0); 3299 nb_ifreq = target_ifc_len / target_ifreq_size; 3300 host_ifc_len = nb_ifreq * sizeof(struct ifreq); 3301 3302 outbufsz = sizeof(*host_ifconf) + host_ifc_len; 3303 if (outbufsz > MAX_STRUCT_SIZE) { 3304 /* We can't fit all the extents into the fixed size buffer. 3305 * Allocate one that is large enough and use it instead. 3306 */ 3307 host_ifconf = malloc(outbufsz); 3308 if (!host_ifconf) { 3309 return -TARGET_ENOMEM; 3310 } 3311 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf)); 3312 free_buf = 1; 3313 } 3314 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf); 3315 3316 host_ifconf->ifc_len = host_ifc_len; 3317 host_ifconf->ifc_buf = host_ifc_buf; 3318 3319 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf)); 3320 if (!is_error(ret)) { 3321 /* convert host ifc_len to target ifc_len */ 3322 3323 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq); 3324 target_ifc_len = nb_ifreq * target_ifreq_size; 3325 host_ifconf->ifc_len = target_ifc_len; 3326 3327 /* restore target ifc_buf */ 3328 3329 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf; 3330 3331 /* copy struct ifconf to target user */ 3332 3333 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3334 if (!argptr) 3335 return -TARGET_EFAULT; 3336 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET); 3337 unlock_user(argptr, arg, target_size); 3338 3339 /* copy ifreq[] to target user */ 3340 3341 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0); 3342 for (i = 0; i < nb_ifreq ; i++) { 3343 thunk_convert(argptr + i * target_ifreq_size, 3344 host_ifc_buf + i * sizeof(struct ifreq), 3345 ifreq_arg_type, THUNK_TARGET); 3346 } 3347 unlock_user(argptr, target_ifc_buf, target_ifc_len); 3348 } 3349 3350 if (free_buf) { 3351 free(host_ifconf); 3352 } 3353 3354 return ret; 3355 } 3356 3357 static IOCTLEntry ioctl_entries[] = { 3358 #define IOCTL(cmd, access, ...) \ 3359 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } }, 3360 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \ 3361 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } }, 3362 #include "ioctls.h" 3363 { 0, 0, }, 3364 }; 3365 3366 /* ??? Implement proper locking for ioctls. */ 3367 /* do_ioctl() Must return target values and target errnos. */ 3368 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg) 3369 { 3370 const IOCTLEntry *ie; 3371 const argtype *arg_type; 3372 abi_long ret; 3373 uint8_t buf_temp[MAX_STRUCT_SIZE]; 3374 int target_size; 3375 void *argptr; 3376 3377 ie = ioctl_entries; 3378 for(;;) { 3379 if (ie->target_cmd == 0) { 3380 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd); 3381 return -TARGET_ENOSYS; 3382 } 3383 if (ie->target_cmd == cmd) 3384 break; 3385 ie++; 3386 } 3387 arg_type = ie->arg_type; 3388 #if defined(DEBUG) 3389 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name); 3390 #endif 3391 if (ie->do_ioctl) { 3392 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg); 3393 } 3394 3395 switch(arg_type[0]) { 3396 case TYPE_NULL: 3397 /* no argument */ 3398 ret = get_errno(ioctl(fd, ie->host_cmd)); 3399 break; 3400 case TYPE_PTRVOID: 3401 case TYPE_INT: 3402 /* int argment */ 3403 ret = get_errno(ioctl(fd, ie->host_cmd, arg)); 3404 break; 3405 case TYPE_PTR: 3406 arg_type++; 3407 target_size = thunk_type_size(arg_type, 0); 3408 switch(ie->access) { 3409 case IOC_R: 3410 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3411 if (!is_error(ret)) { 3412 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3413 if (!argptr) 3414 return -TARGET_EFAULT; 3415 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3416 unlock_user(argptr, arg, target_size); 3417 } 3418 break; 3419 case IOC_W: 3420 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3421 if (!argptr) 3422 return -TARGET_EFAULT; 3423 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3424 unlock_user(argptr, arg, 0); 3425 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3426 break; 3427 default: 3428 case IOC_RW: 3429 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3430 if (!argptr) 3431 return -TARGET_EFAULT; 3432 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3433 unlock_user(argptr, arg, 0); 3434 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3435 if (!is_error(ret)) { 3436 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3437 if (!argptr) 3438 return -TARGET_EFAULT; 3439 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3440 unlock_user(argptr, arg, target_size); 3441 } 3442 break; 3443 } 3444 break; 3445 default: 3446 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n", 3447 (long)cmd, arg_type[0]); 3448 ret = -TARGET_ENOSYS; 3449 break; 3450 } 3451 return ret; 3452 } 3453 3454 static const bitmask_transtbl iflag_tbl[] = { 3455 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK }, 3456 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT }, 3457 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR }, 3458 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK }, 3459 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK }, 3460 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP }, 3461 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR }, 3462 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR }, 3463 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL }, 3464 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC }, 3465 { TARGET_IXON, TARGET_IXON, IXON, IXON }, 3466 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY }, 3467 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF }, 3468 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL }, 3469 { 0, 0, 0, 0 } 3470 }; 3471 3472 static const bitmask_transtbl oflag_tbl[] = { 3473 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST }, 3474 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC }, 3475 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR }, 3476 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL }, 3477 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR }, 3478 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET }, 3479 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL }, 3480 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL }, 3481 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 }, 3482 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 }, 3483 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 }, 3484 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 }, 3485 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 }, 3486 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 }, 3487 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 }, 3488 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 }, 3489 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 }, 3490 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 }, 3491 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 }, 3492 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 }, 3493 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 }, 3494 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 }, 3495 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 }, 3496 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 }, 3497 { 0, 0, 0, 0 } 3498 }; 3499 3500 static const bitmask_transtbl cflag_tbl[] = { 3501 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 }, 3502 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 }, 3503 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 }, 3504 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 }, 3505 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 }, 3506 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 }, 3507 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 }, 3508 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 }, 3509 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 }, 3510 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 }, 3511 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 }, 3512 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 }, 3513 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 }, 3514 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 }, 3515 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 }, 3516 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 }, 3517 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 }, 3518 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 }, 3519 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 }, 3520 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 }, 3521 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 }, 3522 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 }, 3523 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 }, 3524 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 }, 3525 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB }, 3526 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD }, 3527 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB }, 3528 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD }, 3529 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL }, 3530 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL }, 3531 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS }, 3532 { 0, 0, 0, 0 } 3533 }; 3534 3535 static const bitmask_transtbl lflag_tbl[] = { 3536 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG }, 3537 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON }, 3538 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE }, 3539 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO }, 3540 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE }, 3541 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK }, 3542 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL }, 3543 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH }, 3544 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP }, 3545 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL }, 3546 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT }, 3547 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE }, 3548 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO }, 3549 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN }, 3550 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN }, 3551 { 0, 0, 0, 0 } 3552 }; 3553 3554 static void target_to_host_termios (void *dst, const void *src) 3555 { 3556 struct host_termios *host = dst; 3557 const struct target_termios *target = src; 3558 3559 host->c_iflag = 3560 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl); 3561 host->c_oflag = 3562 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl); 3563 host->c_cflag = 3564 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl); 3565 host->c_lflag = 3566 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl); 3567 host->c_line = target->c_line; 3568 3569 memset(host->c_cc, 0, sizeof(host->c_cc)); 3570 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR]; 3571 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT]; 3572 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE]; 3573 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL]; 3574 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF]; 3575 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME]; 3576 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN]; 3577 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC]; 3578 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART]; 3579 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP]; 3580 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP]; 3581 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL]; 3582 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT]; 3583 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD]; 3584 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE]; 3585 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT]; 3586 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2]; 3587 } 3588 3589 static void host_to_target_termios (void *dst, const void *src) 3590 { 3591 struct target_termios *target = dst; 3592 const struct host_termios *host = src; 3593 3594 target->c_iflag = 3595 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl)); 3596 target->c_oflag = 3597 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl)); 3598 target->c_cflag = 3599 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl)); 3600 target->c_lflag = 3601 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl)); 3602 target->c_line = host->c_line; 3603 3604 memset(target->c_cc, 0, sizeof(target->c_cc)); 3605 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR]; 3606 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT]; 3607 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE]; 3608 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL]; 3609 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF]; 3610 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME]; 3611 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN]; 3612 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC]; 3613 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART]; 3614 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP]; 3615 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP]; 3616 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL]; 3617 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT]; 3618 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD]; 3619 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE]; 3620 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT]; 3621 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2]; 3622 } 3623 3624 static const StructEntry struct_termios_def = { 3625 .convert = { host_to_target_termios, target_to_host_termios }, 3626 .size = { sizeof(struct target_termios), sizeof(struct host_termios) }, 3627 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) }, 3628 }; 3629 3630 static bitmask_transtbl mmap_flags_tbl[] = { 3631 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED }, 3632 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE }, 3633 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED }, 3634 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS }, 3635 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN }, 3636 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE }, 3637 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE }, 3638 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED }, 3639 { 0, 0, 0, 0 } 3640 }; 3641 3642 #if defined(TARGET_I386) 3643 3644 /* NOTE: there is really one LDT for all the threads */ 3645 static uint8_t *ldt_table; 3646 3647 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount) 3648 { 3649 int size; 3650 void *p; 3651 3652 if (!ldt_table) 3653 return 0; 3654 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE; 3655 if (size > bytecount) 3656 size = bytecount; 3657 p = lock_user(VERIFY_WRITE, ptr, size, 0); 3658 if (!p) 3659 return -TARGET_EFAULT; 3660 /* ??? Should this by byteswapped? */ 3661 memcpy(p, ldt_table, size); 3662 unlock_user(p, ptr, size); 3663 return size; 3664 } 3665 3666 /* XXX: add locking support */ 3667 static abi_long write_ldt(CPUX86State *env, 3668 abi_ulong ptr, unsigned long bytecount, int oldmode) 3669 { 3670 struct target_modify_ldt_ldt_s ldt_info; 3671 struct target_modify_ldt_ldt_s *target_ldt_info; 3672 int seg_32bit, contents, read_exec_only, limit_in_pages; 3673 int seg_not_present, useable, lm; 3674 uint32_t *lp, entry_1, entry_2; 3675 3676 if (bytecount != sizeof(ldt_info)) 3677 return -TARGET_EINVAL; 3678 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1)) 3679 return -TARGET_EFAULT; 3680 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 3681 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 3682 ldt_info.limit = tswap32(target_ldt_info->limit); 3683 ldt_info.flags = tswap32(target_ldt_info->flags); 3684 unlock_user_struct(target_ldt_info, ptr, 0); 3685 3686 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES) 3687 return -TARGET_EINVAL; 3688 seg_32bit = ldt_info.flags & 1; 3689 contents = (ldt_info.flags >> 1) & 3; 3690 read_exec_only = (ldt_info.flags >> 3) & 1; 3691 limit_in_pages = (ldt_info.flags >> 4) & 1; 3692 seg_not_present = (ldt_info.flags >> 5) & 1; 3693 useable = (ldt_info.flags >> 6) & 1; 3694 #ifdef TARGET_ABI32 3695 lm = 0; 3696 #else 3697 lm = (ldt_info.flags >> 7) & 1; 3698 #endif 3699 if (contents == 3) { 3700 if (oldmode) 3701 return -TARGET_EINVAL; 3702 if (seg_not_present == 0) 3703 return -TARGET_EINVAL; 3704 } 3705 /* allocate the LDT */ 3706 if (!ldt_table) { 3707 env->ldt.base = target_mmap(0, 3708 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE, 3709 PROT_READ|PROT_WRITE, 3710 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 3711 if (env->ldt.base == -1) 3712 return -TARGET_ENOMEM; 3713 memset(g2h(env->ldt.base), 0, 3714 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE); 3715 env->ldt.limit = 0xffff; 3716 ldt_table = g2h(env->ldt.base); 3717 } 3718 3719 /* NOTE: same code as Linux kernel */ 3720 /* Allow LDTs to be cleared by the user. */ 3721 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 3722 if (oldmode || 3723 (contents == 0 && 3724 read_exec_only == 1 && 3725 seg_32bit == 0 && 3726 limit_in_pages == 0 && 3727 seg_not_present == 1 && 3728 useable == 0 )) { 3729 entry_1 = 0; 3730 entry_2 = 0; 3731 goto install; 3732 } 3733 } 3734 3735 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 3736 (ldt_info.limit & 0x0ffff); 3737 entry_2 = (ldt_info.base_addr & 0xff000000) | 3738 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 3739 (ldt_info.limit & 0xf0000) | 3740 ((read_exec_only ^ 1) << 9) | 3741 (contents << 10) | 3742 ((seg_not_present ^ 1) << 15) | 3743 (seg_32bit << 22) | 3744 (limit_in_pages << 23) | 3745 (lm << 21) | 3746 0x7000; 3747 if (!oldmode) 3748 entry_2 |= (useable << 20); 3749 3750 /* Install the new entry ... */ 3751 install: 3752 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3)); 3753 lp[0] = tswap32(entry_1); 3754 lp[1] = tswap32(entry_2); 3755 return 0; 3756 } 3757 3758 /* specific and weird i386 syscalls */ 3759 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr, 3760 unsigned long bytecount) 3761 { 3762 abi_long ret; 3763 3764 switch (func) { 3765 case 0: 3766 ret = read_ldt(ptr, bytecount); 3767 break; 3768 case 1: 3769 ret = write_ldt(env, ptr, bytecount, 1); 3770 break; 3771 case 0x11: 3772 ret = write_ldt(env, ptr, bytecount, 0); 3773 break; 3774 default: 3775 ret = -TARGET_ENOSYS; 3776 break; 3777 } 3778 return ret; 3779 } 3780 3781 #if defined(TARGET_I386) && defined(TARGET_ABI32) 3782 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr) 3783 { 3784 uint64_t *gdt_table = g2h(env->gdt.base); 3785 struct target_modify_ldt_ldt_s ldt_info; 3786 struct target_modify_ldt_ldt_s *target_ldt_info; 3787 int seg_32bit, contents, read_exec_only, limit_in_pages; 3788 int seg_not_present, useable, lm; 3789 uint32_t *lp, entry_1, entry_2; 3790 int i; 3791 3792 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 3793 if (!target_ldt_info) 3794 return -TARGET_EFAULT; 3795 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 3796 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 3797 ldt_info.limit = tswap32(target_ldt_info->limit); 3798 ldt_info.flags = tswap32(target_ldt_info->flags); 3799 if (ldt_info.entry_number == -1) { 3800 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) { 3801 if (gdt_table[i] == 0) { 3802 ldt_info.entry_number = i; 3803 target_ldt_info->entry_number = tswap32(i); 3804 break; 3805 } 3806 } 3807 } 3808 unlock_user_struct(target_ldt_info, ptr, 1); 3809 3810 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 3811 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX) 3812 return -TARGET_EINVAL; 3813 seg_32bit = ldt_info.flags & 1; 3814 contents = (ldt_info.flags >> 1) & 3; 3815 read_exec_only = (ldt_info.flags >> 3) & 1; 3816 limit_in_pages = (ldt_info.flags >> 4) & 1; 3817 seg_not_present = (ldt_info.flags >> 5) & 1; 3818 useable = (ldt_info.flags >> 6) & 1; 3819 #ifdef TARGET_ABI32 3820 lm = 0; 3821 #else 3822 lm = (ldt_info.flags >> 7) & 1; 3823 #endif 3824 3825 if (contents == 3) { 3826 if (seg_not_present == 0) 3827 return -TARGET_EINVAL; 3828 } 3829 3830 /* NOTE: same code as Linux kernel */ 3831 /* Allow LDTs to be cleared by the user. */ 3832 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 3833 if ((contents == 0 && 3834 read_exec_only == 1 && 3835 seg_32bit == 0 && 3836 limit_in_pages == 0 && 3837 seg_not_present == 1 && 3838 useable == 0 )) { 3839 entry_1 = 0; 3840 entry_2 = 0; 3841 goto install; 3842 } 3843 } 3844 3845 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 3846 (ldt_info.limit & 0x0ffff); 3847 entry_2 = (ldt_info.base_addr & 0xff000000) | 3848 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 3849 (ldt_info.limit & 0xf0000) | 3850 ((read_exec_only ^ 1) << 9) | 3851 (contents << 10) | 3852 ((seg_not_present ^ 1) << 15) | 3853 (seg_32bit << 22) | 3854 (limit_in_pages << 23) | 3855 (useable << 20) | 3856 (lm << 21) | 3857 0x7000; 3858 3859 /* Install the new entry ... */ 3860 install: 3861 lp = (uint32_t *)(gdt_table + ldt_info.entry_number); 3862 lp[0] = tswap32(entry_1); 3863 lp[1] = tswap32(entry_2); 3864 return 0; 3865 } 3866 3867 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr) 3868 { 3869 struct target_modify_ldt_ldt_s *target_ldt_info; 3870 uint64_t *gdt_table = g2h(env->gdt.base); 3871 uint32_t base_addr, limit, flags; 3872 int seg_32bit, contents, read_exec_only, limit_in_pages, idx; 3873 int seg_not_present, useable, lm; 3874 uint32_t *lp, entry_1, entry_2; 3875 3876 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 3877 if (!target_ldt_info) 3878 return -TARGET_EFAULT; 3879 idx = tswap32(target_ldt_info->entry_number); 3880 if (idx < TARGET_GDT_ENTRY_TLS_MIN || 3881 idx > TARGET_GDT_ENTRY_TLS_MAX) { 3882 unlock_user_struct(target_ldt_info, ptr, 1); 3883 return -TARGET_EINVAL; 3884 } 3885 lp = (uint32_t *)(gdt_table + idx); 3886 entry_1 = tswap32(lp[0]); 3887 entry_2 = tswap32(lp[1]); 3888 3889 read_exec_only = ((entry_2 >> 9) & 1) ^ 1; 3890 contents = (entry_2 >> 10) & 3; 3891 seg_not_present = ((entry_2 >> 15) & 1) ^ 1; 3892 seg_32bit = (entry_2 >> 22) & 1; 3893 limit_in_pages = (entry_2 >> 23) & 1; 3894 useable = (entry_2 >> 20) & 1; 3895 #ifdef TARGET_ABI32 3896 lm = 0; 3897 #else 3898 lm = (entry_2 >> 21) & 1; 3899 #endif 3900 flags = (seg_32bit << 0) | (contents << 1) | 3901 (read_exec_only << 3) | (limit_in_pages << 4) | 3902 (seg_not_present << 5) | (useable << 6) | (lm << 7); 3903 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000); 3904 base_addr = (entry_1 >> 16) | 3905 (entry_2 & 0xff000000) | 3906 ((entry_2 & 0xff) << 16); 3907 target_ldt_info->base_addr = tswapal(base_addr); 3908 target_ldt_info->limit = tswap32(limit); 3909 target_ldt_info->flags = tswap32(flags); 3910 unlock_user_struct(target_ldt_info, ptr, 1); 3911 return 0; 3912 } 3913 #endif /* TARGET_I386 && TARGET_ABI32 */ 3914 3915 #ifndef TARGET_ABI32 3916 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 3917 { 3918 abi_long ret = 0; 3919 abi_ulong val; 3920 int idx; 3921 3922 switch(code) { 3923 case TARGET_ARCH_SET_GS: 3924 case TARGET_ARCH_SET_FS: 3925 if (code == TARGET_ARCH_SET_GS) 3926 idx = R_GS; 3927 else 3928 idx = R_FS; 3929 cpu_x86_load_seg(env, idx, 0); 3930 env->segs[idx].base = addr; 3931 break; 3932 case TARGET_ARCH_GET_GS: 3933 case TARGET_ARCH_GET_FS: 3934 if (code == TARGET_ARCH_GET_GS) 3935 idx = R_GS; 3936 else 3937 idx = R_FS; 3938 val = env->segs[idx].base; 3939 if (put_user(val, addr, abi_ulong)) 3940 ret = -TARGET_EFAULT; 3941 break; 3942 default: 3943 ret = -TARGET_EINVAL; 3944 break; 3945 } 3946 return ret; 3947 } 3948 #endif 3949 3950 #endif /* defined(TARGET_I386) */ 3951 3952 #define NEW_STACK_SIZE 0x40000 3953 3954 #if defined(CONFIG_USE_NPTL) 3955 3956 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; 3957 typedef struct { 3958 CPUArchState *env; 3959 pthread_mutex_t mutex; 3960 pthread_cond_t cond; 3961 pthread_t thread; 3962 uint32_t tid; 3963 abi_ulong child_tidptr; 3964 abi_ulong parent_tidptr; 3965 sigset_t sigmask; 3966 } new_thread_info; 3967 3968 static void *clone_func(void *arg) 3969 { 3970 new_thread_info *info = arg; 3971 CPUArchState *env; 3972 TaskState *ts; 3973 3974 env = info->env; 3975 thread_env = env; 3976 ts = (TaskState *)thread_env->opaque; 3977 info->tid = gettid(); 3978 env->host_tid = info->tid; 3979 task_settid(ts); 3980 if (info->child_tidptr) 3981 put_user_u32(info->tid, info->child_tidptr); 3982 if (info->parent_tidptr) 3983 put_user_u32(info->tid, info->parent_tidptr); 3984 /* Enable signals. */ 3985 sigprocmask(SIG_SETMASK, &info->sigmask, NULL); 3986 /* Signal to the parent that we're ready. */ 3987 pthread_mutex_lock(&info->mutex); 3988 pthread_cond_broadcast(&info->cond); 3989 pthread_mutex_unlock(&info->mutex); 3990 /* Wait until the parent has finshed initializing the tls state. */ 3991 pthread_mutex_lock(&clone_lock); 3992 pthread_mutex_unlock(&clone_lock); 3993 cpu_loop(env); 3994 /* never exits */ 3995 return NULL; 3996 } 3997 #else 3998 3999 static int clone_func(void *arg) 4000 { 4001 CPUArchState *env = arg; 4002 cpu_loop(env); 4003 /* never exits */ 4004 return 0; 4005 } 4006 #endif 4007 4008 /* do_fork() Must return host values and target errnos (unlike most 4009 do_*() functions). */ 4010 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, 4011 abi_ulong parent_tidptr, target_ulong newtls, 4012 abi_ulong child_tidptr) 4013 { 4014 int ret; 4015 TaskState *ts; 4016 CPUArchState *new_env; 4017 #if defined(CONFIG_USE_NPTL) 4018 unsigned int nptl_flags; 4019 sigset_t sigmask; 4020 #else 4021 uint8_t *new_stack; 4022 #endif 4023 4024 /* Emulate vfork() with fork() */ 4025 if (flags & CLONE_VFORK) 4026 flags &= ~(CLONE_VFORK | CLONE_VM); 4027 4028 if (flags & CLONE_VM) { 4029 TaskState *parent_ts = (TaskState *)env->opaque; 4030 #if defined(CONFIG_USE_NPTL) 4031 new_thread_info info; 4032 pthread_attr_t attr; 4033 #endif 4034 ts = g_malloc0(sizeof(TaskState)); 4035 init_task_state(ts); 4036 /* we create a new CPU instance. */ 4037 new_env = cpu_copy(env); 4038 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC) 4039 cpu_state_reset(new_env); 4040 #endif 4041 /* Init regs that differ from the parent. */ 4042 cpu_clone_regs(new_env, newsp); 4043 new_env->opaque = ts; 4044 ts->bprm = parent_ts->bprm; 4045 ts->info = parent_ts->info; 4046 #if defined(CONFIG_USE_NPTL) 4047 nptl_flags = flags; 4048 flags &= ~CLONE_NPTL_FLAGS2; 4049 4050 if (nptl_flags & CLONE_CHILD_CLEARTID) { 4051 ts->child_tidptr = child_tidptr; 4052 } 4053 4054 if (nptl_flags & CLONE_SETTLS) 4055 cpu_set_tls (new_env, newtls); 4056 4057 /* Grab a mutex so that thread setup appears atomic. */ 4058 pthread_mutex_lock(&clone_lock); 4059 4060 memset(&info, 0, sizeof(info)); 4061 pthread_mutex_init(&info.mutex, NULL); 4062 pthread_mutex_lock(&info.mutex); 4063 pthread_cond_init(&info.cond, NULL); 4064 info.env = new_env; 4065 if (nptl_flags & CLONE_CHILD_SETTID) 4066 info.child_tidptr = child_tidptr; 4067 if (nptl_flags & CLONE_PARENT_SETTID) 4068 info.parent_tidptr = parent_tidptr; 4069 4070 ret = pthread_attr_init(&attr); 4071 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE); 4072 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 4073 /* It is not safe to deliver signals until the child has finished 4074 initializing, so temporarily block all signals. */ 4075 sigfillset(&sigmask); 4076 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); 4077 4078 ret = pthread_create(&info.thread, &attr, clone_func, &info); 4079 /* TODO: Free new CPU state if thread creation failed. */ 4080 4081 sigprocmask(SIG_SETMASK, &info.sigmask, NULL); 4082 pthread_attr_destroy(&attr); 4083 if (ret == 0) { 4084 /* Wait for the child to initialize. */ 4085 pthread_cond_wait(&info.cond, &info.mutex); 4086 ret = info.tid; 4087 if (flags & CLONE_PARENT_SETTID) 4088 put_user_u32(ret, parent_tidptr); 4089 } else { 4090 ret = -1; 4091 } 4092 pthread_mutex_unlock(&info.mutex); 4093 pthread_cond_destroy(&info.cond); 4094 pthread_mutex_destroy(&info.mutex); 4095 pthread_mutex_unlock(&clone_lock); 4096 #else 4097 if (flags & CLONE_NPTL_FLAGS2) 4098 return -EINVAL; 4099 /* This is probably going to die very quickly, but do it anyway. */ 4100 new_stack = g_malloc0 (NEW_STACK_SIZE); 4101 #ifdef __ia64__ 4102 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env); 4103 #else 4104 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env); 4105 #endif 4106 #endif 4107 } else { 4108 /* if no CLONE_VM, we consider it is a fork */ 4109 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) 4110 return -EINVAL; 4111 fork_start(); 4112 ret = fork(); 4113 if (ret == 0) { 4114 /* Child Process. */ 4115 cpu_clone_regs(env, newsp); 4116 fork_end(1); 4117 #if defined(CONFIG_USE_NPTL) 4118 /* There is a race condition here. The parent process could 4119 theoretically read the TID in the child process before the child 4120 tid is set. This would require using either ptrace 4121 (not implemented) or having *_tidptr to point at a shared memory 4122 mapping. We can't repeat the spinlock hack used above because 4123 the child process gets its own copy of the lock. */ 4124 if (flags & CLONE_CHILD_SETTID) 4125 put_user_u32(gettid(), child_tidptr); 4126 if (flags & CLONE_PARENT_SETTID) 4127 put_user_u32(gettid(), parent_tidptr); 4128 ts = (TaskState *)env->opaque; 4129 if (flags & CLONE_SETTLS) 4130 cpu_set_tls (env, newtls); 4131 if (flags & CLONE_CHILD_CLEARTID) 4132 ts->child_tidptr = child_tidptr; 4133 #endif 4134 } else { 4135 fork_end(0); 4136 } 4137 } 4138 return ret; 4139 } 4140 4141 /* warning : doesn't handle linux specific flags... */ 4142 static int target_to_host_fcntl_cmd(int cmd) 4143 { 4144 switch(cmd) { 4145 case TARGET_F_DUPFD: 4146 case TARGET_F_GETFD: 4147 case TARGET_F_SETFD: 4148 case TARGET_F_GETFL: 4149 case TARGET_F_SETFL: 4150 return cmd; 4151 case TARGET_F_GETLK: 4152 return F_GETLK; 4153 case TARGET_F_SETLK: 4154 return F_SETLK; 4155 case TARGET_F_SETLKW: 4156 return F_SETLKW; 4157 case TARGET_F_GETOWN: 4158 return F_GETOWN; 4159 case TARGET_F_SETOWN: 4160 return F_SETOWN; 4161 case TARGET_F_GETSIG: 4162 return F_GETSIG; 4163 case TARGET_F_SETSIG: 4164 return F_SETSIG; 4165 #if TARGET_ABI_BITS == 32 4166 case TARGET_F_GETLK64: 4167 return F_GETLK64; 4168 case TARGET_F_SETLK64: 4169 return F_SETLK64; 4170 case TARGET_F_SETLKW64: 4171 return F_SETLKW64; 4172 #endif 4173 case TARGET_F_SETLEASE: 4174 return F_SETLEASE; 4175 case TARGET_F_GETLEASE: 4176 return F_GETLEASE; 4177 #ifdef F_DUPFD_CLOEXEC 4178 case TARGET_F_DUPFD_CLOEXEC: 4179 return F_DUPFD_CLOEXEC; 4180 #endif 4181 case TARGET_F_NOTIFY: 4182 return F_NOTIFY; 4183 default: 4184 return -TARGET_EINVAL; 4185 } 4186 return -TARGET_EINVAL; 4187 } 4188 4189 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) 4190 { 4191 struct flock fl; 4192 struct target_flock *target_fl; 4193 struct flock64 fl64; 4194 struct target_flock64 *target_fl64; 4195 abi_long ret; 4196 int host_cmd = target_to_host_fcntl_cmd(cmd); 4197 4198 if (host_cmd == -TARGET_EINVAL) 4199 return host_cmd; 4200 4201 switch(cmd) { 4202 case TARGET_F_GETLK: 4203 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4204 return -TARGET_EFAULT; 4205 fl.l_type = tswap16(target_fl->l_type); 4206 fl.l_whence = tswap16(target_fl->l_whence); 4207 fl.l_start = tswapal(target_fl->l_start); 4208 fl.l_len = tswapal(target_fl->l_len); 4209 fl.l_pid = tswap32(target_fl->l_pid); 4210 unlock_user_struct(target_fl, arg, 0); 4211 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4212 if (ret == 0) { 4213 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0)) 4214 return -TARGET_EFAULT; 4215 target_fl->l_type = tswap16(fl.l_type); 4216 target_fl->l_whence = tswap16(fl.l_whence); 4217 target_fl->l_start = tswapal(fl.l_start); 4218 target_fl->l_len = tswapal(fl.l_len); 4219 target_fl->l_pid = tswap32(fl.l_pid); 4220 unlock_user_struct(target_fl, arg, 1); 4221 } 4222 break; 4223 4224 case TARGET_F_SETLK: 4225 case TARGET_F_SETLKW: 4226 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4227 return -TARGET_EFAULT; 4228 fl.l_type = tswap16(target_fl->l_type); 4229 fl.l_whence = tswap16(target_fl->l_whence); 4230 fl.l_start = tswapal(target_fl->l_start); 4231 fl.l_len = tswapal(target_fl->l_len); 4232 fl.l_pid = tswap32(target_fl->l_pid); 4233 unlock_user_struct(target_fl, arg, 0); 4234 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4235 break; 4236 4237 case TARGET_F_GETLK64: 4238 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4239 return -TARGET_EFAULT; 4240 fl64.l_type = tswap16(target_fl64->l_type) >> 1; 4241 fl64.l_whence = tswap16(target_fl64->l_whence); 4242 fl64.l_start = tswap64(target_fl64->l_start); 4243 fl64.l_len = tswap64(target_fl64->l_len); 4244 fl64.l_pid = tswap32(target_fl64->l_pid); 4245 unlock_user_struct(target_fl64, arg, 0); 4246 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4247 if (ret == 0) { 4248 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0)) 4249 return -TARGET_EFAULT; 4250 target_fl64->l_type = tswap16(fl64.l_type) >> 1; 4251 target_fl64->l_whence = tswap16(fl64.l_whence); 4252 target_fl64->l_start = tswap64(fl64.l_start); 4253 target_fl64->l_len = tswap64(fl64.l_len); 4254 target_fl64->l_pid = tswap32(fl64.l_pid); 4255 unlock_user_struct(target_fl64, arg, 1); 4256 } 4257 break; 4258 case TARGET_F_SETLK64: 4259 case TARGET_F_SETLKW64: 4260 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4261 return -TARGET_EFAULT; 4262 fl64.l_type = tswap16(target_fl64->l_type) >> 1; 4263 fl64.l_whence = tswap16(target_fl64->l_whence); 4264 fl64.l_start = tswap64(target_fl64->l_start); 4265 fl64.l_len = tswap64(target_fl64->l_len); 4266 fl64.l_pid = tswap32(target_fl64->l_pid); 4267 unlock_user_struct(target_fl64, arg, 0); 4268 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4269 break; 4270 4271 case TARGET_F_GETFL: 4272 ret = get_errno(fcntl(fd, host_cmd, arg)); 4273 if (ret >= 0) { 4274 ret = host_to_target_bitmask(ret, fcntl_flags_tbl); 4275 } 4276 break; 4277 4278 case TARGET_F_SETFL: 4279 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl))); 4280 break; 4281 4282 case TARGET_F_SETOWN: 4283 case TARGET_F_GETOWN: 4284 case TARGET_F_SETSIG: 4285 case TARGET_F_GETSIG: 4286 case TARGET_F_SETLEASE: 4287 case TARGET_F_GETLEASE: 4288 ret = get_errno(fcntl(fd, host_cmd, arg)); 4289 break; 4290 4291 default: 4292 ret = get_errno(fcntl(fd, cmd, arg)); 4293 break; 4294 } 4295 return ret; 4296 } 4297 4298 #ifdef USE_UID16 4299 4300 static inline int high2lowuid(int uid) 4301 { 4302 if (uid > 65535) 4303 return 65534; 4304 else 4305 return uid; 4306 } 4307 4308 static inline int high2lowgid(int gid) 4309 { 4310 if (gid > 65535) 4311 return 65534; 4312 else 4313 return gid; 4314 } 4315 4316 static inline int low2highuid(int uid) 4317 { 4318 if ((int16_t)uid == -1) 4319 return -1; 4320 else 4321 return uid; 4322 } 4323 4324 static inline int low2highgid(int gid) 4325 { 4326 if ((int16_t)gid == -1) 4327 return -1; 4328 else 4329 return gid; 4330 } 4331 static inline int tswapid(int id) 4332 { 4333 return tswap16(id); 4334 } 4335 #else /* !USE_UID16 */ 4336 static inline int high2lowuid(int uid) 4337 { 4338 return uid; 4339 } 4340 static inline int high2lowgid(int gid) 4341 { 4342 return gid; 4343 } 4344 static inline int low2highuid(int uid) 4345 { 4346 return uid; 4347 } 4348 static inline int low2highgid(int gid) 4349 { 4350 return gid; 4351 } 4352 static inline int tswapid(int id) 4353 { 4354 return tswap32(id); 4355 } 4356 #endif /* USE_UID16 */ 4357 4358 void syscall_init(void) 4359 { 4360 IOCTLEntry *ie; 4361 const argtype *arg_type; 4362 int size; 4363 int i; 4364 4365 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def); 4366 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def); 4367 #include "syscall_types.h" 4368 #undef STRUCT 4369 #undef STRUCT_SPECIAL 4370 4371 /* we patch the ioctl size if necessary. We rely on the fact that 4372 no ioctl has all the bits at '1' in the size field */ 4373 ie = ioctl_entries; 4374 while (ie->target_cmd != 0) { 4375 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) == 4376 TARGET_IOC_SIZEMASK) { 4377 arg_type = ie->arg_type; 4378 if (arg_type[0] != TYPE_PTR) { 4379 fprintf(stderr, "cannot patch size for ioctl 0x%x\n", 4380 ie->target_cmd); 4381 exit(1); 4382 } 4383 arg_type++; 4384 size = thunk_type_size(arg_type, 0); 4385 ie->target_cmd = (ie->target_cmd & 4386 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) | 4387 (size << TARGET_IOC_SIZESHIFT); 4388 } 4389 4390 /* Build target_to_host_errno_table[] table from 4391 * host_to_target_errno_table[]. */ 4392 for (i=0; i < ERRNO_TABLE_SIZE; i++) 4393 target_to_host_errno_table[host_to_target_errno_table[i]] = i; 4394 4395 /* automatic consistency check if same arch */ 4396 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 4397 (defined(__x86_64__) && defined(TARGET_X86_64)) 4398 if (unlikely(ie->target_cmd != ie->host_cmd)) { 4399 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n", 4400 ie->name, ie->target_cmd, ie->host_cmd); 4401 } 4402 #endif 4403 ie++; 4404 } 4405 } 4406 4407 #if TARGET_ABI_BITS == 32 4408 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1) 4409 { 4410 #ifdef TARGET_WORDS_BIGENDIAN 4411 return ((uint64_t)word0 << 32) | word1; 4412 #else 4413 return ((uint64_t)word1 << 32) | word0; 4414 #endif 4415 } 4416 #else /* TARGET_ABI_BITS == 32 */ 4417 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1) 4418 { 4419 return word0; 4420 } 4421 #endif /* TARGET_ABI_BITS != 32 */ 4422 4423 #ifdef TARGET_NR_truncate64 4424 static inline abi_long target_truncate64(void *cpu_env, const char *arg1, 4425 abi_long arg2, 4426 abi_long arg3, 4427 abi_long arg4) 4428 { 4429 if (regpairs_aligned(cpu_env)) { 4430 arg2 = arg3; 4431 arg3 = arg4; 4432 } 4433 return get_errno(truncate64(arg1, target_offset64(arg2, arg3))); 4434 } 4435 #endif 4436 4437 #ifdef TARGET_NR_ftruncate64 4438 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1, 4439 abi_long arg2, 4440 abi_long arg3, 4441 abi_long arg4) 4442 { 4443 if (regpairs_aligned(cpu_env)) { 4444 arg2 = arg3; 4445 arg3 = arg4; 4446 } 4447 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3))); 4448 } 4449 #endif 4450 4451 static inline abi_long target_to_host_timespec(struct timespec *host_ts, 4452 abi_ulong target_addr) 4453 { 4454 struct target_timespec *target_ts; 4455 4456 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) 4457 return -TARGET_EFAULT; 4458 host_ts->tv_sec = tswapal(target_ts->tv_sec); 4459 host_ts->tv_nsec = tswapal(target_ts->tv_nsec); 4460 unlock_user_struct(target_ts, target_addr, 0); 4461 return 0; 4462 } 4463 4464 static inline abi_long host_to_target_timespec(abi_ulong target_addr, 4465 struct timespec *host_ts) 4466 { 4467 struct target_timespec *target_ts; 4468 4469 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) 4470 return -TARGET_EFAULT; 4471 target_ts->tv_sec = tswapal(host_ts->tv_sec); 4472 target_ts->tv_nsec = tswapal(host_ts->tv_nsec); 4473 unlock_user_struct(target_ts, target_addr, 1); 4474 return 0; 4475 } 4476 4477 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat) 4478 static inline abi_long host_to_target_stat64(void *cpu_env, 4479 abi_ulong target_addr, 4480 struct stat *host_st) 4481 { 4482 #ifdef TARGET_ARM 4483 if (((CPUARMState *)cpu_env)->eabi) { 4484 struct target_eabi_stat64 *target_st; 4485 4486 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 4487 return -TARGET_EFAULT; 4488 memset(target_st, 0, sizeof(struct target_eabi_stat64)); 4489 __put_user(host_st->st_dev, &target_st->st_dev); 4490 __put_user(host_st->st_ino, &target_st->st_ino); 4491 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 4492 __put_user(host_st->st_ino, &target_st->__st_ino); 4493 #endif 4494 __put_user(host_st->st_mode, &target_st->st_mode); 4495 __put_user(host_st->st_nlink, &target_st->st_nlink); 4496 __put_user(host_st->st_uid, &target_st->st_uid); 4497 __put_user(host_st->st_gid, &target_st->st_gid); 4498 __put_user(host_st->st_rdev, &target_st->st_rdev); 4499 __put_user(host_st->st_size, &target_st->st_size); 4500 __put_user(host_st->st_blksize, &target_st->st_blksize); 4501 __put_user(host_st->st_blocks, &target_st->st_blocks); 4502 __put_user(host_st->st_atime, &target_st->target_st_atime); 4503 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 4504 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 4505 unlock_user_struct(target_st, target_addr, 1); 4506 } else 4507 #endif 4508 { 4509 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA) 4510 struct target_stat *target_st; 4511 #else 4512 struct target_stat64 *target_st; 4513 #endif 4514 4515 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 4516 return -TARGET_EFAULT; 4517 memset(target_st, 0, sizeof(*target_st)); 4518 __put_user(host_st->st_dev, &target_st->st_dev); 4519 __put_user(host_st->st_ino, &target_st->st_ino); 4520 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 4521 __put_user(host_st->st_ino, &target_st->__st_ino); 4522 #endif 4523 __put_user(host_st->st_mode, &target_st->st_mode); 4524 __put_user(host_st->st_nlink, &target_st->st_nlink); 4525 __put_user(host_st->st_uid, &target_st->st_uid); 4526 __put_user(host_st->st_gid, &target_st->st_gid); 4527 __put_user(host_st->st_rdev, &target_st->st_rdev); 4528 /* XXX: better use of kernel struct */ 4529 __put_user(host_st->st_size, &target_st->st_size); 4530 __put_user(host_st->st_blksize, &target_st->st_blksize); 4531 __put_user(host_st->st_blocks, &target_st->st_blocks); 4532 __put_user(host_st->st_atime, &target_st->target_st_atime); 4533 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 4534 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 4535 unlock_user_struct(target_st, target_addr, 1); 4536 } 4537 4538 return 0; 4539 } 4540 #endif 4541 4542 #if defined(CONFIG_USE_NPTL) 4543 /* ??? Using host futex calls even when target atomic operations 4544 are not really atomic probably breaks things. However implementing 4545 futexes locally would make futexes shared between multiple processes 4546 tricky. However they're probably useless because guest atomic 4547 operations won't work either. */ 4548 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout, 4549 target_ulong uaddr2, int val3) 4550 { 4551 struct timespec ts, *pts; 4552 int base_op; 4553 4554 /* ??? We assume FUTEX_* constants are the same on both host 4555 and target. */ 4556 #ifdef FUTEX_CMD_MASK 4557 base_op = op & FUTEX_CMD_MASK; 4558 #else 4559 base_op = op; 4560 #endif 4561 switch (base_op) { 4562 case FUTEX_WAIT: 4563 if (timeout) { 4564 pts = &ts; 4565 target_to_host_timespec(pts, timeout); 4566 } else { 4567 pts = NULL; 4568 } 4569 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val), 4570 pts, NULL, 0)); 4571 case FUTEX_WAKE: 4572 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 4573 case FUTEX_FD: 4574 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 4575 case FUTEX_REQUEUE: 4576 case FUTEX_CMP_REQUEUE: 4577 case FUTEX_WAKE_OP: 4578 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 4579 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 4580 But the prototype takes a `struct timespec *'; insert casts 4581 to satisfy the compiler. We do not need to tswap TIMEOUT 4582 since it's not compared to guest memory. */ 4583 pts = (struct timespec *)(uintptr_t) timeout; 4584 return get_errno(sys_futex(g2h(uaddr), op, val, pts, 4585 g2h(uaddr2), 4586 (base_op == FUTEX_CMP_REQUEUE 4587 ? tswap32(val3) 4588 : val3))); 4589 default: 4590 return -TARGET_ENOSYS; 4591 } 4592 } 4593 #endif 4594 4595 /* Map host to target signal numbers for the wait family of syscalls. 4596 Assume all other status bits are the same. */ 4597 static int host_to_target_waitstatus(int status) 4598 { 4599 if (WIFSIGNALED(status)) { 4600 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f); 4601 } 4602 if (WIFSTOPPED(status)) { 4603 return (host_to_target_signal(WSTOPSIG(status)) << 8) 4604 | (status & 0xff); 4605 } 4606 return status; 4607 } 4608 4609 int get_osversion(void) 4610 { 4611 static int osversion; 4612 struct new_utsname buf; 4613 const char *s; 4614 int i, n, tmp; 4615 if (osversion) 4616 return osversion; 4617 if (qemu_uname_release && *qemu_uname_release) { 4618 s = qemu_uname_release; 4619 } else { 4620 if (sys_uname(&buf)) 4621 return 0; 4622 s = buf.release; 4623 } 4624 tmp = 0; 4625 for (i = 0; i < 3; i++) { 4626 n = 0; 4627 while (*s >= '0' && *s <= '9') { 4628 n *= 10; 4629 n += *s - '0'; 4630 s++; 4631 } 4632 tmp = (tmp << 8) + n; 4633 if (*s == '.') 4634 s++; 4635 } 4636 osversion = tmp; 4637 return osversion; 4638 } 4639 4640 4641 static int open_self_maps(void *cpu_env, int fd) 4642 { 4643 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 4644 4645 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n", 4646 (unsigned long long)ts->info->stack_limit, 4647 (unsigned long long)(ts->stack_base + (TARGET_PAGE_SIZE - 1)) 4648 & TARGET_PAGE_MASK, 4649 (unsigned long long)ts->stack_base); 4650 4651 return 0; 4652 } 4653 4654 static int open_self_stat(void *cpu_env, int fd) 4655 { 4656 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 4657 abi_ulong start_stack = ts->info->start_stack; 4658 int i; 4659 4660 for (i = 0; i < 44; i++) { 4661 char buf[128]; 4662 int len; 4663 uint64_t val = 0; 4664 4665 if (i == 0) { 4666 /* pid */ 4667 val = getpid(); 4668 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 4669 } else if (i == 1) { 4670 /* app name */ 4671 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]); 4672 } else if (i == 27) { 4673 /* stack bottom */ 4674 val = start_stack; 4675 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 4676 } else { 4677 /* for the rest, there is MasterCard */ 4678 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' '); 4679 } 4680 4681 len = strlen(buf); 4682 if (write(fd, buf, len) != len) { 4683 return -1; 4684 } 4685 } 4686 4687 return 0; 4688 } 4689 4690 static int open_self_auxv(void *cpu_env, int fd) 4691 { 4692 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 4693 abi_ulong auxv = ts->info->saved_auxv; 4694 abi_ulong len = ts->info->auxv_len; 4695 char *ptr; 4696 4697 /* 4698 * Auxiliary vector is stored in target process stack. 4699 * read in whole auxv vector and copy it to file 4700 */ 4701 ptr = lock_user(VERIFY_READ, auxv, len, 0); 4702 if (ptr != NULL) { 4703 while (len > 0) { 4704 ssize_t r; 4705 r = write(fd, ptr, len); 4706 if (r <= 0) { 4707 break; 4708 } 4709 len -= r; 4710 ptr += r; 4711 } 4712 lseek(fd, 0, SEEK_SET); 4713 unlock_user(ptr, auxv, len); 4714 } 4715 4716 return 0; 4717 } 4718 4719 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode) 4720 { 4721 struct fake_open { 4722 const char *filename; 4723 int (*fill)(void *cpu_env, int fd); 4724 }; 4725 const struct fake_open *fake_open; 4726 static const struct fake_open fakes[] = { 4727 { "/proc/self/maps", open_self_maps }, 4728 { "/proc/self/stat", open_self_stat }, 4729 { "/proc/self/auxv", open_self_auxv }, 4730 { NULL, NULL } 4731 }; 4732 4733 for (fake_open = fakes; fake_open->filename; fake_open++) { 4734 if (!strncmp(pathname, fake_open->filename, 4735 strlen(fake_open->filename))) { 4736 break; 4737 } 4738 } 4739 4740 if (fake_open->filename) { 4741 const char *tmpdir; 4742 char filename[PATH_MAX]; 4743 int fd, r; 4744 4745 /* create temporary file to map stat to */ 4746 tmpdir = getenv("TMPDIR"); 4747 if (!tmpdir) 4748 tmpdir = "/tmp"; 4749 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir); 4750 fd = mkstemp(filename); 4751 if (fd < 0) { 4752 return fd; 4753 } 4754 unlink(filename); 4755 4756 if ((r = fake_open->fill(cpu_env, fd))) { 4757 close(fd); 4758 return r; 4759 } 4760 lseek(fd, 0, SEEK_SET); 4761 4762 return fd; 4763 } 4764 4765 return get_errno(open(path(pathname), flags, mode)); 4766 } 4767 4768 /* do_syscall() should always have a single exit point at the end so 4769 that actions, such as logging of syscall results, can be performed. 4770 All errnos that do_syscall() returns must be -TARGET_<errcode>. */ 4771 abi_long do_syscall(void *cpu_env, int num, abi_long arg1, 4772 abi_long arg2, abi_long arg3, abi_long arg4, 4773 abi_long arg5, abi_long arg6, abi_long arg7, 4774 abi_long arg8) 4775 { 4776 abi_long ret; 4777 struct stat st; 4778 struct statfs stfs; 4779 void *p; 4780 4781 #ifdef DEBUG 4782 gemu_log("syscall %d", num); 4783 #endif 4784 if(do_strace) 4785 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); 4786 4787 switch(num) { 4788 case TARGET_NR_exit: 4789 #ifdef CONFIG_USE_NPTL 4790 /* In old applications this may be used to implement _exit(2). 4791 However in threaded applictions it is used for thread termination, 4792 and _exit_group is used for application termination. 4793 Do thread termination if we have more then one thread. */ 4794 /* FIXME: This probably breaks if a signal arrives. We should probably 4795 be disabling signals. */ 4796 if (first_cpu->next_cpu) { 4797 TaskState *ts; 4798 CPUArchState **lastp; 4799 CPUArchState *p; 4800 4801 cpu_list_lock(); 4802 lastp = &first_cpu; 4803 p = first_cpu; 4804 while (p && p != (CPUArchState *)cpu_env) { 4805 lastp = &p->next_cpu; 4806 p = p->next_cpu; 4807 } 4808 /* If we didn't find the CPU for this thread then something is 4809 horribly wrong. */ 4810 if (!p) 4811 abort(); 4812 /* Remove the CPU from the list. */ 4813 *lastp = p->next_cpu; 4814 cpu_list_unlock(); 4815 ts = ((CPUArchState *)cpu_env)->opaque; 4816 if (ts->child_tidptr) { 4817 put_user_u32(0, ts->child_tidptr); 4818 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX, 4819 NULL, NULL, 0); 4820 } 4821 thread_env = NULL; 4822 g_free(cpu_env); 4823 g_free(ts); 4824 pthread_exit(NULL); 4825 } 4826 #endif 4827 #ifdef TARGET_GPROF 4828 _mcleanup(); 4829 #endif 4830 gdb_exit(cpu_env, arg1); 4831 _exit(arg1); 4832 ret = 0; /* avoid warning */ 4833 break; 4834 case TARGET_NR_read: 4835 if (arg3 == 0) 4836 ret = 0; 4837 else { 4838 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 4839 goto efault; 4840 ret = get_errno(read(arg1, p, arg3)); 4841 unlock_user(p, arg2, ret); 4842 } 4843 break; 4844 case TARGET_NR_write: 4845 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 4846 goto efault; 4847 ret = get_errno(write(arg1, p, arg3)); 4848 unlock_user(p, arg2, 0); 4849 break; 4850 case TARGET_NR_open: 4851 if (!(p = lock_user_string(arg1))) 4852 goto efault; 4853 ret = get_errno(do_open(cpu_env, p, 4854 target_to_host_bitmask(arg2, fcntl_flags_tbl), 4855 arg3)); 4856 unlock_user(p, arg1, 0); 4857 break; 4858 #if defined(TARGET_NR_openat) && defined(__NR_openat) 4859 case TARGET_NR_openat: 4860 if (!(p = lock_user_string(arg2))) 4861 goto efault; 4862 ret = get_errno(sys_openat(arg1, 4863 path(p), 4864 target_to_host_bitmask(arg3, fcntl_flags_tbl), 4865 arg4)); 4866 unlock_user(p, arg2, 0); 4867 break; 4868 #endif 4869 case TARGET_NR_close: 4870 ret = get_errno(close(arg1)); 4871 break; 4872 case TARGET_NR_brk: 4873 ret = do_brk(arg1); 4874 break; 4875 case TARGET_NR_fork: 4876 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0)); 4877 break; 4878 #ifdef TARGET_NR_waitpid 4879 case TARGET_NR_waitpid: 4880 { 4881 int status; 4882 ret = get_errno(waitpid(arg1, &status, arg3)); 4883 if (!is_error(ret) && arg2 && ret 4884 && put_user_s32(host_to_target_waitstatus(status), arg2)) 4885 goto efault; 4886 } 4887 break; 4888 #endif 4889 #ifdef TARGET_NR_waitid 4890 case TARGET_NR_waitid: 4891 { 4892 siginfo_t info; 4893 info.si_pid = 0; 4894 ret = get_errno(waitid(arg1, arg2, &info, arg4)); 4895 if (!is_error(ret) && arg3 && info.si_pid != 0) { 4896 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) 4897 goto efault; 4898 host_to_target_siginfo(p, &info); 4899 unlock_user(p, arg3, sizeof(target_siginfo_t)); 4900 } 4901 } 4902 break; 4903 #endif 4904 #ifdef TARGET_NR_creat /* not on alpha */ 4905 case TARGET_NR_creat: 4906 if (!(p = lock_user_string(arg1))) 4907 goto efault; 4908 ret = get_errno(creat(p, arg2)); 4909 unlock_user(p, arg1, 0); 4910 break; 4911 #endif 4912 case TARGET_NR_link: 4913 { 4914 void * p2; 4915 p = lock_user_string(arg1); 4916 p2 = lock_user_string(arg2); 4917 if (!p || !p2) 4918 ret = -TARGET_EFAULT; 4919 else 4920 ret = get_errno(link(p, p2)); 4921 unlock_user(p2, arg2, 0); 4922 unlock_user(p, arg1, 0); 4923 } 4924 break; 4925 #if defined(TARGET_NR_linkat) && defined(__NR_linkat) 4926 case TARGET_NR_linkat: 4927 { 4928 void * p2 = NULL; 4929 if (!arg2 || !arg4) 4930 goto efault; 4931 p = lock_user_string(arg2); 4932 p2 = lock_user_string(arg4); 4933 if (!p || !p2) 4934 ret = -TARGET_EFAULT; 4935 else 4936 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5)); 4937 unlock_user(p, arg2, 0); 4938 unlock_user(p2, arg4, 0); 4939 } 4940 break; 4941 #endif 4942 case TARGET_NR_unlink: 4943 if (!(p = lock_user_string(arg1))) 4944 goto efault; 4945 ret = get_errno(unlink(p)); 4946 unlock_user(p, arg1, 0); 4947 break; 4948 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat) 4949 case TARGET_NR_unlinkat: 4950 if (!(p = lock_user_string(arg2))) 4951 goto efault; 4952 ret = get_errno(sys_unlinkat(arg1, p, arg3)); 4953 unlock_user(p, arg2, 0); 4954 break; 4955 #endif 4956 case TARGET_NR_execve: 4957 { 4958 char **argp, **envp; 4959 int argc, envc; 4960 abi_ulong gp; 4961 abi_ulong guest_argp; 4962 abi_ulong guest_envp; 4963 abi_ulong addr; 4964 char **q; 4965 int total_size = 0; 4966 4967 argc = 0; 4968 guest_argp = arg2; 4969 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { 4970 if (get_user_ual(addr, gp)) 4971 goto efault; 4972 if (!addr) 4973 break; 4974 argc++; 4975 } 4976 envc = 0; 4977 guest_envp = arg3; 4978 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { 4979 if (get_user_ual(addr, gp)) 4980 goto efault; 4981 if (!addr) 4982 break; 4983 envc++; 4984 } 4985 4986 argp = alloca((argc + 1) * sizeof(void *)); 4987 envp = alloca((envc + 1) * sizeof(void *)); 4988 4989 for (gp = guest_argp, q = argp; gp; 4990 gp += sizeof(abi_ulong), q++) { 4991 if (get_user_ual(addr, gp)) 4992 goto execve_efault; 4993 if (!addr) 4994 break; 4995 if (!(*q = lock_user_string(addr))) 4996 goto execve_efault; 4997 total_size += strlen(*q) + 1; 4998 } 4999 *q = NULL; 5000 5001 for (gp = guest_envp, q = envp; gp; 5002 gp += sizeof(abi_ulong), q++) { 5003 if (get_user_ual(addr, gp)) 5004 goto execve_efault; 5005 if (!addr) 5006 break; 5007 if (!(*q = lock_user_string(addr))) 5008 goto execve_efault; 5009 total_size += strlen(*q) + 1; 5010 } 5011 *q = NULL; 5012 5013 /* This case will not be caught by the host's execve() if its 5014 page size is bigger than the target's. */ 5015 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) { 5016 ret = -TARGET_E2BIG; 5017 goto execve_end; 5018 } 5019 if (!(p = lock_user_string(arg1))) 5020 goto execve_efault; 5021 ret = get_errno(execve(p, argp, envp)); 5022 unlock_user(p, arg1, 0); 5023 5024 goto execve_end; 5025 5026 execve_efault: 5027 ret = -TARGET_EFAULT; 5028 5029 execve_end: 5030 for (gp = guest_argp, q = argp; *q; 5031 gp += sizeof(abi_ulong), q++) { 5032 if (get_user_ual(addr, gp) 5033 || !addr) 5034 break; 5035 unlock_user(*q, addr, 0); 5036 } 5037 for (gp = guest_envp, q = envp; *q; 5038 gp += sizeof(abi_ulong), q++) { 5039 if (get_user_ual(addr, gp) 5040 || !addr) 5041 break; 5042 unlock_user(*q, addr, 0); 5043 } 5044 } 5045 break; 5046 case TARGET_NR_chdir: 5047 if (!(p = lock_user_string(arg1))) 5048 goto efault; 5049 ret = get_errno(chdir(p)); 5050 unlock_user(p, arg1, 0); 5051 break; 5052 #ifdef TARGET_NR_time 5053 case TARGET_NR_time: 5054 { 5055 time_t host_time; 5056 ret = get_errno(time(&host_time)); 5057 if (!is_error(ret) 5058 && arg1 5059 && put_user_sal(host_time, arg1)) 5060 goto efault; 5061 } 5062 break; 5063 #endif 5064 case TARGET_NR_mknod: 5065 if (!(p = lock_user_string(arg1))) 5066 goto efault; 5067 ret = get_errno(mknod(p, arg2, arg3)); 5068 unlock_user(p, arg1, 0); 5069 break; 5070 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat) 5071 case TARGET_NR_mknodat: 5072 if (!(p = lock_user_string(arg2))) 5073 goto efault; 5074 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4)); 5075 unlock_user(p, arg2, 0); 5076 break; 5077 #endif 5078 case TARGET_NR_chmod: 5079 if (!(p = lock_user_string(arg1))) 5080 goto efault; 5081 ret = get_errno(chmod(p, arg2)); 5082 unlock_user(p, arg1, 0); 5083 break; 5084 #ifdef TARGET_NR_break 5085 case TARGET_NR_break: 5086 goto unimplemented; 5087 #endif 5088 #ifdef TARGET_NR_oldstat 5089 case TARGET_NR_oldstat: 5090 goto unimplemented; 5091 #endif 5092 case TARGET_NR_lseek: 5093 ret = get_errno(lseek(arg1, arg2, arg3)); 5094 break; 5095 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) 5096 /* Alpha specific */ 5097 case TARGET_NR_getxpid: 5098 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid(); 5099 ret = get_errno(getpid()); 5100 break; 5101 #endif 5102 #ifdef TARGET_NR_getpid 5103 case TARGET_NR_getpid: 5104 ret = get_errno(getpid()); 5105 break; 5106 #endif 5107 case TARGET_NR_mount: 5108 { 5109 /* need to look at the data field */ 5110 void *p2, *p3; 5111 p = lock_user_string(arg1); 5112 p2 = lock_user_string(arg2); 5113 p3 = lock_user_string(arg3); 5114 if (!p || !p2 || !p3) 5115 ret = -TARGET_EFAULT; 5116 else { 5117 /* FIXME - arg5 should be locked, but it isn't clear how to 5118 * do that since it's not guaranteed to be a NULL-terminated 5119 * string. 5120 */ 5121 if ( ! arg5 ) 5122 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL)); 5123 else 5124 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5))); 5125 } 5126 unlock_user(p, arg1, 0); 5127 unlock_user(p2, arg2, 0); 5128 unlock_user(p3, arg3, 0); 5129 break; 5130 } 5131 #ifdef TARGET_NR_umount 5132 case TARGET_NR_umount: 5133 if (!(p = lock_user_string(arg1))) 5134 goto efault; 5135 ret = get_errno(umount(p)); 5136 unlock_user(p, arg1, 0); 5137 break; 5138 #endif 5139 #ifdef TARGET_NR_stime /* not on alpha */ 5140 case TARGET_NR_stime: 5141 { 5142 time_t host_time; 5143 if (get_user_sal(host_time, arg1)) 5144 goto efault; 5145 ret = get_errno(stime(&host_time)); 5146 } 5147 break; 5148 #endif 5149 case TARGET_NR_ptrace: 5150 goto unimplemented; 5151 #ifdef TARGET_NR_alarm /* not on alpha */ 5152 case TARGET_NR_alarm: 5153 ret = alarm(arg1); 5154 break; 5155 #endif 5156 #ifdef TARGET_NR_oldfstat 5157 case TARGET_NR_oldfstat: 5158 goto unimplemented; 5159 #endif 5160 #ifdef TARGET_NR_pause /* not on alpha */ 5161 case TARGET_NR_pause: 5162 ret = get_errno(pause()); 5163 break; 5164 #endif 5165 #ifdef TARGET_NR_utime 5166 case TARGET_NR_utime: 5167 { 5168 struct utimbuf tbuf, *host_tbuf; 5169 struct target_utimbuf *target_tbuf; 5170 if (arg2) { 5171 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) 5172 goto efault; 5173 tbuf.actime = tswapal(target_tbuf->actime); 5174 tbuf.modtime = tswapal(target_tbuf->modtime); 5175 unlock_user_struct(target_tbuf, arg2, 0); 5176 host_tbuf = &tbuf; 5177 } else { 5178 host_tbuf = NULL; 5179 } 5180 if (!(p = lock_user_string(arg1))) 5181 goto efault; 5182 ret = get_errno(utime(p, host_tbuf)); 5183 unlock_user(p, arg1, 0); 5184 } 5185 break; 5186 #endif 5187 case TARGET_NR_utimes: 5188 { 5189 struct timeval *tvp, tv[2]; 5190 if (arg2) { 5191 if (copy_from_user_timeval(&tv[0], arg2) 5192 || copy_from_user_timeval(&tv[1], 5193 arg2 + sizeof(struct target_timeval))) 5194 goto efault; 5195 tvp = tv; 5196 } else { 5197 tvp = NULL; 5198 } 5199 if (!(p = lock_user_string(arg1))) 5200 goto efault; 5201 ret = get_errno(utimes(p, tvp)); 5202 unlock_user(p, arg1, 0); 5203 } 5204 break; 5205 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat) 5206 case TARGET_NR_futimesat: 5207 { 5208 struct timeval *tvp, tv[2]; 5209 if (arg3) { 5210 if (copy_from_user_timeval(&tv[0], arg3) 5211 || copy_from_user_timeval(&tv[1], 5212 arg3 + sizeof(struct target_timeval))) 5213 goto efault; 5214 tvp = tv; 5215 } else { 5216 tvp = NULL; 5217 } 5218 if (!(p = lock_user_string(arg2))) 5219 goto efault; 5220 ret = get_errno(sys_futimesat(arg1, path(p), tvp)); 5221 unlock_user(p, arg2, 0); 5222 } 5223 break; 5224 #endif 5225 #ifdef TARGET_NR_stty 5226 case TARGET_NR_stty: 5227 goto unimplemented; 5228 #endif 5229 #ifdef TARGET_NR_gtty 5230 case TARGET_NR_gtty: 5231 goto unimplemented; 5232 #endif 5233 case TARGET_NR_access: 5234 if (!(p = lock_user_string(arg1))) 5235 goto efault; 5236 ret = get_errno(access(path(p), arg2)); 5237 unlock_user(p, arg1, 0); 5238 break; 5239 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 5240 case TARGET_NR_faccessat: 5241 if (!(p = lock_user_string(arg2))) 5242 goto efault; 5243 ret = get_errno(sys_faccessat(arg1, p, arg3)); 5244 unlock_user(p, arg2, 0); 5245 break; 5246 #endif 5247 #ifdef TARGET_NR_nice /* not on alpha */ 5248 case TARGET_NR_nice: 5249 ret = get_errno(nice(arg1)); 5250 break; 5251 #endif 5252 #ifdef TARGET_NR_ftime 5253 case TARGET_NR_ftime: 5254 goto unimplemented; 5255 #endif 5256 case TARGET_NR_sync: 5257 sync(); 5258 ret = 0; 5259 break; 5260 case TARGET_NR_kill: 5261 ret = get_errno(kill(arg1, target_to_host_signal(arg2))); 5262 break; 5263 case TARGET_NR_rename: 5264 { 5265 void *p2; 5266 p = lock_user_string(arg1); 5267 p2 = lock_user_string(arg2); 5268 if (!p || !p2) 5269 ret = -TARGET_EFAULT; 5270 else 5271 ret = get_errno(rename(p, p2)); 5272 unlock_user(p2, arg2, 0); 5273 unlock_user(p, arg1, 0); 5274 } 5275 break; 5276 #if defined(TARGET_NR_renameat) && defined(__NR_renameat) 5277 case TARGET_NR_renameat: 5278 { 5279 void *p2; 5280 p = lock_user_string(arg2); 5281 p2 = lock_user_string(arg4); 5282 if (!p || !p2) 5283 ret = -TARGET_EFAULT; 5284 else 5285 ret = get_errno(sys_renameat(arg1, p, arg3, p2)); 5286 unlock_user(p2, arg4, 0); 5287 unlock_user(p, arg2, 0); 5288 } 5289 break; 5290 #endif 5291 case TARGET_NR_mkdir: 5292 if (!(p = lock_user_string(arg1))) 5293 goto efault; 5294 ret = get_errno(mkdir(p, arg2)); 5295 unlock_user(p, arg1, 0); 5296 break; 5297 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat) 5298 case TARGET_NR_mkdirat: 5299 if (!(p = lock_user_string(arg2))) 5300 goto efault; 5301 ret = get_errno(sys_mkdirat(arg1, p, arg3)); 5302 unlock_user(p, arg2, 0); 5303 break; 5304 #endif 5305 case TARGET_NR_rmdir: 5306 if (!(p = lock_user_string(arg1))) 5307 goto efault; 5308 ret = get_errno(rmdir(p)); 5309 unlock_user(p, arg1, 0); 5310 break; 5311 case TARGET_NR_dup: 5312 ret = get_errno(dup(arg1)); 5313 break; 5314 case TARGET_NR_pipe: 5315 ret = do_pipe(cpu_env, arg1, 0, 0); 5316 break; 5317 #ifdef TARGET_NR_pipe2 5318 case TARGET_NR_pipe2: 5319 ret = do_pipe(cpu_env, arg1, arg2, 1); 5320 break; 5321 #endif 5322 case TARGET_NR_times: 5323 { 5324 struct target_tms *tmsp; 5325 struct tms tms; 5326 ret = get_errno(times(&tms)); 5327 if (arg1) { 5328 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); 5329 if (!tmsp) 5330 goto efault; 5331 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime)); 5332 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime)); 5333 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime)); 5334 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime)); 5335 } 5336 if (!is_error(ret)) 5337 ret = host_to_target_clock_t(ret); 5338 } 5339 break; 5340 #ifdef TARGET_NR_prof 5341 case TARGET_NR_prof: 5342 goto unimplemented; 5343 #endif 5344 #ifdef TARGET_NR_signal 5345 case TARGET_NR_signal: 5346 goto unimplemented; 5347 #endif 5348 case TARGET_NR_acct: 5349 if (arg1 == 0) { 5350 ret = get_errno(acct(NULL)); 5351 } else { 5352 if (!(p = lock_user_string(arg1))) 5353 goto efault; 5354 ret = get_errno(acct(path(p))); 5355 unlock_user(p, arg1, 0); 5356 } 5357 break; 5358 #ifdef TARGET_NR_umount2 /* not on alpha */ 5359 case TARGET_NR_umount2: 5360 if (!(p = lock_user_string(arg1))) 5361 goto efault; 5362 ret = get_errno(umount2(p, arg2)); 5363 unlock_user(p, arg1, 0); 5364 break; 5365 #endif 5366 #ifdef TARGET_NR_lock 5367 case TARGET_NR_lock: 5368 goto unimplemented; 5369 #endif 5370 case TARGET_NR_ioctl: 5371 ret = do_ioctl(arg1, arg2, arg3); 5372 break; 5373 case TARGET_NR_fcntl: 5374 ret = do_fcntl(arg1, arg2, arg3); 5375 break; 5376 #ifdef TARGET_NR_mpx 5377 case TARGET_NR_mpx: 5378 goto unimplemented; 5379 #endif 5380 case TARGET_NR_setpgid: 5381 ret = get_errno(setpgid(arg1, arg2)); 5382 break; 5383 #ifdef TARGET_NR_ulimit 5384 case TARGET_NR_ulimit: 5385 goto unimplemented; 5386 #endif 5387 #ifdef TARGET_NR_oldolduname 5388 case TARGET_NR_oldolduname: 5389 goto unimplemented; 5390 #endif 5391 case TARGET_NR_umask: 5392 ret = get_errno(umask(arg1)); 5393 break; 5394 case TARGET_NR_chroot: 5395 if (!(p = lock_user_string(arg1))) 5396 goto efault; 5397 ret = get_errno(chroot(p)); 5398 unlock_user(p, arg1, 0); 5399 break; 5400 case TARGET_NR_ustat: 5401 goto unimplemented; 5402 case TARGET_NR_dup2: 5403 ret = get_errno(dup2(arg1, arg2)); 5404 break; 5405 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) 5406 case TARGET_NR_dup3: 5407 ret = get_errno(dup3(arg1, arg2, arg3)); 5408 break; 5409 #endif 5410 #ifdef TARGET_NR_getppid /* not on alpha */ 5411 case TARGET_NR_getppid: 5412 ret = get_errno(getppid()); 5413 break; 5414 #endif 5415 case TARGET_NR_getpgrp: 5416 ret = get_errno(getpgrp()); 5417 break; 5418 case TARGET_NR_setsid: 5419 ret = get_errno(setsid()); 5420 break; 5421 #ifdef TARGET_NR_sigaction 5422 case TARGET_NR_sigaction: 5423 { 5424 #if defined(TARGET_ALPHA) 5425 struct target_sigaction act, oact, *pact = 0; 5426 struct target_old_sigaction *old_act; 5427 if (arg2) { 5428 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5429 goto efault; 5430 act._sa_handler = old_act->_sa_handler; 5431 target_siginitset(&act.sa_mask, old_act->sa_mask); 5432 act.sa_flags = old_act->sa_flags; 5433 act.sa_restorer = 0; 5434 unlock_user_struct(old_act, arg2, 0); 5435 pact = &act; 5436 } 5437 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5438 if (!is_error(ret) && arg3) { 5439 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5440 goto efault; 5441 old_act->_sa_handler = oact._sa_handler; 5442 old_act->sa_mask = oact.sa_mask.sig[0]; 5443 old_act->sa_flags = oact.sa_flags; 5444 unlock_user_struct(old_act, arg3, 1); 5445 } 5446 #elif defined(TARGET_MIPS) 5447 struct target_sigaction act, oact, *pact, *old_act; 5448 5449 if (arg2) { 5450 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5451 goto efault; 5452 act._sa_handler = old_act->_sa_handler; 5453 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); 5454 act.sa_flags = old_act->sa_flags; 5455 unlock_user_struct(old_act, arg2, 0); 5456 pact = &act; 5457 } else { 5458 pact = NULL; 5459 } 5460 5461 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5462 5463 if (!is_error(ret) && arg3) { 5464 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5465 goto efault; 5466 old_act->_sa_handler = oact._sa_handler; 5467 old_act->sa_flags = oact.sa_flags; 5468 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; 5469 old_act->sa_mask.sig[1] = 0; 5470 old_act->sa_mask.sig[2] = 0; 5471 old_act->sa_mask.sig[3] = 0; 5472 unlock_user_struct(old_act, arg3, 1); 5473 } 5474 #else 5475 struct target_old_sigaction *old_act; 5476 struct target_sigaction act, oact, *pact; 5477 if (arg2) { 5478 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5479 goto efault; 5480 act._sa_handler = old_act->_sa_handler; 5481 target_siginitset(&act.sa_mask, old_act->sa_mask); 5482 act.sa_flags = old_act->sa_flags; 5483 act.sa_restorer = old_act->sa_restorer; 5484 unlock_user_struct(old_act, arg2, 0); 5485 pact = &act; 5486 } else { 5487 pact = NULL; 5488 } 5489 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5490 if (!is_error(ret) && arg3) { 5491 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5492 goto efault; 5493 old_act->_sa_handler = oact._sa_handler; 5494 old_act->sa_mask = oact.sa_mask.sig[0]; 5495 old_act->sa_flags = oact.sa_flags; 5496 old_act->sa_restorer = oact.sa_restorer; 5497 unlock_user_struct(old_act, arg3, 1); 5498 } 5499 #endif 5500 } 5501 break; 5502 #endif 5503 case TARGET_NR_rt_sigaction: 5504 { 5505 #if defined(TARGET_ALPHA) 5506 struct target_sigaction act, oact, *pact = 0; 5507 struct target_rt_sigaction *rt_act; 5508 /* ??? arg4 == sizeof(sigset_t). */ 5509 if (arg2) { 5510 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1)) 5511 goto efault; 5512 act._sa_handler = rt_act->_sa_handler; 5513 act.sa_mask = rt_act->sa_mask; 5514 act.sa_flags = rt_act->sa_flags; 5515 act.sa_restorer = arg5; 5516 unlock_user_struct(rt_act, arg2, 0); 5517 pact = &act; 5518 } 5519 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5520 if (!is_error(ret) && arg3) { 5521 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0)) 5522 goto efault; 5523 rt_act->_sa_handler = oact._sa_handler; 5524 rt_act->sa_mask = oact.sa_mask; 5525 rt_act->sa_flags = oact.sa_flags; 5526 unlock_user_struct(rt_act, arg3, 1); 5527 } 5528 #else 5529 struct target_sigaction *act; 5530 struct target_sigaction *oact; 5531 5532 if (arg2) { 5533 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) 5534 goto efault; 5535 } else 5536 act = NULL; 5537 if (arg3) { 5538 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { 5539 ret = -TARGET_EFAULT; 5540 goto rt_sigaction_fail; 5541 } 5542 } else 5543 oact = NULL; 5544 ret = get_errno(do_sigaction(arg1, act, oact)); 5545 rt_sigaction_fail: 5546 if (act) 5547 unlock_user_struct(act, arg2, 0); 5548 if (oact) 5549 unlock_user_struct(oact, arg3, 1); 5550 #endif 5551 } 5552 break; 5553 #ifdef TARGET_NR_sgetmask /* not on alpha */ 5554 case TARGET_NR_sgetmask: 5555 { 5556 sigset_t cur_set; 5557 abi_ulong target_set; 5558 sigprocmask(0, NULL, &cur_set); 5559 host_to_target_old_sigset(&target_set, &cur_set); 5560 ret = target_set; 5561 } 5562 break; 5563 #endif 5564 #ifdef TARGET_NR_ssetmask /* not on alpha */ 5565 case TARGET_NR_ssetmask: 5566 { 5567 sigset_t set, oset, cur_set; 5568 abi_ulong target_set = arg1; 5569 sigprocmask(0, NULL, &cur_set); 5570 target_to_host_old_sigset(&set, &target_set); 5571 sigorset(&set, &set, &cur_set); 5572 sigprocmask(SIG_SETMASK, &set, &oset); 5573 host_to_target_old_sigset(&target_set, &oset); 5574 ret = target_set; 5575 } 5576 break; 5577 #endif 5578 #ifdef TARGET_NR_sigprocmask 5579 case TARGET_NR_sigprocmask: 5580 { 5581 #if defined(TARGET_ALPHA) 5582 sigset_t set, oldset; 5583 abi_ulong mask; 5584 int how; 5585 5586 switch (arg1) { 5587 case TARGET_SIG_BLOCK: 5588 how = SIG_BLOCK; 5589 break; 5590 case TARGET_SIG_UNBLOCK: 5591 how = SIG_UNBLOCK; 5592 break; 5593 case TARGET_SIG_SETMASK: 5594 how = SIG_SETMASK; 5595 break; 5596 default: 5597 ret = -TARGET_EINVAL; 5598 goto fail; 5599 } 5600 mask = arg2; 5601 target_to_host_old_sigset(&set, &mask); 5602 5603 ret = get_errno(sigprocmask(how, &set, &oldset)); 5604 5605 if (!is_error(ret)) { 5606 host_to_target_old_sigset(&mask, &oldset); 5607 ret = mask; 5608 ((CPUAlphaState *)cpu_env)->[IR_V0] = 0; /* force no error */ 5609 } 5610 #else 5611 sigset_t set, oldset, *set_ptr; 5612 int how; 5613 5614 if (arg2) { 5615 switch (arg1) { 5616 case TARGET_SIG_BLOCK: 5617 how = SIG_BLOCK; 5618 break; 5619 case TARGET_SIG_UNBLOCK: 5620 how = SIG_UNBLOCK; 5621 break; 5622 case TARGET_SIG_SETMASK: 5623 how = SIG_SETMASK; 5624 break; 5625 default: 5626 ret = -TARGET_EINVAL; 5627 goto fail; 5628 } 5629 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 5630 goto efault; 5631 target_to_host_old_sigset(&set, p); 5632 unlock_user(p, arg2, 0); 5633 set_ptr = &set; 5634 } else { 5635 how = 0; 5636 set_ptr = NULL; 5637 } 5638 ret = get_errno(sigprocmask(how, set_ptr, &oldset)); 5639 if (!is_error(ret) && arg3) { 5640 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 5641 goto efault; 5642 host_to_target_old_sigset(p, &oldset); 5643 unlock_user(p, arg3, sizeof(target_sigset_t)); 5644 } 5645 #endif 5646 } 5647 break; 5648 #endif 5649 case TARGET_NR_rt_sigprocmask: 5650 { 5651 int how = arg1; 5652 sigset_t set, oldset, *set_ptr; 5653 5654 if (arg2) { 5655 switch(how) { 5656 case TARGET_SIG_BLOCK: 5657 how = SIG_BLOCK; 5658 break; 5659 case TARGET_SIG_UNBLOCK: 5660 how = SIG_UNBLOCK; 5661 break; 5662 case TARGET_SIG_SETMASK: 5663 how = SIG_SETMASK; 5664 break; 5665 default: 5666 ret = -TARGET_EINVAL; 5667 goto fail; 5668 } 5669 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 5670 goto efault; 5671 target_to_host_sigset(&set, p); 5672 unlock_user(p, arg2, 0); 5673 set_ptr = &set; 5674 } else { 5675 how = 0; 5676 set_ptr = NULL; 5677 } 5678 ret = get_errno(sigprocmask(how, set_ptr, &oldset)); 5679 if (!is_error(ret) && arg3) { 5680 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 5681 goto efault; 5682 host_to_target_sigset(p, &oldset); 5683 unlock_user(p, arg3, sizeof(target_sigset_t)); 5684 } 5685 } 5686 break; 5687 #ifdef TARGET_NR_sigpending 5688 case TARGET_NR_sigpending: 5689 { 5690 sigset_t set; 5691 ret = get_errno(sigpending(&set)); 5692 if (!is_error(ret)) { 5693 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 5694 goto efault; 5695 host_to_target_old_sigset(p, &set); 5696 unlock_user(p, arg1, sizeof(target_sigset_t)); 5697 } 5698 } 5699 break; 5700 #endif 5701 case TARGET_NR_rt_sigpending: 5702 { 5703 sigset_t set; 5704 ret = get_errno(sigpending(&set)); 5705 if (!is_error(ret)) { 5706 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 5707 goto efault; 5708 host_to_target_sigset(p, &set); 5709 unlock_user(p, arg1, sizeof(target_sigset_t)); 5710 } 5711 } 5712 break; 5713 #ifdef TARGET_NR_sigsuspend 5714 case TARGET_NR_sigsuspend: 5715 { 5716 sigset_t set; 5717 #if defined(TARGET_ALPHA) 5718 abi_ulong mask = arg1; 5719 target_to_host_old_sigset(&set, &mask); 5720 #else 5721 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 5722 goto efault; 5723 target_to_host_old_sigset(&set, p); 5724 unlock_user(p, arg1, 0); 5725 #endif 5726 ret = get_errno(sigsuspend(&set)); 5727 } 5728 break; 5729 #endif 5730 case TARGET_NR_rt_sigsuspend: 5731 { 5732 sigset_t set; 5733 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 5734 goto efault; 5735 target_to_host_sigset(&set, p); 5736 unlock_user(p, arg1, 0); 5737 ret = get_errno(sigsuspend(&set)); 5738 } 5739 break; 5740 case TARGET_NR_rt_sigtimedwait: 5741 { 5742 sigset_t set; 5743 struct timespec uts, *puts; 5744 siginfo_t uinfo; 5745 5746 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 5747 goto efault; 5748 target_to_host_sigset(&set, p); 5749 unlock_user(p, arg1, 0); 5750 if (arg3) { 5751 puts = &uts; 5752 target_to_host_timespec(puts, arg3); 5753 } else { 5754 puts = NULL; 5755 } 5756 ret = get_errno(sigtimedwait(&set, &uinfo, puts)); 5757 if (!is_error(ret) && arg2) { 5758 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0))) 5759 goto efault; 5760 host_to_target_siginfo(p, &uinfo); 5761 unlock_user(p, arg2, sizeof(target_siginfo_t)); 5762 } 5763 } 5764 break; 5765 case TARGET_NR_rt_sigqueueinfo: 5766 { 5767 siginfo_t uinfo; 5768 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1))) 5769 goto efault; 5770 target_to_host_siginfo(&uinfo, p); 5771 unlock_user(p, arg1, 0); 5772 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo)); 5773 } 5774 break; 5775 #ifdef TARGET_NR_sigreturn 5776 case TARGET_NR_sigreturn: 5777 /* NOTE: ret is eax, so not transcoding must be done */ 5778 ret = do_sigreturn(cpu_env); 5779 break; 5780 #endif 5781 case TARGET_NR_rt_sigreturn: 5782 /* NOTE: ret is eax, so not transcoding must be done */ 5783 ret = do_rt_sigreturn(cpu_env); 5784 break; 5785 case TARGET_NR_sethostname: 5786 if (!(p = lock_user_string(arg1))) 5787 goto efault; 5788 ret = get_errno(sethostname(p, arg2)); 5789 unlock_user(p, arg1, 0); 5790 break; 5791 case TARGET_NR_setrlimit: 5792 { 5793 int resource = target_to_host_resource(arg1); 5794 struct target_rlimit *target_rlim; 5795 struct rlimit rlim; 5796 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) 5797 goto efault; 5798 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur); 5799 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max); 5800 unlock_user_struct(target_rlim, arg2, 0); 5801 ret = get_errno(setrlimit(resource, &rlim)); 5802 } 5803 break; 5804 case TARGET_NR_getrlimit: 5805 { 5806 int resource = target_to_host_resource(arg1); 5807 struct target_rlimit *target_rlim; 5808 struct rlimit rlim; 5809 5810 ret = get_errno(getrlimit(resource, &rlim)); 5811 if (!is_error(ret)) { 5812 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 5813 goto efault; 5814 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 5815 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 5816 unlock_user_struct(target_rlim, arg2, 1); 5817 } 5818 } 5819 break; 5820 case TARGET_NR_getrusage: 5821 { 5822 struct rusage rusage; 5823 ret = get_errno(getrusage(arg1, &rusage)); 5824 if (!is_error(ret)) { 5825 host_to_target_rusage(arg2, &rusage); 5826 } 5827 } 5828 break; 5829 case TARGET_NR_gettimeofday: 5830 { 5831 struct timeval tv; 5832 ret = get_errno(gettimeofday(&tv, NULL)); 5833 if (!is_error(ret)) { 5834 if (copy_to_user_timeval(arg1, &tv)) 5835 goto efault; 5836 } 5837 } 5838 break; 5839 case TARGET_NR_settimeofday: 5840 { 5841 struct timeval tv; 5842 if (copy_from_user_timeval(&tv, arg1)) 5843 goto efault; 5844 ret = get_errno(settimeofday(&tv, NULL)); 5845 } 5846 break; 5847 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390) 5848 case TARGET_NR_select: 5849 { 5850 struct target_sel_arg_struct *sel; 5851 abi_ulong inp, outp, exp, tvp; 5852 long nsel; 5853 5854 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) 5855 goto efault; 5856 nsel = tswapal(sel->n); 5857 inp = tswapal(sel->inp); 5858 outp = tswapal(sel->outp); 5859 exp = tswapal(sel->exp); 5860 tvp = tswapal(sel->tvp); 5861 unlock_user_struct(sel, arg1, 0); 5862 ret = do_select(nsel, inp, outp, exp, tvp); 5863 } 5864 break; 5865 #endif 5866 #ifdef TARGET_NR_pselect6 5867 case TARGET_NR_pselect6: 5868 { 5869 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr; 5870 fd_set rfds, wfds, efds; 5871 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 5872 struct timespec ts, *ts_ptr; 5873 5874 /* 5875 * The 6th arg is actually two args smashed together, 5876 * so we cannot use the C library. 5877 */ 5878 sigset_t set; 5879 struct { 5880 sigset_t *set; 5881 size_t size; 5882 } sig, *sig_ptr; 5883 5884 abi_ulong arg_sigset, arg_sigsize, *arg7; 5885 target_sigset_t *target_sigset; 5886 5887 n = arg1; 5888 rfd_addr = arg2; 5889 wfd_addr = arg3; 5890 efd_addr = arg4; 5891 ts_addr = arg5; 5892 5893 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 5894 if (ret) { 5895 goto fail; 5896 } 5897 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 5898 if (ret) { 5899 goto fail; 5900 } 5901 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 5902 if (ret) { 5903 goto fail; 5904 } 5905 5906 /* 5907 * This takes a timespec, and not a timeval, so we cannot 5908 * use the do_select() helper ... 5909 */ 5910 if (ts_addr) { 5911 if (target_to_host_timespec(&ts, ts_addr)) { 5912 goto efault; 5913 } 5914 ts_ptr = &ts; 5915 } else { 5916 ts_ptr = NULL; 5917 } 5918 5919 /* Extract the two packed args for the sigset */ 5920 if (arg6) { 5921 sig_ptr = &sig; 5922 sig.size = _NSIG / 8; 5923 5924 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1); 5925 if (!arg7) { 5926 goto efault; 5927 } 5928 arg_sigset = tswapal(arg7[0]); 5929 arg_sigsize = tswapal(arg7[1]); 5930 unlock_user(arg7, arg6, 0); 5931 5932 if (arg_sigset) { 5933 sig.set = &set; 5934 if (arg_sigsize != sizeof(*target_sigset)) { 5935 /* Like the kernel, we enforce correct size sigsets */ 5936 ret = -TARGET_EINVAL; 5937 goto fail; 5938 } 5939 target_sigset = lock_user(VERIFY_READ, arg_sigset, 5940 sizeof(*target_sigset), 1); 5941 if (!target_sigset) { 5942 goto efault; 5943 } 5944 target_to_host_sigset(&set, target_sigset); 5945 unlock_user(target_sigset, arg_sigset, 0); 5946 } else { 5947 sig.set = NULL; 5948 } 5949 } else { 5950 sig_ptr = NULL; 5951 } 5952 5953 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 5954 ts_ptr, sig_ptr)); 5955 5956 if (!is_error(ret)) { 5957 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 5958 goto efault; 5959 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 5960 goto efault; 5961 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 5962 goto efault; 5963 5964 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) 5965 goto efault; 5966 } 5967 } 5968 break; 5969 #endif 5970 case TARGET_NR_symlink: 5971 { 5972 void *p2; 5973 p = lock_user_string(arg1); 5974 p2 = lock_user_string(arg2); 5975 if (!p || !p2) 5976 ret = -TARGET_EFAULT; 5977 else 5978 ret = get_errno(symlink(p, p2)); 5979 unlock_user(p2, arg2, 0); 5980 unlock_user(p, arg1, 0); 5981 } 5982 break; 5983 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat) 5984 case TARGET_NR_symlinkat: 5985 { 5986 void *p2; 5987 p = lock_user_string(arg1); 5988 p2 = lock_user_string(arg3); 5989 if (!p || !p2) 5990 ret = -TARGET_EFAULT; 5991 else 5992 ret = get_errno(sys_symlinkat(p, arg2, p2)); 5993 unlock_user(p2, arg3, 0); 5994 unlock_user(p, arg1, 0); 5995 } 5996 break; 5997 #endif 5998 #ifdef TARGET_NR_oldlstat 5999 case TARGET_NR_oldlstat: 6000 goto unimplemented; 6001 #endif 6002 case TARGET_NR_readlink: 6003 { 6004 void *p2, *temp; 6005 p = lock_user_string(arg1); 6006 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); 6007 if (!p || !p2) 6008 ret = -TARGET_EFAULT; 6009 else { 6010 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) { 6011 char real[PATH_MAX]; 6012 temp = realpath(exec_path,real); 6013 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ; 6014 snprintf((char *)p2, arg3, "%s", real); 6015 } 6016 else 6017 ret = get_errno(readlink(path(p), p2, arg3)); 6018 } 6019 unlock_user(p2, arg2, ret); 6020 unlock_user(p, arg1, 0); 6021 } 6022 break; 6023 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat) 6024 case TARGET_NR_readlinkat: 6025 { 6026 void *p2; 6027 p = lock_user_string(arg2); 6028 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); 6029 if (!p || !p2) 6030 ret = -TARGET_EFAULT; 6031 else 6032 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4)); 6033 unlock_user(p2, arg3, ret); 6034 unlock_user(p, arg2, 0); 6035 } 6036 break; 6037 #endif 6038 #ifdef TARGET_NR_uselib 6039 case TARGET_NR_uselib: 6040 goto unimplemented; 6041 #endif 6042 #ifdef TARGET_NR_swapon 6043 case TARGET_NR_swapon: 6044 if (!(p = lock_user_string(arg1))) 6045 goto efault; 6046 ret = get_errno(swapon(p, arg2)); 6047 unlock_user(p, arg1, 0); 6048 break; 6049 #endif 6050 case TARGET_NR_reboot: 6051 if (!(p = lock_user_string(arg4))) 6052 goto efault; 6053 ret = reboot(arg1, arg2, arg3, p); 6054 unlock_user(p, arg4, 0); 6055 break; 6056 #ifdef TARGET_NR_readdir 6057 case TARGET_NR_readdir: 6058 goto unimplemented; 6059 #endif 6060 #ifdef TARGET_NR_mmap 6061 case TARGET_NR_mmap: 6062 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \ 6063 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ 6064 || defined(TARGET_S390X) 6065 { 6066 abi_ulong *v; 6067 abi_ulong v1, v2, v3, v4, v5, v6; 6068 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) 6069 goto efault; 6070 v1 = tswapal(v[0]); 6071 v2 = tswapal(v[1]); 6072 v3 = tswapal(v[2]); 6073 v4 = tswapal(v[3]); 6074 v5 = tswapal(v[4]); 6075 v6 = tswapal(v[5]); 6076 unlock_user(v, arg1, 0); 6077 ret = get_errno(target_mmap(v1, v2, v3, 6078 target_to_host_bitmask(v4, mmap_flags_tbl), 6079 v5, v6)); 6080 } 6081 #else 6082 ret = get_errno(target_mmap(arg1, arg2, arg3, 6083 target_to_host_bitmask(arg4, mmap_flags_tbl), 6084 arg5, 6085 arg6)); 6086 #endif 6087 break; 6088 #endif 6089 #ifdef TARGET_NR_mmap2 6090 case TARGET_NR_mmap2: 6091 #ifndef MMAP_SHIFT 6092 #define MMAP_SHIFT 12 6093 #endif 6094 ret = get_errno(target_mmap(arg1, arg2, arg3, 6095 target_to_host_bitmask(arg4, mmap_flags_tbl), 6096 arg5, 6097 arg6 << MMAP_SHIFT)); 6098 break; 6099 #endif 6100 case TARGET_NR_munmap: 6101 ret = get_errno(target_munmap(arg1, arg2)); 6102 break; 6103 case TARGET_NR_mprotect: 6104 { 6105 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 6106 /* Special hack to detect libc making the stack executable. */ 6107 if ((arg3 & PROT_GROWSDOWN) 6108 && arg1 >= ts->info->stack_limit 6109 && arg1 <= ts->info->start_stack) { 6110 arg3 &= ~PROT_GROWSDOWN; 6111 arg2 = arg2 + arg1 - ts->info->stack_limit; 6112 arg1 = ts->info->stack_limit; 6113 } 6114 } 6115 ret = get_errno(target_mprotect(arg1, arg2, arg3)); 6116 break; 6117 #ifdef TARGET_NR_mremap 6118 case TARGET_NR_mremap: 6119 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); 6120 break; 6121 #endif 6122 /* ??? msync/mlock/munlock are broken for softmmu. */ 6123 #ifdef TARGET_NR_msync 6124 case TARGET_NR_msync: 6125 ret = get_errno(msync(g2h(arg1), arg2, arg3)); 6126 break; 6127 #endif 6128 #ifdef TARGET_NR_mlock 6129 case TARGET_NR_mlock: 6130 ret = get_errno(mlock(g2h(arg1), arg2)); 6131 break; 6132 #endif 6133 #ifdef TARGET_NR_munlock 6134 case TARGET_NR_munlock: 6135 ret = get_errno(munlock(g2h(arg1), arg2)); 6136 break; 6137 #endif 6138 #ifdef TARGET_NR_mlockall 6139 case TARGET_NR_mlockall: 6140 ret = get_errno(mlockall(arg1)); 6141 break; 6142 #endif 6143 #ifdef TARGET_NR_munlockall 6144 case TARGET_NR_munlockall: 6145 ret = get_errno(munlockall()); 6146 break; 6147 #endif 6148 case TARGET_NR_truncate: 6149 if (!(p = lock_user_string(arg1))) 6150 goto efault; 6151 ret = get_errno(truncate(p, arg2)); 6152 unlock_user(p, arg1, 0); 6153 break; 6154 case TARGET_NR_ftruncate: 6155 ret = get_errno(ftruncate(arg1, arg2)); 6156 break; 6157 case TARGET_NR_fchmod: 6158 ret = get_errno(fchmod(arg1, arg2)); 6159 break; 6160 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat) 6161 case TARGET_NR_fchmodat: 6162 if (!(p = lock_user_string(arg2))) 6163 goto efault; 6164 ret = get_errno(sys_fchmodat(arg1, p, arg3)); 6165 unlock_user(p, arg2, 0); 6166 break; 6167 #endif 6168 case TARGET_NR_getpriority: 6169 /* libc does special remapping of the return value of 6170 * sys_getpriority() so it's just easiest to call 6171 * sys_getpriority() directly rather than through libc. */ 6172 ret = get_errno(sys_getpriority(arg1, arg2)); 6173 break; 6174 case TARGET_NR_setpriority: 6175 ret = get_errno(setpriority(arg1, arg2, arg3)); 6176 break; 6177 #ifdef TARGET_NR_profil 6178 case TARGET_NR_profil: 6179 goto unimplemented; 6180 #endif 6181 case TARGET_NR_statfs: 6182 if (!(p = lock_user_string(arg1))) 6183 goto efault; 6184 ret = get_errno(statfs(path(p), &stfs)); 6185 unlock_user(p, arg1, 0); 6186 convert_statfs: 6187 if (!is_error(ret)) { 6188 struct target_statfs *target_stfs; 6189 6190 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) 6191 goto efault; 6192 __put_user(stfs.f_type, &target_stfs->f_type); 6193 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 6194 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 6195 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 6196 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 6197 __put_user(stfs.f_files, &target_stfs->f_files); 6198 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 6199 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 6200 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 6201 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 6202 unlock_user_struct(target_stfs, arg2, 1); 6203 } 6204 break; 6205 case TARGET_NR_fstatfs: 6206 ret = get_errno(fstatfs(arg1, &stfs)); 6207 goto convert_statfs; 6208 #ifdef TARGET_NR_statfs64 6209 case TARGET_NR_statfs64: 6210 if (!(p = lock_user_string(arg1))) 6211 goto efault; 6212 ret = get_errno(statfs(path(p), &stfs)); 6213 unlock_user(p, arg1, 0); 6214 convert_statfs64: 6215 if (!is_error(ret)) { 6216 struct target_statfs64 *target_stfs; 6217 6218 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) 6219 goto efault; 6220 __put_user(stfs.f_type, &target_stfs->f_type); 6221 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 6222 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 6223 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 6224 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 6225 __put_user(stfs.f_files, &target_stfs->f_files); 6226 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 6227 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 6228 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 6229 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 6230 unlock_user_struct(target_stfs, arg3, 1); 6231 } 6232 break; 6233 case TARGET_NR_fstatfs64: 6234 ret = get_errno(fstatfs(arg1, &stfs)); 6235 goto convert_statfs64; 6236 #endif 6237 #ifdef TARGET_NR_ioperm 6238 case TARGET_NR_ioperm: 6239 goto unimplemented; 6240 #endif 6241 #ifdef TARGET_NR_socketcall 6242 case TARGET_NR_socketcall: 6243 ret = do_socketcall(arg1, arg2); 6244 break; 6245 #endif 6246 #ifdef TARGET_NR_accept 6247 case TARGET_NR_accept: 6248 ret = do_accept(arg1, arg2, arg3); 6249 break; 6250 #endif 6251 #ifdef TARGET_NR_bind 6252 case TARGET_NR_bind: 6253 ret = do_bind(arg1, arg2, arg3); 6254 break; 6255 #endif 6256 #ifdef TARGET_NR_connect 6257 case TARGET_NR_connect: 6258 ret = do_connect(arg1, arg2, arg3); 6259 break; 6260 #endif 6261 #ifdef TARGET_NR_getpeername 6262 case TARGET_NR_getpeername: 6263 ret = do_getpeername(arg1, arg2, arg3); 6264 break; 6265 #endif 6266 #ifdef TARGET_NR_getsockname 6267 case TARGET_NR_getsockname: 6268 ret = do_getsockname(arg1, arg2, arg3); 6269 break; 6270 #endif 6271 #ifdef TARGET_NR_getsockopt 6272 case TARGET_NR_getsockopt: 6273 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5); 6274 break; 6275 #endif 6276 #ifdef TARGET_NR_listen 6277 case TARGET_NR_listen: 6278 ret = get_errno(listen(arg1, arg2)); 6279 break; 6280 #endif 6281 #ifdef TARGET_NR_recv 6282 case TARGET_NR_recv: 6283 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); 6284 break; 6285 #endif 6286 #ifdef TARGET_NR_recvfrom 6287 case TARGET_NR_recvfrom: 6288 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); 6289 break; 6290 #endif 6291 #ifdef TARGET_NR_recvmsg 6292 case TARGET_NR_recvmsg: 6293 ret = do_sendrecvmsg(arg1, arg2, arg3, 0); 6294 break; 6295 #endif 6296 #ifdef TARGET_NR_send 6297 case TARGET_NR_send: 6298 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0); 6299 break; 6300 #endif 6301 #ifdef TARGET_NR_sendmsg 6302 case TARGET_NR_sendmsg: 6303 ret = do_sendrecvmsg(arg1, arg2, arg3, 1); 6304 break; 6305 #endif 6306 #ifdef TARGET_NR_sendto 6307 case TARGET_NR_sendto: 6308 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); 6309 break; 6310 #endif 6311 #ifdef TARGET_NR_shutdown 6312 case TARGET_NR_shutdown: 6313 ret = get_errno(shutdown(arg1, arg2)); 6314 break; 6315 #endif 6316 #ifdef TARGET_NR_socket 6317 case TARGET_NR_socket: 6318 ret = do_socket(arg1, arg2, arg3); 6319 break; 6320 #endif 6321 #ifdef TARGET_NR_socketpair 6322 case TARGET_NR_socketpair: 6323 ret = do_socketpair(arg1, arg2, arg3, arg4); 6324 break; 6325 #endif 6326 #ifdef TARGET_NR_setsockopt 6327 case TARGET_NR_setsockopt: 6328 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); 6329 break; 6330 #endif 6331 6332 case TARGET_NR_syslog: 6333 if (!(p = lock_user_string(arg2))) 6334 goto efault; 6335 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); 6336 unlock_user(p, arg2, 0); 6337 break; 6338 6339 case TARGET_NR_setitimer: 6340 { 6341 struct itimerval value, ovalue, *pvalue; 6342 6343 if (arg2) { 6344 pvalue = &value; 6345 if (copy_from_user_timeval(&pvalue->it_interval, arg2) 6346 || copy_from_user_timeval(&pvalue->it_value, 6347 arg2 + sizeof(struct target_timeval))) 6348 goto efault; 6349 } else { 6350 pvalue = NULL; 6351 } 6352 ret = get_errno(setitimer(arg1, pvalue, &ovalue)); 6353 if (!is_error(ret) && arg3) { 6354 if (copy_to_user_timeval(arg3, 6355 &ovalue.it_interval) 6356 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), 6357 &ovalue.it_value)) 6358 goto efault; 6359 } 6360 } 6361 break; 6362 case TARGET_NR_getitimer: 6363 { 6364 struct itimerval value; 6365 6366 ret = get_errno(getitimer(arg1, &value)); 6367 if (!is_error(ret) && arg2) { 6368 if (copy_to_user_timeval(arg2, 6369 &value.it_interval) 6370 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), 6371 &value.it_value)) 6372 goto efault; 6373 } 6374 } 6375 break; 6376 case TARGET_NR_stat: 6377 if (!(p = lock_user_string(arg1))) 6378 goto efault; 6379 ret = get_errno(stat(path(p), &st)); 6380 unlock_user(p, arg1, 0); 6381 goto do_stat; 6382 case TARGET_NR_lstat: 6383 if (!(p = lock_user_string(arg1))) 6384 goto efault; 6385 ret = get_errno(lstat(path(p), &st)); 6386 unlock_user(p, arg1, 0); 6387 goto do_stat; 6388 case TARGET_NR_fstat: 6389 { 6390 ret = get_errno(fstat(arg1, &st)); 6391 do_stat: 6392 if (!is_error(ret)) { 6393 struct target_stat *target_st; 6394 6395 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) 6396 goto efault; 6397 memset(target_st, 0, sizeof(*target_st)); 6398 __put_user(st.st_dev, &target_st->st_dev); 6399 __put_user(st.st_ino, &target_st->st_ino); 6400 __put_user(st.st_mode, &target_st->st_mode); 6401 __put_user(st.st_uid, &target_st->st_uid); 6402 __put_user(st.st_gid, &target_st->st_gid); 6403 __put_user(st.st_nlink, &target_st->st_nlink); 6404 __put_user(st.st_rdev, &target_st->st_rdev); 6405 __put_user(st.st_size, &target_st->st_size); 6406 __put_user(st.st_blksize, &target_st->st_blksize); 6407 __put_user(st.st_blocks, &target_st->st_blocks); 6408 __put_user(st.st_atime, &target_st->target_st_atime); 6409 __put_user(st.st_mtime, &target_st->target_st_mtime); 6410 __put_user(st.st_ctime, &target_st->target_st_ctime); 6411 unlock_user_struct(target_st, arg2, 1); 6412 } 6413 } 6414 break; 6415 #ifdef TARGET_NR_olduname 6416 case TARGET_NR_olduname: 6417 goto unimplemented; 6418 #endif 6419 #ifdef TARGET_NR_iopl 6420 case TARGET_NR_iopl: 6421 goto unimplemented; 6422 #endif 6423 case TARGET_NR_vhangup: 6424 ret = get_errno(vhangup()); 6425 break; 6426 #ifdef TARGET_NR_idle 6427 case TARGET_NR_idle: 6428 goto unimplemented; 6429 #endif 6430 #ifdef TARGET_NR_syscall 6431 case TARGET_NR_syscall: 6432 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5, 6433 arg6, arg7, arg8, 0); 6434 break; 6435 #endif 6436 case TARGET_NR_wait4: 6437 { 6438 int status; 6439 abi_long status_ptr = arg2; 6440 struct rusage rusage, *rusage_ptr; 6441 abi_ulong target_rusage = arg4; 6442 if (target_rusage) 6443 rusage_ptr = &rusage; 6444 else 6445 rusage_ptr = NULL; 6446 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr)); 6447 if (!is_error(ret)) { 6448 if (status_ptr && ret) { 6449 status = host_to_target_waitstatus(status); 6450 if (put_user_s32(status, status_ptr)) 6451 goto efault; 6452 } 6453 if (target_rusage) 6454 host_to_target_rusage(target_rusage, &rusage); 6455 } 6456 } 6457 break; 6458 #ifdef TARGET_NR_swapoff 6459 case TARGET_NR_swapoff: 6460 if (!(p = lock_user_string(arg1))) 6461 goto efault; 6462 ret = get_errno(swapoff(p)); 6463 unlock_user(p, arg1, 0); 6464 break; 6465 #endif 6466 case TARGET_NR_sysinfo: 6467 { 6468 struct target_sysinfo *target_value; 6469 struct sysinfo value; 6470 ret = get_errno(sysinfo(&value)); 6471 if (!is_error(ret) && arg1) 6472 { 6473 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) 6474 goto efault; 6475 __put_user(value.uptime, &target_value->uptime); 6476 __put_user(value.loads[0], &target_value->loads[0]); 6477 __put_user(value.loads[1], &target_value->loads[1]); 6478 __put_user(value.loads[2], &target_value->loads[2]); 6479 __put_user(value.totalram, &target_value->totalram); 6480 __put_user(value.freeram, &target_value->freeram); 6481 __put_user(value.sharedram, &target_value->sharedram); 6482 __put_user(value.bufferram, &target_value->bufferram); 6483 __put_user(value.totalswap, &target_value->totalswap); 6484 __put_user(value.freeswap, &target_value->freeswap); 6485 __put_user(value.procs, &target_value->procs); 6486 __put_user(value.totalhigh, &target_value->totalhigh); 6487 __put_user(value.freehigh, &target_value->freehigh); 6488 __put_user(value.mem_unit, &target_value->mem_unit); 6489 unlock_user_struct(target_value, arg1, 1); 6490 } 6491 } 6492 break; 6493 #ifdef TARGET_NR_ipc 6494 case TARGET_NR_ipc: 6495 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6); 6496 break; 6497 #endif 6498 #ifdef TARGET_NR_semget 6499 case TARGET_NR_semget: 6500 ret = get_errno(semget(arg1, arg2, arg3)); 6501 break; 6502 #endif 6503 #ifdef TARGET_NR_semop 6504 case TARGET_NR_semop: 6505 ret = get_errno(do_semop(arg1, arg2, arg3)); 6506 break; 6507 #endif 6508 #ifdef TARGET_NR_semctl 6509 case TARGET_NR_semctl: 6510 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4); 6511 break; 6512 #endif 6513 #ifdef TARGET_NR_msgctl 6514 case TARGET_NR_msgctl: 6515 ret = do_msgctl(arg1, arg2, arg3); 6516 break; 6517 #endif 6518 #ifdef TARGET_NR_msgget 6519 case TARGET_NR_msgget: 6520 ret = get_errno(msgget(arg1, arg2)); 6521 break; 6522 #endif 6523 #ifdef TARGET_NR_msgrcv 6524 case TARGET_NR_msgrcv: 6525 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5); 6526 break; 6527 #endif 6528 #ifdef TARGET_NR_msgsnd 6529 case TARGET_NR_msgsnd: 6530 ret = do_msgsnd(arg1, arg2, arg3, arg4); 6531 break; 6532 #endif 6533 #ifdef TARGET_NR_shmget 6534 case TARGET_NR_shmget: 6535 ret = get_errno(shmget(arg1, arg2, arg3)); 6536 break; 6537 #endif 6538 #ifdef TARGET_NR_shmctl 6539 case TARGET_NR_shmctl: 6540 ret = do_shmctl(arg1, arg2, arg3); 6541 break; 6542 #endif 6543 #ifdef TARGET_NR_shmat 6544 case TARGET_NR_shmat: 6545 ret = do_shmat(arg1, arg2, arg3); 6546 break; 6547 #endif 6548 #ifdef TARGET_NR_shmdt 6549 case TARGET_NR_shmdt: 6550 ret = do_shmdt(arg1); 6551 break; 6552 #endif 6553 case TARGET_NR_fsync: 6554 ret = get_errno(fsync(arg1)); 6555 break; 6556 case TARGET_NR_clone: 6557 #if defined(TARGET_SH4) || defined(TARGET_ALPHA) 6558 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); 6559 #elif defined(TARGET_CRIS) 6560 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5)); 6561 #elif defined(TARGET_S390X) 6562 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); 6563 #else 6564 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); 6565 #endif 6566 break; 6567 #ifdef __NR_exit_group 6568 /* new thread calls */ 6569 case TARGET_NR_exit_group: 6570 #ifdef TARGET_GPROF 6571 _mcleanup(); 6572 #endif 6573 gdb_exit(cpu_env, arg1); 6574 ret = get_errno(exit_group(arg1)); 6575 break; 6576 #endif 6577 case TARGET_NR_setdomainname: 6578 if (!(p = lock_user_string(arg1))) 6579 goto efault; 6580 ret = get_errno(setdomainname(p, arg2)); 6581 unlock_user(p, arg1, 0); 6582 break; 6583 case TARGET_NR_uname: 6584 /* no need to transcode because we use the linux syscall */ 6585 { 6586 struct new_utsname * buf; 6587 6588 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) 6589 goto efault; 6590 ret = get_errno(sys_uname(buf)); 6591 if (!is_error(ret)) { 6592 /* Overrite the native machine name with whatever is being 6593 emulated. */ 6594 strcpy (buf->machine, cpu_to_uname_machine(cpu_env)); 6595 /* Allow the user to override the reported release. */ 6596 if (qemu_uname_release && *qemu_uname_release) 6597 strcpy (buf->release, qemu_uname_release); 6598 } 6599 unlock_user_struct(buf, arg1, 1); 6600 } 6601 break; 6602 #ifdef TARGET_I386 6603 case TARGET_NR_modify_ldt: 6604 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3); 6605 break; 6606 #if !defined(TARGET_X86_64) 6607 case TARGET_NR_vm86old: 6608 goto unimplemented; 6609 case TARGET_NR_vm86: 6610 ret = do_vm86(cpu_env, arg1, arg2); 6611 break; 6612 #endif 6613 #endif 6614 case TARGET_NR_adjtimex: 6615 goto unimplemented; 6616 #ifdef TARGET_NR_create_module 6617 case TARGET_NR_create_module: 6618 #endif 6619 case TARGET_NR_init_module: 6620 case TARGET_NR_delete_module: 6621 #ifdef TARGET_NR_get_kernel_syms 6622 case TARGET_NR_get_kernel_syms: 6623 #endif 6624 goto unimplemented; 6625 case TARGET_NR_quotactl: 6626 goto unimplemented; 6627 case TARGET_NR_getpgid: 6628 ret = get_errno(getpgid(arg1)); 6629 break; 6630 case TARGET_NR_fchdir: 6631 ret = get_errno(fchdir(arg1)); 6632 break; 6633 #ifdef TARGET_NR_bdflush /* not on x86_64 */ 6634 case TARGET_NR_bdflush: 6635 goto unimplemented; 6636 #endif 6637 #ifdef TARGET_NR_sysfs 6638 case TARGET_NR_sysfs: 6639 goto unimplemented; 6640 #endif 6641 case TARGET_NR_personality: 6642 ret = get_errno(personality(arg1)); 6643 break; 6644 #ifdef TARGET_NR_afs_syscall 6645 case TARGET_NR_afs_syscall: 6646 goto unimplemented; 6647 #endif 6648 #ifdef TARGET_NR__llseek /* Not on alpha */ 6649 case TARGET_NR__llseek: 6650 { 6651 int64_t res; 6652 #if !defined(__NR_llseek) 6653 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5); 6654 if (res == -1) { 6655 ret = get_errno(res); 6656 } else { 6657 ret = 0; 6658 } 6659 #else 6660 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); 6661 #endif 6662 if ((ret == 0) && put_user_s64(res, arg4)) { 6663 goto efault; 6664 } 6665 } 6666 break; 6667 #endif 6668 case TARGET_NR_getdents: 6669 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 6670 { 6671 struct target_dirent *target_dirp; 6672 struct linux_dirent *dirp; 6673 abi_long count = arg3; 6674 6675 dirp = malloc(count); 6676 if (!dirp) { 6677 ret = -TARGET_ENOMEM; 6678 goto fail; 6679 } 6680 6681 ret = get_errno(sys_getdents(arg1, dirp, count)); 6682 if (!is_error(ret)) { 6683 struct linux_dirent *de; 6684 struct target_dirent *tde; 6685 int len = ret; 6686 int reclen, treclen; 6687 int count1, tnamelen; 6688 6689 count1 = 0; 6690 de = dirp; 6691 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 6692 goto efault; 6693 tde = target_dirp; 6694 while (len > 0) { 6695 reclen = de->d_reclen; 6696 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long))); 6697 tde->d_reclen = tswap16(treclen); 6698 tde->d_ino = tswapal(de->d_ino); 6699 tde->d_off = tswapal(de->d_off); 6700 tnamelen = treclen - (2 * sizeof(abi_long) + 2); 6701 if (tnamelen > 256) 6702 tnamelen = 256; 6703 /* XXX: may not be correct */ 6704 pstrcpy(tde->d_name, tnamelen, de->d_name); 6705 de = (struct linux_dirent *)((char *)de + reclen); 6706 len -= reclen; 6707 tde = (struct target_dirent *)((char *)tde + treclen); 6708 count1 += treclen; 6709 } 6710 ret = count1; 6711 unlock_user(target_dirp, arg2, ret); 6712 } 6713 free(dirp); 6714 } 6715 #else 6716 { 6717 struct linux_dirent *dirp; 6718 abi_long count = arg3; 6719 6720 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 6721 goto efault; 6722 ret = get_errno(sys_getdents(arg1, dirp, count)); 6723 if (!is_error(ret)) { 6724 struct linux_dirent *de; 6725 int len = ret; 6726 int reclen; 6727 de = dirp; 6728 while (len > 0) { 6729 reclen = de->d_reclen; 6730 if (reclen > len) 6731 break; 6732 de->d_reclen = tswap16(reclen); 6733 tswapls(&de->d_ino); 6734 tswapls(&de->d_off); 6735 de = (struct linux_dirent *)((char *)de + reclen); 6736 len -= reclen; 6737 } 6738 } 6739 unlock_user(dirp, arg2, ret); 6740 } 6741 #endif 6742 break; 6743 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 6744 case TARGET_NR_getdents64: 6745 { 6746 struct linux_dirent64 *dirp; 6747 abi_long count = arg3; 6748 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 6749 goto efault; 6750 ret = get_errno(sys_getdents64(arg1, dirp, count)); 6751 if (!is_error(ret)) { 6752 struct linux_dirent64 *de; 6753 int len = ret; 6754 int reclen; 6755 de = dirp; 6756 while (len > 0) { 6757 reclen = de->d_reclen; 6758 if (reclen > len) 6759 break; 6760 de->d_reclen = tswap16(reclen); 6761 tswap64s((uint64_t *)&de->d_ino); 6762 tswap64s((uint64_t *)&de->d_off); 6763 de = (struct linux_dirent64 *)((char *)de + reclen); 6764 len -= reclen; 6765 } 6766 } 6767 unlock_user(dirp, arg2, ret); 6768 } 6769 break; 6770 #endif /* TARGET_NR_getdents64 */ 6771 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X) 6772 #ifdef TARGET_S390X 6773 case TARGET_NR_select: 6774 #else 6775 case TARGET_NR__newselect: 6776 #endif 6777 ret = do_select(arg1, arg2, arg3, arg4, arg5); 6778 break; 6779 #endif 6780 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) 6781 # ifdef TARGET_NR_poll 6782 case TARGET_NR_poll: 6783 # endif 6784 # ifdef TARGET_NR_ppoll 6785 case TARGET_NR_ppoll: 6786 # endif 6787 { 6788 struct target_pollfd *target_pfd; 6789 unsigned int nfds = arg2; 6790 int timeout = arg3; 6791 struct pollfd *pfd; 6792 unsigned int i; 6793 6794 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1); 6795 if (!target_pfd) 6796 goto efault; 6797 6798 pfd = alloca(sizeof(struct pollfd) * nfds); 6799 for(i = 0; i < nfds; i++) { 6800 pfd[i].fd = tswap32(target_pfd[i].fd); 6801 pfd[i].events = tswap16(target_pfd[i].events); 6802 } 6803 6804 # ifdef TARGET_NR_ppoll 6805 if (num == TARGET_NR_ppoll) { 6806 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; 6807 target_sigset_t *target_set; 6808 sigset_t _set, *set = &_set; 6809 6810 if (arg3) { 6811 if (target_to_host_timespec(timeout_ts, arg3)) { 6812 unlock_user(target_pfd, arg1, 0); 6813 goto efault; 6814 } 6815 } else { 6816 timeout_ts = NULL; 6817 } 6818 6819 if (arg4) { 6820 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1); 6821 if (!target_set) { 6822 unlock_user(target_pfd, arg1, 0); 6823 goto efault; 6824 } 6825 target_to_host_sigset(set, target_set); 6826 } else { 6827 set = NULL; 6828 } 6829 6830 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8)); 6831 6832 if (!is_error(ret) && arg3) { 6833 host_to_target_timespec(arg3, timeout_ts); 6834 } 6835 if (arg4) { 6836 unlock_user(target_set, arg4, 0); 6837 } 6838 } else 6839 # endif 6840 ret = get_errno(poll(pfd, nfds, timeout)); 6841 6842 if (!is_error(ret)) { 6843 for(i = 0; i < nfds; i++) { 6844 target_pfd[i].revents = tswap16(pfd[i].revents); 6845 } 6846 } 6847 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds); 6848 } 6849 break; 6850 #endif 6851 case TARGET_NR_flock: 6852 /* NOTE: the flock constant seems to be the same for every 6853 Linux platform */ 6854 ret = get_errno(flock(arg1, arg2)); 6855 break; 6856 case TARGET_NR_readv: 6857 { 6858 int count = arg3; 6859 struct iovec *vec; 6860 6861 vec = alloca(count * sizeof(struct iovec)); 6862 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0) 6863 goto efault; 6864 ret = get_errno(readv(arg1, vec, count)); 6865 unlock_iovec(vec, arg2, count, 1); 6866 } 6867 break; 6868 case TARGET_NR_writev: 6869 { 6870 int count = arg3; 6871 struct iovec *vec; 6872 6873 vec = alloca(count * sizeof(struct iovec)); 6874 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0) 6875 goto efault; 6876 ret = get_errno(writev(arg1, vec, count)); 6877 unlock_iovec(vec, arg2, count, 0); 6878 } 6879 break; 6880 case TARGET_NR_getsid: 6881 ret = get_errno(getsid(arg1)); 6882 break; 6883 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ 6884 case TARGET_NR_fdatasync: 6885 ret = get_errno(fdatasync(arg1)); 6886 break; 6887 #endif 6888 case TARGET_NR__sysctl: 6889 /* We don't implement this, but ENOTDIR is always a safe 6890 return value. */ 6891 ret = -TARGET_ENOTDIR; 6892 break; 6893 case TARGET_NR_sched_getaffinity: 6894 { 6895 unsigned int mask_size; 6896 unsigned long *mask; 6897 6898 /* 6899 * sched_getaffinity needs multiples of ulong, so need to take 6900 * care of mismatches between target ulong and host ulong sizes. 6901 */ 6902 if (arg2 & (sizeof(abi_ulong) - 1)) { 6903 ret = -TARGET_EINVAL; 6904 break; 6905 } 6906 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 6907 6908 mask = alloca(mask_size); 6909 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask)); 6910 6911 if (!is_error(ret)) { 6912 if (copy_to_user(arg3, mask, ret)) { 6913 goto efault; 6914 } 6915 } 6916 } 6917 break; 6918 case TARGET_NR_sched_setaffinity: 6919 { 6920 unsigned int mask_size; 6921 unsigned long *mask; 6922 6923 /* 6924 * sched_setaffinity needs multiples of ulong, so need to take 6925 * care of mismatches between target ulong and host ulong sizes. 6926 */ 6927 if (arg2 & (sizeof(abi_ulong) - 1)) { 6928 ret = -TARGET_EINVAL; 6929 break; 6930 } 6931 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 6932 6933 mask = alloca(mask_size); 6934 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) { 6935 goto efault; 6936 } 6937 memcpy(mask, p, arg2); 6938 unlock_user_struct(p, arg2, 0); 6939 6940 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask)); 6941 } 6942 break; 6943 case TARGET_NR_sched_setparam: 6944 { 6945 struct sched_param *target_schp; 6946 struct sched_param schp; 6947 6948 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) 6949 goto efault; 6950 schp.sched_priority = tswap32(target_schp->sched_priority); 6951 unlock_user_struct(target_schp, arg2, 0); 6952 ret = get_errno(sched_setparam(arg1, &schp)); 6953 } 6954 break; 6955 case TARGET_NR_sched_getparam: 6956 { 6957 struct sched_param *target_schp; 6958 struct sched_param schp; 6959 ret = get_errno(sched_getparam(arg1, &schp)); 6960 if (!is_error(ret)) { 6961 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) 6962 goto efault; 6963 target_schp->sched_priority = tswap32(schp.sched_priority); 6964 unlock_user_struct(target_schp, arg2, 1); 6965 } 6966 } 6967 break; 6968 case TARGET_NR_sched_setscheduler: 6969 { 6970 struct sched_param *target_schp; 6971 struct sched_param schp; 6972 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) 6973 goto efault; 6974 schp.sched_priority = tswap32(target_schp->sched_priority); 6975 unlock_user_struct(target_schp, arg3, 0); 6976 ret = get_errno(sched_setscheduler(arg1, arg2, &schp)); 6977 } 6978 break; 6979 case TARGET_NR_sched_getscheduler: 6980 ret = get_errno(sched_getscheduler(arg1)); 6981 break; 6982 case TARGET_NR_sched_yield: 6983 ret = get_errno(sched_yield()); 6984 break; 6985 case TARGET_NR_sched_get_priority_max: 6986 ret = get_errno(sched_get_priority_max(arg1)); 6987 break; 6988 case TARGET_NR_sched_get_priority_min: 6989 ret = get_errno(sched_get_priority_min(arg1)); 6990 break; 6991 case TARGET_NR_sched_rr_get_interval: 6992 { 6993 struct timespec ts; 6994 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 6995 if (!is_error(ret)) { 6996 host_to_target_timespec(arg2, &ts); 6997 } 6998 } 6999 break; 7000 case TARGET_NR_nanosleep: 7001 { 7002 struct timespec req, rem; 7003 target_to_host_timespec(&req, arg1); 7004 ret = get_errno(nanosleep(&req, &rem)); 7005 if (is_error(ret) && arg2) { 7006 host_to_target_timespec(arg2, &rem); 7007 } 7008 } 7009 break; 7010 #ifdef TARGET_NR_query_module 7011 case TARGET_NR_query_module: 7012 goto unimplemented; 7013 #endif 7014 #ifdef TARGET_NR_nfsservctl 7015 case TARGET_NR_nfsservctl: 7016 goto unimplemented; 7017 #endif 7018 case TARGET_NR_prctl: 7019 switch (arg1) 7020 { 7021 case PR_GET_PDEATHSIG: 7022 { 7023 int deathsig; 7024 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5)); 7025 if (!is_error(ret) && arg2 7026 && put_user_ual(deathsig, arg2)) 7027 goto efault; 7028 } 7029 break; 7030 default: 7031 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5)); 7032 break; 7033 } 7034 break; 7035 #ifdef TARGET_NR_arch_prctl 7036 case TARGET_NR_arch_prctl: 7037 #if defined(TARGET_I386) && !defined(TARGET_ABI32) 7038 ret = do_arch_prctl(cpu_env, arg1, arg2); 7039 break; 7040 #else 7041 goto unimplemented; 7042 #endif 7043 #endif 7044 #ifdef TARGET_NR_pread 7045 case TARGET_NR_pread: 7046 if (regpairs_aligned(cpu_env)) 7047 arg4 = arg5; 7048 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 7049 goto efault; 7050 ret = get_errno(pread(arg1, p, arg3, arg4)); 7051 unlock_user(p, arg2, ret); 7052 break; 7053 case TARGET_NR_pwrite: 7054 if (regpairs_aligned(cpu_env)) 7055 arg4 = arg5; 7056 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 7057 goto efault; 7058 ret = get_errno(pwrite(arg1, p, arg3, arg4)); 7059 unlock_user(p, arg2, 0); 7060 break; 7061 #endif 7062 #ifdef TARGET_NR_pread64 7063 case TARGET_NR_pread64: 7064 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 7065 goto efault; 7066 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); 7067 unlock_user(p, arg2, ret); 7068 break; 7069 case TARGET_NR_pwrite64: 7070 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 7071 goto efault; 7072 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); 7073 unlock_user(p, arg2, 0); 7074 break; 7075 #endif 7076 case TARGET_NR_getcwd: 7077 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) 7078 goto efault; 7079 ret = get_errno(sys_getcwd1(p, arg2)); 7080 unlock_user(p, arg1, ret); 7081 break; 7082 case TARGET_NR_capget: 7083 goto unimplemented; 7084 case TARGET_NR_capset: 7085 goto unimplemented; 7086 case TARGET_NR_sigaltstack: 7087 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \ 7088 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \ 7089 defined(TARGET_M68K) || defined(TARGET_S390X) 7090 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env)); 7091 break; 7092 #else 7093 goto unimplemented; 7094 #endif 7095 case TARGET_NR_sendfile: 7096 goto unimplemented; 7097 #ifdef TARGET_NR_getpmsg 7098 case TARGET_NR_getpmsg: 7099 goto unimplemented; 7100 #endif 7101 #ifdef TARGET_NR_putpmsg 7102 case TARGET_NR_putpmsg: 7103 goto unimplemented; 7104 #endif 7105 #ifdef TARGET_NR_vfork 7106 case TARGET_NR_vfork: 7107 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 7108 0, 0, 0, 0)); 7109 break; 7110 #endif 7111 #ifdef TARGET_NR_ugetrlimit 7112 case TARGET_NR_ugetrlimit: 7113 { 7114 struct rlimit rlim; 7115 int resource = target_to_host_resource(arg1); 7116 ret = get_errno(getrlimit(resource, &rlim)); 7117 if (!is_error(ret)) { 7118 struct target_rlimit *target_rlim; 7119 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 7120 goto efault; 7121 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 7122 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 7123 unlock_user_struct(target_rlim, arg2, 1); 7124 } 7125 break; 7126 } 7127 #endif 7128 #ifdef TARGET_NR_truncate64 7129 case TARGET_NR_truncate64: 7130 if (!(p = lock_user_string(arg1))) 7131 goto efault; 7132 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); 7133 unlock_user(p, arg1, 0); 7134 break; 7135 #endif 7136 #ifdef TARGET_NR_ftruncate64 7137 case TARGET_NR_ftruncate64: 7138 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); 7139 break; 7140 #endif 7141 #ifdef TARGET_NR_stat64 7142 case TARGET_NR_stat64: 7143 if (!(p = lock_user_string(arg1))) 7144 goto efault; 7145 ret = get_errno(stat(path(p), &st)); 7146 unlock_user(p, arg1, 0); 7147 if (!is_error(ret)) 7148 ret = host_to_target_stat64(cpu_env, arg2, &st); 7149 break; 7150 #endif 7151 #ifdef TARGET_NR_lstat64 7152 case TARGET_NR_lstat64: 7153 if (!(p = lock_user_string(arg1))) 7154 goto efault; 7155 ret = get_errno(lstat(path(p), &st)); 7156 unlock_user(p, arg1, 0); 7157 if (!is_error(ret)) 7158 ret = host_to_target_stat64(cpu_env, arg2, &st); 7159 break; 7160 #endif 7161 #ifdef TARGET_NR_fstat64 7162 case TARGET_NR_fstat64: 7163 ret = get_errno(fstat(arg1, &st)); 7164 if (!is_error(ret)) 7165 ret = host_to_target_stat64(cpu_env, arg2, &st); 7166 break; 7167 #endif 7168 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \ 7169 (defined(__NR_fstatat64) || defined(__NR_newfstatat)) 7170 #ifdef TARGET_NR_fstatat64 7171 case TARGET_NR_fstatat64: 7172 #endif 7173 #ifdef TARGET_NR_newfstatat 7174 case TARGET_NR_newfstatat: 7175 #endif 7176 if (!(p = lock_user_string(arg2))) 7177 goto efault; 7178 #ifdef __NR_fstatat64 7179 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4)); 7180 #else 7181 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4)); 7182 #endif 7183 if (!is_error(ret)) 7184 ret = host_to_target_stat64(cpu_env, arg3, &st); 7185 break; 7186 #endif 7187 case TARGET_NR_lchown: 7188 if (!(p = lock_user_string(arg1))) 7189 goto efault; 7190 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); 7191 unlock_user(p, arg1, 0); 7192 break; 7193 #ifdef TARGET_NR_getuid 7194 case TARGET_NR_getuid: 7195 ret = get_errno(high2lowuid(getuid())); 7196 break; 7197 #endif 7198 #ifdef TARGET_NR_getgid 7199 case TARGET_NR_getgid: 7200 ret = get_errno(high2lowgid(getgid())); 7201 break; 7202 #endif 7203 #ifdef TARGET_NR_geteuid 7204 case TARGET_NR_geteuid: 7205 ret = get_errno(high2lowuid(geteuid())); 7206 break; 7207 #endif 7208 #ifdef TARGET_NR_getegid 7209 case TARGET_NR_getegid: 7210 ret = get_errno(high2lowgid(getegid())); 7211 break; 7212 #endif 7213 case TARGET_NR_setreuid: 7214 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); 7215 break; 7216 case TARGET_NR_setregid: 7217 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); 7218 break; 7219 case TARGET_NR_getgroups: 7220 { 7221 int gidsetsize = arg1; 7222 target_id *target_grouplist; 7223 gid_t *grouplist; 7224 int i; 7225 7226 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7227 ret = get_errno(getgroups(gidsetsize, grouplist)); 7228 if (gidsetsize == 0) 7229 break; 7230 if (!is_error(ret)) { 7231 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0); 7232 if (!target_grouplist) 7233 goto efault; 7234 for(i = 0;i < ret; i++) 7235 target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); 7236 unlock_user(target_grouplist, arg2, gidsetsize * 2); 7237 } 7238 } 7239 break; 7240 case TARGET_NR_setgroups: 7241 { 7242 int gidsetsize = arg1; 7243 target_id *target_grouplist; 7244 gid_t *grouplist; 7245 int i; 7246 7247 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7248 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1); 7249 if (!target_grouplist) { 7250 ret = -TARGET_EFAULT; 7251 goto fail; 7252 } 7253 for(i = 0;i < gidsetsize; i++) 7254 grouplist[i] = low2highgid(tswapid(target_grouplist[i])); 7255 unlock_user(target_grouplist, arg2, 0); 7256 ret = get_errno(setgroups(gidsetsize, grouplist)); 7257 } 7258 break; 7259 case TARGET_NR_fchown: 7260 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); 7261 break; 7262 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) 7263 case TARGET_NR_fchownat: 7264 if (!(p = lock_user_string(arg2))) 7265 goto efault; 7266 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5)); 7267 unlock_user(p, arg2, 0); 7268 break; 7269 #endif 7270 #ifdef TARGET_NR_setresuid 7271 case TARGET_NR_setresuid: 7272 ret = get_errno(setresuid(low2highuid(arg1), 7273 low2highuid(arg2), 7274 low2highuid(arg3))); 7275 break; 7276 #endif 7277 #ifdef TARGET_NR_getresuid 7278 case TARGET_NR_getresuid: 7279 { 7280 uid_t ruid, euid, suid; 7281 ret = get_errno(getresuid(&ruid, &euid, &suid)); 7282 if (!is_error(ret)) { 7283 if (put_user_u16(high2lowuid(ruid), arg1) 7284 || put_user_u16(high2lowuid(euid), arg2) 7285 || put_user_u16(high2lowuid(suid), arg3)) 7286 goto efault; 7287 } 7288 } 7289 break; 7290 #endif 7291 #ifdef TARGET_NR_getresgid 7292 case TARGET_NR_setresgid: 7293 ret = get_errno(setresgid(low2highgid(arg1), 7294 low2highgid(arg2), 7295 low2highgid(arg3))); 7296 break; 7297 #endif 7298 #ifdef TARGET_NR_getresgid 7299 case TARGET_NR_getresgid: 7300 { 7301 gid_t rgid, egid, sgid; 7302 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 7303 if (!is_error(ret)) { 7304 if (put_user_u16(high2lowgid(rgid), arg1) 7305 || put_user_u16(high2lowgid(egid), arg2) 7306 || put_user_u16(high2lowgid(sgid), arg3)) 7307 goto efault; 7308 } 7309 } 7310 break; 7311 #endif 7312 case TARGET_NR_chown: 7313 if (!(p = lock_user_string(arg1))) 7314 goto efault; 7315 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); 7316 unlock_user(p, arg1, 0); 7317 break; 7318 case TARGET_NR_setuid: 7319 ret = get_errno(setuid(low2highuid(arg1))); 7320 break; 7321 case TARGET_NR_setgid: 7322 ret = get_errno(setgid(low2highgid(arg1))); 7323 break; 7324 case TARGET_NR_setfsuid: 7325 ret = get_errno(setfsuid(arg1)); 7326 break; 7327 case TARGET_NR_setfsgid: 7328 ret = get_errno(setfsgid(arg1)); 7329 break; 7330 7331 #ifdef TARGET_NR_lchown32 7332 case TARGET_NR_lchown32: 7333 if (!(p = lock_user_string(arg1))) 7334 goto efault; 7335 ret = get_errno(lchown(p, arg2, arg3)); 7336 unlock_user(p, arg1, 0); 7337 break; 7338 #endif 7339 #ifdef TARGET_NR_getuid32 7340 case TARGET_NR_getuid32: 7341 ret = get_errno(getuid()); 7342 break; 7343 #endif 7344 7345 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) 7346 /* Alpha specific */ 7347 case TARGET_NR_getxuid: 7348 { 7349 uid_t euid; 7350 euid=geteuid(); 7351 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid; 7352 } 7353 ret = get_errno(getuid()); 7354 break; 7355 #endif 7356 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) 7357 /* Alpha specific */ 7358 case TARGET_NR_getxgid: 7359 { 7360 uid_t egid; 7361 egid=getegid(); 7362 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid; 7363 } 7364 ret = get_errno(getgid()); 7365 break; 7366 #endif 7367 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) 7368 /* Alpha specific */ 7369 case TARGET_NR_osf_getsysinfo: 7370 ret = -TARGET_EOPNOTSUPP; 7371 switch (arg1) { 7372 case TARGET_GSI_IEEE_FP_CONTROL: 7373 { 7374 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env); 7375 7376 /* Copied from linux ieee_fpcr_to_swcr. */ 7377 swcr = (fpcr >> 35) & SWCR_STATUS_MASK; 7378 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ; 7379 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV 7380 | SWCR_TRAP_ENABLE_DZE 7381 | SWCR_TRAP_ENABLE_OVF); 7382 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF 7383 | SWCR_TRAP_ENABLE_INE); 7384 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ; 7385 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO; 7386 7387 if (put_user_u64 (swcr, arg2)) 7388 goto efault; 7389 ret = 0; 7390 } 7391 break; 7392 7393 /* case GSI_IEEE_STATE_AT_SIGNAL: 7394 -- Not implemented in linux kernel. 7395 case GSI_UACPROC: 7396 -- Retrieves current unaligned access state; not much used. 7397 case GSI_PROC_TYPE: 7398 -- Retrieves implver information; surely not used. 7399 case GSI_GET_HWRPB: 7400 -- Grabs a copy of the HWRPB; surely not used. 7401 */ 7402 } 7403 break; 7404 #endif 7405 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) 7406 /* Alpha specific */ 7407 case TARGET_NR_osf_setsysinfo: 7408 ret = -TARGET_EOPNOTSUPP; 7409 switch (arg1) { 7410 case TARGET_SSI_IEEE_FP_CONTROL: 7411 case TARGET_SSI_IEEE_RAISE_EXCEPTION: 7412 { 7413 uint64_t swcr, fpcr, orig_fpcr; 7414 7415 if (get_user_u64 (swcr, arg2)) 7416 goto efault; 7417 orig_fpcr = cpu_alpha_load_fpcr (cpu_env); 7418 fpcr = orig_fpcr & FPCR_DYN_MASK; 7419 7420 /* Copied from linux ieee_swcr_to_fpcr. */ 7421 fpcr |= (swcr & SWCR_STATUS_MASK) << 35; 7422 fpcr |= (swcr & SWCR_MAP_DMZ) << 36; 7423 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV 7424 | SWCR_TRAP_ENABLE_DZE 7425 | SWCR_TRAP_ENABLE_OVF)) << 48; 7426 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF 7427 | SWCR_TRAP_ENABLE_INE)) << 57; 7428 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0); 7429 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41; 7430 7431 cpu_alpha_store_fpcr (cpu_env, fpcr); 7432 ret = 0; 7433 7434 if (arg1 == TARGET_SSI_IEEE_RAISE_EXCEPTION) { 7435 /* Old exceptions are not signaled. */ 7436 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK); 7437 7438 /* If any exceptions set by this call, and are unmasked, 7439 send a signal. */ 7440 /* ??? FIXME */ 7441 } 7442 } 7443 break; 7444 7445 /* case SSI_NVPAIRS: 7446 -- Used with SSIN_UACPROC to enable unaligned accesses. 7447 case SSI_IEEE_STATE_AT_SIGNAL: 7448 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: 7449 -- Not implemented in linux kernel 7450 */ 7451 } 7452 break; 7453 #endif 7454 #ifdef TARGET_NR_osf_sigprocmask 7455 /* Alpha specific. */ 7456 case TARGET_NR_osf_sigprocmask: 7457 { 7458 abi_ulong mask; 7459 int how; 7460 sigset_t set, oldset; 7461 7462 switch(arg1) { 7463 case TARGET_SIG_BLOCK: 7464 how = SIG_BLOCK; 7465 break; 7466 case TARGET_SIG_UNBLOCK: 7467 how = SIG_UNBLOCK; 7468 break; 7469 case TARGET_SIG_SETMASK: 7470 how = SIG_SETMASK; 7471 break; 7472 default: 7473 ret = -TARGET_EINVAL; 7474 goto fail; 7475 } 7476 mask = arg2; 7477 target_to_host_old_sigset(&set, &mask); 7478 sigprocmask(how, &set, &oldset); 7479 host_to_target_old_sigset(&mask, &oldset); 7480 ret = mask; 7481 } 7482 break; 7483 #endif 7484 7485 #ifdef TARGET_NR_getgid32 7486 case TARGET_NR_getgid32: 7487 ret = get_errno(getgid()); 7488 break; 7489 #endif 7490 #ifdef TARGET_NR_geteuid32 7491 case TARGET_NR_geteuid32: 7492 ret = get_errno(geteuid()); 7493 break; 7494 #endif 7495 #ifdef TARGET_NR_getegid32 7496 case TARGET_NR_getegid32: 7497 ret = get_errno(getegid()); 7498 break; 7499 #endif 7500 #ifdef TARGET_NR_setreuid32 7501 case TARGET_NR_setreuid32: 7502 ret = get_errno(setreuid(arg1, arg2)); 7503 break; 7504 #endif 7505 #ifdef TARGET_NR_setregid32 7506 case TARGET_NR_setregid32: 7507 ret = get_errno(setregid(arg1, arg2)); 7508 break; 7509 #endif 7510 #ifdef TARGET_NR_getgroups32 7511 case TARGET_NR_getgroups32: 7512 { 7513 int gidsetsize = arg1; 7514 uint32_t *target_grouplist; 7515 gid_t *grouplist; 7516 int i; 7517 7518 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7519 ret = get_errno(getgroups(gidsetsize, grouplist)); 7520 if (gidsetsize == 0) 7521 break; 7522 if (!is_error(ret)) { 7523 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); 7524 if (!target_grouplist) { 7525 ret = -TARGET_EFAULT; 7526 goto fail; 7527 } 7528 for(i = 0;i < ret; i++) 7529 target_grouplist[i] = tswap32(grouplist[i]); 7530 unlock_user(target_grouplist, arg2, gidsetsize * 4); 7531 } 7532 } 7533 break; 7534 #endif 7535 #ifdef TARGET_NR_setgroups32 7536 case TARGET_NR_setgroups32: 7537 { 7538 int gidsetsize = arg1; 7539 uint32_t *target_grouplist; 7540 gid_t *grouplist; 7541 int i; 7542 7543 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7544 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); 7545 if (!target_grouplist) { 7546 ret = -TARGET_EFAULT; 7547 goto fail; 7548 } 7549 for(i = 0;i < gidsetsize; i++) 7550 grouplist[i] = tswap32(target_grouplist[i]); 7551 unlock_user(target_grouplist, arg2, 0); 7552 ret = get_errno(setgroups(gidsetsize, grouplist)); 7553 } 7554 break; 7555 #endif 7556 #ifdef TARGET_NR_fchown32 7557 case TARGET_NR_fchown32: 7558 ret = get_errno(fchown(arg1, arg2, arg3)); 7559 break; 7560 #endif 7561 #ifdef TARGET_NR_setresuid32 7562 case TARGET_NR_setresuid32: 7563 ret = get_errno(setresuid(arg1, arg2, arg3)); 7564 break; 7565 #endif 7566 #ifdef TARGET_NR_getresuid32 7567 case TARGET_NR_getresuid32: 7568 { 7569 uid_t ruid, euid, suid; 7570 ret = get_errno(getresuid(&ruid, &euid, &suid)); 7571 if (!is_error(ret)) { 7572 if (put_user_u32(ruid, arg1) 7573 || put_user_u32(euid, arg2) 7574 || put_user_u32(suid, arg3)) 7575 goto efault; 7576 } 7577 } 7578 break; 7579 #endif 7580 #ifdef TARGET_NR_setresgid32 7581 case TARGET_NR_setresgid32: 7582 ret = get_errno(setresgid(arg1, arg2, arg3)); 7583 break; 7584 #endif 7585 #ifdef TARGET_NR_getresgid32 7586 case TARGET_NR_getresgid32: 7587 { 7588 gid_t rgid, egid, sgid; 7589 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 7590 if (!is_error(ret)) { 7591 if (put_user_u32(rgid, arg1) 7592 || put_user_u32(egid, arg2) 7593 || put_user_u32(sgid, arg3)) 7594 goto efault; 7595 } 7596 } 7597 break; 7598 #endif 7599 #ifdef TARGET_NR_chown32 7600 case TARGET_NR_chown32: 7601 if (!(p = lock_user_string(arg1))) 7602 goto efault; 7603 ret = get_errno(chown(p, arg2, arg3)); 7604 unlock_user(p, arg1, 0); 7605 break; 7606 #endif 7607 #ifdef TARGET_NR_setuid32 7608 case TARGET_NR_setuid32: 7609 ret = get_errno(setuid(arg1)); 7610 break; 7611 #endif 7612 #ifdef TARGET_NR_setgid32 7613 case TARGET_NR_setgid32: 7614 ret = get_errno(setgid(arg1)); 7615 break; 7616 #endif 7617 #ifdef TARGET_NR_setfsuid32 7618 case TARGET_NR_setfsuid32: 7619 ret = get_errno(setfsuid(arg1)); 7620 break; 7621 #endif 7622 #ifdef TARGET_NR_setfsgid32 7623 case TARGET_NR_setfsgid32: 7624 ret = get_errno(setfsgid(arg1)); 7625 break; 7626 #endif 7627 7628 case TARGET_NR_pivot_root: 7629 goto unimplemented; 7630 #ifdef TARGET_NR_mincore 7631 case TARGET_NR_mincore: 7632 { 7633 void *a; 7634 ret = -TARGET_EFAULT; 7635 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0))) 7636 goto efault; 7637 if (!(p = lock_user_string(arg3))) 7638 goto mincore_fail; 7639 ret = get_errno(mincore(a, arg2, p)); 7640 unlock_user(p, arg3, ret); 7641 mincore_fail: 7642 unlock_user(a, arg1, 0); 7643 } 7644 break; 7645 #endif 7646 #ifdef TARGET_NR_arm_fadvise64_64 7647 case TARGET_NR_arm_fadvise64_64: 7648 { 7649 /* 7650 * arm_fadvise64_64 looks like fadvise64_64 but 7651 * with different argument order 7652 */ 7653 abi_long temp; 7654 temp = arg3; 7655 arg3 = arg4; 7656 arg4 = temp; 7657 } 7658 #endif 7659 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64) 7660 #ifdef TARGET_NR_fadvise64_64 7661 case TARGET_NR_fadvise64_64: 7662 #endif 7663 #ifdef TARGET_NR_fadvise64 7664 case TARGET_NR_fadvise64: 7665 #endif 7666 #ifdef TARGET_S390X 7667 switch (arg4) { 7668 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ 7669 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ 7670 case 6: arg4 = POSIX_FADV_DONTNEED; break; 7671 case 7: arg4 = POSIX_FADV_NOREUSE; break; 7672 default: break; 7673 } 7674 #endif 7675 ret = -posix_fadvise(arg1, arg2, arg3, arg4); 7676 break; 7677 #endif 7678 #ifdef TARGET_NR_madvise 7679 case TARGET_NR_madvise: 7680 /* A straight passthrough may not be safe because qemu sometimes 7681 turns private flie-backed mappings into anonymous mappings. 7682 This will break MADV_DONTNEED. 7683 This is a hint, so ignoring and returning success is ok. */ 7684 ret = get_errno(0); 7685 break; 7686 #endif 7687 #if TARGET_ABI_BITS == 32 7688 case TARGET_NR_fcntl64: 7689 { 7690 int cmd; 7691 struct flock64 fl; 7692 struct target_flock64 *target_fl; 7693 #ifdef TARGET_ARM 7694 struct target_eabi_flock64 *target_efl; 7695 #endif 7696 7697 cmd = target_to_host_fcntl_cmd(arg2); 7698 if (cmd == -TARGET_EINVAL) { 7699 ret = cmd; 7700 break; 7701 } 7702 7703 switch(arg2) { 7704 case TARGET_F_GETLK64: 7705 #ifdef TARGET_ARM 7706 if (((CPUARMState *)cpu_env)->eabi) { 7707 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 7708 goto efault; 7709 fl.l_type = tswap16(target_efl->l_type); 7710 fl.l_whence = tswap16(target_efl->l_whence); 7711 fl.l_start = tswap64(target_efl->l_start); 7712 fl.l_len = tswap64(target_efl->l_len); 7713 fl.l_pid = tswap32(target_efl->l_pid); 7714 unlock_user_struct(target_efl, arg3, 0); 7715 } else 7716 #endif 7717 { 7718 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 7719 goto efault; 7720 fl.l_type = tswap16(target_fl->l_type); 7721 fl.l_whence = tswap16(target_fl->l_whence); 7722 fl.l_start = tswap64(target_fl->l_start); 7723 fl.l_len = tswap64(target_fl->l_len); 7724 fl.l_pid = tswap32(target_fl->l_pid); 7725 unlock_user_struct(target_fl, arg3, 0); 7726 } 7727 ret = get_errno(fcntl(arg1, cmd, &fl)); 7728 if (ret == 0) { 7729 #ifdef TARGET_ARM 7730 if (((CPUARMState *)cpu_env)->eabi) { 7731 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0)) 7732 goto efault; 7733 target_efl->l_type = tswap16(fl.l_type); 7734 target_efl->l_whence = tswap16(fl.l_whence); 7735 target_efl->l_start = tswap64(fl.l_start); 7736 target_efl->l_len = tswap64(fl.l_len); 7737 target_efl->l_pid = tswap32(fl.l_pid); 7738 unlock_user_struct(target_efl, arg3, 1); 7739 } else 7740 #endif 7741 { 7742 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0)) 7743 goto efault; 7744 target_fl->l_type = tswap16(fl.l_type); 7745 target_fl->l_whence = tswap16(fl.l_whence); 7746 target_fl->l_start = tswap64(fl.l_start); 7747 target_fl->l_len = tswap64(fl.l_len); 7748 target_fl->l_pid = tswap32(fl.l_pid); 7749 unlock_user_struct(target_fl, arg3, 1); 7750 } 7751 } 7752 break; 7753 7754 case TARGET_F_SETLK64: 7755 case TARGET_F_SETLKW64: 7756 #ifdef TARGET_ARM 7757 if (((CPUARMState *)cpu_env)->eabi) { 7758 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 7759 goto efault; 7760 fl.l_type = tswap16(target_efl->l_type); 7761 fl.l_whence = tswap16(target_efl->l_whence); 7762 fl.l_start = tswap64(target_efl->l_start); 7763 fl.l_len = tswap64(target_efl->l_len); 7764 fl.l_pid = tswap32(target_efl->l_pid); 7765 unlock_user_struct(target_efl, arg3, 0); 7766 } else 7767 #endif 7768 { 7769 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 7770 goto efault; 7771 fl.l_type = tswap16(target_fl->l_type); 7772 fl.l_whence = tswap16(target_fl->l_whence); 7773 fl.l_start = tswap64(target_fl->l_start); 7774 fl.l_len = tswap64(target_fl->l_len); 7775 fl.l_pid = tswap32(target_fl->l_pid); 7776 unlock_user_struct(target_fl, arg3, 0); 7777 } 7778 ret = get_errno(fcntl(arg1, cmd, &fl)); 7779 break; 7780 default: 7781 ret = do_fcntl(arg1, arg2, arg3); 7782 break; 7783 } 7784 break; 7785 } 7786 #endif 7787 #ifdef TARGET_NR_cacheflush 7788 case TARGET_NR_cacheflush: 7789 /* self-modifying code is handled automatically, so nothing needed */ 7790 ret = 0; 7791 break; 7792 #endif 7793 #ifdef TARGET_NR_security 7794 case TARGET_NR_security: 7795 goto unimplemented; 7796 #endif 7797 #ifdef TARGET_NR_getpagesize 7798 case TARGET_NR_getpagesize: 7799 ret = TARGET_PAGE_SIZE; 7800 break; 7801 #endif 7802 case TARGET_NR_gettid: 7803 ret = get_errno(gettid()); 7804 break; 7805 #ifdef TARGET_NR_readahead 7806 case TARGET_NR_readahead: 7807 #if TARGET_ABI_BITS == 32 7808 if (regpairs_aligned(cpu_env)) { 7809 arg2 = arg3; 7810 arg3 = arg4; 7811 arg4 = arg5; 7812 } 7813 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4)); 7814 #else 7815 ret = get_errno(readahead(arg1, arg2, arg3)); 7816 #endif 7817 break; 7818 #endif 7819 #ifdef CONFIG_ATTR 7820 #ifdef TARGET_NR_setxattr 7821 case TARGET_NR_listxattr: 7822 case TARGET_NR_llistxattr: 7823 { 7824 void *p, *b = 0; 7825 if (arg2) { 7826 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 7827 if (!b) { 7828 ret = -TARGET_EFAULT; 7829 break; 7830 } 7831 } 7832 p = lock_user_string(arg1); 7833 if (p) { 7834 if (num == TARGET_NR_listxattr) { 7835 ret = get_errno(listxattr(p, b, arg3)); 7836 } else { 7837 ret = get_errno(llistxattr(p, b, arg3)); 7838 } 7839 } else { 7840 ret = -TARGET_EFAULT; 7841 } 7842 unlock_user(p, arg1, 0); 7843 unlock_user(b, arg2, arg3); 7844 break; 7845 } 7846 case TARGET_NR_flistxattr: 7847 { 7848 void *b = 0; 7849 if (arg2) { 7850 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 7851 if (!b) { 7852 ret = -TARGET_EFAULT; 7853 break; 7854 } 7855 } 7856 ret = get_errno(flistxattr(arg1, b, arg3)); 7857 unlock_user(b, arg2, arg3); 7858 break; 7859 } 7860 case TARGET_NR_setxattr: 7861 case TARGET_NR_lsetxattr: 7862 { 7863 void *p, *n, *v = 0; 7864 if (arg3) { 7865 v = lock_user(VERIFY_READ, arg3, arg4, 1); 7866 if (!v) { 7867 ret = -TARGET_EFAULT; 7868 break; 7869 } 7870 } 7871 p = lock_user_string(arg1); 7872 n = lock_user_string(arg2); 7873 if (p && n) { 7874 if (num == TARGET_NR_setxattr) { 7875 ret = get_errno(setxattr(p, n, v, arg4, arg5)); 7876 } else { 7877 ret = get_errno(lsetxattr(p, n, v, arg4, arg5)); 7878 } 7879 } else { 7880 ret = -TARGET_EFAULT; 7881 } 7882 unlock_user(p, arg1, 0); 7883 unlock_user(n, arg2, 0); 7884 unlock_user(v, arg3, 0); 7885 } 7886 break; 7887 case TARGET_NR_fsetxattr: 7888 { 7889 void *n, *v = 0; 7890 if (arg3) { 7891 v = lock_user(VERIFY_READ, arg3, arg4, 1); 7892 if (!v) { 7893 ret = -TARGET_EFAULT; 7894 break; 7895 } 7896 } 7897 n = lock_user_string(arg2); 7898 if (n) { 7899 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5)); 7900 } else { 7901 ret = -TARGET_EFAULT; 7902 } 7903 unlock_user(n, arg2, 0); 7904 unlock_user(v, arg3, 0); 7905 } 7906 break; 7907 case TARGET_NR_getxattr: 7908 case TARGET_NR_lgetxattr: 7909 { 7910 void *p, *n, *v = 0; 7911 if (arg3) { 7912 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 7913 if (!v) { 7914 ret = -TARGET_EFAULT; 7915 break; 7916 } 7917 } 7918 p = lock_user_string(arg1); 7919 n = lock_user_string(arg2); 7920 if (p && n) { 7921 if (num == TARGET_NR_getxattr) { 7922 ret = get_errno(getxattr(p, n, v, arg4)); 7923 } else { 7924 ret = get_errno(lgetxattr(p, n, v, arg4)); 7925 } 7926 } else { 7927 ret = -TARGET_EFAULT; 7928 } 7929 unlock_user(p, arg1, 0); 7930 unlock_user(n, arg2, 0); 7931 unlock_user(v, arg3, arg4); 7932 } 7933 break; 7934 case TARGET_NR_fgetxattr: 7935 { 7936 void *n, *v = 0; 7937 if (arg3) { 7938 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 7939 if (!v) { 7940 ret = -TARGET_EFAULT; 7941 break; 7942 } 7943 } 7944 n = lock_user_string(arg2); 7945 if (n) { 7946 ret = get_errno(fgetxattr(arg1, n, v, arg4)); 7947 } else { 7948 ret = -TARGET_EFAULT; 7949 } 7950 unlock_user(n, arg2, 0); 7951 unlock_user(v, arg3, arg4); 7952 } 7953 break; 7954 case TARGET_NR_removexattr: 7955 case TARGET_NR_lremovexattr: 7956 { 7957 void *p, *n; 7958 p = lock_user_string(arg1); 7959 n = lock_user_string(arg2); 7960 if (p && n) { 7961 if (num == TARGET_NR_removexattr) { 7962 ret = get_errno(removexattr(p, n)); 7963 } else { 7964 ret = get_errno(lremovexattr(p, n)); 7965 } 7966 } else { 7967 ret = -TARGET_EFAULT; 7968 } 7969 unlock_user(p, arg1, 0); 7970 unlock_user(n, arg2, 0); 7971 } 7972 break; 7973 case TARGET_NR_fremovexattr: 7974 { 7975 void *n; 7976 n = lock_user_string(arg2); 7977 if (n) { 7978 ret = get_errno(fremovexattr(arg1, n)); 7979 } else { 7980 ret = -TARGET_EFAULT; 7981 } 7982 unlock_user(n, arg2, 0); 7983 } 7984 break; 7985 #endif 7986 #endif /* CONFIG_ATTR */ 7987 #ifdef TARGET_NR_set_thread_area 7988 case TARGET_NR_set_thread_area: 7989 #if defined(TARGET_MIPS) 7990 ((CPUMIPSState *) cpu_env)->tls_value = arg1; 7991 ret = 0; 7992 break; 7993 #elif defined(TARGET_CRIS) 7994 if (arg1 & 0xff) 7995 ret = -TARGET_EINVAL; 7996 else { 7997 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1; 7998 ret = 0; 7999 } 8000 break; 8001 #elif defined(TARGET_I386) && defined(TARGET_ABI32) 8002 ret = do_set_thread_area(cpu_env, arg1); 8003 break; 8004 #else 8005 goto unimplemented_nowarn; 8006 #endif 8007 #endif 8008 #ifdef TARGET_NR_get_thread_area 8009 case TARGET_NR_get_thread_area: 8010 #if defined(TARGET_I386) && defined(TARGET_ABI32) 8011 ret = do_get_thread_area(cpu_env, arg1); 8012 #else 8013 goto unimplemented_nowarn; 8014 #endif 8015 #endif 8016 #ifdef TARGET_NR_getdomainname 8017 case TARGET_NR_getdomainname: 8018 goto unimplemented_nowarn; 8019 #endif 8020 8021 #ifdef TARGET_NR_clock_gettime 8022 case TARGET_NR_clock_gettime: 8023 { 8024 struct timespec ts; 8025 ret = get_errno(clock_gettime(arg1, &ts)); 8026 if (!is_error(ret)) { 8027 host_to_target_timespec(arg2, &ts); 8028 } 8029 break; 8030 } 8031 #endif 8032 #ifdef TARGET_NR_clock_getres 8033 case TARGET_NR_clock_getres: 8034 { 8035 struct timespec ts; 8036 ret = get_errno(clock_getres(arg1, &ts)); 8037 if (!is_error(ret)) { 8038 host_to_target_timespec(arg2, &ts); 8039 } 8040 break; 8041 } 8042 #endif 8043 #ifdef TARGET_NR_clock_nanosleep 8044 case TARGET_NR_clock_nanosleep: 8045 { 8046 struct timespec ts; 8047 target_to_host_timespec(&ts, arg3); 8048 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL)); 8049 if (arg4) 8050 host_to_target_timespec(arg4, &ts); 8051 break; 8052 } 8053 #endif 8054 8055 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 8056 case TARGET_NR_set_tid_address: 8057 ret = get_errno(set_tid_address((int *)g2h(arg1))); 8058 break; 8059 #endif 8060 8061 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 8062 case TARGET_NR_tkill: 8063 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2))); 8064 break; 8065 #endif 8066 8067 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 8068 case TARGET_NR_tgkill: 8069 ret = get_errno(sys_tgkill((int)arg1, (int)arg2, 8070 target_to_host_signal(arg3))); 8071 break; 8072 #endif 8073 8074 #ifdef TARGET_NR_set_robust_list 8075 case TARGET_NR_set_robust_list: 8076 goto unimplemented_nowarn; 8077 #endif 8078 8079 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat) 8080 case TARGET_NR_utimensat: 8081 { 8082 struct timespec *tsp, ts[2]; 8083 if (!arg3) { 8084 tsp = NULL; 8085 } else { 8086 target_to_host_timespec(ts, arg3); 8087 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec)); 8088 tsp = ts; 8089 } 8090 if (!arg2) 8091 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 8092 else { 8093 if (!(p = lock_user_string(arg2))) { 8094 ret = -TARGET_EFAULT; 8095 goto fail; 8096 } 8097 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 8098 unlock_user(p, arg2, 0); 8099 } 8100 } 8101 break; 8102 #endif 8103 #if defined(CONFIG_USE_NPTL) 8104 case TARGET_NR_futex: 8105 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6); 8106 break; 8107 #endif 8108 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 8109 case TARGET_NR_inotify_init: 8110 ret = get_errno(sys_inotify_init()); 8111 break; 8112 #endif 8113 #ifdef CONFIG_INOTIFY1 8114 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 8115 case TARGET_NR_inotify_init1: 8116 ret = get_errno(sys_inotify_init1(arg1)); 8117 break; 8118 #endif 8119 #endif 8120 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 8121 case TARGET_NR_inotify_add_watch: 8122 p = lock_user_string(arg2); 8123 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3)); 8124 unlock_user(p, arg2, 0); 8125 break; 8126 #endif 8127 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 8128 case TARGET_NR_inotify_rm_watch: 8129 ret = get_errno(sys_inotify_rm_watch(arg1, arg2)); 8130 break; 8131 #endif 8132 8133 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 8134 case TARGET_NR_mq_open: 8135 { 8136 struct mq_attr posix_mq_attr; 8137 8138 p = lock_user_string(arg1 - 1); 8139 if (arg4 != 0) 8140 copy_from_user_mq_attr (&posix_mq_attr, arg4); 8141 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr)); 8142 unlock_user (p, arg1, 0); 8143 } 8144 break; 8145 8146 case TARGET_NR_mq_unlink: 8147 p = lock_user_string(arg1 - 1); 8148 ret = get_errno(mq_unlink(p)); 8149 unlock_user (p, arg1, 0); 8150 break; 8151 8152 case TARGET_NR_mq_timedsend: 8153 { 8154 struct timespec ts; 8155 8156 p = lock_user (VERIFY_READ, arg2, arg3, 1); 8157 if (arg5 != 0) { 8158 target_to_host_timespec(&ts, arg5); 8159 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts)); 8160 host_to_target_timespec(arg5, &ts); 8161 } 8162 else 8163 ret = get_errno(mq_send(arg1, p, arg3, arg4)); 8164 unlock_user (p, arg2, arg3); 8165 } 8166 break; 8167 8168 case TARGET_NR_mq_timedreceive: 8169 { 8170 struct timespec ts; 8171 unsigned int prio; 8172 8173 p = lock_user (VERIFY_READ, arg2, arg3, 1); 8174 if (arg5 != 0) { 8175 target_to_host_timespec(&ts, arg5); 8176 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts)); 8177 host_to_target_timespec(arg5, &ts); 8178 } 8179 else 8180 ret = get_errno(mq_receive(arg1, p, arg3, &prio)); 8181 unlock_user (p, arg2, arg3); 8182 if (arg4 != 0) 8183 put_user_u32(prio, arg4); 8184 } 8185 break; 8186 8187 /* Not implemented for now... */ 8188 /* case TARGET_NR_mq_notify: */ 8189 /* break; */ 8190 8191 case TARGET_NR_mq_getsetattr: 8192 { 8193 struct mq_attr posix_mq_attr_in, posix_mq_attr_out; 8194 ret = 0; 8195 if (arg3 != 0) { 8196 ret = mq_getattr(arg1, &posix_mq_attr_out); 8197 copy_to_user_mq_attr(arg3, &posix_mq_attr_out); 8198 } 8199 if (arg2 != 0) { 8200 copy_from_user_mq_attr(&posix_mq_attr_in, arg2); 8201 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out); 8202 } 8203 8204 } 8205 break; 8206 #endif 8207 8208 #ifdef CONFIG_SPLICE 8209 #ifdef TARGET_NR_tee 8210 case TARGET_NR_tee: 8211 { 8212 ret = get_errno(tee(arg1,arg2,arg3,arg4)); 8213 } 8214 break; 8215 #endif 8216 #ifdef TARGET_NR_splice 8217 case TARGET_NR_splice: 8218 { 8219 loff_t loff_in, loff_out; 8220 loff_t *ploff_in = NULL, *ploff_out = NULL; 8221 if(arg2) { 8222 get_user_u64(loff_in, arg2); 8223 ploff_in = &loff_in; 8224 } 8225 if(arg4) { 8226 get_user_u64(loff_out, arg2); 8227 ploff_out = &loff_out; 8228 } 8229 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); 8230 } 8231 break; 8232 #endif 8233 #ifdef TARGET_NR_vmsplice 8234 case TARGET_NR_vmsplice: 8235 { 8236 int count = arg3; 8237 struct iovec *vec; 8238 8239 vec = alloca(count * sizeof(struct iovec)); 8240 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0) 8241 goto efault; 8242 ret = get_errno(vmsplice(arg1, vec, count, arg4)); 8243 unlock_iovec(vec, arg2, count, 0); 8244 } 8245 break; 8246 #endif 8247 #endif /* CONFIG_SPLICE */ 8248 #ifdef CONFIG_EVENTFD 8249 #if defined(TARGET_NR_eventfd) 8250 case TARGET_NR_eventfd: 8251 ret = get_errno(eventfd(arg1, 0)); 8252 break; 8253 #endif 8254 #if defined(TARGET_NR_eventfd2) 8255 case TARGET_NR_eventfd2: 8256 ret = get_errno(eventfd(arg1, arg2)); 8257 break; 8258 #endif 8259 #endif /* CONFIG_EVENTFD */ 8260 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) 8261 case TARGET_NR_fallocate: 8262 ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); 8263 break; 8264 #endif 8265 #if defined(CONFIG_SYNC_FILE_RANGE) 8266 #if defined(TARGET_NR_sync_file_range) 8267 case TARGET_NR_sync_file_range: 8268 #if TARGET_ABI_BITS == 32 8269 #if defined(TARGET_MIPS) 8270 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 8271 target_offset64(arg5, arg6), arg7)); 8272 #else 8273 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3), 8274 target_offset64(arg4, arg5), arg6)); 8275 #endif /* !TARGET_MIPS */ 8276 #else 8277 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4)); 8278 #endif 8279 break; 8280 #endif 8281 #if defined(TARGET_NR_sync_file_range2) 8282 case TARGET_NR_sync_file_range2: 8283 /* This is like sync_file_range but the arguments are reordered */ 8284 #if TARGET_ABI_BITS == 32 8285 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 8286 target_offset64(arg5, arg6), arg2)); 8287 #else 8288 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2)); 8289 #endif 8290 break; 8291 #endif 8292 #endif 8293 #if defined(CONFIG_EPOLL) 8294 #if defined(TARGET_NR_epoll_create) 8295 case TARGET_NR_epoll_create: 8296 ret = get_errno(epoll_create(arg1)); 8297 break; 8298 #endif 8299 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1) 8300 case TARGET_NR_epoll_create1: 8301 ret = get_errno(epoll_create1(arg1)); 8302 break; 8303 #endif 8304 #if defined(TARGET_NR_epoll_ctl) 8305 case TARGET_NR_epoll_ctl: 8306 { 8307 struct epoll_event ep; 8308 struct epoll_event *epp = 0; 8309 if (arg4) { 8310 struct target_epoll_event *target_ep; 8311 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) { 8312 goto efault; 8313 } 8314 ep.events = tswap32(target_ep->events); 8315 /* The epoll_data_t union is just opaque data to the kernel, 8316 * so we transfer all 64 bits across and need not worry what 8317 * actual data type it is. 8318 */ 8319 ep.data.u64 = tswap64(target_ep->data.u64); 8320 unlock_user_struct(target_ep, arg4, 0); 8321 epp = &ep; 8322 } 8323 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp)); 8324 break; 8325 } 8326 #endif 8327 8328 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT) 8329 #define IMPLEMENT_EPOLL_PWAIT 8330 #endif 8331 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT) 8332 #if defined(TARGET_NR_epoll_wait) 8333 case TARGET_NR_epoll_wait: 8334 #endif 8335 #if defined(IMPLEMENT_EPOLL_PWAIT) 8336 case TARGET_NR_epoll_pwait: 8337 #endif 8338 { 8339 struct target_epoll_event *target_ep; 8340 struct epoll_event *ep; 8341 int epfd = arg1; 8342 int maxevents = arg3; 8343 int timeout = arg4; 8344 8345 target_ep = lock_user(VERIFY_WRITE, arg2, 8346 maxevents * sizeof(struct target_epoll_event), 1); 8347 if (!target_ep) { 8348 goto efault; 8349 } 8350 8351 ep = alloca(maxevents * sizeof(struct epoll_event)); 8352 8353 switch (num) { 8354 #if defined(IMPLEMENT_EPOLL_PWAIT) 8355 case TARGET_NR_epoll_pwait: 8356 { 8357 target_sigset_t *target_set; 8358 sigset_t _set, *set = &_set; 8359 8360 if (arg5) { 8361 target_set = lock_user(VERIFY_READ, arg5, 8362 sizeof(target_sigset_t), 1); 8363 if (!target_set) { 8364 unlock_user(target_ep, arg2, 0); 8365 goto efault; 8366 } 8367 target_to_host_sigset(set, target_set); 8368 unlock_user(target_set, arg5, 0); 8369 } else { 8370 set = NULL; 8371 } 8372 8373 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set)); 8374 break; 8375 } 8376 #endif 8377 #if defined(TARGET_NR_epoll_wait) 8378 case TARGET_NR_epoll_wait: 8379 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout)); 8380 break; 8381 #endif 8382 default: 8383 ret = -TARGET_ENOSYS; 8384 } 8385 if (!is_error(ret)) { 8386 int i; 8387 for (i = 0; i < ret; i++) { 8388 target_ep[i].events = tswap32(ep[i].events); 8389 target_ep[i].data.u64 = tswap64(ep[i].data.u64); 8390 } 8391 } 8392 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event)); 8393 break; 8394 } 8395 #endif 8396 #endif 8397 #ifdef TARGET_NR_prlimit64 8398 case TARGET_NR_prlimit64: 8399 { 8400 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */ 8401 struct target_rlimit64 *target_rnew, *target_rold; 8402 struct host_rlimit64 rnew, rold, *rnewp = 0; 8403 if (arg3) { 8404 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { 8405 goto efault; 8406 } 8407 rnew.rlim_cur = tswap64(target_rnew->rlim_cur); 8408 rnew.rlim_max = tswap64(target_rnew->rlim_max); 8409 unlock_user_struct(target_rnew, arg3, 0); 8410 rnewp = &rnew; 8411 } 8412 8413 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0)); 8414 if (!is_error(ret) && arg4) { 8415 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { 8416 goto efault; 8417 } 8418 target_rold->rlim_cur = tswap64(rold.rlim_cur); 8419 target_rold->rlim_max = tswap64(rold.rlim_max); 8420 unlock_user_struct(target_rold, arg4, 1); 8421 } 8422 break; 8423 } 8424 #endif 8425 default: 8426 unimplemented: 8427 gemu_log("qemu: Unsupported syscall: %d\n", num); 8428 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list) 8429 unimplemented_nowarn: 8430 #endif 8431 ret = -TARGET_ENOSYS; 8432 break; 8433 } 8434 fail: 8435 #ifdef DEBUG 8436 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret); 8437 #endif 8438 if(do_strace) 8439 print_syscall_ret(num, ret); 8440 return ret; 8441 efault: 8442 ret = -TARGET_EFAULT; 8443 goto fail; 8444 } 8445