1 /* 2 * Linux syscalls 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #define _ATFILE_SOURCE 20 #include <stdlib.h> 21 #include <stdio.h> 22 #include <stdarg.h> 23 #include <string.h> 24 #include <elf.h> 25 #include <endian.h> 26 #include <errno.h> 27 #include <unistd.h> 28 #include <fcntl.h> 29 #include <time.h> 30 #include <limits.h> 31 #include <sys/types.h> 32 #include <sys/ipc.h> 33 #include <sys/msg.h> 34 #include <sys/wait.h> 35 #include <sys/time.h> 36 #include <sys/stat.h> 37 #include <sys/mount.h> 38 #include <sys/prctl.h> 39 #include <sys/resource.h> 40 #include <sys/mman.h> 41 #include <sys/swap.h> 42 #include <signal.h> 43 #include <sched.h> 44 #ifdef __ia64__ 45 int __clone2(int (*fn)(void *), void *child_stack_base, 46 size_t stack_size, int flags, void *arg, ...); 47 #endif 48 #include <sys/socket.h> 49 #include <sys/un.h> 50 #include <sys/uio.h> 51 #include <sys/poll.h> 52 #include <sys/times.h> 53 #include <sys/shm.h> 54 #include <sys/sem.h> 55 #include <sys/statfs.h> 56 #include <utime.h> 57 #include <sys/sysinfo.h> 58 #include <sys/utsname.h> 59 //#include <sys/user.h> 60 #include <netinet/ip.h> 61 #include <netinet/tcp.h> 62 #include <linux/wireless.h> 63 #include "qemu-common.h" 64 #ifdef TARGET_GPROF 65 #include <sys/gmon.h> 66 #endif 67 #ifdef CONFIG_EVENTFD 68 #include <sys/eventfd.h> 69 #endif 70 #ifdef CONFIG_EPOLL 71 #include <sys/epoll.h> 72 #endif 73 #ifdef CONFIG_ATTR 74 #include "qemu-xattr.h" 75 #endif 76 77 #define termios host_termios 78 #define winsize host_winsize 79 #define termio host_termio 80 #define sgttyb host_sgttyb /* same as target */ 81 #define tchars host_tchars /* same as target */ 82 #define ltchars host_ltchars /* same as target */ 83 84 #include <linux/termios.h> 85 #include <linux/unistd.h> 86 #include <linux/utsname.h> 87 #include <linux/cdrom.h> 88 #include <linux/hdreg.h> 89 #include <linux/soundcard.h> 90 #include <linux/kd.h> 91 #include <linux/mtio.h> 92 #include <linux/fs.h> 93 #if defined(CONFIG_FIEMAP) 94 #include <linux/fiemap.h> 95 #endif 96 #include <linux/fb.h> 97 #include <linux/vt.h> 98 #include <linux/dm-ioctl.h> 99 #include "linux_loop.h" 100 #include "cpu-uname.h" 101 102 #include "qemu.h" 103 104 #if defined(CONFIG_USE_NPTL) 105 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \ 106 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID) 107 #else 108 /* XXX: Hardcode the above values. */ 109 #define CLONE_NPTL_FLAGS2 0 110 #endif 111 112 //#define DEBUG 113 114 //#include <linux/msdos_fs.h> 115 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2]) 116 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2]) 117 118 119 #undef _syscall0 120 #undef _syscall1 121 #undef _syscall2 122 #undef _syscall3 123 #undef _syscall4 124 #undef _syscall5 125 #undef _syscall6 126 127 #define _syscall0(type,name) \ 128 static type name (void) \ 129 { \ 130 return syscall(__NR_##name); \ 131 } 132 133 #define _syscall1(type,name,type1,arg1) \ 134 static type name (type1 arg1) \ 135 { \ 136 return syscall(__NR_##name, arg1); \ 137 } 138 139 #define _syscall2(type,name,type1,arg1,type2,arg2) \ 140 static type name (type1 arg1,type2 arg2) \ 141 { \ 142 return syscall(__NR_##name, arg1, arg2); \ 143 } 144 145 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 146 static type name (type1 arg1,type2 arg2,type3 arg3) \ 147 { \ 148 return syscall(__NR_##name, arg1, arg2, arg3); \ 149 } 150 151 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 152 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ 153 { \ 154 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 155 } 156 157 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 158 type5,arg5) \ 159 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ 160 { \ 161 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 162 } 163 164 165 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 166 type5,arg5,type6,arg6) \ 167 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \ 168 type6 arg6) \ 169 { \ 170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 171 } 172 173 174 #define __NR_sys_uname __NR_uname 175 #define __NR_sys_faccessat __NR_faccessat 176 #define __NR_sys_fchmodat __NR_fchmodat 177 #define __NR_sys_fchownat __NR_fchownat 178 #define __NR_sys_fstatat64 __NR_fstatat64 179 #define __NR_sys_futimesat __NR_futimesat 180 #define __NR_sys_getcwd1 __NR_getcwd 181 #define __NR_sys_getdents __NR_getdents 182 #define __NR_sys_getdents64 __NR_getdents64 183 #define __NR_sys_getpriority __NR_getpriority 184 #define __NR_sys_linkat __NR_linkat 185 #define __NR_sys_mkdirat __NR_mkdirat 186 #define __NR_sys_mknodat __NR_mknodat 187 #define __NR_sys_newfstatat __NR_newfstatat 188 #define __NR_sys_openat __NR_openat 189 #define __NR_sys_readlinkat __NR_readlinkat 190 #define __NR_sys_renameat __NR_renameat 191 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo 192 #define __NR_sys_symlinkat __NR_symlinkat 193 #define __NR_sys_syslog __NR_syslog 194 #define __NR_sys_tgkill __NR_tgkill 195 #define __NR_sys_tkill __NR_tkill 196 #define __NR_sys_unlinkat __NR_unlinkat 197 #define __NR_sys_utimensat __NR_utimensat 198 #define __NR_sys_futex __NR_futex 199 #define __NR_sys_inotify_init __NR_inotify_init 200 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch 201 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch 202 203 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \ 204 defined(__s390x__) 205 #define __NR__llseek __NR_lseek 206 #endif 207 208 #ifdef __NR_gettid 209 _syscall0(int, gettid) 210 #else 211 /* This is a replacement for the host gettid() and must return a host 212 errno. */ 213 static int gettid(void) { 214 return -ENOSYS; 215 } 216 #endif 217 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count); 218 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 219 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count); 220 #endif 221 #if defined(TARGET_NR__llseek) && defined(__NR_llseek) 222 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo, 223 loff_t *, res, uint, wh); 224 #endif 225 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo) 226 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len) 227 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 228 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig) 229 #endif 230 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 231 _syscall2(int,sys_tkill,int,tid,int,sig) 232 #endif 233 #ifdef __NR_exit_group 234 _syscall1(int,exit_group,int,error_code) 235 #endif 236 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 237 _syscall1(int,set_tid_address,int *,tidptr) 238 #endif 239 #if defined(CONFIG_USE_NPTL) 240 #if defined(TARGET_NR_futex) && defined(__NR_futex) 241 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, 242 const struct timespec *,timeout,int *,uaddr2,int,val3) 243 #endif 244 #endif 245 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity 246 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, 247 unsigned long *, user_mask_ptr); 248 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity 249 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len, 250 unsigned long *, user_mask_ptr); 251 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd, 252 void *, arg); 253 254 static bitmask_transtbl fcntl_flags_tbl[] = { 255 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, }, 256 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, }, 257 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, }, 258 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, }, 259 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, }, 260 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, }, 261 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, }, 262 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, }, 263 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, }, 264 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, }, 265 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, }, 266 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, }, 267 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, }, 268 #if defined(O_DIRECT) 269 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, }, 270 #endif 271 #if defined(O_NOATIME) 272 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME }, 273 #endif 274 #if defined(O_CLOEXEC) 275 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC }, 276 #endif 277 #if defined(O_PATH) 278 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH }, 279 #endif 280 /* Don't terminate the list prematurely on 64-bit host+guest. */ 281 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0 282 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, }, 283 #endif 284 { 0, 0, 0, 0 } 285 }; 286 287 #define COPY_UTSNAME_FIELD(dest, src) \ 288 do { \ 289 /* __NEW_UTS_LEN doesn't include terminating null */ \ 290 (void) strncpy((dest), (src), __NEW_UTS_LEN); \ 291 (dest)[__NEW_UTS_LEN] = '\0'; \ 292 } while (0) 293 294 static int sys_uname(struct new_utsname *buf) 295 { 296 struct utsname uts_buf; 297 298 if (uname(&uts_buf) < 0) 299 return (-1); 300 301 /* 302 * Just in case these have some differences, we 303 * translate utsname to new_utsname (which is the 304 * struct linux kernel uses). 305 */ 306 307 memset(buf, 0, sizeof(*buf)); 308 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname); 309 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename); 310 COPY_UTSNAME_FIELD(buf->release, uts_buf.release); 311 COPY_UTSNAME_FIELD(buf->version, uts_buf.version); 312 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine); 313 #ifdef _GNU_SOURCE 314 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname); 315 #endif 316 return (0); 317 318 #undef COPY_UTSNAME_FIELD 319 } 320 321 static int sys_getcwd1(char *buf, size_t size) 322 { 323 if (getcwd(buf, size) == NULL) { 324 /* getcwd() sets errno */ 325 return (-1); 326 } 327 return strlen(buf)+1; 328 } 329 330 #ifdef CONFIG_ATFILE 331 /* 332 * Host system seems to have atfile syscall stubs available. We 333 * now enable them one by one as specified by target syscall_nr.h. 334 */ 335 336 #ifdef TARGET_NR_faccessat 337 static int sys_faccessat(int dirfd, const char *pathname, int mode) 338 { 339 return (faccessat(dirfd, pathname, mode, 0)); 340 } 341 #endif 342 #ifdef TARGET_NR_fchmodat 343 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode) 344 { 345 return (fchmodat(dirfd, pathname, mode, 0)); 346 } 347 #endif 348 #if defined(TARGET_NR_fchownat) 349 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner, 350 gid_t group, int flags) 351 { 352 return (fchownat(dirfd, pathname, owner, group, flags)); 353 } 354 #endif 355 #ifdef __NR_fstatat64 356 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf, 357 int flags) 358 { 359 return (fstatat(dirfd, pathname, buf, flags)); 360 } 361 #endif 362 #ifdef __NR_newfstatat 363 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf, 364 int flags) 365 { 366 return (fstatat(dirfd, pathname, buf, flags)); 367 } 368 #endif 369 #ifdef TARGET_NR_futimesat 370 static int sys_futimesat(int dirfd, const char *pathname, 371 const struct timeval times[2]) 372 { 373 return (futimesat(dirfd, pathname, times)); 374 } 375 #endif 376 #ifdef TARGET_NR_linkat 377 static int sys_linkat(int olddirfd, const char *oldpath, 378 int newdirfd, const char *newpath, int flags) 379 { 380 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags)); 381 } 382 #endif 383 #ifdef TARGET_NR_mkdirat 384 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode) 385 { 386 return (mkdirat(dirfd, pathname, mode)); 387 } 388 #endif 389 #ifdef TARGET_NR_mknodat 390 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode, 391 dev_t dev) 392 { 393 return (mknodat(dirfd, pathname, mode, dev)); 394 } 395 #endif 396 #ifdef TARGET_NR_openat 397 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode) 398 { 399 /* 400 * open(2) has extra parameter 'mode' when called with 401 * flag O_CREAT. 402 */ 403 if ((flags & O_CREAT) != 0) { 404 return (openat(dirfd, pathname, flags, mode)); 405 } 406 return (openat(dirfd, pathname, flags)); 407 } 408 #endif 409 #ifdef TARGET_NR_readlinkat 410 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz) 411 { 412 return (readlinkat(dirfd, pathname, buf, bufsiz)); 413 } 414 #endif 415 #ifdef TARGET_NR_renameat 416 static int sys_renameat(int olddirfd, const char *oldpath, 417 int newdirfd, const char *newpath) 418 { 419 return (renameat(olddirfd, oldpath, newdirfd, newpath)); 420 } 421 #endif 422 #ifdef TARGET_NR_symlinkat 423 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath) 424 { 425 return (symlinkat(oldpath, newdirfd, newpath)); 426 } 427 #endif 428 #ifdef TARGET_NR_unlinkat 429 static int sys_unlinkat(int dirfd, const char *pathname, int flags) 430 { 431 return (unlinkat(dirfd, pathname, flags)); 432 } 433 #endif 434 #else /* !CONFIG_ATFILE */ 435 436 /* 437 * Try direct syscalls instead 438 */ 439 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 440 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode) 441 #endif 442 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat) 443 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode) 444 #endif 445 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) 446 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname, 447 uid_t,owner,gid_t,group,int,flags) 448 #endif 449 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \ 450 defined(__NR_fstatat64) 451 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname, 452 struct stat *,buf,int,flags) 453 #endif 454 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat) 455 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname, 456 const struct timeval *,times) 457 #endif 458 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \ 459 defined(__NR_newfstatat) 460 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname, 461 struct stat *,buf,int,flags) 462 #endif 463 #if defined(TARGET_NR_linkat) && defined(__NR_linkat) 464 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath, 465 int,newdirfd,const char *,newpath,int,flags) 466 #endif 467 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat) 468 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode) 469 #endif 470 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat) 471 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname, 472 mode_t,mode,dev_t,dev) 473 #endif 474 #if defined(TARGET_NR_openat) && defined(__NR_openat) 475 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode) 476 #endif 477 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat) 478 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname, 479 char *,buf,size_t,bufsize) 480 #endif 481 #if defined(TARGET_NR_renameat) && defined(__NR_renameat) 482 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath, 483 int,newdirfd,const char *,newpath) 484 #endif 485 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat) 486 _syscall3(int,sys_symlinkat,const char *,oldpath, 487 int,newdirfd,const char *,newpath) 488 #endif 489 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat) 490 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags) 491 #endif 492 493 #endif /* CONFIG_ATFILE */ 494 495 #ifdef CONFIG_UTIMENSAT 496 static int sys_utimensat(int dirfd, const char *pathname, 497 const struct timespec times[2], int flags) 498 { 499 if (pathname == NULL) 500 return futimens(dirfd, times); 501 else 502 return utimensat(dirfd, pathname, times, flags); 503 } 504 #else 505 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat) 506 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname, 507 const struct timespec *,tsp,int,flags) 508 #endif 509 #endif /* CONFIG_UTIMENSAT */ 510 511 #ifdef CONFIG_INOTIFY 512 #include <sys/inotify.h> 513 514 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 515 static int sys_inotify_init(void) 516 { 517 return (inotify_init()); 518 } 519 #endif 520 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 521 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask) 522 { 523 return (inotify_add_watch(fd, pathname, mask)); 524 } 525 #endif 526 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 527 static int sys_inotify_rm_watch(int fd, int32_t wd) 528 { 529 return (inotify_rm_watch(fd, wd)); 530 } 531 #endif 532 #ifdef CONFIG_INOTIFY1 533 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 534 static int sys_inotify_init1(int flags) 535 { 536 return (inotify_init1(flags)); 537 } 538 #endif 539 #endif 540 #else 541 /* Userspace can usually survive runtime without inotify */ 542 #undef TARGET_NR_inotify_init 543 #undef TARGET_NR_inotify_init1 544 #undef TARGET_NR_inotify_add_watch 545 #undef TARGET_NR_inotify_rm_watch 546 #endif /* CONFIG_INOTIFY */ 547 548 #if defined(TARGET_NR_ppoll) 549 #ifndef __NR_ppoll 550 # define __NR_ppoll -1 551 #endif 552 #define __NR_sys_ppoll __NR_ppoll 553 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds, 554 struct timespec *, timeout, const __sigset_t *, sigmask, 555 size_t, sigsetsize) 556 #endif 557 558 #if defined(TARGET_NR_pselect6) 559 #ifndef __NR_pselect6 560 # define __NR_pselect6 -1 561 #endif 562 #define __NR_sys_pselect6 __NR_pselect6 563 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, 564 fd_set *, exceptfds, struct timespec *, timeout, void *, sig); 565 #endif 566 567 #if defined(TARGET_NR_prlimit64) 568 #ifndef __NR_prlimit64 569 # define __NR_prlimit64 -1 570 #endif 571 #define __NR_sys_prlimit64 __NR_prlimit64 572 /* The glibc rlimit structure may not be that used by the underlying syscall */ 573 struct host_rlimit64 { 574 uint64_t rlim_cur; 575 uint64_t rlim_max; 576 }; 577 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource, 578 const struct host_rlimit64 *, new_limit, 579 struct host_rlimit64 *, old_limit) 580 #endif 581 582 extern int personality(int); 583 extern int flock(int, int); 584 extern int setfsuid(int); 585 extern int setfsgid(int); 586 extern int setgroups(int, gid_t *); 587 588 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */ 589 #ifdef TARGET_ARM 590 static inline int regpairs_aligned(void *cpu_env) { 591 return ((((CPUARMState *)cpu_env)->eabi) == 1) ; 592 } 593 #elif defined(TARGET_MIPS) 594 static inline int regpairs_aligned(void *cpu_env) { return 1; } 595 #else 596 static inline int regpairs_aligned(void *cpu_env) { return 0; } 597 #endif 598 599 #define ERRNO_TABLE_SIZE 1200 600 601 /* target_to_host_errno_table[] is initialized from 602 * host_to_target_errno_table[] in syscall_init(). */ 603 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = { 604 }; 605 606 /* 607 * This list is the union of errno values overridden in asm-<arch>/errno.h 608 * minus the errnos that are not actually generic to all archs. 609 */ 610 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = { 611 [EIDRM] = TARGET_EIDRM, 612 [ECHRNG] = TARGET_ECHRNG, 613 [EL2NSYNC] = TARGET_EL2NSYNC, 614 [EL3HLT] = TARGET_EL3HLT, 615 [EL3RST] = TARGET_EL3RST, 616 [ELNRNG] = TARGET_ELNRNG, 617 [EUNATCH] = TARGET_EUNATCH, 618 [ENOCSI] = TARGET_ENOCSI, 619 [EL2HLT] = TARGET_EL2HLT, 620 [EDEADLK] = TARGET_EDEADLK, 621 [ENOLCK] = TARGET_ENOLCK, 622 [EBADE] = TARGET_EBADE, 623 [EBADR] = TARGET_EBADR, 624 [EXFULL] = TARGET_EXFULL, 625 [ENOANO] = TARGET_ENOANO, 626 [EBADRQC] = TARGET_EBADRQC, 627 [EBADSLT] = TARGET_EBADSLT, 628 [EBFONT] = TARGET_EBFONT, 629 [ENOSTR] = TARGET_ENOSTR, 630 [ENODATA] = TARGET_ENODATA, 631 [ETIME] = TARGET_ETIME, 632 [ENOSR] = TARGET_ENOSR, 633 [ENONET] = TARGET_ENONET, 634 [ENOPKG] = TARGET_ENOPKG, 635 [EREMOTE] = TARGET_EREMOTE, 636 [ENOLINK] = TARGET_ENOLINK, 637 [EADV] = TARGET_EADV, 638 [ESRMNT] = TARGET_ESRMNT, 639 [ECOMM] = TARGET_ECOMM, 640 [EPROTO] = TARGET_EPROTO, 641 [EDOTDOT] = TARGET_EDOTDOT, 642 [EMULTIHOP] = TARGET_EMULTIHOP, 643 [EBADMSG] = TARGET_EBADMSG, 644 [ENAMETOOLONG] = TARGET_ENAMETOOLONG, 645 [EOVERFLOW] = TARGET_EOVERFLOW, 646 [ENOTUNIQ] = TARGET_ENOTUNIQ, 647 [EBADFD] = TARGET_EBADFD, 648 [EREMCHG] = TARGET_EREMCHG, 649 [ELIBACC] = TARGET_ELIBACC, 650 [ELIBBAD] = TARGET_ELIBBAD, 651 [ELIBSCN] = TARGET_ELIBSCN, 652 [ELIBMAX] = TARGET_ELIBMAX, 653 [ELIBEXEC] = TARGET_ELIBEXEC, 654 [EILSEQ] = TARGET_EILSEQ, 655 [ENOSYS] = TARGET_ENOSYS, 656 [ELOOP] = TARGET_ELOOP, 657 [ERESTART] = TARGET_ERESTART, 658 [ESTRPIPE] = TARGET_ESTRPIPE, 659 [ENOTEMPTY] = TARGET_ENOTEMPTY, 660 [EUSERS] = TARGET_EUSERS, 661 [ENOTSOCK] = TARGET_ENOTSOCK, 662 [EDESTADDRREQ] = TARGET_EDESTADDRREQ, 663 [EMSGSIZE] = TARGET_EMSGSIZE, 664 [EPROTOTYPE] = TARGET_EPROTOTYPE, 665 [ENOPROTOOPT] = TARGET_ENOPROTOOPT, 666 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT, 667 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT, 668 [EOPNOTSUPP] = TARGET_EOPNOTSUPP, 669 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT, 670 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT, 671 [EADDRINUSE] = TARGET_EADDRINUSE, 672 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL, 673 [ENETDOWN] = TARGET_ENETDOWN, 674 [ENETUNREACH] = TARGET_ENETUNREACH, 675 [ENETRESET] = TARGET_ENETRESET, 676 [ECONNABORTED] = TARGET_ECONNABORTED, 677 [ECONNRESET] = TARGET_ECONNRESET, 678 [ENOBUFS] = TARGET_ENOBUFS, 679 [EISCONN] = TARGET_EISCONN, 680 [ENOTCONN] = TARGET_ENOTCONN, 681 [EUCLEAN] = TARGET_EUCLEAN, 682 [ENOTNAM] = TARGET_ENOTNAM, 683 [ENAVAIL] = TARGET_ENAVAIL, 684 [EISNAM] = TARGET_EISNAM, 685 [EREMOTEIO] = TARGET_EREMOTEIO, 686 [ESHUTDOWN] = TARGET_ESHUTDOWN, 687 [ETOOMANYREFS] = TARGET_ETOOMANYREFS, 688 [ETIMEDOUT] = TARGET_ETIMEDOUT, 689 [ECONNREFUSED] = TARGET_ECONNREFUSED, 690 [EHOSTDOWN] = TARGET_EHOSTDOWN, 691 [EHOSTUNREACH] = TARGET_EHOSTUNREACH, 692 [EALREADY] = TARGET_EALREADY, 693 [EINPROGRESS] = TARGET_EINPROGRESS, 694 [ESTALE] = TARGET_ESTALE, 695 [ECANCELED] = TARGET_ECANCELED, 696 [ENOMEDIUM] = TARGET_ENOMEDIUM, 697 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE, 698 #ifdef ENOKEY 699 [ENOKEY] = TARGET_ENOKEY, 700 #endif 701 #ifdef EKEYEXPIRED 702 [EKEYEXPIRED] = TARGET_EKEYEXPIRED, 703 #endif 704 #ifdef EKEYREVOKED 705 [EKEYREVOKED] = TARGET_EKEYREVOKED, 706 #endif 707 #ifdef EKEYREJECTED 708 [EKEYREJECTED] = TARGET_EKEYREJECTED, 709 #endif 710 #ifdef EOWNERDEAD 711 [EOWNERDEAD] = TARGET_EOWNERDEAD, 712 #endif 713 #ifdef ENOTRECOVERABLE 714 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE, 715 #endif 716 }; 717 718 static inline int host_to_target_errno(int err) 719 { 720 if(host_to_target_errno_table[err]) 721 return host_to_target_errno_table[err]; 722 return err; 723 } 724 725 static inline int target_to_host_errno(int err) 726 { 727 if (target_to_host_errno_table[err]) 728 return target_to_host_errno_table[err]; 729 return err; 730 } 731 732 static inline abi_long get_errno(abi_long ret) 733 { 734 if (ret == -1) 735 return -host_to_target_errno(errno); 736 else 737 return ret; 738 } 739 740 static inline int is_error(abi_long ret) 741 { 742 return (abi_ulong)ret >= (abi_ulong)(-4096); 743 } 744 745 char *target_strerror(int err) 746 { 747 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) { 748 return NULL; 749 } 750 return strerror(target_to_host_errno(err)); 751 } 752 753 static abi_ulong target_brk; 754 static abi_ulong target_original_brk; 755 static abi_ulong brk_page; 756 757 void target_set_brk(abi_ulong new_brk) 758 { 759 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk); 760 brk_page = HOST_PAGE_ALIGN(target_brk); 761 } 762 763 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0) 764 #define DEBUGF_BRK(message, args...) 765 766 /* do_brk() must return target values and target errnos. */ 767 abi_long do_brk(abi_ulong new_brk) 768 { 769 abi_long mapped_addr; 770 int new_alloc_size; 771 772 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk); 773 774 if (!new_brk) { 775 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk); 776 return target_brk; 777 } 778 if (new_brk < target_original_brk) { 779 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n", 780 target_brk); 781 return target_brk; 782 } 783 784 /* If the new brk is less than the highest page reserved to the 785 * target heap allocation, set it and we're almost done... */ 786 if (new_brk <= brk_page) { 787 /* Heap contents are initialized to zero, as for anonymous 788 * mapped pages. */ 789 if (new_brk > target_brk) { 790 memset(g2h(target_brk), 0, new_brk - target_brk); 791 } 792 target_brk = new_brk; 793 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk); 794 return target_brk; 795 } 796 797 /* We need to allocate more memory after the brk... Note that 798 * we don't use MAP_FIXED because that will map over the top of 799 * any existing mapping (like the one with the host libc or qemu 800 * itself); instead we treat "mapped but at wrong address" as 801 * a failure and unmap again. 802 */ 803 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page); 804 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, 805 PROT_READ|PROT_WRITE, 806 MAP_ANON|MAP_PRIVATE, 0, 0)); 807 808 if (mapped_addr == brk_page) { 809 /* Heap contents are initialized to zero, as for anonymous 810 * mapped pages. Technically the new pages are already 811 * initialized to zero since they *are* anonymous mapped 812 * pages, however we have to take care with the contents that 813 * come from the remaining part of the previous page: it may 814 * contains garbage data due to a previous heap usage (grown 815 * then shrunken). */ 816 memset(g2h(target_brk), 0, brk_page - target_brk); 817 818 target_brk = new_brk; 819 brk_page = HOST_PAGE_ALIGN(target_brk); 820 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n", 821 target_brk); 822 return target_brk; 823 } else if (mapped_addr != -1) { 824 /* Mapped but at wrong address, meaning there wasn't actually 825 * enough space for this brk. 826 */ 827 target_munmap(mapped_addr, new_alloc_size); 828 mapped_addr = -1; 829 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk); 830 } 831 else { 832 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk); 833 } 834 835 #if defined(TARGET_ALPHA) 836 /* We (partially) emulate OSF/1 on Alpha, which requires we 837 return a proper errno, not an unchanged brk value. */ 838 return -TARGET_ENOMEM; 839 #endif 840 /* For everything else, return the previous break. */ 841 return target_brk; 842 } 843 844 static inline abi_long copy_from_user_fdset(fd_set *fds, 845 abi_ulong target_fds_addr, 846 int n) 847 { 848 int i, nw, j, k; 849 abi_ulong b, *target_fds; 850 851 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 852 if (!(target_fds = lock_user(VERIFY_READ, 853 target_fds_addr, 854 sizeof(abi_ulong) * nw, 855 1))) 856 return -TARGET_EFAULT; 857 858 FD_ZERO(fds); 859 k = 0; 860 for (i = 0; i < nw; i++) { 861 /* grab the abi_ulong */ 862 __get_user(b, &target_fds[i]); 863 for (j = 0; j < TARGET_ABI_BITS; j++) { 864 /* check the bit inside the abi_ulong */ 865 if ((b >> j) & 1) 866 FD_SET(k, fds); 867 k++; 868 } 869 } 870 871 unlock_user(target_fds, target_fds_addr, 0); 872 873 return 0; 874 } 875 876 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr, 877 abi_ulong target_fds_addr, 878 int n) 879 { 880 if (target_fds_addr) { 881 if (copy_from_user_fdset(fds, target_fds_addr, n)) 882 return -TARGET_EFAULT; 883 *fds_ptr = fds; 884 } else { 885 *fds_ptr = NULL; 886 } 887 return 0; 888 } 889 890 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr, 891 const fd_set *fds, 892 int n) 893 { 894 int i, nw, j, k; 895 abi_long v; 896 abi_ulong *target_fds; 897 898 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 899 if (!(target_fds = lock_user(VERIFY_WRITE, 900 target_fds_addr, 901 sizeof(abi_ulong) * nw, 902 0))) 903 return -TARGET_EFAULT; 904 905 k = 0; 906 for (i = 0; i < nw; i++) { 907 v = 0; 908 for (j = 0; j < TARGET_ABI_BITS; j++) { 909 v |= ((FD_ISSET(k, fds) != 0) << j); 910 k++; 911 } 912 __put_user(v, &target_fds[i]); 913 } 914 915 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw); 916 917 return 0; 918 } 919 920 #if defined(__alpha__) 921 #define HOST_HZ 1024 922 #else 923 #define HOST_HZ 100 924 #endif 925 926 static inline abi_long host_to_target_clock_t(long ticks) 927 { 928 #if HOST_HZ == TARGET_HZ 929 return ticks; 930 #else 931 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ; 932 #endif 933 } 934 935 static inline abi_long host_to_target_rusage(abi_ulong target_addr, 936 const struct rusage *rusage) 937 { 938 struct target_rusage *target_rusage; 939 940 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0)) 941 return -TARGET_EFAULT; 942 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec); 943 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec); 944 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec); 945 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec); 946 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss); 947 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss); 948 target_rusage->ru_idrss = tswapal(rusage->ru_idrss); 949 target_rusage->ru_isrss = tswapal(rusage->ru_isrss); 950 target_rusage->ru_minflt = tswapal(rusage->ru_minflt); 951 target_rusage->ru_majflt = tswapal(rusage->ru_majflt); 952 target_rusage->ru_nswap = tswapal(rusage->ru_nswap); 953 target_rusage->ru_inblock = tswapal(rusage->ru_inblock); 954 target_rusage->ru_oublock = tswapal(rusage->ru_oublock); 955 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd); 956 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv); 957 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals); 958 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw); 959 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw); 960 unlock_user_struct(target_rusage, target_addr, 1); 961 962 return 0; 963 } 964 965 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim) 966 { 967 abi_ulong target_rlim_swap; 968 rlim_t result; 969 970 target_rlim_swap = tswapal(target_rlim); 971 if (target_rlim_swap == TARGET_RLIM_INFINITY) 972 return RLIM_INFINITY; 973 974 result = target_rlim_swap; 975 if (target_rlim_swap != (rlim_t)result) 976 return RLIM_INFINITY; 977 978 return result; 979 } 980 981 static inline abi_ulong host_to_target_rlim(rlim_t rlim) 982 { 983 abi_ulong target_rlim_swap; 984 abi_ulong result; 985 986 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim) 987 target_rlim_swap = TARGET_RLIM_INFINITY; 988 else 989 target_rlim_swap = rlim; 990 result = tswapal(target_rlim_swap); 991 992 return result; 993 } 994 995 static inline int target_to_host_resource(int code) 996 { 997 switch (code) { 998 case TARGET_RLIMIT_AS: 999 return RLIMIT_AS; 1000 case TARGET_RLIMIT_CORE: 1001 return RLIMIT_CORE; 1002 case TARGET_RLIMIT_CPU: 1003 return RLIMIT_CPU; 1004 case TARGET_RLIMIT_DATA: 1005 return RLIMIT_DATA; 1006 case TARGET_RLIMIT_FSIZE: 1007 return RLIMIT_FSIZE; 1008 case TARGET_RLIMIT_LOCKS: 1009 return RLIMIT_LOCKS; 1010 case TARGET_RLIMIT_MEMLOCK: 1011 return RLIMIT_MEMLOCK; 1012 case TARGET_RLIMIT_MSGQUEUE: 1013 return RLIMIT_MSGQUEUE; 1014 case TARGET_RLIMIT_NICE: 1015 return RLIMIT_NICE; 1016 case TARGET_RLIMIT_NOFILE: 1017 return RLIMIT_NOFILE; 1018 case TARGET_RLIMIT_NPROC: 1019 return RLIMIT_NPROC; 1020 case TARGET_RLIMIT_RSS: 1021 return RLIMIT_RSS; 1022 case TARGET_RLIMIT_RTPRIO: 1023 return RLIMIT_RTPRIO; 1024 case TARGET_RLIMIT_SIGPENDING: 1025 return RLIMIT_SIGPENDING; 1026 case TARGET_RLIMIT_STACK: 1027 return RLIMIT_STACK; 1028 default: 1029 return code; 1030 } 1031 } 1032 1033 static inline abi_long copy_from_user_timeval(struct timeval *tv, 1034 abi_ulong target_tv_addr) 1035 { 1036 struct target_timeval *target_tv; 1037 1038 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) 1039 return -TARGET_EFAULT; 1040 1041 __get_user(tv->tv_sec, &target_tv->tv_sec); 1042 __get_user(tv->tv_usec, &target_tv->tv_usec); 1043 1044 unlock_user_struct(target_tv, target_tv_addr, 0); 1045 1046 return 0; 1047 } 1048 1049 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr, 1050 const struct timeval *tv) 1051 { 1052 struct target_timeval *target_tv; 1053 1054 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) 1055 return -TARGET_EFAULT; 1056 1057 __put_user(tv->tv_sec, &target_tv->tv_sec); 1058 __put_user(tv->tv_usec, &target_tv->tv_usec); 1059 1060 unlock_user_struct(target_tv, target_tv_addr, 1); 1061 1062 return 0; 1063 } 1064 1065 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 1066 #include <mqueue.h> 1067 1068 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr, 1069 abi_ulong target_mq_attr_addr) 1070 { 1071 struct target_mq_attr *target_mq_attr; 1072 1073 if (!lock_user_struct(VERIFY_READ, target_mq_attr, 1074 target_mq_attr_addr, 1)) 1075 return -TARGET_EFAULT; 1076 1077 __get_user(attr->mq_flags, &target_mq_attr->mq_flags); 1078 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1079 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1080 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1081 1082 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0); 1083 1084 return 0; 1085 } 1086 1087 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr, 1088 const struct mq_attr *attr) 1089 { 1090 struct target_mq_attr *target_mq_attr; 1091 1092 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr, 1093 target_mq_attr_addr, 0)) 1094 return -TARGET_EFAULT; 1095 1096 __put_user(attr->mq_flags, &target_mq_attr->mq_flags); 1097 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1098 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1099 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1100 1101 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1); 1102 1103 return 0; 1104 } 1105 #endif 1106 1107 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) 1108 /* do_select() must return target values and target errnos. */ 1109 static abi_long do_select(int n, 1110 abi_ulong rfd_addr, abi_ulong wfd_addr, 1111 abi_ulong efd_addr, abi_ulong target_tv_addr) 1112 { 1113 fd_set rfds, wfds, efds; 1114 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 1115 struct timeval tv, *tv_ptr; 1116 abi_long ret; 1117 1118 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 1119 if (ret) { 1120 return ret; 1121 } 1122 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 1123 if (ret) { 1124 return ret; 1125 } 1126 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 1127 if (ret) { 1128 return ret; 1129 } 1130 1131 if (target_tv_addr) { 1132 if (copy_from_user_timeval(&tv, target_tv_addr)) 1133 return -TARGET_EFAULT; 1134 tv_ptr = &tv; 1135 } else { 1136 tv_ptr = NULL; 1137 } 1138 1139 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr)); 1140 1141 if (!is_error(ret)) { 1142 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 1143 return -TARGET_EFAULT; 1144 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 1145 return -TARGET_EFAULT; 1146 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 1147 return -TARGET_EFAULT; 1148 1149 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv)) 1150 return -TARGET_EFAULT; 1151 } 1152 1153 return ret; 1154 } 1155 #endif 1156 1157 static abi_long do_pipe2(int host_pipe[], int flags) 1158 { 1159 #ifdef CONFIG_PIPE2 1160 return pipe2(host_pipe, flags); 1161 #else 1162 return -ENOSYS; 1163 #endif 1164 } 1165 1166 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes, 1167 int flags, int is_pipe2) 1168 { 1169 int host_pipe[2]; 1170 abi_long ret; 1171 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe); 1172 1173 if (is_error(ret)) 1174 return get_errno(ret); 1175 1176 /* Several targets have special calling conventions for the original 1177 pipe syscall, but didn't replicate this into the pipe2 syscall. */ 1178 if (!is_pipe2) { 1179 #if defined(TARGET_ALPHA) 1180 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1]; 1181 return host_pipe[0]; 1182 #elif defined(TARGET_MIPS) 1183 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1]; 1184 return host_pipe[0]; 1185 #elif defined(TARGET_SH4) 1186 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1]; 1187 return host_pipe[0]; 1188 #endif 1189 } 1190 1191 if (put_user_s32(host_pipe[0], pipedes) 1192 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0]))) 1193 return -TARGET_EFAULT; 1194 return get_errno(ret); 1195 } 1196 1197 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn, 1198 abi_ulong target_addr, 1199 socklen_t len) 1200 { 1201 struct target_ip_mreqn *target_smreqn; 1202 1203 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1); 1204 if (!target_smreqn) 1205 return -TARGET_EFAULT; 1206 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr; 1207 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr; 1208 if (len == sizeof(struct target_ip_mreqn)) 1209 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex); 1210 unlock_user(target_smreqn, target_addr, 0); 1211 1212 return 0; 1213 } 1214 1215 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr, 1216 abi_ulong target_addr, 1217 socklen_t len) 1218 { 1219 const socklen_t unix_maxlen = sizeof (struct sockaddr_un); 1220 sa_family_t sa_family; 1221 struct target_sockaddr *target_saddr; 1222 1223 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 1224 if (!target_saddr) 1225 return -TARGET_EFAULT; 1226 1227 sa_family = tswap16(target_saddr->sa_family); 1228 1229 /* Oops. The caller might send a incomplete sun_path; sun_path 1230 * must be terminated by \0 (see the manual page), but 1231 * unfortunately it is quite common to specify sockaddr_un 1232 * length as "strlen(x->sun_path)" while it should be 1233 * "strlen(...) + 1". We'll fix that here if needed. 1234 * Linux kernel has a similar feature. 1235 */ 1236 1237 if (sa_family == AF_UNIX) { 1238 if (len < unix_maxlen && len > 0) { 1239 char *cp = (char*)target_saddr; 1240 1241 if ( cp[len-1] && !cp[len] ) 1242 len++; 1243 } 1244 if (len > unix_maxlen) 1245 len = unix_maxlen; 1246 } 1247 1248 memcpy(addr, target_saddr, len); 1249 addr->sa_family = sa_family; 1250 unlock_user(target_saddr, target_addr, 0); 1251 1252 return 0; 1253 } 1254 1255 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr, 1256 struct sockaddr *addr, 1257 socklen_t len) 1258 { 1259 struct target_sockaddr *target_saddr; 1260 1261 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0); 1262 if (!target_saddr) 1263 return -TARGET_EFAULT; 1264 memcpy(target_saddr, addr, len); 1265 target_saddr->sa_family = tswap16(addr->sa_family); 1266 unlock_user(target_saddr, target_addr, len); 1267 1268 return 0; 1269 } 1270 1271 /* ??? Should this also swap msgh->name? */ 1272 static inline abi_long target_to_host_cmsg(struct msghdr *msgh, 1273 struct target_msghdr *target_msgh) 1274 { 1275 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1276 abi_long msg_controllen; 1277 abi_ulong target_cmsg_addr; 1278 struct target_cmsghdr *target_cmsg; 1279 socklen_t space = 0; 1280 1281 msg_controllen = tswapal(target_msgh->msg_controllen); 1282 if (msg_controllen < sizeof (struct target_cmsghdr)) 1283 goto the_end; 1284 target_cmsg_addr = tswapal(target_msgh->msg_control); 1285 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1); 1286 if (!target_cmsg) 1287 return -TARGET_EFAULT; 1288 1289 while (cmsg && target_cmsg) { 1290 void *data = CMSG_DATA(cmsg); 1291 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1292 1293 int len = tswapal(target_cmsg->cmsg_len) 1294 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr)); 1295 1296 space += CMSG_SPACE(len); 1297 if (space > msgh->msg_controllen) { 1298 space -= CMSG_SPACE(len); 1299 gemu_log("Host cmsg overflow\n"); 1300 break; 1301 } 1302 1303 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level); 1304 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type); 1305 cmsg->cmsg_len = CMSG_LEN(len); 1306 1307 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) { 1308 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type); 1309 memcpy(data, target_data, len); 1310 } else { 1311 int *fd = (int *)data; 1312 int *target_fd = (int *)target_data; 1313 int i, numfds = len / sizeof(int); 1314 1315 for (i = 0; i < numfds; i++) 1316 fd[i] = tswap32(target_fd[i]); 1317 } 1318 1319 cmsg = CMSG_NXTHDR(msgh, cmsg); 1320 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1321 } 1322 unlock_user(target_cmsg, target_cmsg_addr, 0); 1323 the_end: 1324 msgh->msg_controllen = space; 1325 return 0; 1326 } 1327 1328 /* ??? Should this also swap msgh->name? */ 1329 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, 1330 struct msghdr *msgh) 1331 { 1332 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1333 abi_long msg_controllen; 1334 abi_ulong target_cmsg_addr; 1335 struct target_cmsghdr *target_cmsg; 1336 socklen_t space = 0; 1337 1338 msg_controllen = tswapal(target_msgh->msg_controllen); 1339 if (msg_controllen < sizeof (struct target_cmsghdr)) 1340 goto the_end; 1341 target_cmsg_addr = tswapal(target_msgh->msg_control); 1342 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0); 1343 if (!target_cmsg) 1344 return -TARGET_EFAULT; 1345 1346 while (cmsg && target_cmsg) { 1347 void *data = CMSG_DATA(cmsg); 1348 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1349 1350 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr)); 1351 1352 space += TARGET_CMSG_SPACE(len); 1353 if (space > msg_controllen) { 1354 space -= TARGET_CMSG_SPACE(len); 1355 gemu_log("Target cmsg overflow\n"); 1356 break; 1357 } 1358 1359 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level); 1360 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type); 1361 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len)); 1362 1363 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) { 1364 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type); 1365 memcpy(target_data, data, len); 1366 } else { 1367 int *fd = (int *)data; 1368 int *target_fd = (int *)target_data; 1369 int i, numfds = len / sizeof(int); 1370 1371 for (i = 0; i < numfds; i++) 1372 target_fd[i] = tswap32(fd[i]); 1373 } 1374 1375 cmsg = CMSG_NXTHDR(msgh, cmsg); 1376 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1377 } 1378 unlock_user(target_cmsg, target_cmsg_addr, space); 1379 the_end: 1380 target_msgh->msg_controllen = tswapal(space); 1381 return 0; 1382 } 1383 1384 /* do_setsockopt() Must return target values and target errnos. */ 1385 static abi_long do_setsockopt(int sockfd, int level, int optname, 1386 abi_ulong optval_addr, socklen_t optlen) 1387 { 1388 abi_long ret; 1389 int val; 1390 struct ip_mreqn *ip_mreq; 1391 struct ip_mreq_source *ip_mreq_source; 1392 1393 switch(level) { 1394 case SOL_TCP: 1395 /* TCP options all take an 'int' value. */ 1396 if (optlen < sizeof(uint32_t)) 1397 return -TARGET_EINVAL; 1398 1399 if (get_user_u32(val, optval_addr)) 1400 return -TARGET_EFAULT; 1401 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1402 break; 1403 case SOL_IP: 1404 switch(optname) { 1405 case IP_TOS: 1406 case IP_TTL: 1407 case IP_HDRINCL: 1408 case IP_ROUTER_ALERT: 1409 case IP_RECVOPTS: 1410 case IP_RETOPTS: 1411 case IP_PKTINFO: 1412 case IP_MTU_DISCOVER: 1413 case IP_RECVERR: 1414 case IP_RECVTOS: 1415 #ifdef IP_FREEBIND 1416 case IP_FREEBIND: 1417 #endif 1418 case IP_MULTICAST_TTL: 1419 case IP_MULTICAST_LOOP: 1420 val = 0; 1421 if (optlen >= sizeof(uint32_t)) { 1422 if (get_user_u32(val, optval_addr)) 1423 return -TARGET_EFAULT; 1424 } else if (optlen >= 1) { 1425 if (get_user_u8(val, optval_addr)) 1426 return -TARGET_EFAULT; 1427 } 1428 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1429 break; 1430 case IP_ADD_MEMBERSHIP: 1431 case IP_DROP_MEMBERSHIP: 1432 if (optlen < sizeof (struct target_ip_mreq) || 1433 optlen > sizeof (struct target_ip_mreqn)) 1434 return -TARGET_EINVAL; 1435 1436 ip_mreq = (struct ip_mreqn *) alloca(optlen); 1437 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen); 1438 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen)); 1439 break; 1440 1441 case IP_BLOCK_SOURCE: 1442 case IP_UNBLOCK_SOURCE: 1443 case IP_ADD_SOURCE_MEMBERSHIP: 1444 case IP_DROP_SOURCE_MEMBERSHIP: 1445 if (optlen != sizeof (struct target_ip_mreq_source)) 1446 return -TARGET_EINVAL; 1447 1448 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1); 1449 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen)); 1450 unlock_user (ip_mreq_source, optval_addr, 0); 1451 break; 1452 1453 default: 1454 goto unimplemented; 1455 } 1456 break; 1457 case TARGET_SOL_SOCKET: 1458 switch (optname) { 1459 /* Options with 'int' argument. */ 1460 case TARGET_SO_DEBUG: 1461 optname = SO_DEBUG; 1462 break; 1463 case TARGET_SO_REUSEADDR: 1464 optname = SO_REUSEADDR; 1465 break; 1466 case TARGET_SO_TYPE: 1467 optname = SO_TYPE; 1468 break; 1469 case TARGET_SO_ERROR: 1470 optname = SO_ERROR; 1471 break; 1472 case TARGET_SO_DONTROUTE: 1473 optname = SO_DONTROUTE; 1474 break; 1475 case TARGET_SO_BROADCAST: 1476 optname = SO_BROADCAST; 1477 break; 1478 case TARGET_SO_SNDBUF: 1479 optname = SO_SNDBUF; 1480 break; 1481 case TARGET_SO_RCVBUF: 1482 optname = SO_RCVBUF; 1483 break; 1484 case TARGET_SO_KEEPALIVE: 1485 optname = SO_KEEPALIVE; 1486 break; 1487 case TARGET_SO_OOBINLINE: 1488 optname = SO_OOBINLINE; 1489 break; 1490 case TARGET_SO_NO_CHECK: 1491 optname = SO_NO_CHECK; 1492 break; 1493 case TARGET_SO_PRIORITY: 1494 optname = SO_PRIORITY; 1495 break; 1496 #ifdef SO_BSDCOMPAT 1497 case TARGET_SO_BSDCOMPAT: 1498 optname = SO_BSDCOMPAT; 1499 break; 1500 #endif 1501 case TARGET_SO_PASSCRED: 1502 optname = SO_PASSCRED; 1503 break; 1504 case TARGET_SO_TIMESTAMP: 1505 optname = SO_TIMESTAMP; 1506 break; 1507 case TARGET_SO_RCVLOWAT: 1508 optname = SO_RCVLOWAT; 1509 break; 1510 case TARGET_SO_RCVTIMEO: 1511 optname = SO_RCVTIMEO; 1512 break; 1513 case TARGET_SO_SNDTIMEO: 1514 optname = SO_SNDTIMEO; 1515 break; 1516 break; 1517 default: 1518 goto unimplemented; 1519 } 1520 if (optlen < sizeof(uint32_t)) 1521 return -TARGET_EINVAL; 1522 1523 if (get_user_u32(val, optval_addr)) 1524 return -TARGET_EFAULT; 1525 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val))); 1526 break; 1527 default: 1528 unimplemented: 1529 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname); 1530 ret = -TARGET_ENOPROTOOPT; 1531 } 1532 return ret; 1533 } 1534 1535 /* do_getsockopt() Must return target values and target errnos. */ 1536 static abi_long do_getsockopt(int sockfd, int level, int optname, 1537 abi_ulong optval_addr, abi_ulong optlen) 1538 { 1539 abi_long ret; 1540 int len, val; 1541 socklen_t lv; 1542 1543 switch(level) { 1544 case TARGET_SOL_SOCKET: 1545 level = SOL_SOCKET; 1546 switch (optname) { 1547 /* These don't just return a single integer */ 1548 case TARGET_SO_LINGER: 1549 case TARGET_SO_RCVTIMEO: 1550 case TARGET_SO_SNDTIMEO: 1551 case TARGET_SO_PEERNAME: 1552 goto unimplemented; 1553 case TARGET_SO_PEERCRED: { 1554 struct ucred cr; 1555 socklen_t crlen; 1556 struct target_ucred *tcr; 1557 1558 if (get_user_u32(len, optlen)) { 1559 return -TARGET_EFAULT; 1560 } 1561 if (len < 0) { 1562 return -TARGET_EINVAL; 1563 } 1564 1565 crlen = sizeof(cr); 1566 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED, 1567 &cr, &crlen)); 1568 if (ret < 0) { 1569 return ret; 1570 } 1571 if (len > crlen) { 1572 len = crlen; 1573 } 1574 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) { 1575 return -TARGET_EFAULT; 1576 } 1577 __put_user(cr.pid, &tcr->pid); 1578 __put_user(cr.uid, &tcr->uid); 1579 __put_user(cr.gid, &tcr->gid); 1580 unlock_user_struct(tcr, optval_addr, 1); 1581 if (put_user_u32(len, optlen)) { 1582 return -TARGET_EFAULT; 1583 } 1584 break; 1585 } 1586 /* Options with 'int' argument. */ 1587 case TARGET_SO_DEBUG: 1588 optname = SO_DEBUG; 1589 goto int_case; 1590 case TARGET_SO_REUSEADDR: 1591 optname = SO_REUSEADDR; 1592 goto int_case; 1593 case TARGET_SO_TYPE: 1594 optname = SO_TYPE; 1595 goto int_case; 1596 case TARGET_SO_ERROR: 1597 optname = SO_ERROR; 1598 goto int_case; 1599 case TARGET_SO_DONTROUTE: 1600 optname = SO_DONTROUTE; 1601 goto int_case; 1602 case TARGET_SO_BROADCAST: 1603 optname = SO_BROADCAST; 1604 goto int_case; 1605 case TARGET_SO_SNDBUF: 1606 optname = SO_SNDBUF; 1607 goto int_case; 1608 case TARGET_SO_RCVBUF: 1609 optname = SO_RCVBUF; 1610 goto int_case; 1611 case TARGET_SO_KEEPALIVE: 1612 optname = SO_KEEPALIVE; 1613 goto int_case; 1614 case TARGET_SO_OOBINLINE: 1615 optname = SO_OOBINLINE; 1616 goto int_case; 1617 case TARGET_SO_NO_CHECK: 1618 optname = SO_NO_CHECK; 1619 goto int_case; 1620 case TARGET_SO_PRIORITY: 1621 optname = SO_PRIORITY; 1622 goto int_case; 1623 #ifdef SO_BSDCOMPAT 1624 case TARGET_SO_BSDCOMPAT: 1625 optname = SO_BSDCOMPAT; 1626 goto int_case; 1627 #endif 1628 case TARGET_SO_PASSCRED: 1629 optname = SO_PASSCRED; 1630 goto int_case; 1631 case TARGET_SO_TIMESTAMP: 1632 optname = SO_TIMESTAMP; 1633 goto int_case; 1634 case TARGET_SO_RCVLOWAT: 1635 optname = SO_RCVLOWAT; 1636 goto int_case; 1637 default: 1638 goto int_case; 1639 } 1640 break; 1641 case SOL_TCP: 1642 /* TCP options all take an 'int' value. */ 1643 int_case: 1644 if (get_user_u32(len, optlen)) 1645 return -TARGET_EFAULT; 1646 if (len < 0) 1647 return -TARGET_EINVAL; 1648 lv = sizeof(lv); 1649 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1650 if (ret < 0) 1651 return ret; 1652 if (len > lv) 1653 len = lv; 1654 if (len == 4) { 1655 if (put_user_u32(val, optval_addr)) 1656 return -TARGET_EFAULT; 1657 } else { 1658 if (put_user_u8(val, optval_addr)) 1659 return -TARGET_EFAULT; 1660 } 1661 if (put_user_u32(len, optlen)) 1662 return -TARGET_EFAULT; 1663 break; 1664 case SOL_IP: 1665 switch(optname) { 1666 case IP_TOS: 1667 case IP_TTL: 1668 case IP_HDRINCL: 1669 case IP_ROUTER_ALERT: 1670 case IP_RECVOPTS: 1671 case IP_RETOPTS: 1672 case IP_PKTINFO: 1673 case IP_MTU_DISCOVER: 1674 case IP_RECVERR: 1675 case IP_RECVTOS: 1676 #ifdef IP_FREEBIND 1677 case IP_FREEBIND: 1678 #endif 1679 case IP_MULTICAST_TTL: 1680 case IP_MULTICAST_LOOP: 1681 if (get_user_u32(len, optlen)) 1682 return -TARGET_EFAULT; 1683 if (len < 0) 1684 return -TARGET_EINVAL; 1685 lv = sizeof(lv); 1686 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1687 if (ret < 0) 1688 return ret; 1689 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 1690 len = 1; 1691 if (put_user_u32(len, optlen) 1692 || put_user_u8(val, optval_addr)) 1693 return -TARGET_EFAULT; 1694 } else { 1695 if (len > sizeof(int)) 1696 len = sizeof(int); 1697 if (put_user_u32(len, optlen) 1698 || put_user_u32(val, optval_addr)) 1699 return -TARGET_EFAULT; 1700 } 1701 break; 1702 default: 1703 ret = -TARGET_ENOPROTOOPT; 1704 break; 1705 } 1706 break; 1707 default: 1708 unimplemented: 1709 gemu_log("getsockopt level=%d optname=%d not yet supported\n", 1710 level, optname); 1711 ret = -TARGET_EOPNOTSUPP; 1712 break; 1713 } 1714 return ret; 1715 } 1716 1717 /* FIXME 1718 * lock_iovec()/unlock_iovec() have a return code of 0 for success where 1719 * other lock functions have a return code of 0 for failure. 1720 */ 1721 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr, 1722 int count, int copy) 1723 { 1724 struct target_iovec *target_vec; 1725 abi_ulong base; 1726 int i; 1727 1728 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1); 1729 if (!target_vec) 1730 return -TARGET_EFAULT; 1731 for(i = 0;i < count; i++) { 1732 base = tswapal(target_vec[i].iov_base); 1733 vec[i].iov_len = tswapal(target_vec[i].iov_len); 1734 if (vec[i].iov_len != 0) { 1735 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy); 1736 /* Don't check lock_user return value. We must call writev even 1737 if a element has invalid base address. */ 1738 } else { 1739 /* zero length pointer is ignored */ 1740 vec[i].iov_base = NULL; 1741 } 1742 } 1743 unlock_user (target_vec, target_addr, 0); 1744 return 0; 1745 } 1746 1747 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr, 1748 int count, int copy) 1749 { 1750 struct target_iovec *target_vec; 1751 abi_ulong base; 1752 int i; 1753 1754 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1); 1755 if (!target_vec) 1756 return -TARGET_EFAULT; 1757 for(i = 0;i < count; i++) { 1758 if (target_vec[i].iov_base) { 1759 base = tswapal(target_vec[i].iov_base); 1760 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0); 1761 } 1762 } 1763 unlock_user (target_vec, target_addr, 0); 1764 1765 return 0; 1766 } 1767 1768 /* do_socket() Must return target values and target errnos. */ 1769 static abi_long do_socket(int domain, int type, int protocol) 1770 { 1771 #if defined(TARGET_MIPS) 1772 switch(type) { 1773 case TARGET_SOCK_DGRAM: 1774 type = SOCK_DGRAM; 1775 break; 1776 case TARGET_SOCK_STREAM: 1777 type = SOCK_STREAM; 1778 break; 1779 case TARGET_SOCK_RAW: 1780 type = SOCK_RAW; 1781 break; 1782 case TARGET_SOCK_RDM: 1783 type = SOCK_RDM; 1784 break; 1785 case TARGET_SOCK_SEQPACKET: 1786 type = SOCK_SEQPACKET; 1787 break; 1788 case TARGET_SOCK_PACKET: 1789 type = SOCK_PACKET; 1790 break; 1791 } 1792 #endif 1793 if (domain == PF_NETLINK) 1794 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */ 1795 return get_errno(socket(domain, type, protocol)); 1796 } 1797 1798 /* do_bind() Must return target values and target errnos. */ 1799 static abi_long do_bind(int sockfd, abi_ulong target_addr, 1800 socklen_t addrlen) 1801 { 1802 void *addr; 1803 abi_long ret; 1804 1805 if ((int)addrlen < 0) { 1806 return -TARGET_EINVAL; 1807 } 1808 1809 addr = alloca(addrlen+1); 1810 1811 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1812 if (ret) 1813 return ret; 1814 1815 return get_errno(bind(sockfd, addr, addrlen)); 1816 } 1817 1818 /* do_connect() Must return target values and target errnos. */ 1819 static abi_long do_connect(int sockfd, abi_ulong target_addr, 1820 socklen_t addrlen) 1821 { 1822 void *addr; 1823 abi_long ret; 1824 1825 if ((int)addrlen < 0) { 1826 return -TARGET_EINVAL; 1827 } 1828 1829 addr = alloca(addrlen); 1830 1831 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1832 if (ret) 1833 return ret; 1834 1835 return get_errno(connect(sockfd, addr, addrlen)); 1836 } 1837 1838 /* do_sendrecvmsg() Must return target values and target errnos. */ 1839 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg, 1840 int flags, int send) 1841 { 1842 abi_long ret, len; 1843 struct target_msghdr *msgp; 1844 struct msghdr msg; 1845 int count; 1846 struct iovec *vec; 1847 abi_ulong target_vec; 1848 1849 /* FIXME */ 1850 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE, 1851 msgp, 1852 target_msg, 1853 send ? 1 : 0)) 1854 return -TARGET_EFAULT; 1855 if (msgp->msg_name) { 1856 msg.msg_namelen = tswap32(msgp->msg_namelen); 1857 msg.msg_name = alloca(msg.msg_namelen); 1858 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name), 1859 msg.msg_namelen); 1860 if (ret) { 1861 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 1862 return ret; 1863 } 1864 } else { 1865 msg.msg_name = NULL; 1866 msg.msg_namelen = 0; 1867 } 1868 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen); 1869 msg.msg_control = alloca(msg.msg_controllen); 1870 msg.msg_flags = tswap32(msgp->msg_flags); 1871 1872 count = tswapal(msgp->msg_iovlen); 1873 vec = alloca(count * sizeof(struct iovec)); 1874 target_vec = tswapal(msgp->msg_iov); 1875 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send); 1876 msg.msg_iovlen = count; 1877 msg.msg_iov = vec; 1878 1879 if (send) { 1880 ret = target_to_host_cmsg(&msg, msgp); 1881 if (ret == 0) 1882 ret = get_errno(sendmsg(fd, &msg, flags)); 1883 } else { 1884 ret = get_errno(recvmsg(fd, &msg, flags)); 1885 if (!is_error(ret)) { 1886 len = ret; 1887 ret = host_to_target_cmsg(msgp, &msg); 1888 if (!is_error(ret)) 1889 ret = len; 1890 } 1891 } 1892 unlock_iovec(vec, target_vec, count, !send); 1893 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 1894 return ret; 1895 } 1896 1897 /* do_accept() Must return target values and target errnos. */ 1898 static abi_long do_accept(int fd, abi_ulong target_addr, 1899 abi_ulong target_addrlen_addr) 1900 { 1901 socklen_t addrlen; 1902 void *addr; 1903 abi_long ret; 1904 1905 if (target_addr == 0) 1906 return get_errno(accept(fd, NULL, NULL)); 1907 1908 /* linux returns EINVAL if addrlen pointer is invalid */ 1909 if (get_user_u32(addrlen, target_addrlen_addr)) 1910 return -TARGET_EINVAL; 1911 1912 if ((int)addrlen < 0) { 1913 return -TARGET_EINVAL; 1914 } 1915 1916 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 1917 return -TARGET_EINVAL; 1918 1919 addr = alloca(addrlen); 1920 1921 ret = get_errno(accept(fd, addr, &addrlen)); 1922 if (!is_error(ret)) { 1923 host_to_target_sockaddr(target_addr, addr, addrlen); 1924 if (put_user_u32(addrlen, target_addrlen_addr)) 1925 ret = -TARGET_EFAULT; 1926 } 1927 return ret; 1928 } 1929 1930 /* do_getpeername() Must return target values and target errnos. */ 1931 static abi_long do_getpeername(int fd, abi_ulong target_addr, 1932 abi_ulong target_addrlen_addr) 1933 { 1934 socklen_t addrlen; 1935 void *addr; 1936 abi_long ret; 1937 1938 if (get_user_u32(addrlen, target_addrlen_addr)) 1939 return -TARGET_EFAULT; 1940 1941 if ((int)addrlen < 0) { 1942 return -TARGET_EINVAL; 1943 } 1944 1945 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 1946 return -TARGET_EFAULT; 1947 1948 addr = alloca(addrlen); 1949 1950 ret = get_errno(getpeername(fd, addr, &addrlen)); 1951 if (!is_error(ret)) { 1952 host_to_target_sockaddr(target_addr, addr, addrlen); 1953 if (put_user_u32(addrlen, target_addrlen_addr)) 1954 ret = -TARGET_EFAULT; 1955 } 1956 return ret; 1957 } 1958 1959 /* do_getsockname() Must return target values and target errnos. */ 1960 static abi_long do_getsockname(int fd, abi_ulong target_addr, 1961 abi_ulong target_addrlen_addr) 1962 { 1963 socklen_t addrlen; 1964 void *addr; 1965 abi_long ret; 1966 1967 if (get_user_u32(addrlen, target_addrlen_addr)) 1968 return -TARGET_EFAULT; 1969 1970 if ((int)addrlen < 0) { 1971 return -TARGET_EINVAL; 1972 } 1973 1974 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 1975 return -TARGET_EFAULT; 1976 1977 addr = alloca(addrlen); 1978 1979 ret = get_errno(getsockname(fd, addr, &addrlen)); 1980 if (!is_error(ret)) { 1981 host_to_target_sockaddr(target_addr, addr, addrlen); 1982 if (put_user_u32(addrlen, target_addrlen_addr)) 1983 ret = -TARGET_EFAULT; 1984 } 1985 return ret; 1986 } 1987 1988 /* do_socketpair() Must return target values and target errnos. */ 1989 static abi_long do_socketpair(int domain, int type, int protocol, 1990 abi_ulong target_tab_addr) 1991 { 1992 int tab[2]; 1993 abi_long ret; 1994 1995 ret = get_errno(socketpair(domain, type, protocol, tab)); 1996 if (!is_error(ret)) { 1997 if (put_user_s32(tab[0], target_tab_addr) 1998 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0]))) 1999 ret = -TARGET_EFAULT; 2000 } 2001 return ret; 2002 } 2003 2004 /* do_sendto() Must return target values and target errnos. */ 2005 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags, 2006 abi_ulong target_addr, socklen_t addrlen) 2007 { 2008 void *addr; 2009 void *host_msg; 2010 abi_long ret; 2011 2012 if ((int)addrlen < 0) { 2013 return -TARGET_EINVAL; 2014 } 2015 2016 host_msg = lock_user(VERIFY_READ, msg, len, 1); 2017 if (!host_msg) 2018 return -TARGET_EFAULT; 2019 if (target_addr) { 2020 addr = alloca(addrlen); 2021 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 2022 if (ret) { 2023 unlock_user(host_msg, msg, 0); 2024 return ret; 2025 } 2026 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen)); 2027 } else { 2028 ret = get_errno(send(fd, host_msg, len, flags)); 2029 } 2030 unlock_user(host_msg, msg, 0); 2031 return ret; 2032 } 2033 2034 /* do_recvfrom() Must return target values and target errnos. */ 2035 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags, 2036 abi_ulong target_addr, 2037 abi_ulong target_addrlen) 2038 { 2039 socklen_t addrlen; 2040 void *addr; 2041 void *host_msg; 2042 abi_long ret; 2043 2044 host_msg = lock_user(VERIFY_WRITE, msg, len, 0); 2045 if (!host_msg) 2046 return -TARGET_EFAULT; 2047 if (target_addr) { 2048 if (get_user_u32(addrlen, target_addrlen)) { 2049 ret = -TARGET_EFAULT; 2050 goto fail; 2051 } 2052 if ((int)addrlen < 0) { 2053 ret = -TARGET_EINVAL; 2054 goto fail; 2055 } 2056 addr = alloca(addrlen); 2057 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen)); 2058 } else { 2059 addr = NULL; /* To keep compiler quiet. */ 2060 ret = get_errno(qemu_recv(fd, host_msg, len, flags)); 2061 } 2062 if (!is_error(ret)) { 2063 if (target_addr) { 2064 host_to_target_sockaddr(target_addr, addr, addrlen); 2065 if (put_user_u32(addrlen, target_addrlen)) { 2066 ret = -TARGET_EFAULT; 2067 goto fail; 2068 } 2069 } 2070 unlock_user(host_msg, msg, len); 2071 } else { 2072 fail: 2073 unlock_user(host_msg, msg, 0); 2074 } 2075 return ret; 2076 } 2077 2078 #ifdef TARGET_NR_socketcall 2079 /* do_socketcall() Must return target values and target errnos. */ 2080 static abi_long do_socketcall(int num, abi_ulong vptr) 2081 { 2082 abi_long ret; 2083 const int n = sizeof(abi_ulong); 2084 2085 switch(num) { 2086 case SOCKOP_socket: 2087 { 2088 abi_ulong domain, type, protocol; 2089 2090 if (get_user_ual(domain, vptr) 2091 || get_user_ual(type, vptr + n) 2092 || get_user_ual(protocol, vptr + 2 * n)) 2093 return -TARGET_EFAULT; 2094 2095 ret = do_socket(domain, type, protocol); 2096 } 2097 break; 2098 case SOCKOP_bind: 2099 { 2100 abi_ulong sockfd; 2101 abi_ulong target_addr; 2102 socklen_t addrlen; 2103 2104 if (get_user_ual(sockfd, vptr) 2105 || get_user_ual(target_addr, vptr + n) 2106 || get_user_ual(addrlen, vptr + 2 * n)) 2107 return -TARGET_EFAULT; 2108 2109 ret = do_bind(sockfd, target_addr, addrlen); 2110 } 2111 break; 2112 case SOCKOP_connect: 2113 { 2114 abi_ulong sockfd; 2115 abi_ulong target_addr; 2116 socklen_t addrlen; 2117 2118 if (get_user_ual(sockfd, vptr) 2119 || get_user_ual(target_addr, vptr + n) 2120 || get_user_ual(addrlen, vptr + 2 * n)) 2121 return -TARGET_EFAULT; 2122 2123 ret = do_connect(sockfd, target_addr, addrlen); 2124 } 2125 break; 2126 case SOCKOP_listen: 2127 { 2128 abi_ulong sockfd, backlog; 2129 2130 if (get_user_ual(sockfd, vptr) 2131 || get_user_ual(backlog, vptr + n)) 2132 return -TARGET_EFAULT; 2133 2134 ret = get_errno(listen(sockfd, backlog)); 2135 } 2136 break; 2137 case SOCKOP_accept: 2138 { 2139 abi_ulong sockfd; 2140 abi_ulong target_addr, target_addrlen; 2141 2142 if (get_user_ual(sockfd, vptr) 2143 || get_user_ual(target_addr, vptr + n) 2144 || get_user_ual(target_addrlen, vptr + 2 * n)) 2145 return -TARGET_EFAULT; 2146 2147 ret = do_accept(sockfd, target_addr, target_addrlen); 2148 } 2149 break; 2150 case SOCKOP_getsockname: 2151 { 2152 abi_ulong sockfd; 2153 abi_ulong target_addr, target_addrlen; 2154 2155 if (get_user_ual(sockfd, vptr) 2156 || get_user_ual(target_addr, vptr + n) 2157 || get_user_ual(target_addrlen, vptr + 2 * n)) 2158 return -TARGET_EFAULT; 2159 2160 ret = do_getsockname(sockfd, target_addr, target_addrlen); 2161 } 2162 break; 2163 case SOCKOP_getpeername: 2164 { 2165 abi_ulong sockfd; 2166 abi_ulong target_addr, target_addrlen; 2167 2168 if (get_user_ual(sockfd, vptr) 2169 || get_user_ual(target_addr, vptr + n) 2170 || get_user_ual(target_addrlen, vptr + 2 * n)) 2171 return -TARGET_EFAULT; 2172 2173 ret = do_getpeername(sockfd, target_addr, target_addrlen); 2174 } 2175 break; 2176 case SOCKOP_socketpair: 2177 { 2178 abi_ulong domain, type, protocol; 2179 abi_ulong tab; 2180 2181 if (get_user_ual(domain, vptr) 2182 || get_user_ual(type, vptr + n) 2183 || get_user_ual(protocol, vptr + 2 * n) 2184 || get_user_ual(tab, vptr + 3 * n)) 2185 return -TARGET_EFAULT; 2186 2187 ret = do_socketpair(domain, type, protocol, tab); 2188 } 2189 break; 2190 case SOCKOP_send: 2191 { 2192 abi_ulong sockfd; 2193 abi_ulong msg; 2194 size_t len; 2195 abi_ulong flags; 2196 2197 if (get_user_ual(sockfd, vptr) 2198 || get_user_ual(msg, vptr + n) 2199 || get_user_ual(len, vptr + 2 * n) 2200 || get_user_ual(flags, vptr + 3 * n)) 2201 return -TARGET_EFAULT; 2202 2203 ret = do_sendto(sockfd, msg, len, flags, 0, 0); 2204 } 2205 break; 2206 case SOCKOP_recv: 2207 { 2208 abi_ulong sockfd; 2209 abi_ulong msg; 2210 size_t len; 2211 abi_ulong flags; 2212 2213 if (get_user_ual(sockfd, vptr) 2214 || get_user_ual(msg, vptr + n) 2215 || get_user_ual(len, vptr + 2 * n) 2216 || get_user_ual(flags, vptr + 3 * n)) 2217 return -TARGET_EFAULT; 2218 2219 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0); 2220 } 2221 break; 2222 case SOCKOP_sendto: 2223 { 2224 abi_ulong sockfd; 2225 abi_ulong msg; 2226 size_t len; 2227 abi_ulong flags; 2228 abi_ulong addr; 2229 socklen_t addrlen; 2230 2231 if (get_user_ual(sockfd, vptr) 2232 || get_user_ual(msg, vptr + n) 2233 || get_user_ual(len, vptr + 2 * n) 2234 || get_user_ual(flags, vptr + 3 * n) 2235 || get_user_ual(addr, vptr + 4 * n) 2236 || get_user_ual(addrlen, vptr + 5 * n)) 2237 return -TARGET_EFAULT; 2238 2239 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen); 2240 } 2241 break; 2242 case SOCKOP_recvfrom: 2243 { 2244 abi_ulong sockfd; 2245 abi_ulong msg; 2246 size_t len; 2247 abi_ulong flags; 2248 abi_ulong addr; 2249 socklen_t addrlen; 2250 2251 if (get_user_ual(sockfd, vptr) 2252 || get_user_ual(msg, vptr + n) 2253 || get_user_ual(len, vptr + 2 * n) 2254 || get_user_ual(flags, vptr + 3 * n) 2255 || get_user_ual(addr, vptr + 4 * n) 2256 || get_user_ual(addrlen, vptr + 5 * n)) 2257 return -TARGET_EFAULT; 2258 2259 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen); 2260 } 2261 break; 2262 case SOCKOP_shutdown: 2263 { 2264 abi_ulong sockfd, how; 2265 2266 if (get_user_ual(sockfd, vptr) 2267 || get_user_ual(how, vptr + n)) 2268 return -TARGET_EFAULT; 2269 2270 ret = get_errno(shutdown(sockfd, how)); 2271 } 2272 break; 2273 case SOCKOP_sendmsg: 2274 case SOCKOP_recvmsg: 2275 { 2276 abi_ulong fd; 2277 abi_ulong target_msg; 2278 abi_ulong flags; 2279 2280 if (get_user_ual(fd, vptr) 2281 || get_user_ual(target_msg, vptr + n) 2282 || get_user_ual(flags, vptr + 2 * n)) 2283 return -TARGET_EFAULT; 2284 2285 ret = do_sendrecvmsg(fd, target_msg, flags, 2286 (num == SOCKOP_sendmsg)); 2287 } 2288 break; 2289 case SOCKOP_setsockopt: 2290 { 2291 abi_ulong sockfd; 2292 abi_ulong level; 2293 abi_ulong optname; 2294 abi_ulong optval; 2295 socklen_t optlen; 2296 2297 if (get_user_ual(sockfd, vptr) 2298 || get_user_ual(level, vptr + n) 2299 || get_user_ual(optname, vptr + 2 * n) 2300 || get_user_ual(optval, vptr + 3 * n) 2301 || get_user_ual(optlen, vptr + 4 * n)) 2302 return -TARGET_EFAULT; 2303 2304 ret = do_setsockopt(sockfd, level, optname, optval, optlen); 2305 } 2306 break; 2307 case SOCKOP_getsockopt: 2308 { 2309 abi_ulong sockfd; 2310 abi_ulong level; 2311 abi_ulong optname; 2312 abi_ulong optval; 2313 socklen_t optlen; 2314 2315 if (get_user_ual(sockfd, vptr) 2316 || get_user_ual(level, vptr + n) 2317 || get_user_ual(optname, vptr + 2 * n) 2318 || get_user_ual(optval, vptr + 3 * n) 2319 || get_user_ual(optlen, vptr + 4 * n)) 2320 return -TARGET_EFAULT; 2321 2322 ret = do_getsockopt(sockfd, level, optname, optval, optlen); 2323 } 2324 break; 2325 default: 2326 gemu_log("Unsupported socketcall: %d\n", num); 2327 ret = -TARGET_ENOSYS; 2328 break; 2329 } 2330 return ret; 2331 } 2332 #endif 2333 2334 #define N_SHM_REGIONS 32 2335 2336 static struct shm_region { 2337 abi_ulong start; 2338 abi_ulong size; 2339 } shm_regions[N_SHM_REGIONS]; 2340 2341 struct target_ipc_perm 2342 { 2343 abi_long __key; 2344 abi_ulong uid; 2345 abi_ulong gid; 2346 abi_ulong cuid; 2347 abi_ulong cgid; 2348 unsigned short int mode; 2349 unsigned short int __pad1; 2350 unsigned short int __seq; 2351 unsigned short int __pad2; 2352 abi_ulong __unused1; 2353 abi_ulong __unused2; 2354 }; 2355 2356 struct target_semid_ds 2357 { 2358 struct target_ipc_perm sem_perm; 2359 abi_ulong sem_otime; 2360 abi_ulong __unused1; 2361 abi_ulong sem_ctime; 2362 abi_ulong __unused2; 2363 abi_ulong sem_nsems; 2364 abi_ulong __unused3; 2365 abi_ulong __unused4; 2366 }; 2367 2368 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip, 2369 abi_ulong target_addr) 2370 { 2371 struct target_ipc_perm *target_ip; 2372 struct target_semid_ds *target_sd; 2373 2374 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2375 return -TARGET_EFAULT; 2376 target_ip = &(target_sd->sem_perm); 2377 host_ip->__key = tswapal(target_ip->__key); 2378 host_ip->uid = tswapal(target_ip->uid); 2379 host_ip->gid = tswapal(target_ip->gid); 2380 host_ip->cuid = tswapal(target_ip->cuid); 2381 host_ip->cgid = tswapal(target_ip->cgid); 2382 host_ip->mode = tswap16(target_ip->mode); 2383 unlock_user_struct(target_sd, target_addr, 0); 2384 return 0; 2385 } 2386 2387 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr, 2388 struct ipc_perm *host_ip) 2389 { 2390 struct target_ipc_perm *target_ip; 2391 struct target_semid_ds *target_sd; 2392 2393 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2394 return -TARGET_EFAULT; 2395 target_ip = &(target_sd->sem_perm); 2396 target_ip->__key = tswapal(host_ip->__key); 2397 target_ip->uid = tswapal(host_ip->uid); 2398 target_ip->gid = tswapal(host_ip->gid); 2399 target_ip->cuid = tswapal(host_ip->cuid); 2400 target_ip->cgid = tswapal(host_ip->cgid); 2401 target_ip->mode = tswap16(host_ip->mode); 2402 unlock_user_struct(target_sd, target_addr, 1); 2403 return 0; 2404 } 2405 2406 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd, 2407 abi_ulong target_addr) 2408 { 2409 struct target_semid_ds *target_sd; 2410 2411 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2412 return -TARGET_EFAULT; 2413 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr)) 2414 return -TARGET_EFAULT; 2415 host_sd->sem_nsems = tswapal(target_sd->sem_nsems); 2416 host_sd->sem_otime = tswapal(target_sd->sem_otime); 2417 host_sd->sem_ctime = tswapal(target_sd->sem_ctime); 2418 unlock_user_struct(target_sd, target_addr, 0); 2419 return 0; 2420 } 2421 2422 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr, 2423 struct semid_ds *host_sd) 2424 { 2425 struct target_semid_ds *target_sd; 2426 2427 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2428 return -TARGET_EFAULT; 2429 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm))) 2430 return -TARGET_EFAULT; 2431 target_sd->sem_nsems = tswapal(host_sd->sem_nsems); 2432 target_sd->sem_otime = tswapal(host_sd->sem_otime); 2433 target_sd->sem_ctime = tswapal(host_sd->sem_ctime); 2434 unlock_user_struct(target_sd, target_addr, 1); 2435 return 0; 2436 } 2437 2438 struct target_seminfo { 2439 int semmap; 2440 int semmni; 2441 int semmns; 2442 int semmnu; 2443 int semmsl; 2444 int semopm; 2445 int semume; 2446 int semusz; 2447 int semvmx; 2448 int semaem; 2449 }; 2450 2451 static inline abi_long host_to_target_seminfo(abi_ulong target_addr, 2452 struct seminfo *host_seminfo) 2453 { 2454 struct target_seminfo *target_seminfo; 2455 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0)) 2456 return -TARGET_EFAULT; 2457 __put_user(host_seminfo->semmap, &target_seminfo->semmap); 2458 __put_user(host_seminfo->semmni, &target_seminfo->semmni); 2459 __put_user(host_seminfo->semmns, &target_seminfo->semmns); 2460 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu); 2461 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl); 2462 __put_user(host_seminfo->semopm, &target_seminfo->semopm); 2463 __put_user(host_seminfo->semume, &target_seminfo->semume); 2464 __put_user(host_seminfo->semusz, &target_seminfo->semusz); 2465 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx); 2466 __put_user(host_seminfo->semaem, &target_seminfo->semaem); 2467 unlock_user_struct(target_seminfo, target_addr, 1); 2468 return 0; 2469 } 2470 2471 union semun { 2472 int val; 2473 struct semid_ds *buf; 2474 unsigned short *array; 2475 struct seminfo *__buf; 2476 }; 2477 2478 union target_semun { 2479 int val; 2480 abi_ulong buf; 2481 abi_ulong array; 2482 abi_ulong __buf; 2483 }; 2484 2485 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array, 2486 abi_ulong target_addr) 2487 { 2488 int nsems; 2489 unsigned short *array; 2490 union semun semun; 2491 struct semid_ds semid_ds; 2492 int i, ret; 2493 2494 semun.buf = &semid_ds; 2495 2496 ret = semctl(semid, 0, IPC_STAT, semun); 2497 if (ret == -1) 2498 return get_errno(ret); 2499 2500 nsems = semid_ds.sem_nsems; 2501 2502 *host_array = malloc(nsems*sizeof(unsigned short)); 2503 array = lock_user(VERIFY_READ, target_addr, 2504 nsems*sizeof(unsigned short), 1); 2505 if (!array) 2506 return -TARGET_EFAULT; 2507 2508 for(i=0; i<nsems; i++) { 2509 __get_user((*host_array)[i], &array[i]); 2510 } 2511 unlock_user(array, target_addr, 0); 2512 2513 return 0; 2514 } 2515 2516 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr, 2517 unsigned short **host_array) 2518 { 2519 int nsems; 2520 unsigned short *array; 2521 union semun semun; 2522 struct semid_ds semid_ds; 2523 int i, ret; 2524 2525 semun.buf = &semid_ds; 2526 2527 ret = semctl(semid, 0, IPC_STAT, semun); 2528 if (ret == -1) 2529 return get_errno(ret); 2530 2531 nsems = semid_ds.sem_nsems; 2532 2533 array = lock_user(VERIFY_WRITE, target_addr, 2534 nsems*sizeof(unsigned short), 0); 2535 if (!array) 2536 return -TARGET_EFAULT; 2537 2538 for(i=0; i<nsems; i++) { 2539 __put_user((*host_array)[i], &array[i]); 2540 } 2541 free(*host_array); 2542 unlock_user(array, target_addr, 1); 2543 2544 return 0; 2545 } 2546 2547 static inline abi_long do_semctl(int semid, int semnum, int cmd, 2548 union target_semun target_su) 2549 { 2550 union semun arg; 2551 struct semid_ds dsarg; 2552 unsigned short *array = NULL; 2553 struct seminfo seminfo; 2554 abi_long ret = -TARGET_EINVAL; 2555 abi_long err; 2556 cmd &= 0xff; 2557 2558 switch( cmd ) { 2559 case GETVAL: 2560 case SETVAL: 2561 arg.val = tswap32(target_su.val); 2562 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2563 target_su.val = tswap32(arg.val); 2564 break; 2565 case GETALL: 2566 case SETALL: 2567 err = target_to_host_semarray(semid, &array, target_su.array); 2568 if (err) 2569 return err; 2570 arg.array = array; 2571 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2572 err = host_to_target_semarray(semid, target_su.array, &array); 2573 if (err) 2574 return err; 2575 break; 2576 case IPC_STAT: 2577 case IPC_SET: 2578 case SEM_STAT: 2579 err = target_to_host_semid_ds(&dsarg, target_su.buf); 2580 if (err) 2581 return err; 2582 arg.buf = &dsarg; 2583 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2584 err = host_to_target_semid_ds(target_su.buf, &dsarg); 2585 if (err) 2586 return err; 2587 break; 2588 case IPC_INFO: 2589 case SEM_INFO: 2590 arg.__buf = &seminfo; 2591 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2592 err = host_to_target_seminfo(target_su.__buf, &seminfo); 2593 if (err) 2594 return err; 2595 break; 2596 case IPC_RMID: 2597 case GETPID: 2598 case GETNCNT: 2599 case GETZCNT: 2600 ret = get_errno(semctl(semid, semnum, cmd, NULL)); 2601 break; 2602 } 2603 2604 return ret; 2605 } 2606 2607 struct target_sembuf { 2608 unsigned short sem_num; 2609 short sem_op; 2610 short sem_flg; 2611 }; 2612 2613 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf, 2614 abi_ulong target_addr, 2615 unsigned nsops) 2616 { 2617 struct target_sembuf *target_sembuf; 2618 int i; 2619 2620 target_sembuf = lock_user(VERIFY_READ, target_addr, 2621 nsops*sizeof(struct target_sembuf), 1); 2622 if (!target_sembuf) 2623 return -TARGET_EFAULT; 2624 2625 for(i=0; i<nsops; i++) { 2626 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num); 2627 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op); 2628 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg); 2629 } 2630 2631 unlock_user(target_sembuf, target_addr, 0); 2632 2633 return 0; 2634 } 2635 2636 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops) 2637 { 2638 struct sembuf sops[nsops]; 2639 2640 if (target_to_host_sembuf(sops, ptr, nsops)) 2641 return -TARGET_EFAULT; 2642 2643 return semop(semid, sops, nsops); 2644 } 2645 2646 struct target_msqid_ds 2647 { 2648 struct target_ipc_perm msg_perm; 2649 abi_ulong msg_stime; 2650 #if TARGET_ABI_BITS == 32 2651 abi_ulong __unused1; 2652 #endif 2653 abi_ulong msg_rtime; 2654 #if TARGET_ABI_BITS == 32 2655 abi_ulong __unused2; 2656 #endif 2657 abi_ulong msg_ctime; 2658 #if TARGET_ABI_BITS == 32 2659 abi_ulong __unused3; 2660 #endif 2661 abi_ulong __msg_cbytes; 2662 abi_ulong msg_qnum; 2663 abi_ulong msg_qbytes; 2664 abi_ulong msg_lspid; 2665 abi_ulong msg_lrpid; 2666 abi_ulong __unused4; 2667 abi_ulong __unused5; 2668 }; 2669 2670 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md, 2671 abi_ulong target_addr) 2672 { 2673 struct target_msqid_ds *target_md; 2674 2675 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1)) 2676 return -TARGET_EFAULT; 2677 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr)) 2678 return -TARGET_EFAULT; 2679 host_md->msg_stime = tswapal(target_md->msg_stime); 2680 host_md->msg_rtime = tswapal(target_md->msg_rtime); 2681 host_md->msg_ctime = tswapal(target_md->msg_ctime); 2682 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes); 2683 host_md->msg_qnum = tswapal(target_md->msg_qnum); 2684 host_md->msg_qbytes = tswapal(target_md->msg_qbytes); 2685 host_md->msg_lspid = tswapal(target_md->msg_lspid); 2686 host_md->msg_lrpid = tswapal(target_md->msg_lrpid); 2687 unlock_user_struct(target_md, target_addr, 0); 2688 return 0; 2689 } 2690 2691 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr, 2692 struct msqid_ds *host_md) 2693 { 2694 struct target_msqid_ds *target_md; 2695 2696 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0)) 2697 return -TARGET_EFAULT; 2698 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm))) 2699 return -TARGET_EFAULT; 2700 target_md->msg_stime = tswapal(host_md->msg_stime); 2701 target_md->msg_rtime = tswapal(host_md->msg_rtime); 2702 target_md->msg_ctime = tswapal(host_md->msg_ctime); 2703 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes); 2704 target_md->msg_qnum = tswapal(host_md->msg_qnum); 2705 target_md->msg_qbytes = tswapal(host_md->msg_qbytes); 2706 target_md->msg_lspid = tswapal(host_md->msg_lspid); 2707 target_md->msg_lrpid = tswapal(host_md->msg_lrpid); 2708 unlock_user_struct(target_md, target_addr, 1); 2709 return 0; 2710 } 2711 2712 struct target_msginfo { 2713 int msgpool; 2714 int msgmap; 2715 int msgmax; 2716 int msgmnb; 2717 int msgmni; 2718 int msgssz; 2719 int msgtql; 2720 unsigned short int msgseg; 2721 }; 2722 2723 static inline abi_long host_to_target_msginfo(abi_ulong target_addr, 2724 struct msginfo *host_msginfo) 2725 { 2726 struct target_msginfo *target_msginfo; 2727 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0)) 2728 return -TARGET_EFAULT; 2729 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool); 2730 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap); 2731 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax); 2732 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb); 2733 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni); 2734 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz); 2735 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql); 2736 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg); 2737 unlock_user_struct(target_msginfo, target_addr, 1); 2738 return 0; 2739 } 2740 2741 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr) 2742 { 2743 struct msqid_ds dsarg; 2744 struct msginfo msginfo; 2745 abi_long ret = -TARGET_EINVAL; 2746 2747 cmd &= 0xff; 2748 2749 switch (cmd) { 2750 case IPC_STAT: 2751 case IPC_SET: 2752 case MSG_STAT: 2753 if (target_to_host_msqid_ds(&dsarg,ptr)) 2754 return -TARGET_EFAULT; 2755 ret = get_errno(msgctl(msgid, cmd, &dsarg)); 2756 if (host_to_target_msqid_ds(ptr,&dsarg)) 2757 return -TARGET_EFAULT; 2758 break; 2759 case IPC_RMID: 2760 ret = get_errno(msgctl(msgid, cmd, NULL)); 2761 break; 2762 case IPC_INFO: 2763 case MSG_INFO: 2764 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo)); 2765 if (host_to_target_msginfo(ptr, &msginfo)) 2766 return -TARGET_EFAULT; 2767 break; 2768 } 2769 2770 return ret; 2771 } 2772 2773 struct target_msgbuf { 2774 abi_long mtype; 2775 char mtext[1]; 2776 }; 2777 2778 static inline abi_long do_msgsnd(int msqid, abi_long msgp, 2779 unsigned int msgsz, int msgflg) 2780 { 2781 struct target_msgbuf *target_mb; 2782 struct msgbuf *host_mb; 2783 abi_long ret = 0; 2784 2785 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0)) 2786 return -TARGET_EFAULT; 2787 host_mb = malloc(msgsz+sizeof(long)); 2788 host_mb->mtype = (abi_long) tswapal(target_mb->mtype); 2789 memcpy(host_mb->mtext, target_mb->mtext, msgsz); 2790 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg)); 2791 free(host_mb); 2792 unlock_user_struct(target_mb, msgp, 0); 2793 2794 return ret; 2795 } 2796 2797 static inline abi_long do_msgrcv(int msqid, abi_long msgp, 2798 unsigned int msgsz, abi_long msgtyp, 2799 int msgflg) 2800 { 2801 struct target_msgbuf *target_mb; 2802 char *target_mtext; 2803 struct msgbuf *host_mb; 2804 abi_long ret = 0; 2805 2806 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0)) 2807 return -TARGET_EFAULT; 2808 2809 host_mb = malloc(msgsz+sizeof(long)); 2810 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapal(msgtyp), msgflg)); 2811 2812 if (ret > 0) { 2813 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong); 2814 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0); 2815 if (!target_mtext) { 2816 ret = -TARGET_EFAULT; 2817 goto end; 2818 } 2819 memcpy(target_mb->mtext, host_mb->mtext, ret); 2820 unlock_user(target_mtext, target_mtext_addr, ret); 2821 } 2822 2823 target_mb->mtype = tswapal(host_mb->mtype); 2824 free(host_mb); 2825 2826 end: 2827 if (target_mb) 2828 unlock_user_struct(target_mb, msgp, 1); 2829 return ret; 2830 } 2831 2832 struct target_shmid_ds 2833 { 2834 struct target_ipc_perm shm_perm; 2835 abi_ulong shm_segsz; 2836 abi_ulong shm_atime; 2837 #if TARGET_ABI_BITS == 32 2838 abi_ulong __unused1; 2839 #endif 2840 abi_ulong shm_dtime; 2841 #if TARGET_ABI_BITS == 32 2842 abi_ulong __unused2; 2843 #endif 2844 abi_ulong shm_ctime; 2845 #if TARGET_ABI_BITS == 32 2846 abi_ulong __unused3; 2847 #endif 2848 int shm_cpid; 2849 int shm_lpid; 2850 abi_ulong shm_nattch; 2851 unsigned long int __unused4; 2852 unsigned long int __unused5; 2853 }; 2854 2855 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd, 2856 abi_ulong target_addr) 2857 { 2858 struct target_shmid_ds *target_sd; 2859 2860 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2861 return -TARGET_EFAULT; 2862 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr)) 2863 return -TARGET_EFAULT; 2864 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2865 __get_user(host_sd->shm_atime, &target_sd->shm_atime); 2866 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2867 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2868 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2869 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2870 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2871 unlock_user_struct(target_sd, target_addr, 0); 2872 return 0; 2873 } 2874 2875 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr, 2876 struct shmid_ds *host_sd) 2877 { 2878 struct target_shmid_ds *target_sd; 2879 2880 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2881 return -TARGET_EFAULT; 2882 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm))) 2883 return -TARGET_EFAULT; 2884 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2885 __put_user(host_sd->shm_atime, &target_sd->shm_atime); 2886 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2887 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2888 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2889 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2890 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2891 unlock_user_struct(target_sd, target_addr, 1); 2892 return 0; 2893 } 2894 2895 struct target_shminfo { 2896 abi_ulong shmmax; 2897 abi_ulong shmmin; 2898 abi_ulong shmmni; 2899 abi_ulong shmseg; 2900 abi_ulong shmall; 2901 }; 2902 2903 static inline abi_long host_to_target_shminfo(abi_ulong target_addr, 2904 struct shminfo *host_shminfo) 2905 { 2906 struct target_shminfo *target_shminfo; 2907 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0)) 2908 return -TARGET_EFAULT; 2909 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax); 2910 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin); 2911 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni); 2912 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg); 2913 __put_user(host_shminfo->shmall, &target_shminfo->shmall); 2914 unlock_user_struct(target_shminfo, target_addr, 1); 2915 return 0; 2916 } 2917 2918 struct target_shm_info { 2919 int used_ids; 2920 abi_ulong shm_tot; 2921 abi_ulong shm_rss; 2922 abi_ulong shm_swp; 2923 abi_ulong swap_attempts; 2924 abi_ulong swap_successes; 2925 }; 2926 2927 static inline abi_long host_to_target_shm_info(abi_ulong target_addr, 2928 struct shm_info *host_shm_info) 2929 { 2930 struct target_shm_info *target_shm_info; 2931 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0)) 2932 return -TARGET_EFAULT; 2933 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids); 2934 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot); 2935 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss); 2936 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp); 2937 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts); 2938 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes); 2939 unlock_user_struct(target_shm_info, target_addr, 1); 2940 return 0; 2941 } 2942 2943 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf) 2944 { 2945 struct shmid_ds dsarg; 2946 struct shminfo shminfo; 2947 struct shm_info shm_info; 2948 abi_long ret = -TARGET_EINVAL; 2949 2950 cmd &= 0xff; 2951 2952 switch(cmd) { 2953 case IPC_STAT: 2954 case IPC_SET: 2955 case SHM_STAT: 2956 if (target_to_host_shmid_ds(&dsarg, buf)) 2957 return -TARGET_EFAULT; 2958 ret = get_errno(shmctl(shmid, cmd, &dsarg)); 2959 if (host_to_target_shmid_ds(buf, &dsarg)) 2960 return -TARGET_EFAULT; 2961 break; 2962 case IPC_INFO: 2963 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo)); 2964 if (host_to_target_shminfo(buf, &shminfo)) 2965 return -TARGET_EFAULT; 2966 break; 2967 case SHM_INFO: 2968 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info)); 2969 if (host_to_target_shm_info(buf, &shm_info)) 2970 return -TARGET_EFAULT; 2971 break; 2972 case IPC_RMID: 2973 case SHM_LOCK: 2974 case SHM_UNLOCK: 2975 ret = get_errno(shmctl(shmid, cmd, NULL)); 2976 break; 2977 } 2978 2979 return ret; 2980 } 2981 2982 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg) 2983 { 2984 abi_long raddr; 2985 void *host_raddr; 2986 struct shmid_ds shm_info; 2987 int i,ret; 2988 2989 /* find out the length of the shared memory segment */ 2990 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); 2991 if (is_error(ret)) { 2992 /* can't get length, bail out */ 2993 return ret; 2994 } 2995 2996 mmap_lock(); 2997 2998 if (shmaddr) 2999 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg); 3000 else { 3001 abi_ulong mmap_start; 3002 3003 mmap_start = mmap_find_vma(0, shm_info.shm_segsz); 3004 3005 if (mmap_start == -1) { 3006 errno = ENOMEM; 3007 host_raddr = (void *)-1; 3008 } else 3009 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP); 3010 } 3011 3012 if (host_raddr == (void *)-1) { 3013 mmap_unlock(); 3014 return get_errno((long)host_raddr); 3015 } 3016 raddr=h2g((unsigned long)host_raddr); 3017 3018 page_set_flags(raddr, raddr + shm_info.shm_segsz, 3019 PAGE_VALID | PAGE_READ | 3020 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE)); 3021 3022 for (i = 0; i < N_SHM_REGIONS; i++) { 3023 if (shm_regions[i].start == 0) { 3024 shm_regions[i].start = raddr; 3025 shm_regions[i].size = shm_info.shm_segsz; 3026 break; 3027 } 3028 } 3029 3030 mmap_unlock(); 3031 return raddr; 3032 3033 } 3034 3035 static inline abi_long do_shmdt(abi_ulong shmaddr) 3036 { 3037 int i; 3038 3039 for (i = 0; i < N_SHM_REGIONS; ++i) { 3040 if (shm_regions[i].start == shmaddr) { 3041 shm_regions[i].start = 0; 3042 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0); 3043 break; 3044 } 3045 } 3046 3047 return get_errno(shmdt(g2h(shmaddr))); 3048 } 3049 3050 #ifdef TARGET_NR_ipc 3051 /* ??? This only works with linear mappings. */ 3052 /* do_ipc() must return target values and target errnos. */ 3053 static abi_long do_ipc(unsigned int call, int first, 3054 int second, int third, 3055 abi_long ptr, abi_long fifth) 3056 { 3057 int version; 3058 abi_long ret = 0; 3059 3060 version = call >> 16; 3061 call &= 0xffff; 3062 3063 switch (call) { 3064 case IPCOP_semop: 3065 ret = do_semop(first, ptr, second); 3066 break; 3067 3068 case IPCOP_semget: 3069 ret = get_errno(semget(first, second, third)); 3070 break; 3071 3072 case IPCOP_semctl: 3073 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr); 3074 break; 3075 3076 case IPCOP_msgget: 3077 ret = get_errno(msgget(first, second)); 3078 break; 3079 3080 case IPCOP_msgsnd: 3081 ret = do_msgsnd(first, ptr, second, third); 3082 break; 3083 3084 case IPCOP_msgctl: 3085 ret = do_msgctl(first, second, ptr); 3086 break; 3087 3088 case IPCOP_msgrcv: 3089 switch (version) { 3090 case 0: 3091 { 3092 struct target_ipc_kludge { 3093 abi_long msgp; 3094 abi_long msgtyp; 3095 } *tmp; 3096 3097 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) { 3098 ret = -TARGET_EFAULT; 3099 break; 3100 } 3101 3102 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third); 3103 3104 unlock_user_struct(tmp, ptr, 0); 3105 break; 3106 } 3107 default: 3108 ret = do_msgrcv(first, ptr, second, fifth, third); 3109 } 3110 break; 3111 3112 case IPCOP_shmat: 3113 switch (version) { 3114 default: 3115 { 3116 abi_ulong raddr; 3117 raddr = do_shmat(first, ptr, second); 3118 if (is_error(raddr)) 3119 return get_errno(raddr); 3120 if (put_user_ual(raddr, third)) 3121 return -TARGET_EFAULT; 3122 break; 3123 } 3124 case 1: 3125 ret = -TARGET_EINVAL; 3126 break; 3127 } 3128 break; 3129 case IPCOP_shmdt: 3130 ret = do_shmdt(ptr); 3131 break; 3132 3133 case IPCOP_shmget: 3134 /* IPC_* flag values are the same on all linux platforms */ 3135 ret = get_errno(shmget(first, second, third)); 3136 break; 3137 3138 /* IPC_* and SHM_* command values are the same on all linux platforms */ 3139 case IPCOP_shmctl: 3140 ret = do_shmctl(first, second, third); 3141 break; 3142 default: 3143 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version); 3144 ret = -TARGET_ENOSYS; 3145 break; 3146 } 3147 return ret; 3148 } 3149 #endif 3150 3151 /* kernel structure types definitions */ 3152 3153 #define STRUCT(name, ...) STRUCT_ ## name, 3154 #define STRUCT_SPECIAL(name) STRUCT_ ## name, 3155 enum { 3156 #include "syscall_types.h" 3157 }; 3158 #undef STRUCT 3159 #undef STRUCT_SPECIAL 3160 3161 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL }; 3162 #define STRUCT_SPECIAL(name) 3163 #include "syscall_types.h" 3164 #undef STRUCT 3165 #undef STRUCT_SPECIAL 3166 3167 typedef struct IOCTLEntry IOCTLEntry; 3168 3169 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp, 3170 int fd, abi_long cmd, abi_long arg); 3171 3172 struct IOCTLEntry { 3173 unsigned int target_cmd; 3174 unsigned int host_cmd; 3175 const char *name; 3176 int access; 3177 do_ioctl_fn *do_ioctl; 3178 const argtype arg_type[5]; 3179 }; 3180 3181 #define IOC_R 0x0001 3182 #define IOC_W 0x0002 3183 #define IOC_RW (IOC_R | IOC_W) 3184 3185 #define MAX_STRUCT_SIZE 4096 3186 3187 #ifdef CONFIG_FIEMAP 3188 /* So fiemap access checks don't overflow on 32 bit systems. 3189 * This is very slightly smaller than the limit imposed by 3190 * the underlying kernel. 3191 */ 3192 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \ 3193 / sizeof(struct fiemap_extent)) 3194 3195 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp, 3196 int fd, abi_long cmd, abi_long arg) 3197 { 3198 /* The parameter for this ioctl is a struct fiemap followed 3199 * by an array of struct fiemap_extent whose size is set 3200 * in fiemap->fm_extent_count. The array is filled in by the 3201 * ioctl. 3202 */ 3203 int target_size_in, target_size_out; 3204 struct fiemap *fm; 3205 const argtype *arg_type = ie->arg_type; 3206 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) }; 3207 void *argptr, *p; 3208 abi_long ret; 3209 int i, extent_size = thunk_type_size(extent_arg_type, 0); 3210 uint32_t outbufsz; 3211 int free_fm = 0; 3212 3213 assert(arg_type[0] == TYPE_PTR); 3214 assert(ie->access == IOC_RW); 3215 arg_type++; 3216 target_size_in = thunk_type_size(arg_type, 0); 3217 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1); 3218 if (!argptr) { 3219 return -TARGET_EFAULT; 3220 } 3221 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3222 unlock_user(argptr, arg, 0); 3223 fm = (struct fiemap *)buf_temp; 3224 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) { 3225 return -TARGET_EINVAL; 3226 } 3227 3228 outbufsz = sizeof (*fm) + 3229 (sizeof(struct fiemap_extent) * fm->fm_extent_count); 3230 3231 if (outbufsz > MAX_STRUCT_SIZE) { 3232 /* We can't fit all the extents into the fixed size buffer. 3233 * Allocate one that is large enough and use it instead. 3234 */ 3235 fm = malloc(outbufsz); 3236 if (!fm) { 3237 return -TARGET_ENOMEM; 3238 } 3239 memcpy(fm, buf_temp, sizeof(struct fiemap)); 3240 free_fm = 1; 3241 } 3242 ret = get_errno(ioctl(fd, ie->host_cmd, fm)); 3243 if (!is_error(ret)) { 3244 target_size_out = target_size_in; 3245 /* An extent_count of 0 means we were only counting the extents 3246 * so there are no structs to copy 3247 */ 3248 if (fm->fm_extent_count != 0) { 3249 target_size_out += fm->fm_mapped_extents * extent_size; 3250 } 3251 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0); 3252 if (!argptr) { 3253 ret = -TARGET_EFAULT; 3254 } else { 3255 /* Convert the struct fiemap */ 3256 thunk_convert(argptr, fm, arg_type, THUNK_TARGET); 3257 if (fm->fm_extent_count != 0) { 3258 p = argptr + target_size_in; 3259 /* ...and then all the struct fiemap_extents */ 3260 for (i = 0; i < fm->fm_mapped_extents; i++) { 3261 thunk_convert(p, &fm->fm_extents[i], extent_arg_type, 3262 THUNK_TARGET); 3263 p += extent_size; 3264 } 3265 } 3266 unlock_user(argptr, arg, target_size_out); 3267 } 3268 } 3269 if (free_fm) { 3270 free(fm); 3271 } 3272 return ret; 3273 } 3274 #endif 3275 3276 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, 3277 int fd, abi_long cmd, abi_long arg) 3278 { 3279 const argtype *arg_type = ie->arg_type; 3280 int target_size; 3281 void *argptr; 3282 int ret; 3283 struct ifconf *host_ifconf; 3284 uint32_t outbufsz; 3285 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) }; 3286 int target_ifreq_size; 3287 int nb_ifreq; 3288 int free_buf = 0; 3289 int i; 3290 int target_ifc_len; 3291 abi_long target_ifc_buf; 3292 int host_ifc_len; 3293 char *host_ifc_buf; 3294 3295 assert(arg_type[0] == TYPE_PTR); 3296 assert(ie->access == IOC_RW); 3297 3298 arg_type++; 3299 target_size = thunk_type_size(arg_type, 0); 3300 3301 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3302 if (!argptr) 3303 return -TARGET_EFAULT; 3304 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3305 unlock_user(argptr, arg, 0); 3306 3307 host_ifconf = (struct ifconf *)(unsigned long)buf_temp; 3308 target_ifc_len = host_ifconf->ifc_len; 3309 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf; 3310 3311 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0); 3312 nb_ifreq = target_ifc_len / target_ifreq_size; 3313 host_ifc_len = nb_ifreq * sizeof(struct ifreq); 3314 3315 outbufsz = sizeof(*host_ifconf) + host_ifc_len; 3316 if (outbufsz > MAX_STRUCT_SIZE) { 3317 /* We can't fit all the extents into the fixed size buffer. 3318 * Allocate one that is large enough and use it instead. 3319 */ 3320 host_ifconf = malloc(outbufsz); 3321 if (!host_ifconf) { 3322 return -TARGET_ENOMEM; 3323 } 3324 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf)); 3325 free_buf = 1; 3326 } 3327 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf); 3328 3329 host_ifconf->ifc_len = host_ifc_len; 3330 host_ifconf->ifc_buf = host_ifc_buf; 3331 3332 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf)); 3333 if (!is_error(ret)) { 3334 /* convert host ifc_len to target ifc_len */ 3335 3336 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq); 3337 target_ifc_len = nb_ifreq * target_ifreq_size; 3338 host_ifconf->ifc_len = target_ifc_len; 3339 3340 /* restore target ifc_buf */ 3341 3342 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf; 3343 3344 /* copy struct ifconf to target user */ 3345 3346 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3347 if (!argptr) 3348 return -TARGET_EFAULT; 3349 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET); 3350 unlock_user(argptr, arg, target_size); 3351 3352 /* copy ifreq[] to target user */ 3353 3354 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0); 3355 for (i = 0; i < nb_ifreq ; i++) { 3356 thunk_convert(argptr + i * target_ifreq_size, 3357 host_ifc_buf + i * sizeof(struct ifreq), 3358 ifreq_arg_type, THUNK_TARGET); 3359 } 3360 unlock_user(argptr, target_ifc_buf, target_ifc_len); 3361 } 3362 3363 if (free_buf) { 3364 free(host_ifconf); 3365 } 3366 3367 return ret; 3368 } 3369 3370 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 3371 abi_long cmd, abi_long arg) 3372 { 3373 void *argptr; 3374 struct dm_ioctl *host_dm; 3375 abi_long guest_data; 3376 uint32_t guest_data_size; 3377 int target_size; 3378 const argtype *arg_type = ie->arg_type; 3379 abi_long ret; 3380 void *big_buf = NULL; 3381 char *host_data; 3382 3383 arg_type++; 3384 target_size = thunk_type_size(arg_type, 0); 3385 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3386 if (!argptr) { 3387 ret = -TARGET_EFAULT; 3388 goto out; 3389 } 3390 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3391 unlock_user(argptr, arg, 0); 3392 3393 /* buf_temp is too small, so fetch things into a bigger buffer */ 3394 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2); 3395 memcpy(big_buf, buf_temp, target_size); 3396 buf_temp = big_buf; 3397 host_dm = big_buf; 3398 3399 guest_data = arg + host_dm->data_start; 3400 if ((guest_data - arg) < 0) { 3401 ret = -EINVAL; 3402 goto out; 3403 } 3404 guest_data_size = host_dm->data_size - host_dm->data_start; 3405 host_data = (char*)host_dm + host_dm->data_start; 3406 3407 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1); 3408 switch (ie->host_cmd) { 3409 case DM_REMOVE_ALL: 3410 case DM_LIST_DEVICES: 3411 case DM_DEV_CREATE: 3412 case DM_DEV_REMOVE: 3413 case DM_DEV_SUSPEND: 3414 case DM_DEV_STATUS: 3415 case DM_DEV_WAIT: 3416 case DM_TABLE_STATUS: 3417 case DM_TABLE_CLEAR: 3418 case DM_TABLE_DEPS: 3419 case DM_LIST_VERSIONS: 3420 /* no input data */ 3421 break; 3422 case DM_DEV_RENAME: 3423 case DM_DEV_SET_GEOMETRY: 3424 /* data contains only strings */ 3425 memcpy(host_data, argptr, guest_data_size); 3426 break; 3427 case DM_TARGET_MSG: 3428 memcpy(host_data, argptr, guest_data_size); 3429 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr); 3430 break; 3431 case DM_TABLE_LOAD: 3432 { 3433 void *gspec = argptr; 3434 void *cur_data = host_data; 3435 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 3436 int spec_size = thunk_type_size(arg_type, 0); 3437 int i; 3438 3439 for (i = 0; i < host_dm->target_count; i++) { 3440 struct dm_target_spec *spec = cur_data; 3441 uint32_t next; 3442 int slen; 3443 3444 thunk_convert(spec, gspec, arg_type, THUNK_HOST); 3445 slen = strlen((char*)gspec + spec_size) + 1; 3446 next = spec->next; 3447 spec->next = sizeof(*spec) + slen; 3448 strcpy((char*)&spec[1], gspec + spec_size); 3449 gspec += next; 3450 cur_data += spec->next; 3451 } 3452 break; 3453 } 3454 default: 3455 ret = -TARGET_EINVAL; 3456 goto out; 3457 } 3458 unlock_user(argptr, guest_data, 0); 3459 3460 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3461 if (!is_error(ret)) { 3462 guest_data = arg + host_dm->data_start; 3463 guest_data_size = host_dm->data_size - host_dm->data_start; 3464 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0); 3465 switch (ie->host_cmd) { 3466 case DM_REMOVE_ALL: 3467 case DM_DEV_CREATE: 3468 case DM_DEV_REMOVE: 3469 case DM_DEV_RENAME: 3470 case DM_DEV_SUSPEND: 3471 case DM_DEV_STATUS: 3472 case DM_TABLE_LOAD: 3473 case DM_TABLE_CLEAR: 3474 case DM_TARGET_MSG: 3475 case DM_DEV_SET_GEOMETRY: 3476 /* no return data */ 3477 break; 3478 case DM_LIST_DEVICES: 3479 { 3480 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start; 3481 uint32_t remaining_data = guest_data_size; 3482 void *cur_data = argptr; 3483 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) }; 3484 int nl_size = 12; /* can't use thunk_size due to alignment */ 3485 3486 while (1) { 3487 uint32_t next = nl->next; 3488 if (next) { 3489 nl->next = nl_size + (strlen(nl->name) + 1); 3490 } 3491 if (remaining_data < nl->next) { 3492 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3493 break; 3494 } 3495 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET); 3496 strcpy(cur_data + nl_size, nl->name); 3497 cur_data += nl->next; 3498 remaining_data -= nl->next; 3499 if (!next) { 3500 break; 3501 } 3502 nl = (void*)nl + next; 3503 } 3504 break; 3505 } 3506 case DM_DEV_WAIT: 3507 case DM_TABLE_STATUS: 3508 { 3509 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start; 3510 void *cur_data = argptr; 3511 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 3512 int spec_size = thunk_type_size(arg_type, 0); 3513 int i; 3514 3515 for (i = 0; i < host_dm->target_count; i++) { 3516 uint32_t next = spec->next; 3517 int slen = strlen((char*)&spec[1]) + 1; 3518 spec->next = (cur_data - argptr) + spec_size + slen; 3519 if (guest_data_size < spec->next) { 3520 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3521 break; 3522 } 3523 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET); 3524 strcpy(cur_data + spec_size, (char*)&spec[1]); 3525 cur_data = argptr + spec->next; 3526 spec = (void*)host_dm + host_dm->data_start + next; 3527 } 3528 break; 3529 } 3530 case DM_TABLE_DEPS: 3531 { 3532 void *hdata = (void*)host_dm + host_dm->data_start; 3533 int count = *(uint32_t*)hdata; 3534 uint64_t *hdev = hdata + 8; 3535 uint64_t *gdev = argptr + 8; 3536 int i; 3537 3538 *(uint32_t*)argptr = tswap32(count); 3539 for (i = 0; i < count; i++) { 3540 *gdev = tswap64(*hdev); 3541 gdev++; 3542 hdev++; 3543 } 3544 break; 3545 } 3546 case DM_LIST_VERSIONS: 3547 { 3548 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start; 3549 uint32_t remaining_data = guest_data_size; 3550 void *cur_data = argptr; 3551 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) }; 3552 int vers_size = thunk_type_size(arg_type, 0); 3553 3554 while (1) { 3555 uint32_t next = vers->next; 3556 if (next) { 3557 vers->next = vers_size + (strlen(vers->name) + 1); 3558 } 3559 if (remaining_data < vers->next) { 3560 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3561 break; 3562 } 3563 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET); 3564 strcpy(cur_data + vers_size, vers->name); 3565 cur_data += vers->next; 3566 remaining_data -= vers->next; 3567 if (!next) { 3568 break; 3569 } 3570 vers = (void*)vers + next; 3571 } 3572 break; 3573 } 3574 default: 3575 ret = -TARGET_EINVAL; 3576 goto out; 3577 } 3578 unlock_user(argptr, guest_data, guest_data_size); 3579 3580 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3581 if (!argptr) { 3582 ret = -TARGET_EFAULT; 3583 goto out; 3584 } 3585 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3586 unlock_user(argptr, arg, target_size); 3587 } 3588 out: 3589 if (big_buf) { 3590 free(big_buf); 3591 } 3592 return ret; 3593 } 3594 3595 static IOCTLEntry ioctl_entries[] = { 3596 #define IOCTL(cmd, access, ...) \ 3597 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } }, 3598 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \ 3599 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } }, 3600 #include "ioctls.h" 3601 { 0, 0, }, 3602 }; 3603 3604 /* ??? Implement proper locking for ioctls. */ 3605 /* do_ioctl() Must return target values and target errnos. */ 3606 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg) 3607 { 3608 const IOCTLEntry *ie; 3609 const argtype *arg_type; 3610 abi_long ret; 3611 uint8_t buf_temp[MAX_STRUCT_SIZE]; 3612 int target_size; 3613 void *argptr; 3614 3615 ie = ioctl_entries; 3616 for(;;) { 3617 if (ie->target_cmd == 0) { 3618 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd); 3619 return -TARGET_ENOSYS; 3620 } 3621 if (ie->target_cmd == cmd) 3622 break; 3623 ie++; 3624 } 3625 arg_type = ie->arg_type; 3626 #if defined(DEBUG) 3627 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name); 3628 #endif 3629 if (ie->do_ioctl) { 3630 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg); 3631 } 3632 3633 switch(arg_type[0]) { 3634 case TYPE_NULL: 3635 /* no argument */ 3636 ret = get_errno(ioctl(fd, ie->host_cmd)); 3637 break; 3638 case TYPE_PTRVOID: 3639 case TYPE_INT: 3640 /* int argment */ 3641 ret = get_errno(ioctl(fd, ie->host_cmd, arg)); 3642 break; 3643 case TYPE_PTR: 3644 arg_type++; 3645 target_size = thunk_type_size(arg_type, 0); 3646 switch(ie->access) { 3647 case IOC_R: 3648 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3649 if (!is_error(ret)) { 3650 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3651 if (!argptr) 3652 return -TARGET_EFAULT; 3653 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3654 unlock_user(argptr, arg, target_size); 3655 } 3656 break; 3657 case IOC_W: 3658 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3659 if (!argptr) 3660 return -TARGET_EFAULT; 3661 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3662 unlock_user(argptr, arg, 0); 3663 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3664 break; 3665 default: 3666 case IOC_RW: 3667 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3668 if (!argptr) 3669 return -TARGET_EFAULT; 3670 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3671 unlock_user(argptr, arg, 0); 3672 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3673 if (!is_error(ret)) { 3674 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3675 if (!argptr) 3676 return -TARGET_EFAULT; 3677 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3678 unlock_user(argptr, arg, target_size); 3679 } 3680 break; 3681 } 3682 break; 3683 default: 3684 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n", 3685 (long)cmd, arg_type[0]); 3686 ret = -TARGET_ENOSYS; 3687 break; 3688 } 3689 return ret; 3690 } 3691 3692 static const bitmask_transtbl iflag_tbl[] = { 3693 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK }, 3694 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT }, 3695 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR }, 3696 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK }, 3697 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK }, 3698 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP }, 3699 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR }, 3700 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR }, 3701 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL }, 3702 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC }, 3703 { TARGET_IXON, TARGET_IXON, IXON, IXON }, 3704 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY }, 3705 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF }, 3706 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL }, 3707 { 0, 0, 0, 0 } 3708 }; 3709 3710 static const bitmask_transtbl oflag_tbl[] = { 3711 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST }, 3712 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC }, 3713 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR }, 3714 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL }, 3715 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR }, 3716 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET }, 3717 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL }, 3718 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL }, 3719 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 }, 3720 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 }, 3721 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 }, 3722 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 }, 3723 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 }, 3724 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 }, 3725 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 }, 3726 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 }, 3727 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 }, 3728 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 }, 3729 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 }, 3730 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 }, 3731 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 }, 3732 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 }, 3733 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 }, 3734 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 }, 3735 { 0, 0, 0, 0 } 3736 }; 3737 3738 static const bitmask_transtbl cflag_tbl[] = { 3739 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 }, 3740 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 }, 3741 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 }, 3742 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 }, 3743 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 }, 3744 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 }, 3745 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 }, 3746 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 }, 3747 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 }, 3748 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 }, 3749 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 }, 3750 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 }, 3751 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 }, 3752 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 }, 3753 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 }, 3754 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 }, 3755 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 }, 3756 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 }, 3757 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 }, 3758 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 }, 3759 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 }, 3760 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 }, 3761 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 }, 3762 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 }, 3763 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB }, 3764 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD }, 3765 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB }, 3766 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD }, 3767 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL }, 3768 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL }, 3769 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS }, 3770 { 0, 0, 0, 0 } 3771 }; 3772 3773 static const bitmask_transtbl lflag_tbl[] = { 3774 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG }, 3775 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON }, 3776 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE }, 3777 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO }, 3778 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE }, 3779 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK }, 3780 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL }, 3781 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH }, 3782 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP }, 3783 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL }, 3784 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT }, 3785 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE }, 3786 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO }, 3787 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN }, 3788 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN }, 3789 { 0, 0, 0, 0 } 3790 }; 3791 3792 static void target_to_host_termios (void *dst, const void *src) 3793 { 3794 struct host_termios *host = dst; 3795 const struct target_termios *target = src; 3796 3797 host->c_iflag = 3798 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl); 3799 host->c_oflag = 3800 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl); 3801 host->c_cflag = 3802 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl); 3803 host->c_lflag = 3804 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl); 3805 host->c_line = target->c_line; 3806 3807 memset(host->c_cc, 0, sizeof(host->c_cc)); 3808 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR]; 3809 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT]; 3810 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE]; 3811 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL]; 3812 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF]; 3813 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME]; 3814 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN]; 3815 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC]; 3816 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART]; 3817 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP]; 3818 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP]; 3819 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL]; 3820 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT]; 3821 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD]; 3822 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE]; 3823 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT]; 3824 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2]; 3825 } 3826 3827 static void host_to_target_termios (void *dst, const void *src) 3828 { 3829 struct target_termios *target = dst; 3830 const struct host_termios *host = src; 3831 3832 target->c_iflag = 3833 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl)); 3834 target->c_oflag = 3835 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl)); 3836 target->c_cflag = 3837 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl)); 3838 target->c_lflag = 3839 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl)); 3840 target->c_line = host->c_line; 3841 3842 memset(target->c_cc, 0, sizeof(target->c_cc)); 3843 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR]; 3844 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT]; 3845 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE]; 3846 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL]; 3847 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF]; 3848 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME]; 3849 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN]; 3850 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC]; 3851 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART]; 3852 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP]; 3853 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP]; 3854 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL]; 3855 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT]; 3856 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD]; 3857 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE]; 3858 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT]; 3859 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2]; 3860 } 3861 3862 static const StructEntry struct_termios_def = { 3863 .convert = { host_to_target_termios, target_to_host_termios }, 3864 .size = { sizeof(struct target_termios), sizeof(struct host_termios) }, 3865 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) }, 3866 }; 3867 3868 static bitmask_transtbl mmap_flags_tbl[] = { 3869 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED }, 3870 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE }, 3871 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED }, 3872 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS }, 3873 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN }, 3874 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE }, 3875 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE }, 3876 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED }, 3877 { 0, 0, 0, 0 } 3878 }; 3879 3880 #if defined(TARGET_I386) 3881 3882 /* NOTE: there is really one LDT for all the threads */ 3883 static uint8_t *ldt_table; 3884 3885 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount) 3886 { 3887 int size; 3888 void *p; 3889 3890 if (!ldt_table) 3891 return 0; 3892 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE; 3893 if (size > bytecount) 3894 size = bytecount; 3895 p = lock_user(VERIFY_WRITE, ptr, size, 0); 3896 if (!p) 3897 return -TARGET_EFAULT; 3898 /* ??? Should this by byteswapped? */ 3899 memcpy(p, ldt_table, size); 3900 unlock_user(p, ptr, size); 3901 return size; 3902 } 3903 3904 /* XXX: add locking support */ 3905 static abi_long write_ldt(CPUX86State *env, 3906 abi_ulong ptr, unsigned long bytecount, int oldmode) 3907 { 3908 struct target_modify_ldt_ldt_s ldt_info; 3909 struct target_modify_ldt_ldt_s *target_ldt_info; 3910 int seg_32bit, contents, read_exec_only, limit_in_pages; 3911 int seg_not_present, useable, lm; 3912 uint32_t *lp, entry_1, entry_2; 3913 3914 if (bytecount != sizeof(ldt_info)) 3915 return -TARGET_EINVAL; 3916 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1)) 3917 return -TARGET_EFAULT; 3918 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 3919 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 3920 ldt_info.limit = tswap32(target_ldt_info->limit); 3921 ldt_info.flags = tswap32(target_ldt_info->flags); 3922 unlock_user_struct(target_ldt_info, ptr, 0); 3923 3924 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES) 3925 return -TARGET_EINVAL; 3926 seg_32bit = ldt_info.flags & 1; 3927 contents = (ldt_info.flags >> 1) & 3; 3928 read_exec_only = (ldt_info.flags >> 3) & 1; 3929 limit_in_pages = (ldt_info.flags >> 4) & 1; 3930 seg_not_present = (ldt_info.flags >> 5) & 1; 3931 useable = (ldt_info.flags >> 6) & 1; 3932 #ifdef TARGET_ABI32 3933 lm = 0; 3934 #else 3935 lm = (ldt_info.flags >> 7) & 1; 3936 #endif 3937 if (contents == 3) { 3938 if (oldmode) 3939 return -TARGET_EINVAL; 3940 if (seg_not_present == 0) 3941 return -TARGET_EINVAL; 3942 } 3943 /* allocate the LDT */ 3944 if (!ldt_table) { 3945 env->ldt.base = target_mmap(0, 3946 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE, 3947 PROT_READ|PROT_WRITE, 3948 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 3949 if (env->ldt.base == -1) 3950 return -TARGET_ENOMEM; 3951 memset(g2h(env->ldt.base), 0, 3952 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE); 3953 env->ldt.limit = 0xffff; 3954 ldt_table = g2h(env->ldt.base); 3955 } 3956 3957 /* NOTE: same code as Linux kernel */ 3958 /* Allow LDTs to be cleared by the user. */ 3959 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 3960 if (oldmode || 3961 (contents == 0 && 3962 read_exec_only == 1 && 3963 seg_32bit == 0 && 3964 limit_in_pages == 0 && 3965 seg_not_present == 1 && 3966 useable == 0 )) { 3967 entry_1 = 0; 3968 entry_2 = 0; 3969 goto install; 3970 } 3971 } 3972 3973 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 3974 (ldt_info.limit & 0x0ffff); 3975 entry_2 = (ldt_info.base_addr & 0xff000000) | 3976 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 3977 (ldt_info.limit & 0xf0000) | 3978 ((read_exec_only ^ 1) << 9) | 3979 (contents << 10) | 3980 ((seg_not_present ^ 1) << 15) | 3981 (seg_32bit << 22) | 3982 (limit_in_pages << 23) | 3983 (lm << 21) | 3984 0x7000; 3985 if (!oldmode) 3986 entry_2 |= (useable << 20); 3987 3988 /* Install the new entry ... */ 3989 install: 3990 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3)); 3991 lp[0] = tswap32(entry_1); 3992 lp[1] = tswap32(entry_2); 3993 return 0; 3994 } 3995 3996 /* specific and weird i386 syscalls */ 3997 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr, 3998 unsigned long bytecount) 3999 { 4000 abi_long ret; 4001 4002 switch (func) { 4003 case 0: 4004 ret = read_ldt(ptr, bytecount); 4005 break; 4006 case 1: 4007 ret = write_ldt(env, ptr, bytecount, 1); 4008 break; 4009 case 0x11: 4010 ret = write_ldt(env, ptr, bytecount, 0); 4011 break; 4012 default: 4013 ret = -TARGET_ENOSYS; 4014 break; 4015 } 4016 return ret; 4017 } 4018 4019 #if defined(TARGET_I386) && defined(TARGET_ABI32) 4020 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr) 4021 { 4022 uint64_t *gdt_table = g2h(env->gdt.base); 4023 struct target_modify_ldt_ldt_s ldt_info; 4024 struct target_modify_ldt_ldt_s *target_ldt_info; 4025 int seg_32bit, contents, read_exec_only, limit_in_pages; 4026 int seg_not_present, useable, lm; 4027 uint32_t *lp, entry_1, entry_2; 4028 int i; 4029 4030 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 4031 if (!target_ldt_info) 4032 return -TARGET_EFAULT; 4033 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 4034 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 4035 ldt_info.limit = tswap32(target_ldt_info->limit); 4036 ldt_info.flags = tswap32(target_ldt_info->flags); 4037 if (ldt_info.entry_number == -1) { 4038 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) { 4039 if (gdt_table[i] == 0) { 4040 ldt_info.entry_number = i; 4041 target_ldt_info->entry_number = tswap32(i); 4042 break; 4043 } 4044 } 4045 } 4046 unlock_user_struct(target_ldt_info, ptr, 1); 4047 4048 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 4049 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX) 4050 return -TARGET_EINVAL; 4051 seg_32bit = ldt_info.flags & 1; 4052 contents = (ldt_info.flags >> 1) & 3; 4053 read_exec_only = (ldt_info.flags >> 3) & 1; 4054 limit_in_pages = (ldt_info.flags >> 4) & 1; 4055 seg_not_present = (ldt_info.flags >> 5) & 1; 4056 useable = (ldt_info.flags >> 6) & 1; 4057 #ifdef TARGET_ABI32 4058 lm = 0; 4059 #else 4060 lm = (ldt_info.flags >> 7) & 1; 4061 #endif 4062 4063 if (contents == 3) { 4064 if (seg_not_present == 0) 4065 return -TARGET_EINVAL; 4066 } 4067 4068 /* NOTE: same code as Linux kernel */ 4069 /* Allow LDTs to be cleared by the user. */ 4070 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 4071 if ((contents == 0 && 4072 read_exec_only == 1 && 4073 seg_32bit == 0 && 4074 limit_in_pages == 0 && 4075 seg_not_present == 1 && 4076 useable == 0 )) { 4077 entry_1 = 0; 4078 entry_2 = 0; 4079 goto install; 4080 } 4081 } 4082 4083 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 4084 (ldt_info.limit & 0x0ffff); 4085 entry_2 = (ldt_info.base_addr & 0xff000000) | 4086 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 4087 (ldt_info.limit & 0xf0000) | 4088 ((read_exec_only ^ 1) << 9) | 4089 (contents << 10) | 4090 ((seg_not_present ^ 1) << 15) | 4091 (seg_32bit << 22) | 4092 (limit_in_pages << 23) | 4093 (useable << 20) | 4094 (lm << 21) | 4095 0x7000; 4096 4097 /* Install the new entry ... */ 4098 install: 4099 lp = (uint32_t *)(gdt_table + ldt_info.entry_number); 4100 lp[0] = tswap32(entry_1); 4101 lp[1] = tswap32(entry_2); 4102 return 0; 4103 } 4104 4105 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr) 4106 { 4107 struct target_modify_ldt_ldt_s *target_ldt_info; 4108 uint64_t *gdt_table = g2h(env->gdt.base); 4109 uint32_t base_addr, limit, flags; 4110 int seg_32bit, contents, read_exec_only, limit_in_pages, idx; 4111 int seg_not_present, useable, lm; 4112 uint32_t *lp, entry_1, entry_2; 4113 4114 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 4115 if (!target_ldt_info) 4116 return -TARGET_EFAULT; 4117 idx = tswap32(target_ldt_info->entry_number); 4118 if (idx < TARGET_GDT_ENTRY_TLS_MIN || 4119 idx > TARGET_GDT_ENTRY_TLS_MAX) { 4120 unlock_user_struct(target_ldt_info, ptr, 1); 4121 return -TARGET_EINVAL; 4122 } 4123 lp = (uint32_t *)(gdt_table + idx); 4124 entry_1 = tswap32(lp[0]); 4125 entry_2 = tswap32(lp[1]); 4126 4127 read_exec_only = ((entry_2 >> 9) & 1) ^ 1; 4128 contents = (entry_2 >> 10) & 3; 4129 seg_not_present = ((entry_2 >> 15) & 1) ^ 1; 4130 seg_32bit = (entry_2 >> 22) & 1; 4131 limit_in_pages = (entry_2 >> 23) & 1; 4132 useable = (entry_2 >> 20) & 1; 4133 #ifdef TARGET_ABI32 4134 lm = 0; 4135 #else 4136 lm = (entry_2 >> 21) & 1; 4137 #endif 4138 flags = (seg_32bit << 0) | (contents << 1) | 4139 (read_exec_only << 3) | (limit_in_pages << 4) | 4140 (seg_not_present << 5) | (useable << 6) | (lm << 7); 4141 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000); 4142 base_addr = (entry_1 >> 16) | 4143 (entry_2 & 0xff000000) | 4144 ((entry_2 & 0xff) << 16); 4145 target_ldt_info->base_addr = tswapal(base_addr); 4146 target_ldt_info->limit = tswap32(limit); 4147 target_ldt_info->flags = tswap32(flags); 4148 unlock_user_struct(target_ldt_info, ptr, 1); 4149 return 0; 4150 } 4151 #endif /* TARGET_I386 && TARGET_ABI32 */ 4152 4153 #ifndef TARGET_ABI32 4154 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 4155 { 4156 abi_long ret = 0; 4157 abi_ulong val; 4158 int idx; 4159 4160 switch(code) { 4161 case TARGET_ARCH_SET_GS: 4162 case TARGET_ARCH_SET_FS: 4163 if (code == TARGET_ARCH_SET_GS) 4164 idx = R_GS; 4165 else 4166 idx = R_FS; 4167 cpu_x86_load_seg(env, idx, 0); 4168 env->segs[idx].base = addr; 4169 break; 4170 case TARGET_ARCH_GET_GS: 4171 case TARGET_ARCH_GET_FS: 4172 if (code == TARGET_ARCH_GET_GS) 4173 idx = R_GS; 4174 else 4175 idx = R_FS; 4176 val = env->segs[idx].base; 4177 if (put_user(val, addr, abi_ulong)) 4178 ret = -TARGET_EFAULT; 4179 break; 4180 default: 4181 ret = -TARGET_EINVAL; 4182 break; 4183 } 4184 return ret; 4185 } 4186 #endif 4187 4188 #endif /* defined(TARGET_I386) */ 4189 4190 #define NEW_STACK_SIZE 0x40000 4191 4192 #if defined(CONFIG_USE_NPTL) 4193 4194 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; 4195 typedef struct { 4196 CPUArchState *env; 4197 pthread_mutex_t mutex; 4198 pthread_cond_t cond; 4199 pthread_t thread; 4200 uint32_t tid; 4201 abi_ulong child_tidptr; 4202 abi_ulong parent_tidptr; 4203 sigset_t sigmask; 4204 } new_thread_info; 4205 4206 static void *clone_func(void *arg) 4207 { 4208 new_thread_info *info = arg; 4209 CPUArchState *env; 4210 TaskState *ts; 4211 4212 env = info->env; 4213 thread_env = env; 4214 ts = (TaskState *)thread_env->opaque; 4215 info->tid = gettid(); 4216 env->host_tid = info->tid; 4217 task_settid(ts); 4218 if (info->child_tidptr) 4219 put_user_u32(info->tid, info->child_tidptr); 4220 if (info->parent_tidptr) 4221 put_user_u32(info->tid, info->parent_tidptr); 4222 /* Enable signals. */ 4223 sigprocmask(SIG_SETMASK, &info->sigmask, NULL); 4224 /* Signal to the parent that we're ready. */ 4225 pthread_mutex_lock(&info->mutex); 4226 pthread_cond_broadcast(&info->cond); 4227 pthread_mutex_unlock(&info->mutex); 4228 /* Wait until the parent has finshed initializing the tls state. */ 4229 pthread_mutex_lock(&clone_lock); 4230 pthread_mutex_unlock(&clone_lock); 4231 cpu_loop(env); 4232 /* never exits */ 4233 return NULL; 4234 } 4235 #else 4236 4237 static int clone_func(void *arg) 4238 { 4239 CPUArchState *env = arg; 4240 cpu_loop(env); 4241 /* never exits */ 4242 return 0; 4243 } 4244 #endif 4245 4246 /* do_fork() Must return host values and target errnos (unlike most 4247 do_*() functions). */ 4248 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, 4249 abi_ulong parent_tidptr, target_ulong newtls, 4250 abi_ulong child_tidptr) 4251 { 4252 int ret; 4253 TaskState *ts; 4254 CPUArchState *new_env; 4255 #if defined(CONFIG_USE_NPTL) 4256 unsigned int nptl_flags; 4257 sigset_t sigmask; 4258 #else 4259 uint8_t *new_stack; 4260 #endif 4261 4262 /* Emulate vfork() with fork() */ 4263 if (flags & CLONE_VFORK) 4264 flags &= ~(CLONE_VFORK | CLONE_VM); 4265 4266 if (flags & CLONE_VM) { 4267 TaskState *parent_ts = (TaskState *)env->opaque; 4268 #if defined(CONFIG_USE_NPTL) 4269 new_thread_info info; 4270 pthread_attr_t attr; 4271 #endif 4272 ts = g_malloc0(sizeof(TaskState)); 4273 init_task_state(ts); 4274 /* we create a new CPU instance. */ 4275 new_env = cpu_copy(env); 4276 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC) 4277 cpu_reset(ENV_GET_CPU(new_env)); 4278 #endif 4279 /* Init regs that differ from the parent. */ 4280 cpu_clone_regs(new_env, newsp); 4281 new_env->opaque = ts; 4282 ts->bprm = parent_ts->bprm; 4283 ts->info = parent_ts->info; 4284 #if defined(CONFIG_USE_NPTL) 4285 nptl_flags = flags; 4286 flags &= ~CLONE_NPTL_FLAGS2; 4287 4288 if (nptl_flags & CLONE_CHILD_CLEARTID) { 4289 ts->child_tidptr = child_tidptr; 4290 } 4291 4292 if (nptl_flags & CLONE_SETTLS) 4293 cpu_set_tls (new_env, newtls); 4294 4295 /* Grab a mutex so that thread setup appears atomic. */ 4296 pthread_mutex_lock(&clone_lock); 4297 4298 memset(&info, 0, sizeof(info)); 4299 pthread_mutex_init(&info.mutex, NULL); 4300 pthread_mutex_lock(&info.mutex); 4301 pthread_cond_init(&info.cond, NULL); 4302 info.env = new_env; 4303 if (nptl_flags & CLONE_CHILD_SETTID) 4304 info.child_tidptr = child_tidptr; 4305 if (nptl_flags & CLONE_PARENT_SETTID) 4306 info.parent_tidptr = parent_tidptr; 4307 4308 ret = pthread_attr_init(&attr); 4309 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE); 4310 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 4311 /* It is not safe to deliver signals until the child has finished 4312 initializing, so temporarily block all signals. */ 4313 sigfillset(&sigmask); 4314 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); 4315 4316 ret = pthread_create(&info.thread, &attr, clone_func, &info); 4317 /* TODO: Free new CPU state if thread creation failed. */ 4318 4319 sigprocmask(SIG_SETMASK, &info.sigmask, NULL); 4320 pthread_attr_destroy(&attr); 4321 if (ret == 0) { 4322 /* Wait for the child to initialize. */ 4323 pthread_cond_wait(&info.cond, &info.mutex); 4324 ret = info.tid; 4325 if (flags & CLONE_PARENT_SETTID) 4326 put_user_u32(ret, parent_tidptr); 4327 } else { 4328 ret = -1; 4329 } 4330 pthread_mutex_unlock(&info.mutex); 4331 pthread_cond_destroy(&info.cond); 4332 pthread_mutex_destroy(&info.mutex); 4333 pthread_mutex_unlock(&clone_lock); 4334 #else 4335 if (flags & CLONE_NPTL_FLAGS2) 4336 return -EINVAL; 4337 /* This is probably going to die very quickly, but do it anyway. */ 4338 new_stack = g_malloc0 (NEW_STACK_SIZE); 4339 #ifdef __ia64__ 4340 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env); 4341 #else 4342 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env); 4343 #endif 4344 #endif 4345 } else { 4346 /* if no CLONE_VM, we consider it is a fork */ 4347 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) 4348 return -EINVAL; 4349 fork_start(); 4350 ret = fork(); 4351 if (ret == 0) { 4352 /* Child Process. */ 4353 cpu_clone_regs(env, newsp); 4354 fork_end(1); 4355 #if defined(CONFIG_USE_NPTL) 4356 /* There is a race condition here. The parent process could 4357 theoretically read the TID in the child process before the child 4358 tid is set. This would require using either ptrace 4359 (not implemented) or having *_tidptr to point at a shared memory 4360 mapping. We can't repeat the spinlock hack used above because 4361 the child process gets its own copy of the lock. */ 4362 if (flags & CLONE_CHILD_SETTID) 4363 put_user_u32(gettid(), child_tidptr); 4364 if (flags & CLONE_PARENT_SETTID) 4365 put_user_u32(gettid(), parent_tidptr); 4366 ts = (TaskState *)env->opaque; 4367 if (flags & CLONE_SETTLS) 4368 cpu_set_tls (env, newtls); 4369 if (flags & CLONE_CHILD_CLEARTID) 4370 ts->child_tidptr = child_tidptr; 4371 #endif 4372 } else { 4373 fork_end(0); 4374 } 4375 } 4376 return ret; 4377 } 4378 4379 /* warning : doesn't handle linux specific flags... */ 4380 static int target_to_host_fcntl_cmd(int cmd) 4381 { 4382 switch(cmd) { 4383 case TARGET_F_DUPFD: 4384 case TARGET_F_GETFD: 4385 case TARGET_F_SETFD: 4386 case TARGET_F_GETFL: 4387 case TARGET_F_SETFL: 4388 return cmd; 4389 case TARGET_F_GETLK: 4390 return F_GETLK; 4391 case TARGET_F_SETLK: 4392 return F_SETLK; 4393 case TARGET_F_SETLKW: 4394 return F_SETLKW; 4395 case TARGET_F_GETOWN: 4396 return F_GETOWN; 4397 case TARGET_F_SETOWN: 4398 return F_SETOWN; 4399 case TARGET_F_GETSIG: 4400 return F_GETSIG; 4401 case TARGET_F_SETSIG: 4402 return F_SETSIG; 4403 #if TARGET_ABI_BITS == 32 4404 case TARGET_F_GETLK64: 4405 return F_GETLK64; 4406 case TARGET_F_SETLK64: 4407 return F_SETLK64; 4408 case TARGET_F_SETLKW64: 4409 return F_SETLKW64; 4410 #endif 4411 case TARGET_F_SETLEASE: 4412 return F_SETLEASE; 4413 case TARGET_F_GETLEASE: 4414 return F_GETLEASE; 4415 #ifdef F_DUPFD_CLOEXEC 4416 case TARGET_F_DUPFD_CLOEXEC: 4417 return F_DUPFD_CLOEXEC; 4418 #endif 4419 case TARGET_F_NOTIFY: 4420 return F_NOTIFY; 4421 default: 4422 return -TARGET_EINVAL; 4423 } 4424 return -TARGET_EINVAL; 4425 } 4426 4427 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) 4428 { 4429 struct flock fl; 4430 struct target_flock *target_fl; 4431 struct flock64 fl64; 4432 struct target_flock64 *target_fl64; 4433 abi_long ret; 4434 int host_cmd = target_to_host_fcntl_cmd(cmd); 4435 4436 if (host_cmd == -TARGET_EINVAL) 4437 return host_cmd; 4438 4439 switch(cmd) { 4440 case TARGET_F_GETLK: 4441 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4442 return -TARGET_EFAULT; 4443 fl.l_type = tswap16(target_fl->l_type); 4444 fl.l_whence = tswap16(target_fl->l_whence); 4445 fl.l_start = tswapal(target_fl->l_start); 4446 fl.l_len = tswapal(target_fl->l_len); 4447 fl.l_pid = tswap32(target_fl->l_pid); 4448 unlock_user_struct(target_fl, arg, 0); 4449 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4450 if (ret == 0) { 4451 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0)) 4452 return -TARGET_EFAULT; 4453 target_fl->l_type = tswap16(fl.l_type); 4454 target_fl->l_whence = tswap16(fl.l_whence); 4455 target_fl->l_start = tswapal(fl.l_start); 4456 target_fl->l_len = tswapal(fl.l_len); 4457 target_fl->l_pid = tswap32(fl.l_pid); 4458 unlock_user_struct(target_fl, arg, 1); 4459 } 4460 break; 4461 4462 case TARGET_F_SETLK: 4463 case TARGET_F_SETLKW: 4464 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4465 return -TARGET_EFAULT; 4466 fl.l_type = tswap16(target_fl->l_type); 4467 fl.l_whence = tswap16(target_fl->l_whence); 4468 fl.l_start = tswapal(target_fl->l_start); 4469 fl.l_len = tswapal(target_fl->l_len); 4470 fl.l_pid = tswap32(target_fl->l_pid); 4471 unlock_user_struct(target_fl, arg, 0); 4472 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4473 break; 4474 4475 case TARGET_F_GETLK64: 4476 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4477 return -TARGET_EFAULT; 4478 fl64.l_type = tswap16(target_fl64->l_type) >> 1; 4479 fl64.l_whence = tswap16(target_fl64->l_whence); 4480 fl64.l_start = tswap64(target_fl64->l_start); 4481 fl64.l_len = tswap64(target_fl64->l_len); 4482 fl64.l_pid = tswap32(target_fl64->l_pid); 4483 unlock_user_struct(target_fl64, arg, 0); 4484 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4485 if (ret == 0) { 4486 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0)) 4487 return -TARGET_EFAULT; 4488 target_fl64->l_type = tswap16(fl64.l_type) >> 1; 4489 target_fl64->l_whence = tswap16(fl64.l_whence); 4490 target_fl64->l_start = tswap64(fl64.l_start); 4491 target_fl64->l_len = tswap64(fl64.l_len); 4492 target_fl64->l_pid = tswap32(fl64.l_pid); 4493 unlock_user_struct(target_fl64, arg, 1); 4494 } 4495 break; 4496 case TARGET_F_SETLK64: 4497 case TARGET_F_SETLKW64: 4498 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4499 return -TARGET_EFAULT; 4500 fl64.l_type = tswap16(target_fl64->l_type) >> 1; 4501 fl64.l_whence = tswap16(target_fl64->l_whence); 4502 fl64.l_start = tswap64(target_fl64->l_start); 4503 fl64.l_len = tswap64(target_fl64->l_len); 4504 fl64.l_pid = tswap32(target_fl64->l_pid); 4505 unlock_user_struct(target_fl64, arg, 0); 4506 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4507 break; 4508 4509 case TARGET_F_GETFL: 4510 ret = get_errno(fcntl(fd, host_cmd, arg)); 4511 if (ret >= 0) { 4512 ret = host_to_target_bitmask(ret, fcntl_flags_tbl); 4513 } 4514 break; 4515 4516 case TARGET_F_SETFL: 4517 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl))); 4518 break; 4519 4520 case TARGET_F_SETOWN: 4521 case TARGET_F_GETOWN: 4522 case TARGET_F_SETSIG: 4523 case TARGET_F_GETSIG: 4524 case TARGET_F_SETLEASE: 4525 case TARGET_F_GETLEASE: 4526 ret = get_errno(fcntl(fd, host_cmd, arg)); 4527 break; 4528 4529 default: 4530 ret = get_errno(fcntl(fd, cmd, arg)); 4531 break; 4532 } 4533 return ret; 4534 } 4535 4536 #ifdef USE_UID16 4537 4538 static inline int high2lowuid(int uid) 4539 { 4540 if (uid > 65535) 4541 return 65534; 4542 else 4543 return uid; 4544 } 4545 4546 static inline int high2lowgid(int gid) 4547 { 4548 if (gid > 65535) 4549 return 65534; 4550 else 4551 return gid; 4552 } 4553 4554 static inline int low2highuid(int uid) 4555 { 4556 if ((int16_t)uid == -1) 4557 return -1; 4558 else 4559 return uid; 4560 } 4561 4562 static inline int low2highgid(int gid) 4563 { 4564 if ((int16_t)gid == -1) 4565 return -1; 4566 else 4567 return gid; 4568 } 4569 static inline int tswapid(int id) 4570 { 4571 return tswap16(id); 4572 } 4573 #else /* !USE_UID16 */ 4574 static inline int high2lowuid(int uid) 4575 { 4576 return uid; 4577 } 4578 static inline int high2lowgid(int gid) 4579 { 4580 return gid; 4581 } 4582 static inline int low2highuid(int uid) 4583 { 4584 return uid; 4585 } 4586 static inline int low2highgid(int gid) 4587 { 4588 return gid; 4589 } 4590 static inline int tswapid(int id) 4591 { 4592 return tswap32(id); 4593 } 4594 #endif /* USE_UID16 */ 4595 4596 void syscall_init(void) 4597 { 4598 IOCTLEntry *ie; 4599 const argtype *arg_type; 4600 int size; 4601 int i; 4602 4603 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def); 4604 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def); 4605 #include "syscall_types.h" 4606 #undef STRUCT 4607 #undef STRUCT_SPECIAL 4608 4609 /* we patch the ioctl size if necessary. We rely on the fact that 4610 no ioctl has all the bits at '1' in the size field */ 4611 ie = ioctl_entries; 4612 while (ie->target_cmd != 0) { 4613 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) == 4614 TARGET_IOC_SIZEMASK) { 4615 arg_type = ie->arg_type; 4616 if (arg_type[0] != TYPE_PTR) { 4617 fprintf(stderr, "cannot patch size for ioctl 0x%x\n", 4618 ie->target_cmd); 4619 exit(1); 4620 } 4621 arg_type++; 4622 size = thunk_type_size(arg_type, 0); 4623 ie->target_cmd = (ie->target_cmd & 4624 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) | 4625 (size << TARGET_IOC_SIZESHIFT); 4626 } 4627 4628 /* Build target_to_host_errno_table[] table from 4629 * host_to_target_errno_table[]. */ 4630 for (i=0; i < ERRNO_TABLE_SIZE; i++) 4631 target_to_host_errno_table[host_to_target_errno_table[i]] = i; 4632 4633 /* automatic consistency check if same arch */ 4634 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 4635 (defined(__x86_64__) && defined(TARGET_X86_64)) 4636 if (unlikely(ie->target_cmd != ie->host_cmd)) { 4637 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n", 4638 ie->name, ie->target_cmd, ie->host_cmd); 4639 } 4640 #endif 4641 ie++; 4642 } 4643 } 4644 4645 #if TARGET_ABI_BITS == 32 4646 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1) 4647 { 4648 #ifdef TARGET_WORDS_BIGENDIAN 4649 return ((uint64_t)word0 << 32) | word1; 4650 #else 4651 return ((uint64_t)word1 << 32) | word0; 4652 #endif 4653 } 4654 #else /* TARGET_ABI_BITS == 32 */ 4655 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1) 4656 { 4657 return word0; 4658 } 4659 #endif /* TARGET_ABI_BITS != 32 */ 4660 4661 #ifdef TARGET_NR_truncate64 4662 static inline abi_long target_truncate64(void *cpu_env, const char *arg1, 4663 abi_long arg2, 4664 abi_long arg3, 4665 abi_long arg4) 4666 { 4667 if (regpairs_aligned(cpu_env)) { 4668 arg2 = arg3; 4669 arg3 = arg4; 4670 } 4671 return get_errno(truncate64(arg1, target_offset64(arg2, arg3))); 4672 } 4673 #endif 4674 4675 #ifdef TARGET_NR_ftruncate64 4676 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1, 4677 abi_long arg2, 4678 abi_long arg3, 4679 abi_long arg4) 4680 { 4681 if (regpairs_aligned(cpu_env)) { 4682 arg2 = arg3; 4683 arg3 = arg4; 4684 } 4685 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3))); 4686 } 4687 #endif 4688 4689 static inline abi_long target_to_host_timespec(struct timespec *host_ts, 4690 abi_ulong target_addr) 4691 { 4692 struct target_timespec *target_ts; 4693 4694 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) 4695 return -TARGET_EFAULT; 4696 host_ts->tv_sec = tswapal(target_ts->tv_sec); 4697 host_ts->tv_nsec = tswapal(target_ts->tv_nsec); 4698 unlock_user_struct(target_ts, target_addr, 0); 4699 return 0; 4700 } 4701 4702 static inline abi_long host_to_target_timespec(abi_ulong target_addr, 4703 struct timespec *host_ts) 4704 { 4705 struct target_timespec *target_ts; 4706 4707 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) 4708 return -TARGET_EFAULT; 4709 target_ts->tv_sec = tswapal(host_ts->tv_sec); 4710 target_ts->tv_nsec = tswapal(host_ts->tv_nsec); 4711 unlock_user_struct(target_ts, target_addr, 1); 4712 return 0; 4713 } 4714 4715 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat) 4716 static inline abi_long host_to_target_stat64(void *cpu_env, 4717 abi_ulong target_addr, 4718 struct stat *host_st) 4719 { 4720 #ifdef TARGET_ARM 4721 if (((CPUARMState *)cpu_env)->eabi) { 4722 struct target_eabi_stat64 *target_st; 4723 4724 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 4725 return -TARGET_EFAULT; 4726 memset(target_st, 0, sizeof(struct target_eabi_stat64)); 4727 __put_user(host_st->st_dev, &target_st->st_dev); 4728 __put_user(host_st->st_ino, &target_st->st_ino); 4729 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 4730 __put_user(host_st->st_ino, &target_st->__st_ino); 4731 #endif 4732 __put_user(host_st->st_mode, &target_st->st_mode); 4733 __put_user(host_st->st_nlink, &target_st->st_nlink); 4734 __put_user(host_st->st_uid, &target_st->st_uid); 4735 __put_user(host_st->st_gid, &target_st->st_gid); 4736 __put_user(host_st->st_rdev, &target_st->st_rdev); 4737 __put_user(host_st->st_size, &target_st->st_size); 4738 __put_user(host_st->st_blksize, &target_st->st_blksize); 4739 __put_user(host_st->st_blocks, &target_st->st_blocks); 4740 __put_user(host_st->st_atime, &target_st->target_st_atime); 4741 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 4742 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 4743 unlock_user_struct(target_st, target_addr, 1); 4744 } else 4745 #endif 4746 { 4747 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA) 4748 struct target_stat *target_st; 4749 #else 4750 struct target_stat64 *target_st; 4751 #endif 4752 4753 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 4754 return -TARGET_EFAULT; 4755 memset(target_st, 0, sizeof(*target_st)); 4756 __put_user(host_st->st_dev, &target_st->st_dev); 4757 __put_user(host_st->st_ino, &target_st->st_ino); 4758 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 4759 __put_user(host_st->st_ino, &target_st->__st_ino); 4760 #endif 4761 __put_user(host_st->st_mode, &target_st->st_mode); 4762 __put_user(host_st->st_nlink, &target_st->st_nlink); 4763 __put_user(host_st->st_uid, &target_st->st_uid); 4764 __put_user(host_st->st_gid, &target_st->st_gid); 4765 __put_user(host_st->st_rdev, &target_st->st_rdev); 4766 /* XXX: better use of kernel struct */ 4767 __put_user(host_st->st_size, &target_st->st_size); 4768 __put_user(host_st->st_blksize, &target_st->st_blksize); 4769 __put_user(host_st->st_blocks, &target_st->st_blocks); 4770 __put_user(host_st->st_atime, &target_st->target_st_atime); 4771 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 4772 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 4773 unlock_user_struct(target_st, target_addr, 1); 4774 } 4775 4776 return 0; 4777 } 4778 #endif 4779 4780 #if defined(CONFIG_USE_NPTL) 4781 /* ??? Using host futex calls even when target atomic operations 4782 are not really atomic probably breaks things. However implementing 4783 futexes locally would make futexes shared between multiple processes 4784 tricky. However they're probably useless because guest atomic 4785 operations won't work either. */ 4786 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout, 4787 target_ulong uaddr2, int val3) 4788 { 4789 struct timespec ts, *pts; 4790 int base_op; 4791 4792 /* ??? We assume FUTEX_* constants are the same on both host 4793 and target. */ 4794 #ifdef FUTEX_CMD_MASK 4795 base_op = op & FUTEX_CMD_MASK; 4796 #else 4797 base_op = op; 4798 #endif 4799 switch (base_op) { 4800 case FUTEX_WAIT: 4801 if (timeout) { 4802 pts = &ts; 4803 target_to_host_timespec(pts, timeout); 4804 } else { 4805 pts = NULL; 4806 } 4807 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val), 4808 pts, NULL, 0)); 4809 case FUTEX_WAKE: 4810 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 4811 case FUTEX_FD: 4812 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 4813 case FUTEX_REQUEUE: 4814 case FUTEX_CMP_REQUEUE: 4815 case FUTEX_WAKE_OP: 4816 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 4817 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 4818 But the prototype takes a `struct timespec *'; insert casts 4819 to satisfy the compiler. We do not need to tswap TIMEOUT 4820 since it's not compared to guest memory. */ 4821 pts = (struct timespec *)(uintptr_t) timeout; 4822 return get_errno(sys_futex(g2h(uaddr), op, val, pts, 4823 g2h(uaddr2), 4824 (base_op == FUTEX_CMP_REQUEUE 4825 ? tswap32(val3) 4826 : val3))); 4827 default: 4828 return -TARGET_ENOSYS; 4829 } 4830 } 4831 #endif 4832 4833 /* Map host to target signal numbers for the wait family of syscalls. 4834 Assume all other status bits are the same. */ 4835 static int host_to_target_waitstatus(int status) 4836 { 4837 if (WIFSIGNALED(status)) { 4838 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f); 4839 } 4840 if (WIFSTOPPED(status)) { 4841 return (host_to_target_signal(WSTOPSIG(status)) << 8) 4842 | (status & 0xff); 4843 } 4844 return status; 4845 } 4846 4847 int get_osversion(void) 4848 { 4849 static int osversion; 4850 struct new_utsname buf; 4851 const char *s; 4852 int i, n, tmp; 4853 if (osversion) 4854 return osversion; 4855 if (qemu_uname_release && *qemu_uname_release) { 4856 s = qemu_uname_release; 4857 } else { 4858 if (sys_uname(&buf)) 4859 return 0; 4860 s = buf.release; 4861 } 4862 tmp = 0; 4863 for (i = 0; i < 3; i++) { 4864 n = 0; 4865 while (*s >= '0' && *s <= '9') { 4866 n *= 10; 4867 n += *s - '0'; 4868 s++; 4869 } 4870 tmp = (tmp << 8) + n; 4871 if (*s == '.') 4872 s++; 4873 } 4874 osversion = tmp; 4875 return osversion; 4876 } 4877 4878 4879 static int open_self_maps(void *cpu_env, int fd) 4880 { 4881 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32) 4882 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 4883 #endif 4884 FILE *fp; 4885 char *line = NULL; 4886 size_t len = 0; 4887 ssize_t read; 4888 4889 fp = fopen("/proc/self/maps", "r"); 4890 if (fp == NULL) { 4891 return -EACCES; 4892 } 4893 4894 while ((read = getline(&line, &len, fp)) != -1) { 4895 int fields, dev_maj, dev_min, inode; 4896 uint64_t min, max, offset; 4897 char flag_r, flag_w, flag_x, flag_p; 4898 char path[512] = ""; 4899 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d" 4900 " %512s", &min, &max, &flag_r, &flag_w, &flag_x, 4901 &flag_p, &offset, &dev_maj, &dev_min, &inode, path); 4902 4903 if ((fields < 10) || (fields > 11)) { 4904 continue; 4905 } 4906 if (!strncmp(path, "[stack]", 7)) { 4907 continue; 4908 } 4909 if (h2g_valid(min) && h2g_valid(max)) { 4910 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx 4911 " %c%c%c%c %08" PRIx64 " %02x:%02x %d%s%s\n", 4912 h2g(min), h2g(max), flag_r, flag_w, 4913 flag_x, flag_p, offset, dev_maj, dev_min, inode, 4914 path[0] ? " " : "", path); 4915 } 4916 } 4917 4918 free(line); 4919 fclose(fp); 4920 4921 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32) 4922 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n", 4923 (unsigned long long)ts->info->stack_limit, 4924 (unsigned long long)(ts->stack_base + (TARGET_PAGE_SIZE - 1)) 4925 & TARGET_PAGE_MASK, 4926 (unsigned long long)0); 4927 #endif 4928 4929 return 0; 4930 } 4931 4932 static int open_self_stat(void *cpu_env, int fd) 4933 { 4934 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 4935 abi_ulong start_stack = ts->info->start_stack; 4936 int i; 4937 4938 for (i = 0; i < 44; i++) { 4939 char buf[128]; 4940 int len; 4941 uint64_t val = 0; 4942 4943 if (i == 0) { 4944 /* pid */ 4945 val = getpid(); 4946 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 4947 } else if (i == 1) { 4948 /* app name */ 4949 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]); 4950 } else if (i == 27) { 4951 /* stack bottom */ 4952 val = start_stack; 4953 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 4954 } else { 4955 /* for the rest, there is MasterCard */ 4956 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' '); 4957 } 4958 4959 len = strlen(buf); 4960 if (write(fd, buf, len) != len) { 4961 return -1; 4962 } 4963 } 4964 4965 return 0; 4966 } 4967 4968 static int open_self_auxv(void *cpu_env, int fd) 4969 { 4970 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 4971 abi_ulong auxv = ts->info->saved_auxv; 4972 abi_ulong len = ts->info->auxv_len; 4973 char *ptr; 4974 4975 /* 4976 * Auxiliary vector is stored in target process stack. 4977 * read in whole auxv vector and copy it to file 4978 */ 4979 ptr = lock_user(VERIFY_READ, auxv, len, 0); 4980 if (ptr != NULL) { 4981 while (len > 0) { 4982 ssize_t r; 4983 r = write(fd, ptr, len); 4984 if (r <= 0) { 4985 break; 4986 } 4987 len -= r; 4988 ptr += r; 4989 } 4990 lseek(fd, 0, SEEK_SET); 4991 unlock_user(ptr, auxv, len); 4992 } 4993 4994 return 0; 4995 } 4996 4997 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode) 4998 { 4999 struct fake_open { 5000 const char *filename; 5001 int (*fill)(void *cpu_env, int fd); 5002 }; 5003 const struct fake_open *fake_open; 5004 static const struct fake_open fakes[] = { 5005 { "/proc/self/maps", open_self_maps }, 5006 { "/proc/self/stat", open_self_stat }, 5007 { "/proc/self/auxv", open_self_auxv }, 5008 { NULL, NULL } 5009 }; 5010 5011 for (fake_open = fakes; fake_open->filename; fake_open++) { 5012 if (!strncmp(pathname, fake_open->filename, 5013 strlen(fake_open->filename))) { 5014 break; 5015 } 5016 } 5017 5018 if (fake_open->filename) { 5019 const char *tmpdir; 5020 char filename[PATH_MAX]; 5021 int fd, r; 5022 5023 /* create temporary file to map stat to */ 5024 tmpdir = getenv("TMPDIR"); 5025 if (!tmpdir) 5026 tmpdir = "/tmp"; 5027 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir); 5028 fd = mkstemp(filename); 5029 if (fd < 0) { 5030 return fd; 5031 } 5032 unlink(filename); 5033 5034 if ((r = fake_open->fill(cpu_env, fd))) { 5035 close(fd); 5036 return r; 5037 } 5038 lseek(fd, 0, SEEK_SET); 5039 5040 return fd; 5041 } 5042 5043 return get_errno(open(path(pathname), flags, mode)); 5044 } 5045 5046 /* do_syscall() should always have a single exit point at the end so 5047 that actions, such as logging of syscall results, can be performed. 5048 All errnos that do_syscall() returns must be -TARGET_<errcode>. */ 5049 abi_long do_syscall(void *cpu_env, int num, abi_long arg1, 5050 abi_long arg2, abi_long arg3, abi_long arg4, 5051 abi_long arg5, abi_long arg6, abi_long arg7, 5052 abi_long arg8) 5053 { 5054 abi_long ret; 5055 struct stat st; 5056 struct statfs stfs; 5057 void *p; 5058 5059 #ifdef DEBUG 5060 gemu_log("syscall %d", num); 5061 #endif 5062 if(do_strace) 5063 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); 5064 5065 switch(num) { 5066 case TARGET_NR_exit: 5067 #ifdef CONFIG_USE_NPTL 5068 /* In old applications this may be used to implement _exit(2). 5069 However in threaded applictions it is used for thread termination, 5070 and _exit_group is used for application termination. 5071 Do thread termination if we have more then one thread. */ 5072 /* FIXME: This probably breaks if a signal arrives. We should probably 5073 be disabling signals. */ 5074 if (first_cpu->next_cpu) { 5075 TaskState *ts; 5076 CPUArchState **lastp; 5077 CPUArchState *p; 5078 5079 cpu_list_lock(); 5080 lastp = &first_cpu; 5081 p = first_cpu; 5082 while (p && p != (CPUArchState *)cpu_env) { 5083 lastp = &p->next_cpu; 5084 p = p->next_cpu; 5085 } 5086 /* If we didn't find the CPU for this thread then something is 5087 horribly wrong. */ 5088 if (!p) 5089 abort(); 5090 /* Remove the CPU from the list. */ 5091 *lastp = p->next_cpu; 5092 cpu_list_unlock(); 5093 ts = ((CPUArchState *)cpu_env)->opaque; 5094 if (ts->child_tidptr) { 5095 put_user_u32(0, ts->child_tidptr); 5096 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX, 5097 NULL, NULL, 0); 5098 } 5099 thread_env = NULL; 5100 object_delete(OBJECT(ENV_GET_CPU(cpu_env))); 5101 g_free(ts); 5102 pthread_exit(NULL); 5103 } 5104 #endif 5105 #ifdef TARGET_GPROF 5106 _mcleanup(); 5107 #endif 5108 gdb_exit(cpu_env, arg1); 5109 _exit(arg1); 5110 ret = 0; /* avoid warning */ 5111 break; 5112 case TARGET_NR_read: 5113 if (arg3 == 0) 5114 ret = 0; 5115 else { 5116 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 5117 goto efault; 5118 ret = get_errno(read(arg1, p, arg3)); 5119 unlock_user(p, arg2, ret); 5120 } 5121 break; 5122 case TARGET_NR_write: 5123 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 5124 goto efault; 5125 ret = get_errno(write(arg1, p, arg3)); 5126 unlock_user(p, arg2, 0); 5127 break; 5128 case TARGET_NR_open: 5129 if (!(p = lock_user_string(arg1))) 5130 goto efault; 5131 ret = get_errno(do_open(cpu_env, p, 5132 target_to_host_bitmask(arg2, fcntl_flags_tbl), 5133 arg3)); 5134 unlock_user(p, arg1, 0); 5135 break; 5136 #if defined(TARGET_NR_openat) && defined(__NR_openat) 5137 case TARGET_NR_openat: 5138 if (!(p = lock_user_string(arg2))) 5139 goto efault; 5140 ret = get_errno(sys_openat(arg1, 5141 path(p), 5142 target_to_host_bitmask(arg3, fcntl_flags_tbl), 5143 arg4)); 5144 unlock_user(p, arg2, 0); 5145 break; 5146 #endif 5147 case TARGET_NR_close: 5148 ret = get_errno(close(arg1)); 5149 break; 5150 case TARGET_NR_brk: 5151 ret = do_brk(arg1); 5152 break; 5153 case TARGET_NR_fork: 5154 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0)); 5155 break; 5156 #ifdef TARGET_NR_waitpid 5157 case TARGET_NR_waitpid: 5158 { 5159 int status; 5160 ret = get_errno(waitpid(arg1, &status, arg3)); 5161 if (!is_error(ret) && arg2 && ret 5162 && put_user_s32(host_to_target_waitstatus(status), arg2)) 5163 goto efault; 5164 } 5165 break; 5166 #endif 5167 #ifdef TARGET_NR_waitid 5168 case TARGET_NR_waitid: 5169 { 5170 siginfo_t info; 5171 info.si_pid = 0; 5172 ret = get_errno(waitid(arg1, arg2, &info, arg4)); 5173 if (!is_error(ret) && arg3 && info.si_pid != 0) { 5174 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) 5175 goto efault; 5176 host_to_target_siginfo(p, &info); 5177 unlock_user(p, arg3, sizeof(target_siginfo_t)); 5178 } 5179 } 5180 break; 5181 #endif 5182 #ifdef TARGET_NR_creat /* not on alpha */ 5183 case TARGET_NR_creat: 5184 if (!(p = lock_user_string(arg1))) 5185 goto efault; 5186 ret = get_errno(creat(p, arg2)); 5187 unlock_user(p, arg1, 0); 5188 break; 5189 #endif 5190 case TARGET_NR_link: 5191 { 5192 void * p2; 5193 p = lock_user_string(arg1); 5194 p2 = lock_user_string(arg2); 5195 if (!p || !p2) 5196 ret = -TARGET_EFAULT; 5197 else 5198 ret = get_errno(link(p, p2)); 5199 unlock_user(p2, arg2, 0); 5200 unlock_user(p, arg1, 0); 5201 } 5202 break; 5203 #if defined(TARGET_NR_linkat) && defined(__NR_linkat) 5204 case TARGET_NR_linkat: 5205 { 5206 void * p2 = NULL; 5207 if (!arg2 || !arg4) 5208 goto efault; 5209 p = lock_user_string(arg2); 5210 p2 = lock_user_string(arg4); 5211 if (!p || !p2) 5212 ret = -TARGET_EFAULT; 5213 else 5214 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5)); 5215 unlock_user(p, arg2, 0); 5216 unlock_user(p2, arg4, 0); 5217 } 5218 break; 5219 #endif 5220 case TARGET_NR_unlink: 5221 if (!(p = lock_user_string(arg1))) 5222 goto efault; 5223 ret = get_errno(unlink(p)); 5224 unlock_user(p, arg1, 0); 5225 break; 5226 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat) 5227 case TARGET_NR_unlinkat: 5228 if (!(p = lock_user_string(arg2))) 5229 goto efault; 5230 ret = get_errno(sys_unlinkat(arg1, p, arg3)); 5231 unlock_user(p, arg2, 0); 5232 break; 5233 #endif 5234 case TARGET_NR_execve: 5235 { 5236 char **argp, **envp; 5237 int argc, envc; 5238 abi_ulong gp; 5239 abi_ulong guest_argp; 5240 abi_ulong guest_envp; 5241 abi_ulong addr; 5242 char **q; 5243 int total_size = 0; 5244 5245 argc = 0; 5246 guest_argp = arg2; 5247 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { 5248 if (get_user_ual(addr, gp)) 5249 goto efault; 5250 if (!addr) 5251 break; 5252 argc++; 5253 } 5254 envc = 0; 5255 guest_envp = arg3; 5256 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { 5257 if (get_user_ual(addr, gp)) 5258 goto efault; 5259 if (!addr) 5260 break; 5261 envc++; 5262 } 5263 5264 argp = alloca((argc + 1) * sizeof(void *)); 5265 envp = alloca((envc + 1) * sizeof(void *)); 5266 5267 for (gp = guest_argp, q = argp; gp; 5268 gp += sizeof(abi_ulong), q++) { 5269 if (get_user_ual(addr, gp)) 5270 goto execve_efault; 5271 if (!addr) 5272 break; 5273 if (!(*q = lock_user_string(addr))) 5274 goto execve_efault; 5275 total_size += strlen(*q) + 1; 5276 } 5277 *q = NULL; 5278 5279 for (gp = guest_envp, q = envp; gp; 5280 gp += sizeof(abi_ulong), q++) { 5281 if (get_user_ual(addr, gp)) 5282 goto execve_efault; 5283 if (!addr) 5284 break; 5285 if (!(*q = lock_user_string(addr))) 5286 goto execve_efault; 5287 total_size += strlen(*q) + 1; 5288 } 5289 *q = NULL; 5290 5291 /* This case will not be caught by the host's execve() if its 5292 page size is bigger than the target's. */ 5293 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) { 5294 ret = -TARGET_E2BIG; 5295 goto execve_end; 5296 } 5297 if (!(p = lock_user_string(arg1))) 5298 goto execve_efault; 5299 ret = get_errno(execve(p, argp, envp)); 5300 unlock_user(p, arg1, 0); 5301 5302 goto execve_end; 5303 5304 execve_efault: 5305 ret = -TARGET_EFAULT; 5306 5307 execve_end: 5308 for (gp = guest_argp, q = argp; *q; 5309 gp += sizeof(abi_ulong), q++) { 5310 if (get_user_ual(addr, gp) 5311 || !addr) 5312 break; 5313 unlock_user(*q, addr, 0); 5314 } 5315 for (gp = guest_envp, q = envp; *q; 5316 gp += sizeof(abi_ulong), q++) { 5317 if (get_user_ual(addr, gp) 5318 || !addr) 5319 break; 5320 unlock_user(*q, addr, 0); 5321 } 5322 } 5323 break; 5324 case TARGET_NR_chdir: 5325 if (!(p = lock_user_string(arg1))) 5326 goto efault; 5327 ret = get_errno(chdir(p)); 5328 unlock_user(p, arg1, 0); 5329 break; 5330 #ifdef TARGET_NR_time 5331 case TARGET_NR_time: 5332 { 5333 time_t host_time; 5334 ret = get_errno(time(&host_time)); 5335 if (!is_error(ret) 5336 && arg1 5337 && put_user_sal(host_time, arg1)) 5338 goto efault; 5339 } 5340 break; 5341 #endif 5342 case TARGET_NR_mknod: 5343 if (!(p = lock_user_string(arg1))) 5344 goto efault; 5345 ret = get_errno(mknod(p, arg2, arg3)); 5346 unlock_user(p, arg1, 0); 5347 break; 5348 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat) 5349 case TARGET_NR_mknodat: 5350 if (!(p = lock_user_string(arg2))) 5351 goto efault; 5352 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4)); 5353 unlock_user(p, arg2, 0); 5354 break; 5355 #endif 5356 case TARGET_NR_chmod: 5357 if (!(p = lock_user_string(arg1))) 5358 goto efault; 5359 ret = get_errno(chmod(p, arg2)); 5360 unlock_user(p, arg1, 0); 5361 break; 5362 #ifdef TARGET_NR_break 5363 case TARGET_NR_break: 5364 goto unimplemented; 5365 #endif 5366 #ifdef TARGET_NR_oldstat 5367 case TARGET_NR_oldstat: 5368 goto unimplemented; 5369 #endif 5370 case TARGET_NR_lseek: 5371 ret = get_errno(lseek(arg1, arg2, arg3)); 5372 break; 5373 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) 5374 /* Alpha specific */ 5375 case TARGET_NR_getxpid: 5376 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid(); 5377 ret = get_errno(getpid()); 5378 break; 5379 #endif 5380 #ifdef TARGET_NR_getpid 5381 case TARGET_NR_getpid: 5382 ret = get_errno(getpid()); 5383 break; 5384 #endif 5385 case TARGET_NR_mount: 5386 { 5387 /* need to look at the data field */ 5388 void *p2, *p3; 5389 p = lock_user_string(arg1); 5390 p2 = lock_user_string(arg2); 5391 p3 = lock_user_string(arg3); 5392 if (!p || !p2 || !p3) 5393 ret = -TARGET_EFAULT; 5394 else { 5395 /* FIXME - arg5 should be locked, but it isn't clear how to 5396 * do that since it's not guaranteed to be a NULL-terminated 5397 * string. 5398 */ 5399 if ( ! arg5 ) 5400 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL)); 5401 else 5402 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5))); 5403 } 5404 unlock_user(p, arg1, 0); 5405 unlock_user(p2, arg2, 0); 5406 unlock_user(p3, arg3, 0); 5407 break; 5408 } 5409 #ifdef TARGET_NR_umount 5410 case TARGET_NR_umount: 5411 if (!(p = lock_user_string(arg1))) 5412 goto efault; 5413 ret = get_errno(umount(p)); 5414 unlock_user(p, arg1, 0); 5415 break; 5416 #endif 5417 #ifdef TARGET_NR_stime /* not on alpha */ 5418 case TARGET_NR_stime: 5419 { 5420 time_t host_time; 5421 if (get_user_sal(host_time, arg1)) 5422 goto efault; 5423 ret = get_errno(stime(&host_time)); 5424 } 5425 break; 5426 #endif 5427 case TARGET_NR_ptrace: 5428 goto unimplemented; 5429 #ifdef TARGET_NR_alarm /* not on alpha */ 5430 case TARGET_NR_alarm: 5431 ret = alarm(arg1); 5432 break; 5433 #endif 5434 #ifdef TARGET_NR_oldfstat 5435 case TARGET_NR_oldfstat: 5436 goto unimplemented; 5437 #endif 5438 #ifdef TARGET_NR_pause /* not on alpha */ 5439 case TARGET_NR_pause: 5440 ret = get_errno(pause()); 5441 break; 5442 #endif 5443 #ifdef TARGET_NR_utime 5444 case TARGET_NR_utime: 5445 { 5446 struct utimbuf tbuf, *host_tbuf; 5447 struct target_utimbuf *target_tbuf; 5448 if (arg2) { 5449 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) 5450 goto efault; 5451 tbuf.actime = tswapal(target_tbuf->actime); 5452 tbuf.modtime = tswapal(target_tbuf->modtime); 5453 unlock_user_struct(target_tbuf, arg2, 0); 5454 host_tbuf = &tbuf; 5455 } else { 5456 host_tbuf = NULL; 5457 } 5458 if (!(p = lock_user_string(arg1))) 5459 goto efault; 5460 ret = get_errno(utime(p, host_tbuf)); 5461 unlock_user(p, arg1, 0); 5462 } 5463 break; 5464 #endif 5465 case TARGET_NR_utimes: 5466 { 5467 struct timeval *tvp, tv[2]; 5468 if (arg2) { 5469 if (copy_from_user_timeval(&tv[0], arg2) 5470 || copy_from_user_timeval(&tv[1], 5471 arg2 + sizeof(struct target_timeval))) 5472 goto efault; 5473 tvp = tv; 5474 } else { 5475 tvp = NULL; 5476 } 5477 if (!(p = lock_user_string(arg1))) 5478 goto efault; 5479 ret = get_errno(utimes(p, tvp)); 5480 unlock_user(p, arg1, 0); 5481 } 5482 break; 5483 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat) 5484 case TARGET_NR_futimesat: 5485 { 5486 struct timeval *tvp, tv[2]; 5487 if (arg3) { 5488 if (copy_from_user_timeval(&tv[0], arg3) 5489 || copy_from_user_timeval(&tv[1], 5490 arg3 + sizeof(struct target_timeval))) 5491 goto efault; 5492 tvp = tv; 5493 } else { 5494 tvp = NULL; 5495 } 5496 if (!(p = lock_user_string(arg2))) 5497 goto efault; 5498 ret = get_errno(sys_futimesat(arg1, path(p), tvp)); 5499 unlock_user(p, arg2, 0); 5500 } 5501 break; 5502 #endif 5503 #ifdef TARGET_NR_stty 5504 case TARGET_NR_stty: 5505 goto unimplemented; 5506 #endif 5507 #ifdef TARGET_NR_gtty 5508 case TARGET_NR_gtty: 5509 goto unimplemented; 5510 #endif 5511 case TARGET_NR_access: 5512 if (!(p = lock_user_string(arg1))) 5513 goto efault; 5514 ret = get_errno(access(path(p), arg2)); 5515 unlock_user(p, arg1, 0); 5516 break; 5517 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 5518 case TARGET_NR_faccessat: 5519 if (!(p = lock_user_string(arg2))) 5520 goto efault; 5521 ret = get_errno(sys_faccessat(arg1, p, arg3)); 5522 unlock_user(p, arg2, 0); 5523 break; 5524 #endif 5525 #ifdef TARGET_NR_nice /* not on alpha */ 5526 case TARGET_NR_nice: 5527 ret = get_errno(nice(arg1)); 5528 break; 5529 #endif 5530 #ifdef TARGET_NR_ftime 5531 case TARGET_NR_ftime: 5532 goto unimplemented; 5533 #endif 5534 case TARGET_NR_sync: 5535 sync(); 5536 ret = 0; 5537 break; 5538 case TARGET_NR_kill: 5539 ret = get_errno(kill(arg1, target_to_host_signal(arg2))); 5540 break; 5541 case TARGET_NR_rename: 5542 { 5543 void *p2; 5544 p = lock_user_string(arg1); 5545 p2 = lock_user_string(arg2); 5546 if (!p || !p2) 5547 ret = -TARGET_EFAULT; 5548 else 5549 ret = get_errno(rename(p, p2)); 5550 unlock_user(p2, arg2, 0); 5551 unlock_user(p, arg1, 0); 5552 } 5553 break; 5554 #if defined(TARGET_NR_renameat) && defined(__NR_renameat) 5555 case TARGET_NR_renameat: 5556 { 5557 void *p2; 5558 p = lock_user_string(arg2); 5559 p2 = lock_user_string(arg4); 5560 if (!p || !p2) 5561 ret = -TARGET_EFAULT; 5562 else 5563 ret = get_errno(sys_renameat(arg1, p, arg3, p2)); 5564 unlock_user(p2, arg4, 0); 5565 unlock_user(p, arg2, 0); 5566 } 5567 break; 5568 #endif 5569 case TARGET_NR_mkdir: 5570 if (!(p = lock_user_string(arg1))) 5571 goto efault; 5572 ret = get_errno(mkdir(p, arg2)); 5573 unlock_user(p, arg1, 0); 5574 break; 5575 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat) 5576 case TARGET_NR_mkdirat: 5577 if (!(p = lock_user_string(arg2))) 5578 goto efault; 5579 ret = get_errno(sys_mkdirat(arg1, p, arg3)); 5580 unlock_user(p, arg2, 0); 5581 break; 5582 #endif 5583 case TARGET_NR_rmdir: 5584 if (!(p = lock_user_string(arg1))) 5585 goto efault; 5586 ret = get_errno(rmdir(p)); 5587 unlock_user(p, arg1, 0); 5588 break; 5589 case TARGET_NR_dup: 5590 ret = get_errno(dup(arg1)); 5591 break; 5592 case TARGET_NR_pipe: 5593 ret = do_pipe(cpu_env, arg1, 0, 0); 5594 break; 5595 #ifdef TARGET_NR_pipe2 5596 case TARGET_NR_pipe2: 5597 ret = do_pipe(cpu_env, arg1, 5598 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1); 5599 break; 5600 #endif 5601 case TARGET_NR_times: 5602 { 5603 struct target_tms *tmsp; 5604 struct tms tms; 5605 ret = get_errno(times(&tms)); 5606 if (arg1) { 5607 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); 5608 if (!tmsp) 5609 goto efault; 5610 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime)); 5611 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime)); 5612 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime)); 5613 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime)); 5614 } 5615 if (!is_error(ret)) 5616 ret = host_to_target_clock_t(ret); 5617 } 5618 break; 5619 #ifdef TARGET_NR_prof 5620 case TARGET_NR_prof: 5621 goto unimplemented; 5622 #endif 5623 #ifdef TARGET_NR_signal 5624 case TARGET_NR_signal: 5625 goto unimplemented; 5626 #endif 5627 case TARGET_NR_acct: 5628 if (arg1 == 0) { 5629 ret = get_errno(acct(NULL)); 5630 } else { 5631 if (!(p = lock_user_string(arg1))) 5632 goto efault; 5633 ret = get_errno(acct(path(p))); 5634 unlock_user(p, arg1, 0); 5635 } 5636 break; 5637 #ifdef TARGET_NR_umount2 /* not on alpha */ 5638 case TARGET_NR_umount2: 5639 if (!(p = lock_user_string(arg1))) 5640 goto efault; 5641 ret = get_errno(umount2(p, arg2)); 5642 unlock_user(p, arg1, 0); 5643 break; 5644 #endif 5645 #ifdef TARGET_NR_lock 5646 case TARGET_NR_lock: 5647 goto unimplemented; 5648 #endif 5649 case TARGET_NR_ioctl: 5650 ret = do_ioctl(arg1, arg2, arg3); 5651 break; 5652 case TARGET_NR_fcntl: 5653 ret = do_fcntl(arg1, arg2, arg3); 5654 break; 5655 #ifdef TARGET_NR_mpx 5656 case TARGET_NR_mpx: 5657 goto unimplemented; 5658 #endif 5659 case TARGET_NR_setpgid: 5660 ret = get_errno(setpgid(arg1, arg2)); 5661 break; 5662 #ifdef TARGET_NR_ulimit 5663 case TARGET_NR_ulimit: 5664 goto unimplemented; 5665 #endif 5666 #ifdef TARGET_NR_oldolduname 5667 case TARGET_NR_oldolduname: 5668 goto unimplemented; 5669 #endif 5670 case TARGET_NR_umask: 5671 ret = get_errno(umask(arg1)); 5672 break; 5673 case TARGET_NR_chroot: 5674 if (!(p = lock_user_string(arg1))) 5675 goto efault; 5676 ret = get_errno(chroot(p)); 5677 unlock_user(p, arg1, 0); 5678 break; 5679 case TARGET_NR_ustat: 5680 goto unimplemented; 5681 case TARGET_NR_dup2: 5682 ret = get_errno(dup2(arg1, arg2)); 5683 break; 5684 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) 5685 case TARGET_NR_dup3: 5686 ret = get_errno(dup3(arg1, arg2, arg3)); 5687 break; 5688 #endif 5689 #ifdef TARGET_NR_getppid /* not on alpha */ 5690 case TARGET_NR_getppid: 5691 ret = get_errno(getppid()); 5692 break; 5693 #endif 5694 case TARGET_NR_getpgrp: 5695 ret = get_errno(getpgrp()); 5696 break; 5697 case TARGET_NR_setsid: 5698 ret = get_errno(setsid()); 5699 break; 5700 #ifdef TARGET_NR_sigaction 5701 case TARGET_NR_sigaction: 5702 { 5703 #if defined(TARGET_ALPHA) 5704 struct target_sigaction act, oact, *pact = 0; 5705 struct target_old_sigaction *old_act; 5706 if (arg2) { 5707 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5708 goto efault; 5709 act._sa_handler = old_act->_sa_handler; 5710 target_siginitset(&act.sa_mask, old_act->sa_mask); 5711 act.sa_flags = old_act->sa_flags; 5712 act.sa_restorer = 0; 5713 unlock_user_struct(old_act, arg2, 0); 5714 pact = &act; 5715 } 5716 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5717 if (!is_error(ret) && arg3) { 5718 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5719 goto efault; 5720 old_act->_sa_handler = oact._sa_handler; 5721 old_act->sa_mask = oact.sa_mask.sig[0]; 5722 old_act->sa_flags = oact.sa_flags; 5723 unlock_user_struct(old_act, arg3, 1); 5724 } 5725 #elif defined(TARGET_MIPS) 5726 struct target_sigaction act, oact, *pact, *old_act; 5727 5728 if (arg2) { 5729 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5730 goto efault; 5731 act._sa_handler = old_act->_sa_handler; 5732 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); 5733 act.sa_flags = old_act->sa_flags; 5734 unlock_user_struct(old_act, arg2, 0); 5735 pact = &act; 5736 } else { 5737 pact = NULL; 5738 } 5739 5740 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5741 5742 if (!is_error(ret) && arg3) { 5743 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5744 goto efault; 5745 old_act->_sa_handler = oact._sa_handler; 5746 old_act->sa_flags = oact.sa_flags; 5747 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; 5748 old_act->sa_mask.sig[1] = 0; 5749 old_act->sa_mask.sig[2] = 0; 5750 old_act->sa_mask.sig[3] = 0; 5751 unlock_user_struct(old_act, arg3, 1); 5752 } 5753 #else 5754 struct target_old_sigaction *old_act; 5755 struct target_sigaction act, oact, *pact; 5756 if (arg2) { 5757 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5758 goto efault; 5759 act._sa_handler = old_act->_sa_handler; 5760 target_siginitset(&act.sa_mask, old_act->sa_mask); 5761 act.sa_flags = old_act->sa_flags; 5762 act.sa_restorer = old_act->sa_restorer; 5763 unlock_user_struct(old_act, arg2, 0); 5764 pact = &act; 5765 } else { 5766 pact = NULL; 5767 } 5768 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5769 if (!is_error(ret) && arg3) { 5770 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5771 goto efault; 5772 old_act->_sa_handler = oact._sa_handler; 5773 old_act->sa_mask = oact.sa_mask.sig[0]; 5774 old_act->sa_flags = oact.sa_flags; 5775 old_act->sa_restorer = oact.sa_restorer; 5776 unlock_user_struct(old_act, arg3, 1); 5777 } 5778 #endif 5779 } 5780 break; 5781 #endif 5782 case TARGET_NR_rt_sigaction: 5783 { 5784 #if defined(TARGET_ALPHA) 5785 struct target_sigaction act, oact, *pact = 0; 5786 struct target_rt_sigaction *rt_act; 5787 /* ??? arg4 == sizeof(sigset_t). */ 5788 if (arg2) { 5789 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1)) 5790 goto efault; 5791 act._sa_handler = rt_act->_sa_handler; 5792 act.sa_mask = rt_act->sa_mask; 5793 act.sa_flags = rt_act->sa_flags; 5794 act.sa_restorer = arg5; 5795 unlock_user_struct(rt_act, arg2, 0); 5796 pact = &act; 5797 } 5798 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5799 if (!is_error(ret) && arg3) { 5800 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0)) 5801 goto efault; 5802 rt_act->_sa_handler = oact._sa_handler; 5803 rt_act->sa_mask = oact.sa_mask; 5804 rt_act->sa_flags = oact.sa_flags; 5805 unlock_user_struct(rt_act, arg3, 1); 5806 } 5807 #else 5808 struct target_sigaction *act; 5809 struct target_sigaction *oact; 5810 5811 if (arg2) { 5812 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) 5813 goto efault; 5814 } else 5815 act = NULL; 5816 if (arg3) { 5817 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { 5818 ret = -TARGET_EFAULT; 5819 goto rt_sigaction_fail; 5820 } 5821 } else 5822 oact = NULL; 5823 ret = get_errno(do_sigaction(arg1, act, oact)); 5824 rt_sigaction_fail: 5825 if (act) 5826 unlock_user_struct(act, arg2, 0); 5827 if (oact) 5828 unlock_user_struct(oact, arg3, 1); 5829 #endif 5830 } 5831 break; 5832 #ifdef TARGET_NR_sgetmask /* not on alpha */ 5833 case TARGET_NR_sgetmask: 5834 { 5835 sigset_t cur_set; 5836 abi_ulong target_set; 5837 sigprocmask(0, NULL, &cur_set); 5838 host_to_target_old_sigset(&target_set, &cur_set); 5839 ret = target_set; 5840 } 5841 break; 5842 #endif 5843 #ifdef TARGET_NR_ssetmask /* not on alpha */ 5844 case TARGET_NR_ssetmask: 5845 { 5846 sigset_t set, oset, cur_set; 5847 abi_ulong target_set = arg1; 5848 sigprocmask(0, NULL, &cur_set); 5849 target_to_host_old_sigset(&set, &target_set); 5850 sigorset(&set, &set, &cur_set); 5851 sigprocmask(SIG_SETMASK, &set, &oset); 5852 host_to_target_old_sigset(&target_set, &oset); 5853 ret = target_set; 5854 } 5855 break; 5856 #endif 5857 #ifdef TARGET_NR_sigprocmask 5858 case TARGET_NR_sigprocmask: 5859 { 5860 #if defined(TARGET_ALPHA) 5861 sigset_t set, oldset; 5862 abi_ulong mask; 5863 int how; 5864 5865 switch (arg1) { 5866 case TARGET_SIG_BLOCK: 5867 how = SIG_BLOCK; 5868 break; 5869 case TARGET_SIG_UNBLOCK: 5870 how = SIG_UNBLOCK; 5871 break; 5872 case TARGET_SIG_SETMASK: 5873 how = SIG_SETMASK; 5874 break; 5875 default: 5876 ret = -TARGET_EINVAL; 5877 goto fail; 5878 } 5879 mask = arg2; 5880 target_to_host_old_sigset(&set, &mask); 5881 5882 ret = get_errno(sigprocmask(how, &set, &oldset)); 5883 if (!is_error(ret)) { 5884 host_to_target_old_sigset(&mask, &oldset); 5885 ret = mask; 5886 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */ 5887 } 5888 #else 5889 sigset_t set, oldset, *set_ptr; 5890 int how; 5891 5892 if (arg2) { 5893 switch (arg1) { 5894 case TARGET_SIG_BLOCK: 5895 how = SIG_BLOCK; 5896 break; 5897 case TARGET_SIG_UNBLOCK: 5898 how = SIG_UNBLOCK; 5899 break; 5900 case TARGET_SIG_SETMASK: 5901 how = SIG_SETMASK; 5902 break; 5903 default: 5904 ret = -TARGET_EINVAL; 5905 goto fail; 5906 } 5907 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 5908 goto efault; 5909 target_to_host_old_sigset(&set, p); 5910 unlock_user(p, arg2, 0); 5911 set_ptr = &set; 5912 } else { 5913 how = 0; 5914 set_ptr = NULL; 5915 } 5916 ret = get_errno(sigprocmask(how, set_ptr, &oldset)); 5917 if (!is_error(ret) && arg3) { 5918 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 5919 goto efault; 5920 host_to_target_old_sigset(p, &oldset); 5921 unlock_user(p, arg3, sizeof(target_sigset_t)); 5922 } 5923 #endif 5924 } 5925 break; 5926 #endif 5927 case TARGET_NR_rt_sigprocmask: 5928 { 5929 int how = arg1; 5930 sigset_t set, oldset, *set_ptr; 5931 5932 if (arg2) { 5933 switch(how) { 5934 case TARGET_SIG_BLOCK: 5935 how = SIG_BLOCK; 5936 break; 5937 case TARGET_SIG_UNBLOCK: 5938 how = SIG_UNBLOCK; 5939 break; 5940 case TARGET_SIG_SETMASK: 5941 how = SIG_SETMASK; 5942 break; 5943 default: 5944 ret = -TARGET_EINVAL; 5945 goto fail; 5946 } 5947 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 5948 goto efault; 5949 target_to_host_sigset(&set, p); 5950 unlock_user(p, arg2, 0); 5951 set_ptr = &set; 5952 } else { 5953 how = 0; 5954 set_ptr = NULL; 5955 } 5956 ret = get_errno(sigprocmask(how, set_ptr, &oldset)); 5957 if (!is_error(ret) && arg3) { 5958 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 5959 goto efault; 5960 host_to_target_sigset(p, &oldset); 5961 unlock_user(p, arg3, sizeof(target_sigset_t)); 5962 } 5963 } 5964 break; 5965 #ifdef TARGET_NR_sigpending 5966 case TARGET_NR_sigpending: 5967 { 5968 sigset_t set; 5969 ret = get_errno(sigpending(&set)); 5970 if (!is_error(ret)) { 5971 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 5972 goto efault; 5973 host_to_target_old_sigset(p, &set); 5974 unlock_user(p, arg1, sizeof(target_sigset_t)); 5975 } 5976 } 5977 break; 5978 #endif 5979 case TARGET_NR_rt_sigpending: 5980 { 5981 sigset_t set; 5982 ret = get_errno(sigpending(&set)); 5983 if (!is_error(ret)) { 5984 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 5985 goto efault; 5986 host_to_target_sigset(p, &set); 5987 unlock_user(p, arg1, sizeof(target_sigset_t)); 5988 } 5989 } 5990 break; 5991 #ifdef TARGET_NR_sigsuspend 5992 case TARGET_NR_sigsuspend: 5993 { 5994 sigset_t set; 5995 #if defined(TARGET_ALPHA) 5996 abi_ulong mask = arg1; 5997 target_to_host_old_sigset(&set, &mask); 5998 #else 5999 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6000 goto efault; 6001 target_to_host_old_sigset(&set, p); 6002 unlock_user(p, arg1, 0); 6003 #endif 6004 ret = get_errno(sigsuspend(&set)); 6005 } 6006 break; 6007 #endif 6008 case TARGET_NR_rt_sigsuspend: 6009 { 6010 sigset_t set; 6011 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6012 goto efault; 6013 target_to_host_sigset(&set, p); 6014 unlock_user(p, arg1, 0); 6015 ret = get_errno(sigsuspend(&set)); 6016 } 6017 break; 6018 case TARGET_NR_rt_sigtimedwait: 6019 { 6020 sigset_t set; 6021 struct timespec uts, *puts; 6022 siginfo_t uinfo; 6023 6024 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6025 goto efault; 6026 target_to_host_sigset(&set, p); 6027 unlock_user(p, arg1, 0); 6028 if (arg3) { 6029 puts = &uts; 6030 target_to_host_timespec(puts, arg3); 6031 } else { 6032 puts = NULL; 6033 } 6034 ret = get_errno(sigtimedwait(&set, &uinfo, puts)); 6035 if (!is_error(ret) && arg2) { 6036 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0))) 6037 goto efault; 6038 host_to_target_siginfo(p, &uinfo); 6039 unlock_user(p, arg2, sizeof(target_siginfo_t)); 6040 } 6041 } 6042 break; 6043 case TARGET_NR_rt_sigqueueinfo: 6044 { 6045 siginfo_t uinfo; 6046 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1))) 6047 goto efault; 6048 target_to_host_siginfo(&uinfo, p); 6049 unlock_user(p, arg1, 0); 6050 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo)); 6051 } 6052 break; 6053 #ifdef TARGET_NR_sigreturn 6054 case TARGET_NR_sigreturn: 6055 /* NOTE: ret is eax, so not transcoding must be done */ 6056 ret = do_sigreturn(cpu_env); 6057 break; 6058 #endif 6059 case TARGET_NR_rt_sigreturn: 6060 /* NOTE: ret is eax, so not transcoding must be done */ 6061 ret = do_rt_sigreturn(cpu_env); 6062 break; 6063 case TARGET_NR_sethostname: 6064 if (!(p = lock_user_string(arg1))) 6065 goto efault; 6066 ret = get_errno(sethostname(p, arg2)); 6067 unlock_user(p, arg1, 0); 6068 break; 6069 case TARGET_NR_setrlimit: 6070 { 6071 int resource = target_to_host_resource(arg1); 6072 struct target_rlimit *target_rlim; 6073 struct rlimit rlim; 6074 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) 6075 goto efault; 6076 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur); 6077 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max); 6078 unlock_user_struct(target_rlim, arg2, 0); 6079 ret = get_errno(setrlimit(resource, &rlim)); 6080 } 6081 break; 6082 case TARGET_NR_getrlimit: 6083 { 6084 int resource = target_to_host_resource(arg1); 6085 struct target_rlimit *target_rlim; 6086 struct rlimit rlim; 6087 6088 ret = get_errno(getrlimit(resource, &rlim)); 6089 if (!is_error(ret)) { 6090 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 6091 goto efault; 6092 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 6093 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 6094 unlock_user_struct(target_rlim, arg2, 1); 6095 } 6096 } 6097 break; 6098 case TARGET_NR_getrusage: 6099 { 6100 struct rusage rusage; 6101 ret = get_errno(getrusage(arg1, &rusage)); 6102 if (!is_error(ret)) { 6103 host_to_target_rusage(arg2, &rusage); 6104 } 6105 } 6106 break; 6107 case TARGET_NR_gettimeofday: 6108 { 6109 struct timeval tv; 6110 ret = get_errno(gettimeofday(&tv, NULL)); 6111 if (!is_error(ret)) { 6112 if (copy_to_user_timeval(arg1, &tv)) 6113 goto efault; 6114 } 6115 } 6116 break; 6117 case TARGET_NR_settimeofday: 6118 { 6119 struct timeval tv; 6120 if (copy_from_user_timeval(&tv, arg1)) 6121 goto efault; 6122 ret = get_errno(settimeofday(&tv, NULL)); 6123 } 6124 break; 6125 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390) 6126 case TARGET_NR_select: 6127 { 6128 struct target_sel_arg_struct *sel; 6129 abi_ulong inp, outp, exp, tvp; 6130 long nsel; 6131 6132 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) 6133 goto efault; 6134 nsel = tswapal(sel->n); 6135 inp = tswapal(sel->inp); 6136 outp = tswapal(sel->outp); 6137 exp = tswapal(sel->exp); 6138 tvp = tswapal(sel->tvp); 6139 unlock_user_struct(sel, arg1, 0); 6140 ret = do_select(nsel, inp, outp, exp, tvp); 6141 } 6142 break; 6143 #endif 6144 #ifdef TARGET_NR_pselect6 6145 case TARGET_NR_pselect6: 6146 { 6147 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr; 6148 fd_set rfds, wfds, efds; 6149 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 6150 struct timespec ts, *ts_ptr; 6151 6152 /* 6153 * The 6th arg is actually two args smashed together, 6154 * so we cannot use the C library. 6155 */ 6156 sigset_t set; 6157 struct { 6158 sigset_t *set; 6159 size_t size; 6160 } sig, *sig_ptr; 6161 6162 abi_ulong arg_sigset, arg_sigsize, *arg7; 6163 target_sigset_t *target_sigset; 6164 6165 n = arg1; 6166 rfd_addr = arg2; 6167 wfd_addr = arg3; 6168 efd_addr = arg4; 6169 ts_addr = arg5; 6170 6171 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 6172 if (ret) { 6173 goto fail; 6174 } 6175 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 6176 if (ret) { 6177 goto fail; 6178 } 6179 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 6180 if (ret) { 6181 goto fail; 6182 } 6183 6184 /* 6185 * This takes a timespec, and not a timeval, so we cannot 6186 * use the do_select() helper ... 6187 */ 6188 if (ts_addr) { 6189 if (target_to_host_timespec(&ts, ts_addr)) { 6190 goto efault; 6191 } 6192 ts_ptr = &ts; 6193 } else { 6194 ts_ptr = NULL; 6195 } 6196 6197 /* Extract the two packed args for the sigset */ 6198 if (arg6) { 6199 sig_ptr = &sig; 6200 sig.size = _NSIG / 8; 6201 6202 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1); 6203 if (!arg7) { 6204 goto efault; 6205 } 6206 arg_sigset = tswapal(arg7[0]); 6207 arg_sigsize = tswapal(arg7[1]); 6208 unlock_user(arg7, arg6, 0); 6209 6210 if (arg_sigset) { 6211 sig.set = &set; 6212 if (arg_sigsize != sizeof(*target_sigset)) { 6213 /* Like the kernel, we enforce correct size sigsets */ 6214 ret = -TARGET_EINVAL; 6215 goto fail; 6216 } 6217 target_sigset = lock_user(VERIFY_READ, arg_sigset, 6218 sizeof(*target_sigset), 1); 6219 if (!target_sigset) { 6220 goto efault; 6221 } 6222 target_to_host_sigset(&set, target_sigset); 6223 unlock_user(target_sigset, arg_sigset, 0); 6224 } else { 6225 sig.set = NULL; 6226 } 6227 } else { 6228 sig_ptr = NULL; 6229 } 6230 6231 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 6232 ts_ptr, sig_ptr)); 6233 6234 if (!is_error(ret)) { 6235 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 6236 goto efault; 6237 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 6238 goto efault; 6239 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 6240 goto efault; 6241 6242 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) 6243 goto efault; 6244 } 6245 } 6246 break; 6247 #endif 6248 case TARGET_NR_symlink: 6249 { 6250 void *p2; 6251 p = lock_user_string(arg1); 6252 p2 = lock_user_string(arg2); 6253 if (!p || !p2) 6254 ret = -TARGET_EFAULT; 6255 else 6256 ret = get_errno(symlink(p, p2)); 6257 unlock_user(p2, arg2, 0); 6258 unlock_user(p, arg1, 0); 6259 } 6260 break; 6261 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat) 6262 case TARGET_NR_symlinkat: 6263 { 6264 void *p2; 6265 p = lock_user_string(arg1); 6266 p2 = lock_user_string(arg3); 6267 if (!p || !p2) 6268 ret = -TARGET_EFAULT; 6269 else 6270 ret = get_errno(sys_symlinkat(p, arg2, p2)); 6271 unlock_user(p2, arg3, 0); 6272 unlock_user(p, arg1, 0); 6273 } 6274 break; 6275 #endif 6276 #ifdef TARGET_NR_oldlstat 6277 case TARGET_NR_oldlstat: 6278 goto unimplemented; 6279 #endif 6280 case TARGET_NR_readlink: 6281 { 6282 void *p2, *temp; 6283 p = lock_user_string(arg1); 6284 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); 6285 if (!p || !p2) 6286 ret = -TARGET_EFAULT; 6287 else { 6288 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) { 6289 char real[PATH_MAX]; 6290 temp = realpath(exec_path,real); 6291 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ; 6292 snprintf((char *)p2, arg3, "%s", real); 6293 } 6294 else 6295 ret = get_errno(readlink(path(p), p2, arg3)); 6296 } 6297 unlock_user(p2, arg2, ret); 6298 unlock_user(p, arg1, 0); 6299 } 6300 break; 6301 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat) 6302 case TARGET_NR_readlinkat: 6303 { 6304 void *p2; 6305 p = lock_user_string(arg2); 6306 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); 6307 if (!p || !p2) 6308 ret = -TARGET_EFAULT; 6309 else 6310 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4)); 6311 unlock_user(p2, arg3, ret); 6312 unlock_user(p, arg2, 0); 6313 } 6314 break; 6315 #endif 6316 #ifdef TARGET_NR_uselib 6317 case TARGET_NR_uselib: 6318 goto unimplemented; 6319 #endif 6320 #ifdef TARGET_NR_swapon 6321 case TARGET_NR_swapon: 6322 if (!(p = lock_user_string(arg1))) 6323 goto efault; 6324 ret = get_errno(swapon(p, arg2)); 6325 unlock_user(p, arg1, 0); 6326 break; 6327 #endif 6328 case TARGET_NR_reboot: 6329 if (!(p = lock_user_string(arg4))) 6330 goto efault; 6331 ret = reboot(arg1, arg2, arg3, p); 6332 unlock_user(p, arg4, 0); 6333 break; 6334 #ifdef TARGET_NR_readdir 6335 case TARGET_NR_readdir: 6336 goto unimplemented; 6337 #endif 6338 #ifdef TARGET_NR_mmap 6339 case TARGET_NR_mmap: 6340 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \ 6341 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ 6342 || defined(TARGET_S390X) 6343 { 6344 abi_ulong *v; 6345 abi_ulong v1, v2, v3, v4, v5, v6; 6346 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) 6347 goto efault; 6348 v1 = tswapal(v[0]); 6349 v2 = tswapal(v[1]); 6350 v3 = tswapal(v[2]); 6351 v4 = tswapal(v[3]); 6352 v5 = tswapal(v[4]); 6353 v6 = tswapal(v[5]); 6354 unlock_user(v, arg1, 0); 6355 ret = get_errno(target_mmap(v1, v2, v3, 6356 target_to_host_bitmask(v4, mmap_flags_tbl), 6357 v5, v6)); 6358 } 6359 #else 6360 ret = get_errno(target_mmap(arg1, arg2, arg3, 6361 target_to_host_bitmask(arg4, mmap_flags_tbl), 6362 arg5, 6363 arg6)); 6364 #endif 6365 break; 6366 #endif 6367 #ifdef TARGET_NR_mmap2 6368 case TARGET_NR_mmap2: 6369 #ifndef MMAP_SHIFT 6370 #define MMAP_SHIFT 12 6371 #endif 6372 ret = get_errno(target_mmap(arg1, arg2, arg3, 6373 target_to_host_bitmask(arg4, mmap_flags_tbl), 6374 arg5, 6375 arg6 << MMAP_SHIFT)); 6376 break; 6377 #endif 6378 case TARGET_NR_munmap: 6379 ret = get_errno(target_munmap(arg1, arg2)); 6380 break; 6381 case TARGET_NR_mprotect: 6382 { 6383 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 6384 /* Special hack to detect libc making the stack executable. */ 6385 if ((arg3 & PROT_GROWSDOWN) 6386 && arg1 >= ts->info->stack_limit 6387 && arg1 <= ts->info->start_stack) { 6388 arg3 &= ~PROT_GROWSDOWN; 6389 arg2 = arg2 + arg1 - ts->info->stack_limit; 6390 arg1 = ts->info->stack_limit; 6391 } 6392 } 6393 ret = get_errno(target_mprotect(arg1, arg2, arg3)); 6394 break; 6395 #ifdef TARGET_NR_mremap 6396 case TARGET_NR_mremap: 6397 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); 6398 break; 6399 #endif 6400 /* ??? msync/mlock/munlock are broken for softmmu. */ 6401 #ifdef TARGET_NR_msync 6402 case TARGET_NR_msync: 6403 ret = get_errno(msync(g2h(arg1), arg2, arg3)); 6404 break; 6405 #endif 6406 #ifdef TARGET_NR_mlock 6407 case TARGET_NR_mlock: 6408 ret = get_errno(mlock(g2h(arg1), arg2)); 6409 break; 6410 #endif 6411 #ifdef TARGET_NR_munlock 6412 case TARGET_NR_munlock: 6413 ret = get_errno(munlock(g2h(arg1), arg2)); 6414 break; 6415 #endif 6416 #ifdef TARGET_NR_mlockall 6417 case TARGET_NR_mlockall: 6418 ret = get_errno(mlockall(arg1)); 6419 break; 6420 #endif 6421 #ifdef TARGET_NR_munlockall 6422 case TARGET_NR_munlockall: 6423 ret = get_errno(munlockall()); 6424 break; 6425 #endif 6426 case TARGET_NR_truncate: 6427 if (!(p = lock_user_string(arg1))) 6428 goto efault; 6429 ret = get_errno(truncate(p, arg2)); 6430 unlock_user(p, arg1, 0); 6431 break; 6432 case TARGET_NR_ftruncate: 6433 ret = get_errno(ftruncate(arg1, arg2)); 6434 break; 6435 case TARGET_NR_fchmod: 6436 ret = get_errno(fchmod(arg1, arg2)); 6437 break; 6438 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat) 6439 case TARGET_NR_fchmodat: 6440 if (!(p = lock_user_string(arg2))) 6441 goto efault; 6442 ret = get_errno(sys_fchmodat(arg1, p, arg3)); 6443 unlock_user(p, arg2, 0); 6444 break; 6445 #endif 6446 case TARGET_NR_getpriority: 6447 /* Note that negative values are valid for getpriority, so we must 6448 differentiate based on errno settings. */ 6449 errno = 0; 6450 ret = getpriority(arg1, arg2); 6451 if (ret == -1 && errno != 0) { 6452 ret = -host_to_target_errno(errno); 6453 break; 6454 } 6455 #ifdef TARGET_ALPHA 6456 /* Return value is the unbiased priority. Signal no error. */ 6457 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; 6458 #else 6459 /* Return value is a biased priority to avoid negative numbers. */ 6460 ret = 20 - ret; 6461 #endif 6462 break; 6463 case TARGET_NR_setpriority: 6464 ret = get_errno(setpriority(arg1, arg2, arg3)); 6465 break; 6466 #ifdef TARGET_NR_profil 6467 case TARGET_NR_profil: 6468 goto unimplemented; 6469 #endif 6470 case TARGET_NR_statfs: 6471 if (!(p = lock_user_string(arg1))) 6472 goto efault; 6473 ret = get_errno(statfs(path(p), &stfs)); 6474 unlock_user(p, arg1, 0); 6475 convert_statfs: 6476 if (!is_error(ret)) { 6477 struct target_statfs *target_stfs; 6478 6479 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) 6480 goto efault; 6481 __put_user(stfs.f_type, &target_stfs->f_type); 6482 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 6483 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 6484 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 6485 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 6486 __put_user(stfs.f_files, &target_stfs->f_files); 6487 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 6488 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 6489 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 6490 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 6491 unlock_user_struct(target_stfs, arg2, 1); 6492 } 6493 break; 6494 case TARGET_NR_fstatfs: 6495 ret = get_errno(fstatfs(arg1, &stfs)); 6496 goto convert_statfs; 6497 #ifdef TARGET_NR_statfs64 6498 case TARGET_NR_statfs64: 6499 if (!(p = lock_user_string(arg1))) 6500 goto efault; 6501 ret = get_errno(statfs(path(p), &stfs)); 6502 unlock_user(p, arg1, 0); 6503 convert_statfs64: 6504 if (!is_error(ret)) { 6505 struct target_statfs64 *target_stfs; 6506 6507 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) 6508 goto efault; 6509 __put_user(stfs.f_type, &target_stfs->f_type); 6510 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 6511 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 6512 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 6513 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 6514 __put_user(stfs.f_files, &target_stfs->f_files); 6515 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 6516 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 6517 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 6518 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 6519 unlock_user_struct(target_stfs, arg3, 1); 6520 } 6521 break; 6522 case TARGET_NR_fstatfs64: 6523 ret = get_errno(fstatfs(arg1, &stfs)); 6524 goto convert_statfs64; 6525 #endif 6526 #ifdef TARGET_NR_ioperm 6527 case TARGET_NR_ioperm: 6528 goto unimplemented; 6529 #endif 6530 #ifdef TARGET_NR_socketcall 6531 case TARGET_NR_socketcall: 6532 ret = do_socketcall(arg1, arg2); 6533 break; 6534 #endif 6535 #ifdef TARGET_NR_accept 6536 case TARGET_NR_accept: 6537 ret = do_accept(arg1, arg2, arg3); 6538 break; 6539 #endif 6540 #ifdef TARGET_NR_bind 6541 case TARGET_NR_bind: 6542 ret = do_bind(arg1, arg2, arg3); 6543 break; 6544 #endif 6545 #ifdef TARGET_NR_connect 6546 case TARGET_NR_connect: 6547 ret = do_connect(arg1, arg2, arg3); 6548 break; 6549 #endif 6550 #ifdef TARGET_NR_getpeername 6551 case TARGET_NR_getpeername: 6552 ret = do_getpeername(arg1, arg2, arg3); 6553 break; 6554 #endif 6555 #ifdef TARGET_NR_getsockname 6556 case TARGET_NR_getsockname: 6557 ret = do_getsockname(arg1, arg2, arg3); 6558 break; 6559 #endif 6560 #ifdef TARGET_NR_getsockopt 6561 case TARGET_NR_getsockopt: 6562 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5); 6563 break; 6564 #endif 6565 #ifdef TARGET_NR_listen 6566 case TARGET_NR_listen: 6567 ret = get_errno(listen(arg1, arg2)); 6568 break; 6569 #endif 6570 #ifdef TARGET_NR_recv 6571 case TARGET_NR_recv: 6572 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); 6573 break; 6574 #endif 6575 #ifdef TARGET_NR_recvfrom 6576 case TARGET_NR_recvfrom: 6577 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); 6578 break; 6579 #endif 6580 #ifdef TARGET_NR_recvmsg 6581 case TARGET_NR_recvmsg: 6582 ret = do_sendrecvmsg(arg1, arg2, arg3, 0); 6583 break; 6584 #endif 6585 #ifdef TARGET_NR_send 6586 case TARGET_NR_send: 6587 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0); 6588 break; 6589 #endif 6590 #ifdef TARGET_NR_sendmsg 6591 case TARGET_NR_sendmsg: 6592 ret = do_sendrecvmsg(arg1, arg2, arg3, 1); 6593 break; 6594 #endif 6595 #ifdef TARGET_NR_sendto 6596 case TARGET_NR_sendto: 6597 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); 6598 break; 6599 #endif 6600 #ifdef TARGET_NR_shutdown 6601 case TARGET_NR_shutdown: 6602 ret = get_errno(shutdown(arg1, arg2)); 6603 break; 6604 #endif 6605 #ifdef TARGET_NR_socket 6606 case TARGET_NR_socket: 6607 ret = do_socket(arg1, arg2, arg3); 6608 break; 6609 #endif 6610 #ifdef TARGET_NR_socketpair 6611 case TARGET_NR_socketpair: 6612 ret = do_socketpair(arg1, arg2, arg3, arg4); 6613 break; 6614 #endif 6615 #ifdef TARGET_NR_setsockopt 6616 case TARGET_NR_setsockopt: 6617 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); 6618 break; 6619 #endif 6620 6621 case TARGET_NR_syslog: 6622 if (!(p = lock_user_string(arg2))) 6623 goto efault; 6624 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); 6625 unlock_user(p, arg2, 0); 6626 break; 6627 6628 case TARGET_NR_setitimer: 6629 { 6630 struct itimerval value, ovalue, *pvalue; 6631 6632 if (arg2) { 6633 pvalue = &value; 6634 if (copy_from_user_timeval(&pvalue->it_interval, arg2) 6635 || copy_from_user_timeval(&pvalue->it_value, 6636 arg2 + sizeof(struct target_timeval))) 6637 goto efault; 6638 } else { 6639 pvalue = NULL; 6640 } 6641 ret = get_errno(setitimer(arg1, pvalue, &ovalue)); 6642 if (!is_error(ret) && arg3) { 6643 if (copy_to_user_timeval(arg3, 6644 &ovalue.it_interval) 6645 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), 6646 &ovalue.it_value)) 6647 goto efault; 6648 } 6649 } 6650 break; 6651 case TARGET_NR_getitimer: 6652 { 6653 struct itimerval value; 6654 6655 ret = get_errno(getitimer(arg1, &value)); 6656 if (!is_error(ret) && arg2) { 6657 if (copy_to_user_timeval(arg2, 6658 &value.it_interval) 6659 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), 6660 &value.it_value)) 6661 goto efault; 6662 } 6663 } 6664 break; 6665 case TARGET_NR_stat: 6666 if (!(p = lock_user_string(arg1))) 6667 goto efault; 6668 ret = get_errno(stat(path(p), &st)); 6669 unlock_user(p, arg1, 0); 6670 goto do_stat; 6671 case TARGET_NR_lstat: 6672 if (!(p = lock_user_string(arg1))) 6673 goto efault; 6674 ret = get_errno(lstat(path(p), &st)); 6675 unlock_user(p, arg1, 0); 6676 goto do_stat; 6677 case TARGET_NR_fstat: 6678 { 6679 ret = get_errno(fstat(arg1, &st)); 6680 do_stat: 6681 if (!is_error(ret)) { 6682 struct target_stat *target_st; 6683 6684 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) 6685 goto efault; 6686 memset(target_st, 0, sizeof(*target_st)); 6687 __put_user(st.st_dev, &target_st->st_dev); 6688 __put_user(st.st_ino, &target_st->st_ino); 6689 __put_user(st.st_mode, &target_st->st_mode); 6690 __put_user(st.st_uid, &target_st->st_uid); 6691 __put_user(st.st_gid, &target_st->st_gid); 6692 __put_user(st.st_nlink, &target_st->st_nlink); 6693 __put_user(st.st_rdev, &target_st->st_rdev); 6694 __put_user(st.st_size, &target_st->st_size); 6695 __put_user(st.st_blksize, &target_st->st_blksize); 6696 __put_user(st.st_blocks, &target_st->st_blocks); 6697 __put_user(st.st_atime, &target_st->target_st_atime); 6698 __put_user(st.st_mtime, &target_st->target_st_mtime); 6699 __put_user(st.st_ctime, &target_st->target_st_ctime); 6700 unlock_user_struct(target_st, arg2, 1); 6701 } 6702 } 6703 break; 6704 #ifdef TARGET_NR_olduname 6705 case TARGET_NR_olduname: 6706 goto unimplemented; 6707 #endif 6708 #ifdef TARGET_NR_iopl 6709 case TARGET_NR_iopl: 6710 goto unimplemented; 6711 #endif 6712 case TARGET_NR_vhangup: 6713 ret = get_errno(vhangup()); 6714 break; 6715 #ifdef TARGET_NR_idle 6716 case TARGET_NR_idle: 6717 goto unimplemented; 6718 #endif 6719 #ifdef TARGET_NR_syscall 6720 case TARGET_NR_syscall: 6721 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5, 6722 arg6, arg7, arg8, 0); 6723 break; 6724 #endif 6725 case TARGET_NR_wait4: 6726 { 6727 int status; 6728 abi_long status_ptr = arg2; 6729 struct rusage rusage, *rusage_ptr; 6730 abi_ulong target_rusage = arg4; 6731 if (target_rusage) 6732 rusage_ptr = &rusage; 6733 else 6734 rusage_ptr = NULL; 6735 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr)); 6736 if (!is_error(ret)) { 6737 if (status_ptr && ret) { 6738 status = host_to_target_waitstatus(status); 6739 if (put_user_s32(status, status_ptr)) 6740 goto efault; 6741 } 6742 if (target_rusage) 6743 host_to_target_rusage(target_rusage, &rusage); 6744 } 6745 } 6746 break; 6747 #ifdef TARGET_NR_swapoff 6748 case TARGET_NR_swapoff: 6749 if (!(p = lock_user_string(arg1))) 6750 goto efault; 6751 ret = get_errno(swapoff(p)); 6752 unlock_user(p, arg1, 0); 6753 break; 6754 #endif 6755 case TARGET_NR_sysinfo: 6756 { 6757 struct target_sysinfo *target_value; 6758 struct sysinfo value; 6759 ret = get_errno(sysinfo(&value)); 6760 if (!is_error(ret) && arg1) 6761 { 6762 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) 6763 goto efault; 6764 __put_user(value.uptime, &target_value->uptime); 6765 __put_user(value.loads[0], &target_value->loads[0]); 6766 __put_user(value.loads[1], &target_value->loads[1]); 6767 __put_user(value.loads[2], &target_value->loads[2]); 6768 __put_user(value.totalram, &target_value->totalram); 6769 __put_user(value.freeram, &target_value->freeram); 6770 __put_user(value.sharedram, &target_value->sharedram); 6771 __put_user(value.bufferram, &target_value->bufferram); 6772 __put_user(value.totalswap, &target_value->totalswap); 6773 __put_user(value.freeswap, &target_value->freeswap); 6774 __put_user(value.procs, &target_value->procs); 6775 __put_user(value.totalhigh, &target_value->totalhigh); 6776 __put_user(value.freehigh, &target_value->freehigh); 6777 __put_user(value.mem_unit, &target_value->mem_unit); 6778 unlock_user_struct(target_value, arg1, 1); 6779 } 6780 } 6781 break; 6782 #ifdef TARGET_NR_ipc 6783 case TARGET_NR_ipc: 6784 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6); 6785 break; 6786 #endif 6787 #ifdef TARGET_NR_semget 6788 case TARGET_NR_semget: 6789 ret = get_errno(semget(arg1, arg2, arg3)); 6790 break; 6791 #endif 6792 #ifdef TARGET_NR_semop 6793 case TARGET_NR_semop: 6794 ret = get_errno(do_semop(arg1, arg2, arg3)); 6795 break; 6796 #endif 6797 #ifdef TARGET_NR_semctl 6798 case TARGET_NR_semctl: 6799 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4); 6800 break; 6801 #endif 6802 #ifdef TARGET_NR_msgctl 6803 case TARGET_NR_msgctl: 6804 ret = do_msgctl(arg1, arg2, arg3); 6805 break; 6806 #endif 6807 #ifdef TARGET_NR_msgget 6808 case TARGET_NR_msgget: 6809 ret = get_errno(msgget(arg1, arg2)); 6810 break; 6811 #endif 6812 #ifdef TARGET_NR_msgrcv 6813 case TARGET_NR_msgrcv: 6814 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5); 6815 break; 6816 #endif 6817 #ifdef TARGET_NR_msgsnd 6818 case TARGET_NR_msgsnd: 6819 ret = do_msgsnd(arg1, arg2, arg3, arg4); 6820 break; 6821 #endif 6822 #ifdef TARGET_NR_shmget 6823 case TARGET_NR_shmget: 6824 ret = get_errno(shmget(arg1, arg2, arg3)); 6825 break; 6826 #endif 6827 #ifdef TARGET_NR_shmctl 6828 case TARGET_NR_shmctl: 6829 ret = do_shmctl(arg1, arg2, arg3); 6830 break; 6831 #endif 6832 #ifdef TARGET_NR_shmat 6833 case TARGET_NR_shmat: 6834 ret = do_shmat(arg1, arg2, arg3); 6835 break; 6836 #endif 6837 #ifdef TARGET_NR_shmdt 6838 case TARGET_NR_shmdt: 6839 ret = do_shmdt(arg1); 6840 break; 6841 #endif 6842 case TARGET_NR_fsync: 6843 ret = get_errno(fsync(arg1)); 6844 break; 6845 case TARGET_NR_clone: 6846 #if defined(TARGET_SH4) || defined(TARGET_ALPHA) 6847 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); 6848 #elif defined(TARGET_CRIS) 6849 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5)); 6850 #elif defined(TARGET_S390X) 6851 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); 6852 #else 6853 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); 6854 #endif 6855 break; 6856 #ifdef __NR_exit_group 6857 /* new thread calls */ 6858 case TARGET_NR_exit_group: 6859 #ifdef TARGET_GPROF 6860 _mcleanup(); 6861 #endif 6862 gdb_exit(cpu_env, arg1); 6863 ret = get_errno(exit_group(arg1)); 6864 break; 6865 #endif 6866 case TARGET_NR_setdomainname: 6867 if (!(p = lock_user_string(arg1))) 6868 goto efault; 6869 ret = get_errno(setdomainname(p, arg2)); 6870 unlock_user(p, arg1, 0); 6871 break; 6872 case TARGET_NR_uname: 6873 /* no need to transcode because we use the linux syscall */ 6874 { 6875 struct new_utsname * buf; 6876 6877 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) 6878 goto efault; 6879 ret = get_errno(sys_uname(buf)); 6880 if (!is_error(ret)) { 6881 /* Overrite the native machine name with whatever is being 6882 emulated. */ 6883 strcpy (buf->machine, cpu_to_uname_machine(cpu_env)); 6884 /* Allow the user to override the reported release. */ 6885 if (qemu_uname_release && *qemu_uname_release) 6886 strcpy (buf->release, qemu_uname_release); 6887 } 6888 unlock_user_struct(buf, arg1, 1); 6889 } 6890 break; 6891 #ifdef TARGET_I386 6892 case TARGET_NR_modify_ldt: 6893 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3); 6894 break; 6895 #if !defined(TARGET_X86_64) 6896 case TARGET_NR_vm86old: 6897 goto unimplemented; 6898 case TARGET_NR_vm86: 6899 ret = do_vm86(cpu_env, arg1, arg2); 6900 break; 6901 #endif 6902 #endif 6903 case TARGET_NR_adjtimex: 6904 goto unimplemented; 6905 #ifdef TARGET_NR_create_module 6906 case TARGET_NR_create_module: 6907 #endif 6908 case TARGET_NR_init_module: 6909 case TARGET_NR_delete_module: 6910 #ifdef TARGET_NR_get_kernel_syms 6911 case TARGET_NR_get_kernel_syms: 6912 #endif 6913 goto unimplemented; 6914 case TARGET_NR_quotactl: 6915 goto unimplemented; 6916 case TARGET_NR_getpgid: 6917 ret = get_errno(getpgid(arg1)); 6918 break; 6919 case TARGET_NR_fchdir: 6920 ret = get_errno(fchdir(arg1)); 6921 break; 6922 #ifdef TARGET_NR_bdflush /* not on x86_64 */ 6923 case TARGET_NR_bdflush: 6924 goto unimplemented; 6925 #endif 6926 #ifdef TARGET_NR_sysfs 6927 case TARGET_NR_sysfs: 6928 goto unimplemented; 6929 #endif 6930 case TARGET_NR_personality: 6931 ret = get_errno(personality(arg1)); 6932 break; 6933 #ifdef TARGET_NR_afs_syscall 6934 case TARGET_NR_afs_syscall: 6935 goto unimplemented; 6936 #endif 6937 #ifdef TARGET_NR__llseek /* Not on alpha */ 6938 case TARGET_NR__llseek: 6939 { 6940 int64_t res; 6941 #if !defined(__NR_llseek) 6942 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5); 6943 if (res == -1) { 6944 ret = get_errno(res); 6945 } else { 6946 ret = 0; 6947 } 6948 #else 6949 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); 6950 #endif 6951 if ((ret == 0) && put_user_s64(res, arg4)) { 6952 goto efault; 6953 } 6954 } 6955 break; 6956 #endif 6957 case TARGET_NR_getdents: 6958 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 6959 { 6960 struct target_dirent *target_dirp; 6961 struct linux_dirent *dirp; 6962 abi_long count = arg3; 6963 6964 dirp = malloc(count); 6965 if (!dirp) { 6966 ret = -TARGET_ENOMEM; 6967 goto fail; 6968 } 6969 6970 ret = get_errno(sys_getdents(arg1, dirp, count)); 6971 if (!is_error(ret)) { 6972 struct linux_dirent *de; 6973 struct target_dirent *tde; 6974 int len = ret; 6975 int reclen, treclen; 6976 int count1, tnamelen; 6977 6978 count1 = 0; 6979 de = dirp; 6980 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 6981 goto efault; 6982 tde = target_dirp; 6983 while (len > 0) { 6984 reclen = de->d_reclen; 6985 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long))); 6986 tde->d_reclen = tswap16(treclen); 6987 tde->d_ino = tswapal(de->d_ino); 6988 tde->d_off = tswapal(de->d_off); 6989 tnamelen = treclen - (2 * sizeof(abi_long) + 2); 6990 if (tnamelen > 256) 6991 tnamelen = 256; 6992 /* XXX: may not be correct */ 6993 pstrcpy(tde->d_name, tnamelen, de->d_name); 6994 de = (struct linux_dirent *)((char *)de + reclen); 6995 len -= reclen; 6996 tde = (struct target_dirent *)((char *)tde + treclen); 6997 count1 += treclen; 6998 } 6999 ret = count1; 7000 unlock_user(target_dirp, arg2, ret); 7001 } 7002 free(dirp); 7003 } 7004 #else 7005 { 7006 struct linux_dirent *dirp; 7007 abi_long count = arg3; 7008 7009 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7010 goto efault; 7011 ret = get_errno(sys_getdents(arg1, dirp, count)); 7012 if (!is_error(ret)) { 7013 struct linux_dirent *de; 7014 int len = ret; 7015 int reclen; 7016 de = dirp; 7017 while (len > 0) { 7018 reclen = de->d_reclen; 7019 if (reclen > len) 7020 break; 7021 de->d_reclen = tswap16(reclen); 7022 tswapls(&de->d_ino); 7023 tswapls(&de->d_off); 7024 de = (struct linux_dirent *)((char *)de + reclen); 7025 len -= reclen; 7026 } 7027 } 7028 unlock_user(dirp, arg2, ret); 7029 } 7030 #endif 7031 break; 7032 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 7033 case TARGET_NR_getdents64: 7034 { 7035 struct linux_dirent64 *dirp; 7036 abi_long count = arg3; 7037 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7038 goto efault; 7039 ret = get_errno(sys_getdents64(arg1, dirp, count)); 7040 if (!is_error(ret)) { 7041 struct linux_dirent64 *de; 7042 int len = ret; 7043 int reclen; 7044 de = dirp; 7045 while (len > 0) { 7046 reclen = de->d_reclen; 7047 if (reclen > len) 7048 break; 7049 de->d_reclen = tswap16(reclen); 7050 tswap64s((uint64_t *)&de->d_ino); 7051 tswap64s((uint64_t *)&de->d_off); 7052 de = (struct linux_dirent64 *)((char *)de + reclen); 7053 len -= reclen; 7054 } 7055 } 7056 unlock_user(dirp, arg2, ret); 7057 } 7058 break; 7059 #endif /* TARGET_NR_getdents64 */ 7060 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X) 7061 #ifdef TARGET_S390X 7062 case TARGET_NR_select: 7063 #else 7064 case TARGET_NR__newselect: 7065 #endif 7066 ret = do_select(arg1, arg2, arg3, arg4, arg5); 7067 break; 7068 #endif 7069 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) 7070 # ifdef TARGET_NR_poll 7071 case TARGET_NR_poll: 7072 # endif 7073 # ifdef TARGET_NR_ppoll 7074 case TARGET_NR_ppoll: 7075 # endif 7076 { 7077 struct target_pollfd *target_pfd; 7078 unsigned int nfds = arg2; 7079 int timeout = arg3; 7080 struct pollfd *pfd; 7081 unsigned int i; 7082 7083 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1); 7084 if (!target_pfd) 7085 goto efault; 7086 7087 pfd = alloca(sizeof(struct pollfd) * nfds); 7088 for(i = 0; i < nfds; i++) { 7089 pfd[i].fd = tswap32(target_pfd[i].fd); 7090 pfd[i].events = tswap16(target_pfd[i].events); 7091 } 7092 7093 # ifdef TARGET_NR_ppoll 7094 if (num == TARGET_NR_ppoll) { 7095 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; 7096 target_sigset_t *target_set; 7097 sigset_t _set, *set = &_set; 7098 7099 if (arg3) { 7100 if (target_to_host_timespec(timeout_ts, arg3)) { 7101 unlock_user(target_pfd, arg1, 0); 7102 goto efault; 7103 } 7104 } else { 7105 timeout_ts = NULL; 7106 } 7107 7108 if (arg4) { 7109 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1); 7110 if (!target_set) { 7111 unlock_user(target_pfd, arg1, 0); 7112 goto efault; 7113 } 7114 target_to_host_sigset(set, target_set); 7115 } else { 7116 set = NULL; 7117 } 7118 7119 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8)); 7120 7121 if (!is_error(ret) && arg3) { 7122 host_to_target_timespec(arg3, timeout_ts); 7123 } 7124 if (arg4) { 7125 unlock_user(target_set, arg4, 0); 7126 } 7127 } else 7128 # endif 7129 ret = get_errno(poll(pfd, nfds, timeout)); 7130 7131 if (!is_error(ret)) { 7132 for(i = 0; i < nfds; i++) { 7133 target_pfd[i].revents = tswap16(pfd[i].revents); 7134 } 7135 } 7136 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds); 7137 } 7138 break; 7139 #endif 7140 case TARGET_NR_flock: 7141 /* NOTE: the flock constant seems to be the same for every 7142 Linux platform */ 7143 ret = get_errno(flock(arg1, arg2)); 7144 break; 7145 case TARGET_NR_readv: 7146 { 7147 int count = arg3; 7148 struct iovec *vec; 7149 7150 vec = alloca(count * sizeof(struct iovec)); 7151 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0) 7152 goto efault; 7153 ret = get_errno(readv(arg1, vec, count)); 7154 unlock_iovec(vec, arg2, count, 1); 7155 } 7156 break; 7157 case TARGET_NR_writev: 7158 { 7159 int count = arg3; 7160 struct iovec *vec; 7161 7162 vec = alloca(count * sizeof(struct iovec)); 7163 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0) 7164 goto efault; 7165 ret = get_errno(writev(arg1, vec, count)); 7166 unlock_iovec(vec, arg2, count, 0); 7167 } 7168 break; 7169 case TARGET_NR_getsid: 7170 ret = get_errno(getsid(arg1)); 7171 break; 7172 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ 7173 case TARGET_NR_fdatasync: 7174 ret = get_errno(fdatasync(arg1)); 7175 break; 7176 #endif 7177 case TARGET_NR__sysctl: 7178 /* We don't implement this, but ENOTDIR is always a safe 7179 return value. */ 7180 ret = -TARGET_ENOTDIR; 7181 break; 7182 case TARGET_NR_sched_getaffinity: 7183 { 7184 unsigned int mask_size; 7185 unsigned long *mask; 7186 7187 /* 7188 * sched_getaffinity needs multiples of ulong, so need to take 7189 * care of mismatches between target ulong and host ulong sizes. 7190 */ 7191 if (arg2 & (sizeof(abi_ulong) - 1)) { 7192 ret = -TARGET_EINVAL; 7193 break; 7194 } 7195 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 7196 7197 mask = alloca(mask_size); 7198 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask)); 7199 7200 if (!is_error(ret)) { 7201 if (copy_to_user(arg3, mask, ret)) { 7202 goto efault; 7203 } 7204 } 7205 } 7206 break; 7207 case TARGET_NR_sched_setaffinity: 7208 { 7209 unsigned int mask_size; 7210 unsigned long *mask; 7211 7212 /* 7213 * sched_setaffinity needs multiples of ulong, so need to take 7214 * care of mismatches between target ulong and host ulong sizes. 7215 */ 7216 if (arg2 & (sizeof(abi_ulong) - 1)) { 7217 ret = -TARGET_EINVAL; 7218 break; 7219 } 7220 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 7221 7222 mask = alloca(mask_size); 7223 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) { 7224 goto efault; 7225 } 7226 memcpy(mask, p, arg2); 7227 unlock_user_struct(p, arg2, 0); 7228 7229 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask)); 7230 } 7231 break; 7232 case TARGET_NR_sched_setparam: 7233 { 7234 struct sched_param *target_schp; 7235 struct sched_param schp; 7236 7237 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) 7238 goto efault; 7239 schp.sched_priority = tswap32(target_schp->sched_priority); 7240 unlock_user_struct(target_schp, arg2, 0); 7241 ret = get_errno(sched_setparam(arg1, &schp)); 7242 } 7243 break; 7244 case TARGET_NR_sched_getparam: 7245 { 7246 struct sched_param *target_schp; 7247 struct sched_param schp; 7248 ret = get_errno(sched_getparam(arg1, &schp)); 7249 if (!is_error(ret)) { 7250 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) 7251 goto efault; 7252 target_schp->sched_priority = tswap32(schp.sched_priority); 7253 unlock_user_struct(target_schp, arg2, 1); 7254 } 7255 } 7256 break; 7257 case TARGET_NR_sched_setscheduler: 7258 { 7259 struct sched_param *target_schp; 7260 struct sched_param schp; 7261 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) 7262 goto efault; 7263 schp.sched_priority = tswap32(target_schp->sched_priority); 7264 unlock_user_struct(target_schp, arg3, 0); 7265 ret = get_errno(sched_setscheduler(arg1, arg2, &schp)); 7266 } 7267 break; 7268 case TARGET_NR_sched_getscheduler: 7269 ret = get_errno(sched_getscheduler(arg1)); 7270 break; 7271 case TARGET_NR_sched_yield: 7272 ret = get_errno(sched_yield()); 7273 break; 7274 case TARGET_NR_sched_get_priority_max: 7275 ret = get_errno(sched_get_priority_max(arg1)); 7276 break; 7277 case TARGET_NR_sched_get_priority_min: 7278 ret = get_errno(sched_get_priority_min(arg1)); 7279 break; 7280 case TARGET_NR_sched_rr_get_interval: 7281 { 7282 struct timespec ts; 7283 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 7284 if (!is_error(ret)) { 7285 host_to_target_timespec(arg2, &ts); 7286 } 7287 } 7288 break; 7289 case TARGET_NR_nanosleep: 7290 { 7291 struct timespec req, rem; 7292 target_to_host_timespec(&req, arg1); 7293 ret = get_errno(nanosleep(&req, &rem)); 7294 if (is_error(ret) && arg2) { 7295 host_to_target_timespec(arg2, &rem); 7296 } 7297 } 7298 break; 7299 #ifdef TARGET_NR_query_module 7300 case TARGET_NR_query_module: 7301 goto unimplemented; 7302 #endif 7303 #ifdef TARGET_NR_nfsservctl 7304 case TARGET_NR_nfsservctl: 7305 goto unimplemented; 7306 #endif 7307 case TARGET_NR_prctl: 7308 switch (arg1) { 7309 case PR_GET_PDEATHSIG: 7310 { 7311 int deathsig; 7312 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5)); 7313 if (!is_error(ret) && arg2 7314 && put_user_ual(deathsig, arg2)) { 7315 goto efault; 7316 } 7317 break; 7318 } 7319 #ifdef PR_GET_NAME 7320 case PR_GET_NAME: 7321 { 7322 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1); 7323 if (!name) { 7324 goto efault; 7325 } 7326 ret = get_errno(prctl(arg1, (unsigned long)name, 7327 arg3, arg4, arg5)); 7328 unlock_user(name, arg2, 16); 7329 break; 7330 } 7331 case PR_SET_NAME: 7332 { 7333 void *name = lock_user(VERIFY_READ, arg2, 16, 1); 7334 if (!name) { 7335 goto efault; 7336 } 7337 ret = get_errno(prctl(arg1, (unsigned long)name, 7338 arg3, arg4, arg5)); 7339 unlock_user(name, arg2, 0); 7340 break; 7341 } 7342 #endif 7343 default: 7344 /* Most prctl options have no pointer arguments */ 7345 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5)); 7346 break; 7347 } 7348 break; 7349 #ifdef TARGET_NR_arch_prctl 7350 case TARGET_NR_arch_prctl: 7351 #if defined(TARGET_I386) && !defined(TARGET_ABI32) 7352 ret = do_arch_prctl(cpu_env, arg1, arg2); 7353 break; 7354 #else 7355 goto unimplemented; 7356 #endif 7357 #endif 7358 #ifdef TARGET_NR_pread 7359 case TARGET_NR_pread: 7360 if (regpairs_aligned(cpu_env)) 7361 arg4 = arg5; 7362 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 7363 goto efault; 7364 ret = get_errno(pread(arg1, p, arg3, arg4)); 7365 unlock_user(p, arg2, ret); 7366 break; 7367 case TARGET_NR_pwrite: 7368 if (regpairs_aligned(cpu_env)) 7369 arg4 = arg5; 7370 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 7371 goto efault; 7372 ret = get_errno(pwrite(arg1, p, arg3, arg4)); 7373 unlock_user(p, arg2, 0); 7374 break; 7375 #endif 7376 #ifdef TARGET_NR_pread64 7377 case TARGET_NR_pread64: 7378 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 7379 goto efault; 7380 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); 7381 unlock_user(p, arg2, ret); 7382 break; 7383 case TARGET_NR_pwrite64: 7384 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 7385 goto efault; 7386 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); 7387 unlock_user(p, arg2, 0); 7388 break; 7389 #endif 7390 case TARGET_NR_getcwd: 7391 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) 7392 goto efault; 7393 ret = get_errno(sys_getcwd1(p, arg2)); 7394 unlock_user(p, arg1, ret); 7395 break; 7396 case TARGET_NR_capget: 7397 goto unimplemented; 7398 case TARGET_NR_capset: 7399 goto unimplemented; 7400 case TARGET_NR_sigaltstack: 7401 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \ 7402 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \ 7403 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC) 7404 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env)); 7405 break; 7406 #else 7407 goto unimplemented; 7408 #endif 7409 case TARGET_NR_sendfile: 7410 goto unimplemented; 7411 #ifdef TARGET_NR_getpmsg 7412 case TARGET_NR_getpmsg: 7413 goto unimplemented; 7414 #endif 7415 #ifdef TARGET_NR_putpmsg 7416 case TARGET_NR_putpmsg: 7417 goto unimplemented; 7418 #endif 7419 #ifdef TARGET_NR_vfork 7420 case TARGET_NR_vfork: 7421 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 7422 0, 0, 0, 0)); 7423 break; 7424 #endif 7425 #ifdef TARGET_NR_ugetrlimit 7426 case TARGET_NR_ugetrlimit: 7427 { 7428 struct rlimit rlim; 7429 int resource = target_to_host_resource(arg1); 7430 ret = get_errno(getrlimit(resource, &rlim)); 7431 if (!is_error(ret)) { 7432 struct target_rlimit *target_rlim; 7433 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 7434 goto efault; 7435 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 7436 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 7437 unlock_user_struct(target_rlim, arg2, 1); 7438 } 7439 break; 7440 } 7441 #endif 7442 #ifdef TARGET_NR_truncate64 7443 case TARGET_NR_truncate64: 7444 if (!(p = lock_user_string(arg1))) 7445 goto efault; 7446 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); 7447 unlock_user(p, arg1, 0); 7448 break; 7449 #endif 7450 #ifdef TARGET_NR_ftruncate64 7451 case TARGET_NR_ftruncate64: 7452 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); 7453 break; 7454 #endif 7455 #ifdef TARGET_NR_stat64 7456 case TARGET_NR_stat64: 7457 if (!(p = lock_user_string(arg1))) 7458 goto efault; 7459 ret = get_errno(stat(path(p), &st)); 7460 unlock_user(p, arg1, 0); 7461 if (!is_error(ret)) 7462 ret = host_to_target_stat64(cpu_env, arg2, &st); 7463 break; 7464 #endif 7465 #ifdef TARGET_NR_lstat64 7466 case TARGET_NR_lstat64: 7467 if (!(p = lock_user_string(arg1))) 7468 goto efault; 7469 ret = get_errno(lstat(path(p), &st)); 7470 unlock_user(p, arg1, 0); 7471 if (!is_error(ret)) 7472 ret = host_to_target_stat64(cpu_env, arg2, &st); 7473 break; 7474 #endif 7475 #ifdef TARGET_NR_fstat64 7476 case TARGET_NR_fstat64: 7477 ret = get_errno(fstat(arg1, &st)); 7478 if (!is_error(ret)) 7479 ret = host_to_target_stat64(cpu_env, arg2, &st); 7480 break; 7481 #endif 7482 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \ 7483 (defined(__NR_fstatat64) || defined(__NR_newfstatat)) 7484 #ifdef TARGET_NR_fstatat64 7485 case TARGET_NR_fstatat64: 7486 #endif 7487 #ifdef TARGET_NR_newfstatat 7488 case TARGET_NR_newfstatat: 7489 #endif 7490 if (!(p = lock_user_string(arg2))) 7491 goto efault; 7492 #ifdef __NR_fstatat64 7493 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4)); 7494 #else 7495 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4)); 7496 #endif 7497 if (!is_error(ret)) 7498 ret = host_to_target_stat64(cpu_env, arg3, &st); 7499 break; 7500 #endif 7501 case TARGET_NR_lchown: 7502 if (!(p = lock_user_string(arg1))) 7503 goto efault; 7504 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); 7505 unlock_user(p, arg1, 0); 7506 break; 7507 #ifdef TARGET_NR_getuid 7508 case TARGET_NR_getuid: 7509 ret = get_errno(high2lowuid(getuid())); 7510 break; 7511 #endif 7512 #ifdef TARGET_NR_getgid 7513 case TARGET_NR_getgid: 7514 ret = get_errno(high2lowgid(getgid())); 7515 break; 7516 #endif 7517 #ifdef TARGET_NR_geteuid 7518 case TARGET_NR_geteuid: 7519 ret = get_errno(high2lowuid(geteuid())); 7520 break; 7521 #endif 7522 #ifdef TARGET_NR_getegid 7523 case TARGET_NR_getegid: 7524 ret = get_errno(high2lowgid(getegid())); 7525 break; 7526 #endif 7527 case TARGET_NR_setreuid: 7528 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); 7529 break; 7530 case TARGET_NR_setregid: 7531 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); 7532 break; 7533 case TARGET_NR_getgroups: 7534 { 7535 int gidsetsize = arg1; 7536 target_id *target_grouplist; 7537 gid_t *grouplist; 7538 int i; 7539 7540 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7541 ret = get_errno(getgroups(gidsetsize, grouplist)); 7542 if (gidsetsize == 0) 7543 break; 7544 if (!is_error(ret)) { 7545 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0); 7546 if (!target_grouplist) 7547 goto efault; 7548 for(i = 0;i < ret; i++) 7549 target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); 7550 unlock_user(target_grouplist, arg2, gidsetsize * 2); 7551 } 7552 } 7553 break; 7554 case TARGET_NR_setgroups: 7555 { 7556 int gidsetsize = arg1; 7557 target_id *target_grouplist; 7558 gid_t *grouplist; 7559 int i; 7560 7561 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7562 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1); 7563 if (!target_grouplist) { 7564 ret = -TARGET_EFAULT; 7565 goto fail; 7566 } 7567 for(i = 0;i < gidsetsize; i++) 7568 grouplist[i] = low2highgid(tswapid(target_grouplist[i])); 7569 unlock_user(target_grouplist, arg2, 0); 7570 ret = get_errno(setgroups(gidsetsize, grouplist)); 7571 } 7572 break; 7573 case TARGET_NR_fchown: 7574 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); 7575 break; 7576 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) 7577 case TARGET_NR_fchownat: 7578 if (!(p = lock_user_string(arg2))) 7579 goto efault; 7580 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5)); 7581 unlock_user(p, arg2, 0); 7582 break; 7583 #endif 7584 #ifdef TARGET_NR_setresuid 7585 case TARGET_NR_setresuid: 7586 ret = get_errno(setresuid(low2highuid(arg1), 7587 low2highuid(arg2), 7588 low2highuid(arg3))); 7589 break; 7590 #endif 7591 #ifdef TARGET_NR_getresuid 7592 case TARGET_NR_getresuid: 7593 { 7594 uid_t ruid, euid, suid; 7595 ret = get_errno(getresuid(&ruid, &euid, &suid)); 7596 if (!is_error(ret)) { 7597 if (put_user_u16(high2lowuid(ruid), arg1) 7598 || put_user_u16(high2lowuid(euid), arg2) 7599 || put_user_u16(high2lowuid(suid), arg3)) 7600 goto efault; 7601 } 7602 } 7603 break; 7604 #endif 7605 #ifdef TARGET_NR_getresgid 7606 case TARGET_NR_setresgid: 7607 ret = get_errno(setresgid(low2highgid(arg1), 7608 low2highgid(arg2), 7609 low2highgid(arg3))); 7610 break; 7611 #endif 7612 #ifdef TARGET_NR_getresgid 7613 case TARGET_NR_getresgid: 7614 { 7615 gid_t rgid, egid, sgid; 7616 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 7617 if (!is_error(ret)) { 7618 if (put_user_u16(high2lowgid(rgid), arg1) 7619 || put_user_u16(high2lowgid(egid), arg2) 7620 || put_user_u16(high2lowgid(sgid), arg3)) 7621 goto efault; 7622 } 7623 } 7624 break; 7625 #endif 7626 case TARGET_NR_chown: 7627 if (!(p = lock_user_string(arg1))) 7628 goto efault; 7629 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); 7630 unlock_user(p, arg1, 0); 7631 break; 7632 case TARGET_NR_setuid: 7633 ret = get_errno(setuid(low2highuid(arg1))); 7634 break; 7635 case TARGET_NR_setgid: 7636 ret = get_errno(setgid(low2highgid(arg1))); 7637 break; 7638 case TARGET_NR_setfsuid: 7639 ret = get_errno(setfsuid(arg1)); 7640 break; 7641 case TARGET_NR_setfsgid: 7642 ret = get_errno(setfsgid(arg1)); 7643 break; 7644 7645 #ifdef TARGET_NR_lchown32 7646 case TARGET_NR_lchown32: 7647 if (!(p = lock_user_string(arg1))) 7648 goto efault; 7649 ret = get_errno(lchown(p, arg2, arg3)); 7650 unlock_user(p, arg1, 0); 7651 break; 7652 #endif 7653 #ifdef TARGET_NR_getuid32 7654 case TARGET_NR_getuid32: 7655 ret = get_errno(getuid()); 7656 break; 7657 #endif 7658 7659 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) 7660 /* Alpha specific */ 7661 case TARGET_NR_getxuid: 7662 { 7663 uid_t euid; 7664 euid=geteuid(); 7665 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid; 7666 } 7667 ret = get_errno(getuid()); 7668 break; 7669 #endif 7670 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) 7671 /* Alpha specific */ 7672 case TARGET_NR_getxgid: 7673 { 7674 uid_t egid; 7675 egid=getegid(); 7676 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid; 7677 } 7678 ret = get_errno(getgid()); 7679 break; 7680 #endif 7681 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) 7682 /* Alpha specific */ 7683 case TARGET_NR_osf_getsysinfo: 7684 ret = -TARGET_EOPNOTSUPP; 7685 switch (arg1) { 7686 case TARGET_GSI_IEEE_FP_CONTROL: 7687 { 7688 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env); 7689 7690 /* Copied from linux ieee_fpcr_to_swcr. */ 7691 swcr = (fpcr >> 35) & SWCR_STATUS_MASK; 7692 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ; 7693 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV 7694 | SWCR_TRAP_ENABLE_DZE 7695 | SWCR_TRAP_ENABLE_OVF); 7696 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF 7697 | SWCR_TRAP_ENABLE_INE); 7698 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ; 7699 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO; 7700 7701 if (put_user_u64 (swcr, arg2)) 7702 goto efault; 7703 ret = 0; 7704 } 7705 break; 7706 7707 /* case GSI_IEEE_STATE_AT_SIGNAL: 7708 -- Not implemented in linux kernel. 7709 case GSI_UACPROC: 7710 -- Retrieves current unaligned access state; not much used. 7711 case GSI_PROC_TYPE: 7712 -- Retrieves implver information; surely not used. 7713 case GSI_GET_HWRPB: 7714 -- Grabs a copy of the HWRPB; surely not used. 7715 */ 7716 } 7717 break; 7718 #endif 7719 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) 7720 /* Alpha specific */ 7721 case TARGET_NR_osf_setsysinfo: 7722 ret = -TARGET_EOPNOTSUPP; 7723 switch (arg1) { 7724 case TARGET_SSI_IEEE_FP_CONTROL: 7725 { 7726 uint64_t swcr, fpcr, orig_fpcr; 7727 7728 if (get_user_u64 (swcr, arg2)) { 7729 goto efault; 7730 } 7731 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 7732 fpcr = orig_fpcr & FPCR_DYN_MASK; 7733 7734 /* Copied from linux ieee_swcr_to_fpcr. */ 7735 fpcr |= (swcr & SWCR_STATUS_MASK) << 35; 7736 fpcr |= (swcr & SWCR_MAP_DMZ) << 36; 7737 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV 7738 | SWCR_TRAP_ENABLE_DZE 7739 | SWCR_TRAP_ENABLE_OVF)) << 48; 7740 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF 7741 | SWCR_TRAP_ENABLE_INE)) << 57; 7742 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0); 7743 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41; 7744 7745 cpu_alpha_store_fpcr(cpu_env, fpcr); 7746 ret = 0; 7747 } 7748 break; 7749 7750 case TARGET_SSI_IEEE_RAISE_EXCEPTION: 7751 { 7752 uint64_t exc, fpcr, orig_fpcr; 7753 int si_code; 7754 7755 if (get_user_u64(exc, arg2)) { 7756 goto efault; 7757 } 7758 7759 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 7760 7761 /* We only add to the exception status here. */ 7762 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35); 7763 7764 cpu_alpha_store_fpcr(cpu_env, fpcr); 7765 ret = 0; 7766 7767 /* Old exceptions are not signaled. */ 7768 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK); 7769 7770 /* If any exceptions set by this call, 7771 and are unmasked, send a signal. */ 7772 si_code = 0; 7773 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) { 7774 si_code = TARGET_FPE_FLTRES; 7775 } 7776 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) { 7777 si_code = TARGET_FPE_FLTUND; 7778 } 7779 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) { 7780 si_code = TARGET_FPE_FLTOVF; 7781 } 7782 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) { 7783 si_code = TARGET_FPE_FLTDIV; 7784 } 7785 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) { 7786 si_code = TARGET_FPE_FLTINV; 7787 } 7788 if (si_code != 0) { 7789 target_siginfo_t info; 7790 info.si_signo = SIGFPE; 7791 info.si_errno = 0; 7792 info.si_code = si_code; 7793 info._sifields._sigfault._addr 7794 = ((CPUArchState *)cpu_env)->pc; 7795 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info); 7796 } 7797 } 7798 break; 7799 7800 /* case SSI_NVPAIRS: 7801 -- Used with SSIN_UACPROC to enable unaligned accesses. 7802 case SSI_IEEE_STATE_AT_SIGNAL: 7803 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: 7804 -- Not implemented in linux kernel 7805 */ 7806 } 7807 break; 7808 #endif 7809 #ifdef TARGET_NR_osf_sigprocmask 7810 /* Alpha specific. */ 7811 case TARGET_NR_osf_sigprocmask: 7812 { 7813 abi_ulong mask; 7814 int how; 7815 sigset_t set, oldset; 7816 7817 switch(arg1) { 7818 case TARGET_SIG_BLOCK: 7819 how = SIG_BLOCK; 7820 break; 7821 case TARGET_SIG_UNBLOCK: 7822 how = SIG_UNBLOCK; 7823 break; 7824 case TARGET_SIG_SETMASK: 7825 how = SIG_SETMASK; 7826 break; 7827 default: 7828 ret = -TARGET_EINVAL; 7829 goto fail; 7830 } 7831 mask = arg2; 7832 target_to_host_old_sigset(&set, &mask); 7833 sigprocmask(how, &set, &oldset); 7834 host_to_target_old_sigset(&mask, &oldset); 7835 ret = mask; 7836 } 7837 break; 7838 #endif 7839 7840 #ifdef TARGET_NR_getgid32 7841 case TARGET_NR_getgid32: 7842 ret = get_errno(getgid()); 7843 break; 7844 #endif 7845 #ifdef TARGET_NR_geteuid32 7846 case TARGET_NR_geteuid32: 7847 ret = get_errno(geteuid()); 7848 break; 7849 #endif 7850 #ifdef TARGET_NR_getegid32 7851 case TARGET_NR_getegid32: 7852 ret = get_errno(getegid()); 7853 break; 7854 #endif 7855 #ifdef TARGET_NR_setreuid32 7856 case TARGET_NR_setreuid32: 7857 ret = get_errno(setreuid(arg1, arg2)); 7858 break; 7859 #endif 7860 #ifdef TARGET_NR_setregid32 7861 case TARGET_NR_setregid32: 7862 ret = get_errno(setregid(arg1, arg2)); 7863 break; 7864 #endif 7865 #ifdef TARGET_NR_getgroups32 7866 case TARGET_NR_getgroups32: 7867 { 7868 int gidsetsize = arg1; 7869 uint32_t *target_grouplist; 7870 gid_t *grouplist; 7871 int i; 7872 7873 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7874 ret = get_errno(getgroups(gidsetsize, grouplist)); 7875 if (gidsetsize == 0) 7876 break; 7877 if (!is_error(ret)) { 7878 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); 7879 if (!target_grouplist) { 7880 ret = -TARGET_EFAULT; 7881 goto fail; 7882 } 7883 for(i = 0;i < ret; i++) 7884 target_grouplist[i] = tswap32(grouplist[i]); 7885 unlock_user(target_grouplist, arg2, gidsetsize * 4); 7886 } 7887 } 7888 break; 7889 #endif 7890 #ifdef TARGET_NR_setgroups32 7891 case TARGET_NR_setgroups32: 7892 { 7893 int gidsetsize = arg1; 7894 uint32_t *target_grouplist; 7895 gid_t *grouplist; 7896 int i; 7897 7898 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7899 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); 7900 if (!target_grouplist) { 7901 ret = -TARGET_EFAULT; 7902 goto fail; 7903 } 7904 for(i = 0;i < gidsetsize; i++) 7905 grouplist[i] = tswap32(target_grouplist[i]); 7906 unlock_user(target_grouplist, arg2, 0); 7907 ret = get_errno(setgroups(gidsetsize, grouplist)); 7908 } 7909 break; 7910 #endif 7911 #ifdef TARGET_NR_fchown32 7912 case TARGET_NR_fchown32: 7913 ret = get_errno(fchown(arg1, arg2, arg3)); 7914 break; 7915 #endif 7916 #ifdef TARGET_NR_setresuid32 7917 case TARGET_NR_setresuid32: 7918 ret = get_errno(setresuid(arg1, arg2, arg3)); 7919 break; 7920 #endif 7921 #ifdef TARGET_NR_getresuid32 7922 case TARGET_NR_getresuid32: 7923 { 7924 uid_t ruid, euid, suid; 7925 ret = get_errno(getresuid(&ruid, &euid, &suid)); 7926 if (!is_error(ret)) { 7927 if (put_user_u32(ruid, arg1) 7928 || put_user_u32(euid, arg2) 7929 || put_user_u32(suid, arg3)) 7930 goto efault; 7931 } 7932 } 7933 break; 7934 #endif 7935 #ifdef TARGET_NR_setresgid32 7936 case TARGET_NR_setresgid32: 7937 ret = get_errno(setresgid(arg1, arg2, arg3)); 7938 break; 7939 #endif 7940 #ifdef TARGET_NR_getresgid32 7941 case TARGET_NR_getresgid32: 7942 { 7943 gid_t rgid, egid, sgid; 7944 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 7945 if (!is_error(ret)) { 7946 if (put_user_u32(rgid, arg1) 7947 || put_user_u32(egid, arg2) 7948 || put_user_u32(sgid, arg3)) 7949 goto efault; 7950 } 7951 } 7952 break; 7953 #endif 7954 #ifdef TARGET_NR_chown32 7955 case TARGET_NR_chown32: 7956 if (!(p = lock_user_string(arg1))) 7957 goto efault; 7958 ret = get_errno(chown(p, arg2, arg3)); 7959 unlock_user(p, arg1, 0); 7960 break; 7961 #endif 7962 #ifdef TARGET_NR_setuid32 7963 case TARGET_NR_setuid32: 7964 ret = get_errno(setuid(arg1)); 7965 break; 7966 #endif 7967 #ifdef TARGET_NR_setgid32 7968 case TARGET_NR_setgid32: 7969 ret = get_errno(setgid(arg1)); 7970 break; 7971 #endif 7972 #ifdef TARGET_NR_setfsuid32 7973 case TARGET_NR_setfsuid32: 7974 ret = get_errno(setfsuid(arg1)); 7975 break; 7976 #endif 7977 #ifdef TARGET_NR_setfsgid32 7978 case TARGET_NR_setfsgid32: 7979 ret = get_errno(setfsgid(arg1)); 7980 break; 7981 #endif 7982 7983 case TARGET_NR_pivot_root: 7984 goto unimplemented; 7985 #ifdef TARGET_NR_mincore 7986 case TARGET_NR_mincore: 7987 { 7988 void *a; 7989 ret = -TARGET_EFAULT; 7990 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0))) 7991 goto efault; 7992 if (!(p = lock_user_string(arg3))) 7993 goto mincore_fail; 7994 ret = get_errno(mincore(a, arg2, p)); 7995 unlock_user(p, arg3, ret); 7996 mincore_fail: 7997 unlock_user(a, arg1, 0); 7998 } 7999 break; 8000 #endif 8001 #ifdef TARGET_NR_arm_fadvise64_64 8002 case TARGET_NR_arm_fadvise64_64: 8003 { 8004 /* 8005 * arm_fadvise64_64 looks like fadvise64_64 but 8006 * with different argument order 8007 */ 8008 abi_long temp; 8009 temp = arg3; 8010 arg3 = arg4; 8011 arg4 = temp; 8012 } 8013 #endif 8014 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64) 8015 #ifdef TARGET_NR_fadvise64_64 8016 case TARGET_NR_fadvise64_64: 8017 #endif 8018 #ifdef TARGET_NR_fadvise64 8019 case TARGET_NR_fadvise64: 8020 #endif 8021 #ifdef TARGET_S390X 8022 switch (arg4) { 8023 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ 8024 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ 8025 case 6: arg4 = POSIX_FADV_DONTNEED; break; 8026 case 7: arg4 = POSIX_FADV_NOREUSE; break; 8027 default: break; 8028 } 8029 #endif 8030 ret = -posix_fadvise(arg1, arg2, arg3, arg4); 8031 break; 8032 #endif 8033 #ifdef TARGET_NR_madvise 8034 case TARGET_NR_madvise: 8035 /* A straight passthrough may not be safe because qemu sometimes 8036 turns private flie-backed mappings into anonymous mappings. 8037 This will break MADV_DONTNEED. 8038 This is a hint, so ignoring and returning success is ok. */ 8039 ret = get_errno(0); 8040 break; 8041 #endif 8042 #if TARGET_ABI_BITS == 32 8043 case TARGET_NR_fcntl64: 8044 { 8045 int cmd; 8046 struct flock64 fl; 8047 struct target_flock64 *target_fl; 8048 #ifdef TARGET_ARM 8049 struct target_eabi_flock64 *target_efl; 8050 #endif 8051 8052 cmd = target_to_host_fcntl_cmd(arg2); 8053 if (cmd == -TARGET_EINVAL) { 8054 ret = cmd; 8055 break; 8056 } 8057 8058 switch(arg2) { 8059 case TARGET_F_GETLK64: 8060 #ifdef TARGET_ARM 8061 if (((CPUARMState *)cpu_env)->eabi) { 8062 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 8063 goto efault; 8064 fl.l_type = tswap16(target_efl->l_type); 8065 fl.l_whence = tswap16(target_efl->l_whence); 8066 fl.l_start = tswap64(target_efl->l_start); 8067 fl.l_len = tswap64(target_efl->l_len); 8068 fl.l_pid = tswap32(target_efl->l_pid); 8069 unlock_user_struct(target_efl, arg3, 0); 8070 } else 8071 #endif 8072 { 8073 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 8074 goto efault; 8075 fl.l_type = tswap16(target_fl->l_type); 8076 fl.l_whence = tswap16(target_fl->l_whence); 8077 fl.l_start = tswap64(target_fl->l_start); 8078 fl.l_len = tswap64(target_fl->l_len); 8079 fl.l_pid = tswap32(target_fl->l_pid); 8080 unlock_user_struct(target_fl, arg3, 0); 8081 } 8082 ret = get_errno(fcntl(arg1, cmd, &fl)); 8083 if (ret == 0) { 8084 #ifdef TARGET_ARM 8085 if (((CPUARMState *)cpu_env)->eabi) { 8086 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0)) 8087 goto efault; 8088 target_efl->l_type = tswap16(fl.l_type); 8089 target_efl->l_whence = tswap16(fl.l_whence); 8090 target_efl->l_start = tswap64(fl.l_start); 8091 target_efl->l_len = tswap64(fl.l_len); 8092 target_efl->l_pid = tswap32(fl.l_pid); 8093 unlock_user_struct(target_efl, arg3, 1); 8094 } else 8095 #endif 8096 { 8097 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0)) 8098 goto efault; 8099 target_fl->l_type = tswap16(fl.l_type); 8100 target_fl->l_whence = tswap16(fl.l_whence); 8101 target_fl->l_start = tswap64(fl.l_start); 8102 target_fl->l_len = tswap64(fl.l_len); 8103 target_fl->l_pid = tswap32(fl.l_pid); 8104 unlock_user_struct(target_fl, arg3, 1); 8105 } 8106 } 8107 break; 8108 8109 case TARGET_F_SETLK64: 8110 case TARGET_F_SETLKW64: 8111 #ifdef TARGET_ARM 8112 if (((CPUARMState *)cpu_env)->eabi) { 8113 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 8114 goto efault; 8115 fl.l_type = tswap16(target_efl->l_type); 8116 fl.l_whence = tswap16(target_efl->l_whence); 8117 fl.l_start = tswap64(target_efl->l_start); 8118 fl.l_len = tswap64(target_efl->l_len); 8119 fl.l_pid = tswap32(target_efl->l_pid); 8120 unlock_user_struct(target_efl, arg3, 0); 8121 } else 8122 #endif 8123 { 8124 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 8125 goto efault; 8126 fl.l_type = tswap16(target_fl->l_type); 8127 fl.l_whence = tswap16(target_fl->l_whence); 8128 fl.l_start = tswap64(target_fl->l_start); 8129 fl.l_len = tswap64(target_fl->l_len); 8130 fl.l_pid = tswap32(target_fl->l_pid); 8131 unlock_user_struct(target_fl, arg3, 0); 8132 } 8133 ret = get_errno(fcntl(arg1, cmd, &fl)); 8134 break; 8135 default: 8136 ret = do_fcntl(arg1, arg2, arg3); 8137 break; 8138 } 8139 break; 8140 } 8141 #endif 8142 #ifdef TARGET_NR_cacheflush 8143 case TARGET_NR_cacheflush: 8144 /* self-modifying code is handled automatically, so nothing needed */ 8145 ret = 0; 8146 break; 8147 #endif 8148 #ifdef TARGET_NR_security 8149 case TARGET_NR_security: 8150 goto unimplemented; 8151 #endif 8152 #ifdef TARGET_NR_getpagesize 8153 case TARGET_NR_getpagesize: 8154 ret = TARGET_PAGE_SIZE; 8155 break; 8156 #endif 8157 case TARGET_NR_gettid: 8158 ret = get_errno(gettid()); 8159 break; 8160 #ifdef TARGET_NR_readahead 8161 case TARGET_NR_readahead: 8162 #if TARGET_ABI_BITS == 32 8163 if (regpairs_aligned(cpu_env)) { 8164 arg2 = arg3; 8165 arg3 = arg4; 8166 arg4 = arg5; 8167 } 8168 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4)); 8169 #else 8170 ret = get_errno(readahead(arg1, arg2, arg3)); 8171 #endif 8172 break; 8173 #endif 8174 #ifdef CONFIG_ATTR 8175 #ifdef TARGET_NR_setxattr 8176 case TARGET_NR_listxattr: 8177 case TARGET_NR_llistxattr: 8178 { 8179 void *p, *b = 0; 8180 if (arg2) { 8181 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 8182 if (!b) { 8183 ret = -TARGET_EFAULT; 8184 break; 8185 } 8186 } 8187 p = lock_user_string(arg1); 8188 if (p) { 8189 if (num == TARGET_NR_listxattr) { 8190 ret = get_errno(listxattr(p, b, arg3)); 8191 } else { 8192 ret = get_errno(llistxattr(p, b, arg3)); 8193 } 8194 } else { 8195 ret = -TARGET_EFAULT; 8196 } 8197 unlock_user(p, arg1, 0); 8198 unlock_user(b, arg2, arg3); 8199 break; 8200 } 8201 case TARGET_NR_flistxattr: 8202 { 8203 void *b = 0; 8204 if (arg2) { 8205 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 8206 if (!b) { 8207 ret = -TARGET_EFAULT; 8208 break; 8209 } 8210 } 8211 ret = get_errno(flistxattr(arg1, b, arg3)); 8212 unlock_user(b, arg2, arg3); 8213 break; 8214 } 8215 case TARGET_NR_setxattr: 8216 case TARGET_NR_lsetxattr: 8217 { 8218 void *p, *n, *v = 0; 8219 if (arg3) { 8220 v = lock_user(VERIFY_READ, arg3, arg4, 1); 8221 if (!v) { 8222 ret = -TARGET_EFAULT; 8223 break; 8224 } 8225 } 8226 p = lock_user_string(arg1); 8227 n = lock_user_string(arg2); 8228 if (p && n) { 8229 if (num == TARGET_NR_setxattr) { 8230 ret = get_errno(setxattr(p, n, v, arg4, arg5)); 8231 } else { 8232 ret = get_errno(lsetxattr(p, n, v, arg4, arg5)); 8233 } 8234 } else { 8235 ret = -TARGET_EFAULT; 8236 } 8237 unlock_user(p, arg1, 0); 8238 unlock_user(n, arg2, 0); 8239 unlock_user(v, arg3, 0); 8240 } 8241 break; 8242 case TARGET_NR_fsetxattr: 8243 { 8244 void *n, *v = 0; 8245 if (arg3) { 8246 v = lock_user(VERIFY_READ, arg3, arg4, 1); 8247 if (!v) { 8248 ret = -TARGET_EFAULT; 8249 break; 8250 } 8251 } 8252 n = lock_user_string(arg2); 8253 if (n) { 8254 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5)); 8255 } else { 8256 ret = -TARGET_EFAULT; 8257 } 8258 unlock_user(n, arg2, 0); 8259 unlock_user(v, arg3, 0); 8260 } 8261 break; 8262 case TARGET_NR_getxattr: 8263 case TARGET_NR_lgetxattr: 8264 { 8265 void *p, *n, *v = 0; 8266 if (arg3) { 8267 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 8268 if (!v) { 8269 ret = -TARGET_EFAULT; 8270 break; 8271 } 8272 } 8273 p = lock_user_string(arg1); 8274 n = lock_user_string(arg2); 8275 if (p && n) { 8276 if (num == TARGET_NR_getxattr) { 8277 ret = get_errno(getxattr(p, n, v, arg4)); 8278 } else { 8279 ret = get_errno(lgetxattr(p, n, v, arg4)); 8280 } 8281 } else { 8282 ret = -TARGET_EFAULT; 8283 } 8284 unlock_user(p, arg1, 0); 8285 unlock_user(n, arg2, 0); 8286 unlock_user(v, arg3, arg4); 8287 } 8288 break; 8289 case TARGET_NR_fgetxattr: 8290 { 8291 void *n, *v = 0; 8292 if (arg3) { 8293 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 8294 if (!v) { 8295 ret = -TARGET_EFAULT; 8296 break; 8297 } 8298 } 8299 n = lock_user_string(arg2); 8300 if (n) { 8301 ret = get_errno(fgetxattr(arg1, n, v, arg4)); 8302 } else { 8303 ret = -TARGET_EFAULT; 8304 } 8305 unlock_user(n, arg2, 0); 8306 unlock_user(v, arg3, arg4); 8307 } 8308 break; 8309 case TARGET_NR_removexattr: 8310 case TARGET_NR_lremovexattr: 8311 { 8312 void *p, *n; 8313 p = lock_user_string(arg1); 8314 n = lock_user_string(arg2); 8315 if (p && n) { 8316 if (num == TARGET_NR_removexattr) { 8317 ret = get_errno(removexattr(p, n)); 8318 } else { 8319 ret = get_errno(lremovexattr(p, n)); 8320 } 8321 } else { 8322 ret = -TARGET_EFAULT; 8323 } 8324 unlock_user(p, arg1, 0); 8325 unlock_user(n, arg2, 0); 8326 } 8327 break; 8328 case TARGET_NR_fremovexattr: 8329 { 8330 void *n; 8331 n = lock_user_string(arg2); 8332 if (n) { 8333 ret = get_errno(fremovexattr(arg1, n)); 8334 } else { 8335 ret = -TARGET_EFAULT; 8336 } 8337 unlock_user(n, arg2, 0); 8338 } 8339 break; 8340 #endif 8341 #endif /* CONFIG_ATTR */ 8342 #ifdef TARGET_NR_set_thread_area 8343 case TARGET_NR_set_thread_area: 8344 #if defined(TARGET_MIPS) 8345 ((CPUMIPSState *) cpu_env)->tls_value = arg1; 8346 ret = 0; 8347 break; 8348 #elif defined(TARGET_CRIS) 8349 if (arg1 & 0xff) 8350 ret = -TARGET_EINVAL; 8351 else { 8352 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1; 8353 ret = 0; 8354 } 8355 break; 8356 #elif defined(TARGET_I386) && defined(TARGET_ABI32) 8357 ret = do_set_thread_area(cpu_env, arg1); 8358 break; 8359 #else 8360 goto unimplemented_nowarn; 8361 #endif 8362 #endif 8363 #ifdef TARGET_NR_get_thread_area 8364 case TARGET_NR_get_thread_area: 8365 #if defined(TARGET_I386) && defined(TARGET_ABI32) 8366 ret = do_get_thread_area(cpu_env, arg1); 8367 #else 8368 goto unimplemented_nowarn; 8369 #endif 8370 #endif 8371 #ifdef TARGET_NR_getdomainname 8372 case TARGET_NR_getdomainname: 8373 goto unimplemented_nowarn; 8374 #endif 8375 8376 #ifdef TARGET_NR_clock_gettime 8377 case TARGET_NR_clock_gettime: 8378 { 8379 struct timespec ts; 8380 ret = get_errno(clock_gettime(arg1, &ts)); 8381 if (!is_error(ret)) { 8382 host_to_target_timespec(arg2, &ts); 8383 } 8384 break; 8385 } 8386 #endif 8387 #ifdef TARGET_NR_clock_getres 8388 case TARGET_NR_clock_getres: 8389 { 8390 struct timespec ts; 8391 ret = get_errno(clock_getres(arg1, &ts)); 8392 if (!is_error(ret)) { 8393 host_to_target_timespec(arg2, &ts); 8394 } 8395 break; 8396 } 8397 #endif 8398 #ifdef TARGET_NR_clock_nanosleep 8399 case TARGET_NR_clock_nanosleep: 8400 { 8401 struct timespec ts; 8402 target_to_host_timespec(&ts, arg3); 8403 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL)); 8404 if (arg4) 8405 host_to_target_timespec(arg4, &ts); 8406 break; 8407 } 8408 #endif 8409 8410 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 8411 case TARGET_NR_set_tid_address: 8412 ret = get_errno(set_tid_address((int *)g2h(arg1))); 8413 break; 8414 #endif 8415 8416 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 8417 case TARGET_NR_tkill: 8418 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2))); 8419 break; 8420 #endif 8421 8422 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 8423 case TARGET_NR_tgkill: 8424 ret = get_errno(sys_tgkill((int)arg1, (int)arg2, 8425 target_to_host_signal(arg3))); 8426 break; 8427 #endif 8428 8429 #ifdef TARGET_NR_set_robust_list 8430 case TARGET_NR_set_robust_list: 8431 goto unimplemented_nowarn; 8432 #endif 8433 8434 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat) 8435 case TARGET_NR_utimensat: 8436 { 8437 struct timespec *tsp, ts[2]; 8438 if (!arg3) { 8439 tsp = NULL; 8440 } else { 8441 target_to_host_timespec(ts, arg3); 8442 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec)); 8443 tsp = ts; 8444 } 8445 if (!arg2) 8446 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 8447 else { 8448 if (!(p = lock_user_string(arg2))) { 8449 ret = -TARGET_EFAULT; 8450 goto fail; 8451 } 8452 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 8453 unlock_user(p, arg2, 0); 8454 } 8455 } 8456 break; 8457 #endif 8458 #if defined(CONFIG_USE_NPTL) 8459 case TARGET_NR_futex: 8460 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6); 8461 break; 8462 #endif 8463 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 8464 case TARGET_NR_inotify_init: 8465 ret = get_errno(sys_inotify_init()); 8466 break; 8467 #endif 8468 #ifdef CONFIG_INOTIFY1 8469 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 8470 case TARGET_NR_inotify_init1: 8471 ret = get_errno(sys_inotify_init1(arg1)); 8472 break; 8473 #endif 8474 #endif 8475 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 8476 case TARGET_NR_inotify_add_watch: 8477 p = lock_user_string(arg2); 8478 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3)); 8479 unlock_user(p, arg2, 0); 8480 break; 8481 #endif 8482 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 8483 case TARGET_NR_inotify_rm_watch: 8484 ret = get_errno(sys_inotify_rm_watch(arg1, arg2)); 8485 break; 8486 #endif 8487 8488 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 8489 case TARGET_NR_mq_open: 8490 { 8491 struct mq_attr posix_mq_attr; 8492 8493 p = lock_user_string(arg1 - 1); 8494 if (arg4 != 0) 8495 copy_from_user_mq_attr (&posix_mq_attr, arg4); 8496 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr)); 8497 unlock_user (p, arg1, 0); 8498 } 8499 break; 8500 8501 case TARGET_NR_mq_unlink: 8502 p = lock_user_string(arg1 - 1); 8503 ret = get_errno(mq_unlink(p)); 8504 unlock_user (p, arg1, 0); 8505 break; 8506 8507 case TARGET_NR_mq_timedsend: 8508 { 8509 struct timespec ts; 8510 8511 p = lock_user (VERIFY_READ, arg2, arg3, 1); 8512 if (arg5 != 0) { 8513 target_to_host_timespec(&ts, arg5); 8514 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts)); 8515 host_to_target_timespec(arg5, &ts); 8516 } 8517 else 8518 ret = get_errno(mq_send(arg1, p, arg3, arg4)); 8519 unlock_user (p, arg2, arg3); 8520 } 8521 break; 8522 8523 case TARGET_NR_mq_timedreceive: 8524 { 8525 struct timespec ts; 8526 unsigned int prio; 8527 8528 p = lock_user (VERIFY_READ, arg2, arg3, 1); 8529 if (arg5 != 0) { 8530 target_to_host_timespec(&ts, arg5); 8531 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts)); 8532 host_to_target_timespec(arg5, &ts); 8533 } 8534 else 8535 ret = get_errno(mq_receive(arg1, p, arg3, &prio)); 8536 unlock_user (p, arg2, arg3); 8537 if (arg4 != 0) 8538 put_user_u32(prio, arg4); 8539 } 8540 break; 8541 8542 /* Not implemented for now... */ 8543 /* case TARGET_NR_mq_notify: */ 8544 /* break; */ 8545 8546 case TARGET_NR_mq_getsetattr: 8547 { 8548 struct mq_attr posix_mq_attr_in, posix_mq_attr_out; 8549 ret = 0; 8550 if (arg3 != 0) { 8551 ret = mq_getattr(arg1, &posix_mq_attr_out); 8552 copy_to_user_mq_attr(arg3, &posix_mq_attr_out); 8553 } 8554 if (arg2 != 0) { 8555 copy_from_user_mq_attr(&posix_mq_attr_in, arg2); 8556 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out); 8557 } 8558 8559 } 8560 break; 8561 #endif 8562 8563 #ifdef CONFIG_SPLICE 8564 #ifdef TARGET_NR_tee 8565 case TARGET_NR_tee: 8566 { 8567 ret = get_errno(tee(arg1,arg2,arg3,arg4)); 8568 } 8569 break; 8570 #endif 8571 #ifdef TARGET_NR_splice 8572 case TARGET_NR_splice: 8573 { 8574 loff_t loff_in, loff_out; 8575 loff_t *ploff_in = NULL, *ploff_out = NULL; 8576 if(arg2) { 8577 get_user_u64(loff_in, arg2); 8578 ploff_in = &loff_in; 8579 } 8580 if(arg4) { 8581 get_user_u64(loff_out, arg2); 8582 ploff_out = &loff_out; 8583 } 8584 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); 8585 } 8586 break; 8587 #endif 8588 #ifdef TARGET_NR_vmsplice 8589 case TARGET_NR_vmsplice: 8590 { 8591 int count = arg3; 8592 struct iovec *vec; 8593 8594 vec = alloca(count * sizeof(struct iovec)); 8595 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0) 8596 goto efault; 8597 ret = get_errno(vmsplice(arg1, vec, count, arg4)); 8598 unlock_iovec(vec, arg2, count, 0); 8599 } 8600 break; 8601 #endif 8602 #endif /* CONFIG_SPLICE */ 8603 #ifdef CONFIG_EVENTFD 8604 #if defined(TARGET_NR_eventfd) 8605 case TARGET_NR_eventfd: 8606 ret = get_errno(eventfd(arg1, 0)); 8607 break; 8608 #endif 8609 #if defined(TARGET_NR_eventfd2) 8610 case TARGET_NR_eventfd2: 8611 ret = get_errno(eventfd(arg1, arg2)); 8612 break; 8613 #endif 8614 #endif /* CONFIG_EVENTFD */ 8615 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) 8616 case TARGET_NR_fallocate: 8617 #if TARGET_ABI_BITS == 32 8618 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4), 8619 target_offset64(arg5, arg6))); 8620 #else 8621 ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); 8622 #endif 8623 break; 8624 #endif 8625 #if defined(CONFIG_SYNC_FILE_RANGE) 8626 #if defined(TARGET_NR_sync_file_range) 8627 case TARGET_NR_sync_file_range: 8628 #if TARGET_ABI_BITS == 32 8629 #if defined(TARGET_MIPS) 8630 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 8631 target_offset64(arg5, arg6), arg7)); 8632 #else 8633 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3), 8634 target_offset64(arg4, arg5), arg6)); 8635 #endif /* !TARGET_MIPS */ 8636 #else 8637 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4)); 8638 #endif 8639 break; 8640 #endif 8641 #if defined(TARGET_NR_sync_file_range2) 8642 case TARGET_NR_sync_file_range2: 8643 /* This is like sync_file_range but the arguments are reordered */ 8644 #if TARGET_ABI_BITS == 32 8645 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 8646 target_offset64(arg5, arg6), arg2)); 8647 #else 8648 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2)); 8649 #endif 8650 break; 8651 #endif 8652 #endif 8653 #if defined(CONFIG_EPOLL) 8654 #if defined(TARGET_NR_epoll_create) 8655 case TARGET_NR_epoll_create: 8656 ret = get_errno(epoll_create(arg1)); 8657 break; 8658 #endif 8659 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1) 8660 case TARGET_NR_epoll_create1: 8661 ret = get_errno(epoll_create1(arg1)); 8662 break; 8663 #endif 8664 #if defined(TARGET_NR_epoll_ctl) 8665 case TARGET_NR_epoll_ctl: 8666 { 8667 struct epoll_event ep; 8668 struct epoll_event *epp = 0; 8669 if (arg4) { 8670 struct target_epoll_event *target_ep; 8671 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) { 8672 goto efault; 8673 } 8674 ep.events = tswap32(target_ep->events); 8675 /* The epoll_data_t union is just opaque data to the kernel, 8676 * so we transfer all 64 bits across and need not worry what 8677 * actual data type it is. 8678 */ 8679 ep.data.u64 = tswap64(target_ep->data.u64); 8680 unlock_user_struct(target_ep, arg4, 0); 8681 epp = &ep; 8682 } 8683 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp)); 8684 break; 8685 } 8686 #endif 8687 8688 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT) 8689 #define IMPLEMENT_EPOLL_PWAIT 8690 #endif 8691 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT) 8692 #if defined(TARGET_NR_epoll_wait) 8693 case TARGET_NR_epoll_wait: 8694 #endif 8695 #if defined(IMPLEMENT_EPOLL_PWAIT) 8696 case TARGET_NR_epoll_pwait: 8697 #endif 8698 { 8699 struct target_epoll_event *target_ep; 8700 struct epoll_event *ep; 8701 int epfd = arg1; 8702 int maxevents = arg3; 8703 int timeout = arg4; 8704 8705 target_ep = lock_user(VERIFY_WRITE, arg2, 8706 maxevents * sizeof(struct target_epoll_event), 1); 8707 if (!target_ep) { 8708 goto efault; 8709 } 8710 8711 ep = alloca(maxevents * sizeof(struct epoll_event)); 8712 8713 switch (num) { 8714 #if defined(IMPLEMENT_EPOLL_PWAIT) 8715 case TARGET_NR_epoll_pwait: 8716 { 8717 target_sigset_t *target_set; 8718 sigset_t _set, *set = &_set; 8719 8720 if (arg5) { 8721 target_set = lock_user(VERIFY_READ, arg5, 8722 sizeof(target_sigset_t), 1); 8723 if (!target_set) { 8724 unlock_user(target_ep, arg2, 0); 8725 goto efault; 8726 } 8727 target_to_host_sigset(set, target_set); 8728 unlock_user(target_set, arg5, 0); 8729 } else { 8730 set = NULL; 8731 } 8732 8733 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set)); 8734 break; 8735 } 8736 #endif 8737 #if defined(TARGET_NR_epoll_wait) 8738 case TARGET_NR_epoll_wait: 8739 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout)); 8740 break; 8741 #endif 8742 default: 8743 ret = -TARGET_ENOSYS; 8744 } 8745 if (!is_error(ret)) { 8746 int i; 8747 for (i = 0; i < ret; i++) { 8748 target_ep[i].events = tswap32(ep[i].events); 8749 target_ep[i].data.u64 = tswap64(ep[i].data.u64); 8750 } 8751 } 8752 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event)); 8753 break; 8754 } 8755 #endif 8756 #endif 8757 #ifdef TARGET_NR_prlimit64 8758 case TARGET_NR_prlimit64: 8759 { 8760 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */ 8761 struct target_rlimit64 *target_rnew, *target_rold; 8762 struct host_rlimit64 rnew, rold, *rnewp = 0; 8763 if (arg3) { 8764 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { 8765 goto efault; 8766 } 8767 rnew.rlim_cur = tswap64(target_rnew->rlim_cur); 8768 rnew.rlim_max = tswap64(target_rnew->rlim_max); 8769 unlock_user_struct(target_rnew, arg3, 0); 8770 rnewp = &rnew; 8771 } 8772 8773 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0)); 8774 if (!is_error(ret) && arg4) { 8775 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { 8776 goto efault; 8777 } 8778 target_rold->rlim_cur = tswap64(rold.rlim_cur); 8779 target_rold->rlim_max = tswap64(rold.rlim_max); 8780 unlock_user_struct(target_rold, arg4, 1); 8781 } 8782 break; 8783 } 8784 #endif 8785 default: 8786 unimplemented: 8787 gemu_log("qemu: Unsupported syscall: %d\n", num); 8788 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list) 8789 unimplemented_nowarn: 8790 #endif 8791 ret = -TARGET_ENOSYS; 8792 break; 8793 } 8794 fail: 8795 #ifdef DEBUG 8796 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret); 8797 #endif 8798 if(do_strace) 8799 print_syscall_ret(num, ret); 8800 return ret; 8801 efault: 8802 ret = -TARGET_EFAULT; 8803 goto fail; 8804 } 8805