1 /* 2 * Linux syscalls 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #define _ATFILE_SOURCE 20 #include <stdlib.h> 21 #include <stdio.h> 22 #include <stdarg.h> 23 #include <string.h> 24 #include <elf.h> 25 #include <endian.h> 26 #include <errno.h> 27 #include <unistd.h> 28 #include <fcntl.h> 29 #include <time.h> 30 #include <limits.h> 31 #include <grp.h> 32 #include <sys/types.h> 33 #include <sys/ipc.h> 34 #include <sys/msg.h> 35 #include <sys/wait.h> 36 #include <sys/time.h> 37 #include <sys/stat.h> 38 #include <sys/mount.h> 39 #include <sys/file.h> 40 #include <sys/fsuid.h> 41 #include <sys/personality.h> 42 #include <sys/prctl.h> 43 #include <sys/resource.h> 44 #include <sys/mman.h> 45 #include <sys/swap.h> 46 #include <signal.h> 47 #include <sched.h> 48 #ifdef __ia64__ 49 int __clone2(int (*fn)(void *), void *child_stack_base, 50 size_t stack_size, int flags, void *arg, ...); 51 #endif 52 #include <sys/socket.h> 53 #include <sys/un.h> 54 #include <sys/uio.h> 55 #include <sys/poll.h> 56 #include <sys/times.h> 57 #include <sys/shm.h> 58 #include <sys/sem.h> 59 #include <sys/statfs.h> 60 #include <utime.h> 61 #include <sys/sysinfo.h> 62 #include <sys/utsname.h> 63 //#include <sys/user.h> 64 #include <netinet/ip.h> 65 #include <netinet/tcp.h> 66 #include <linux/wireless.h> 67 #include <linux/icmp.h> 68 #include "qemu-common.h" 69 #ifdef TARGET_GPROF 70 #include <sys/gmon.h> 71 #endif 72 #ifdef CONFIG_EVENTFD 73 #include <sys/eventfd.h> 74 #endif 75 #ifdef CONFIG_EPOLL 76 #include <sys/epoll.h> 77 #endif 78 #ifdef CONFIG_ATTR 79 #include "qemu/xattr.h" 80 #endif 81 82 #define termios host_termios 83 #define winsize host_winsize 84 #define termio host_termio 85 #define sgttyb host_sgttyb /* same as target */ 86 #define tchars host_tchars /* same as target */ 87 #define ltchars host_ltchars /* same as target */ 88 89 #include <linux/termios.h> 90 #include <linux/unistd.h> 91 #include <linux/utsname.h> 92 #include <linux/cdrom.h> 93 #include <linux/hdreg.h> 94 #include <linux/soundcard.h> 95 #include <linux/kd.h> 96 #include <linux/mtio.h> 97 #include <linux/fs.h> 98 #if defined(CONFIG_FIEMAP) 99 #include <linux/fiemap.h> 100 #endif 101 #include <linux/fb.h> 102 #include <linux/vt.h> 103 #include <linux/dm-ioctl.h> 104 #include <linux/reboot.h> 105 #include "linux_loop.h" 106 #include "cpu-uname.h" 107 108 #include "qemu.h" 109 110 #if defined(CONFIG_USE_NPTL) 111 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \ 112 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID) 113 #else 114 /* XXX: Hardcode the above values. */ 115 #define CLONE_NPTL_FLAGS2 0 116 #endif 117 118 //#define DEBUG 119 120 //#include <linux/msdos_fs.h> 121 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2]) 122 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2]) 123 124 125 #undef _syscall0 126 #undef _syscall1 127 #undef _syscall2 128 #undef _syscall3 129 #undef _syscall4 130 #undef _syscall5 131 #undef _syscall6 132 133 #define _syscall0(type,name) \ 134 static type name (void) \ 135 { \ 136 return syscall(__NR_##name); \ 137 } 138 139 #define _syscall1(type,name,type1,arg1) \ 140 static type name (type1 arg1) \ 141 { \ 142 return syscall(__NR_##name, arg1); \ 143 } 144 145 #define _syscall2(type,name,type1,arg1,type2,arg2) \ 146 static type name (type1 arg1,type2 arg2) \ 147 { \ 148 return syscall(__NR_##name, arg1, arg2); \ 149 } 150 151 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 152 static type name (type1 arg1,type2 arg2,type3 arg3) \ 153 { \ 154 return syscall(__NR_##name, arg1, arg2, arg3); \ 155 } 156 157 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 158 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ 159 { \ 160 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 161 } 162 163 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 164 type5,arg5) \ 165 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ 166 { \ 167 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 168 } 169 170 171 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 172 type5,arg5,type6,arg6) \ 173 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \ 174 type6 arg6) \ 175 { \ 176 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 177 } 178 179 180 #define __NR_sys_uname __NR_uname 181 #define __NR_sys_faccessat __NR_faccessat 182 #define __NR_sys_fchmodat __NR_fchmodat 183 #define __NR_sys_fchownat __NR_fchownat 184 #define __NR_sys_fstatat64 __NR_fstatat64 185 #define __NR_sys_futimesat __NR_futimesat 186 #define __NR_sys_getcwd1 __NR_getcwd 187 #define __NR_sys_getdents __NR_getdents 188 #define __NR_sys_getdents64 __NR_getdents64 189 #define __NR_sys_getpriority __NR_getpriority 190 #define __NR_sys_linkat __NR_linkat 191 #define __NR_sys_mkdirat __NR_mkdirat 192 #define __NR_sys_mknodat __NR_mknodat 193 #define __NR_sys_newfstatat __NR_newfstatat 194 #define __NR_sys_openat __NR_openat 195 #define __NR_sys_readlinkat __NR_readlinkat 196 #define __NR_sys_renameat __NR_renameat 197 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo 198 #define __NR_sys_symlinkat __NR_symlinkat 199 #define __NR_sys_syslog __NR_syslog 200 #define __NR_sys_tgkill __NR_tgkill 201 #define __NR_sys_tkill __NR_tkill 202 #define __NR_sys_unlinkat __NR_unlinkat 203 #define __NR_sys_utimensat __NR_utimensat 204 #define __NR_sys_futex __NR_futex 205 #define __NR_sys_inotify_init __NR_inotify_init 206 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch 207 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch 208 209 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \ 210 defined(__s390x__) 211 #define __NR__llseek __NR_lseek 212 #endif 213 214 #ifdef __NR_gettid 215 _syscall0(int, gettid) 216 #else 217 /* This is a replacement for the host gettid() and must return a host 218 errno. */ 219 static int gettid(void) { 220 return -ENOSYS; 221 } 222 #endif 223 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count); 224 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 225 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count); 226 #endif 227 #if defined(TARGET_NR__llseek) && defined(__NR_llseek) 228 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo, 229 loff_t *, res, uint, wh); 230 #endif 231 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo) 232 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len) 233 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 234 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig) 235 #endif 236 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 237 _syscall2(int,sys_tkill,int,tid,int,sig) 238 #endif 239 #ifdef __NR_exit_group 240 _syscall1(int,exit_group,int,error_code) 241 #endif 242 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 243 _syscall1(int,set_tid_address,int *,tidptr) 244 #endif 245 #if defined(CONFIG_USE_NPTL) 246 #if defined(TARGET_NR_futex) && defined(__NR_futex) 247 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, 248 const struct timespec *,timeout,int *,uaddr2,int,val3) 249 #endif 250 #endif 251 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity 252 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, 253 unsigned long *, user_mask_ptr); 254 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity 255 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len, 256 unsigned long *, user_mask_ptr); 257 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd, 258 void *, arg); 259 260 static bitmask_transtbl fcntl_flags_tbl[] = { 261 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, }, 262 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, }, 263 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, }, 264 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, }, 265 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, }, 266 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, }, 267 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, }, 268 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, }, 269 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, }, 270 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, }, 271 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, }, 272 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, }, 273 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, }, 274 #if defined(O_DIRECT) 275 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, }, 276 #endif 277 #if defined(O_NOATIME) 278 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME }, 279 #endif 280 #if defined(O_CLOEXEC) 281 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC }, 282 #endif 283 #if defined(O_PATH) 284 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH }, 285 #endif 286 /* Don't terminate the list prematurely on 64-bit host+guest. */ 287 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0 288 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, }, 289 #endif 290 { 0, 0, 0, 0 } 291 }; 292 293 #define COPY_UTSNAME_FIELD(dest, src) \ 294 do { \ 295 /* __NEW_UTS_LEN doesn't include terminating null */ \ 296 (void) strncpy((dest), (src), __NEW_UTS_LEN); \ 297 (dest)[__NEW_UTS_LEN] = '\0'; \ 298 } while (0) 299 300 static int sys_uname(struct new_utsname *buf) 301 { 302 struct utsname uts_buf; 303 304 if (uname(&uts_buf) < 0) 305 return (-1); 306 307 /* 308 * Just in case these have some differences, we 309 * translate utsname to new_utsname (which is the 310 * struct linux kernel uses). 311 */ 312 313 memset(buf, 0, sizeof(*buf)); 314 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname); 315 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename); 316 COPY_UTSNAME_FIELD(buf->release, uts_buf.release); 317 COPY_UTSNAME_FIELD(buf->version, uts_buf.version); 318 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine); 319 #ifdef _GNU_SOURCE 320 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname); 321 #endif 322 return (0); 323 324 #undef COPY_UTSNAME_FIELD 325 } 326 327 static int sys_getcwd1(char *buf, size_t size) 328 { 329 if (getcwd(buf, size) == NULL) { 330 /* getcwd() sets errno */ 331 return (-1); 332 } 333 return strlen(buf)+1; 334 } 335 336 #ifdef CONFIG_ATFILE 337 /* 338 * Host system seems to have atfile syscall stubs available. We 339 * now enable them one by one as specified by target syscall_nr.h. 340 */ 341 342 #ifdef TARGET_NR_faccessat 343 static int sys_faccessat(int dirfd, const char *pathname, int mode) 344 { 345 return (faccessat(dirfd, pathname, mode, 0)); 346 } 347 #endif 348 #ifdef TARGET_NR_fchmodat 349 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode) 350 { 351 return (fchmodat(dirfd, pathname, mode, 0)); 352 } 353 #endif 354 #if defined(TARGET_NR_fchownat) 355 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner, 356 gid_t group, int flags) 357 { 358 return (fchownat(dirfd, pathname, owner, group, flags)); 359 } 360 #endif 361 #ifdef __NR_fstatat64 362 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf, 363 int flags) 364 { 365 return (fstatat(dirfd, pathname, buf, flags)); 366 } 367 #endif 368 #ifdef __NR_newfstatat 369 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf, 370 int flags) 371 { 372 return (fstatat(dirfd, pathname, buf, flags)); 373 } 374 #endif 375 #ifdef TARGET_NR_futimesat 376 static int sys_futimesat(int dirfd, const char *pathname, 377 const struct timeval times[2]) 378 { 379 return (futimesat(dirfd, pathname, times)); 380 } 381 #endif 382 #ifdef TARGET_NR_linkat 383 static int sys_linkat(int olddirfd, const char *oldpath, 384 int newdirfd, const char *newpath, int flags) 385 { 386 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags)); 387 } 388 #endif 389 #ifdef TARGET_NR_mkdirat 390 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode) 391 { 392 return (mkdirat(dirfd, pathname, mode)); 393 } 394 #endif 395 #ifdef TARGET_NR_mknodat 396 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode, 397 dev_t dev) 398 { 399 return (mknodat(dirfd, pathname, mode, dev)); 400 } 401 #endif 402 #ifdef TARGET_NR_openat 403 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode) 404 { 405 /* 406 * open(2) has extra parameter 'mode' when called with 407 * flag O_CREAT. 408 */ 409 if ((flags & O_CREAT) != 0) { 410 return (openat(dirfd, pathname, flags, mode)); 411 } 412 return (openat(dirfd, pathname, flags)); 413 } 414 #endif 415 #ifdef TARGET_NR_readlinkat 416 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz) 417 { 418 return (readlinkat(dirfd, pathname, buf, bufsiz)); 419 } 420 #endif 421 #ifdef TARGET_NR_renameat 422 static int sys_renameat(int olddirfd, const char *oldpath, 423 int newdirfd, const char *newpath) 424 { 425 return (renameat(olddirfd, oldpath, newdirfd, newpath)); 426 } 427 #endif 428 #ifdef TARGET_NR_symlinkat 429 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath) 430 { 431 return (symlinkat(oldpath, newdirfd, newpath)); 432 } 433 #endif 434 #ifdef TARGET_NR_unlinkat 435 static int sys_unlinkat(int dirfd, const char *pathname, int flags) 436 { 437 return (unlinkat(dirfd, pathname, flags)); 438 } 439 #endif 440 #else /* !CONFIG_ATFILE */ 441 442 /* 443 * Try direct syscalls instead 444 */ 445 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 446 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode) 447 #endif 448 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat) 449 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode) 450 #endif 451 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) 452 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname, 453 uid_t,owner,gid_t,group,int,flags) 454 #endif 455 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \ 456 defined(__NR_fstatat64) 457 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname, 458 struct stat *,buf,int,flags) 459 #endif 460 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat) 461 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname, 462 const struct timeval *,times) 463 #endif 464 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \ 465 defined(__NR_newfstatat) 466 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname, 467 struct stat *,buf,int,flags) 468 #endif 469 #if defined(TARGET_NR_linkat) && defined(__NR_linkat) 470 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath, 471 int,newdirfd,const char *,newpath,int,flags) 472 #endif 473 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat) 474 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode) 475 #endif 476 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat) 477 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname, 478 mode_t,mode,dev_t,dev) 479 #endif 480 #if defined(TARGET_NR_openat) && defined(__NR_openat) 481 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode) 482 #endif 483 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat) 484 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname, 485 char *,buf,size_t,bufsize) 486 #endif 487 #if defined(TARGET_NR_renameat) && defined(__NR_renameat) 488 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath, 489 int,newdirfd,const char *,newpath) 490 #endif 491 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat) 492 _syscall3(int,sys_symlinkat,const char *,oldpath, 493 int,newdirfd,const char *,newpath) 494 #endif 495 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat) 496 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags) 497 #endif 498 499 #endif /* CONFIG_ATFILE */ 500 501 #ifdef CONFIG_UTIMENSAT 502 static int sys_utimensat(int dirfd, const char *pathname, 503 const struct timespec times[2], int flags) 504 { 505 if (pathname == NULL) 506 return futimens(dirfd, times); 507 else 508 return utimensat(dirfd, pathname, times, flags); 509 } 510 #else 511 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat) 512 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname, 513 const struct timespec *,tsp,int,flags) 514 #endif 515 #endif /* CONFIG_UTIMENSAT */ 516 517 #ifdef CONFIG_INOTIFY 518 #include <sys/inotify.h> 519 520 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 521 static int sys_inotify_init(void) 522 { 523 return (inotify_init()); 524 } 525 #endif 526 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 527 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask) 528 { 529 return (inotify_add_watch(fd, pathname, mask)); 530 } 531 #endif 532 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 533 static int sys_inotify_rm_watch(int fd, int32_t wd) 534 { 535 return (inotify_rm_watch(fd, wd)); 536 } 537 #endif 538 #ifdef CONFIG_INOTIFY1 539 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 540 static int sys_inotify_init1(int flags) 541 { 542 return (inotify_init1(flags)); 543 } 544 #endif 545 #endif 546 #else 547 /* Userspace can usually survive runtime without inotify */ 548 #undef TARGET_NR_inotify_init 549 #undef TARGET_NR_inotify_init1 550 #undef TARGET_NR_inotify_add_watch 551 #undef TARGET_NR_inotify_rm_watch 552 #endif /* CONFIG_INOTIFY */ 553 554 #if defined(TARGET_NR_ppoll) 555 #ifndef __NR_ppoll 556 # define __NR_ppoll -1 557 #endif 558 #define __NR_sys_ppoll __NR_ppoll 559 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds, 560 struct timespec *, timeout, const __sigset_t *, sigmask, 561 size_t, sigsetsize) 562 #endif 563 564 #if defined(TARGET_NR_pselect6) 565 #ifndef __NR_pselect6 566 # define __NR_pselect6 -1 567 #endif 568 #define __NR_sys_pselect6 __NR_pselect6 569 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, 570 fd_set *, exceptfds, struct timespec *, timeout, void *, sig); 571 #endif 572 573 #if defined(TARGET_NR_prlimit64) 574 #ifndef __NR_prlimit64 575 # define __NR_prlimit64 -1 576 #endif 577 #define __NR_sys_prlimit64 __NR_prlimit64 578 /* The glibc rlimit structure may not be that used by the underlying syscall */ 579 struct host_rlimit64 { 580 uint64_t rlim_cur; 581 uint64_t rlim_max; 582 }; 583 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource, 584 const struct host_rlimit64 *, new_limit, 585 struct host_rlimit64 *, old_limit) 586 #endif 587 588 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */ 589 #ifdef TARGET_ARM 590 static inline int regpairs_aligned(void *cpu_env) { 591 return ((((CPUARMState *)cpu_env)->eabi) == 1) ; 592 } 593 #elif defined(TARGET_MIPS) 594 static inline int regpairs_aligned(void *cpu_env) { return 1; } 595 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64) 596 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs 597 * of registers which translates to the same as ARM/MIPS, because we start with 598 * r3 as arg1 */ 599 static inline int regpairs_aligned(void *cpu_env) { return 1; } 600 #else 601 static inline int regpairs_aligned(void *cpu_env) { return 0; } 602 #endif 603 604 #define ERRNO_TABLE_SIZE 1200 605 606 /* target_to_host_errno_table[] is initialized from 607 * host_to_target_errno_table[] in syscall_init(). */ 608 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = { 609 }; 610 611 /* 612 * This list is the union of errno values overridden in asm-<arch>/errno.h 613 * minus the errnos that are not actually generic to all archs. 614 */ 615 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = { 616 [EIDRM] = TARGET_EIDRM, 617 [ECHRNG] = TARGET_ECHRNG, 618 [EL2NSYNC] = TARGET_EL2NSYNC, 619 [EL3HLT] = TARGET_EL3HLT, 620 [EL3RST] = TARGET_EL3RST, 621 [ELNRNG] = TARGET_ELNRNG, 622 [EUNATCH] = TARGET_EUNATCH, 623 [ENOCSI] = TARGET_ENOCSI, 624 [EL2HLT] = TARGET_EL2HLT, 625 [EDEADLK] = TARGET_EDEADLK, 626 [ENOLCK] = TARGET_ENOLCK, 627 [EBADE] = TARGET_EBADE, 628 [EBADR] = TARGET_EBADR, 629 [EXFULL] = TARGET_EXFULL, 630 [ENOANO] = TARGET_ENOANO, 631 [EBADRQC] = TARGET_EBADRQC, 632 [EBADSLT] = TARGET_EBADSLT, 633 [EBFONT] = TARGET_EBFONT, 634 [ENOSTR] = TARGET_ENOSTR, 635 [ENODATA] = TARGET_ENODATA, 636 [ETIME] = TARGET_ETIME, 637 [ENOSR] = TARGET_ENOSR, 638 [ENONET] = TARGET_ENONET, 639 [ENOPKG] = TARGET_ENOPKG, 640 [EREMOTE] = TARGET_EREMOTE, 641 [ENOLINK] = TARGET_ENOLINK, 642 [EADV] = TARGET_EADV, 643 [ESRMNT] = TARGET_ESRMNT, 644 [ECOMM] = TARGET_ECOMM, 645 [EPROTO] = TARGET_EPROTO, 646 [EDOTDOT] = TARGET_EDOTDOT, 647 [EMULTIHOP] = TARGET_EMULTIHOP, 648 [EBADMSG] = TARGET_EBADMSG, 649 [ENAMETOOLONG] = TARGET_ENAMETOOLONG, 650 [EOVERFLOW] = TARGET_EOVERFLOW, 651 [ENOTUNIQ] = TARGET_ENOTUNIQ, 652 [EBADFD] = TARGET_EBADFD, 653 [EREMCHG] = TARGET_EREMCHG, 654 [ELIBACC] = TARGET_ELIBACC, 655 [ELIBBAD] = TARGET_ELIBBAD, 656 [ELIBSCN] = TARGET_ELIBSCN, 657 [ELIBMAX] = TARGET_ELIBMAX, 658 [ELIBEXEC] = TARGET_ELIBEXEC, 659 [EILSEQ] = TARGET_EILSEQ, 660 [ENOSYS] = TARGET_ENOSYS, 661 [ELOOP] = TARGET_ELOOP, 662 [ERESTART] = TARGET_ERESTART, 663 [ESTRPIPE] = TARGET_ESTRPIPE, 664 [ENOTEMPTY] = TARGET_ENOTEMPTY, 665 [EUSERS] = TARGET_EUSERS, 666 [ENOTSOCK] = TARGET_ENOTSOCK, 667 [EDESTADDRREQ] = TARGET_EDESTADDRREQ, 668 [EMSGSIZE] = TARGET_EMSGSIZE, 669 [EPROTOTYPE] = TARGET_EPROTOTYPE, 670 [ENOPROTOOPT] = TARGET_ENOPROTOOPT, 671 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT, 672 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT, 673 [EOPNOTSUPP] = TARGET_EOPNOTSUPP, 674 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT, 675 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT, 676 [EADDRINUSE] = TARGET_EADDRINUSE, 677 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL, 678 [ENETDOWN] = TARGET_ENETDOWN, 679 [ENETUNREACH] = TARGET_ENETUNREACH, 680 [ENETRESET] = TARGET_ENETRESET, 681 [ECONNABORTED] = TARGET_ECONNABORTED, 682 [ECONNRESET] = TARGET_ECONNRESET, 683 [ENOBUFS] = TARGET_ENOBUFS, 684 [EISCONN] = TARGET_EISCONN, 685 [ENOTCONN] = TARGET_ENOTCONN, 686 [EUCLEAN] = TARGET_EUCLEAN, 687 [ENOTNAM] = TARGET_ENOTNAM, 688 [ENAVAIL] = TARGET_ENAVAIL, 689 [EISNAM] = TARGET_EISNAM, 690 [EREMOTEIO] = TARGET_EREMOTEIO, 691 [ESHUTDOWN] = TARGET_ESHUTDOWN, 692 [ETOOMANYREFS] = TARGET_ETOOMANYREFS, 693 [ETIMEDOUT] = TARGET_ETIMEDOUT, 694 [ECONNREFUSED] = TARGET_ECONNREFUSED, 695 [EHOSTDOWN] = TARGET_EHOSTDOWN, 696 [EHOSTUNREACH] = TARGET_EHOSTUNREACH, 697 [EALREADY] = TARGET_EALREADY, 698 [EINPROGRESS] = TARGET_EINPROGRESS, 699 [ESTALE] = TARGET_ESTALE, 700 [ECANCELED] = TARGET_ECANCELED, 701 [ENOMEDIUM] = TARGET_ENOMEDIUM, 702 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE, 703 #ifdef ENOKEY 704 [ENOKEY] = TARGET_ENOKEY, 705 #endif 706 #ifdef EKEYEXPIRED 707 [EKEYEXPIRED] = TARGET_EKEYEXPIRED, 708 #endif 709 #ifdef EKEYREVOKED 710 [EKEYREVOKED] = TARGET_EKEYREVOKED, 711 #endif 712 #ifdef EKEYREJECTED 713 [EKEYREJECTED] = TARGET_EKEYREJECTED, 714 #endif 715 #ifdef EOWNERDEAD 716 [EOWNERDEAD] = TARGET_EOWNERDEAD, 717 #endif 718 #ifdef ENOTRECOVERABLE 719 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE, 720 #endif 721 }; 722 723 static inline int host_to_target_errno(int err) 724 { 725 if(host_to_target_errno_table[err]) 726 return host_to_target_errno_table[err]; 727 return err; 728 } 729 730 static inline int target_to_host_errno(int err) 731 { 732 if (target_to_host_errno_table[err]) 733 return target_to_host_errno_table[err]; 734 return err; 735 } 736 737 static inline abi_long get_errno(abi_long ret) 738 { 739 if (ret == -1) 740 return -host_to_target_errno(errno); 741 else 742 return ret; 743 } 744 745 static inline int is_error(abi_long ret) 746 { 747 return (abi_ulong)ret >= (abi_ulong)(-4096); 748 } 749 750 char *target_strerror(int err) 751 { 752 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) { 753 return NULL; 754 } 755 return strerror(target_to_host_errno(err)); 756 } 757 758 static abi_ulong target_brk; 759 static abi_ulong target_original_brk; 760 static abi_ulong brk_page; 761 762 void target_set_brk(abi_ulong new_brk) 763 { 764 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk); 765 brk_page = HOST_PAGE_ALIGN(target_brk); 766 } 767 768 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0) 769 #define DEBUGF_BRK(message, args...) 770 771 /* do_brk() must return target values and target errnos. */ 772 abi_long do_brk(abi_ulong new_brk) 773 { 774 abi_long mapped_addr; 775 int new_alloc_size; 776 777 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk); 778 779 if (!new_brk) { 780 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk); 781 return target_brk; 782 } 783 if (new_brk < target_original_brk) { 784 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n", 785 target_brk); 786 return target_brk; 787 } 788 789 /* If the new brk is less than the highest page reserved to the 790 * target heap allocation, set it and we're almost done... */ 791 if (new_brk <= brk_page) { 792 /* Heap contents are initialized to zero, as for anonymous 793 * mapped pages. */ 794 if (new_brk > target_brk) { 795 memset(g2h(target_brk), 0, new_brk - target_brk); 796 } 797 target_brk = new_brk; 798 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk); 799 return target_brk; 800 } 801 802 /* We need to allocate more memory after the brk... Note that 803 * we don't use MAP_FIXED because that will map over the top of 804 * any existing mapping (like the one with the host libc or qemu 805 * itself); instead we treat "mapped but at wrong address" as 806 * a failure and unmap again. 807 */ 808 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page); 809 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, 810 PROT_READ|PROT_WRITE, 811 MAP_ANON|MAP_PRIVATE, 0, 0)); 812 813 if (mapped_addr == brk_page) { 814 /* Heap contents are initialized to zero, as for anonymous 815 * mapped pages. Technically the new pages are already 816 * initialized to zero since they *are* anonymous mapped 817 * pages, however we have to take care with the contents that 818 * come from the remaining part of the previous page: it may 819 * contains garbage data due to a previous heap usage (grown 820 * then shrunken). */ 821 memset(g2h(target_brk), 0, brk_page - target_brk); 822 823 target_brk = new_brk; 824 brk_page = HOST_PAGE_ALIGN(target_brk); 825 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n", 826 target_brk); 827 return target_brk; 828 } else if (mapped_addr != -1) { 829 /* Mapped but at wrong address, meaning there wasn't actually 830 * enough space for this brk. 831 */ 832 target_munmap(mapped_addr, new_alloc_size); 833 mapped_addr = -1; 834 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk); 835 } 836 else { 837 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk); 838 } 839 840 #if defined(TARGET_ALPHA) 841 /* We (partially) emulate OSF/1 on Alpha, which requires we 842 return a proper errno, not an unchanged brk value. */ 843 return -TARGET_ENOMEM; 844 #endif 845 /* For everything else, return the previous break. */ 846 return target_brk; 847 } 848 849 static inline abi_long copy_from_user_fdset(fd_set *fds, 850 abi_ulong target_fds_addr, 851 int n) 852 { 853 int i, nw, j, k; 854 abi_ulong b, *target_fds; 855 856 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 857 if (!(target_fds = lock_user(VERIFY_READ, 858 target_fds_addr, 859 sizeof(abi_ulong) * nw, 860 1))) 861 return -TARGET_EFAULT; 862 863 FD_ZERO(fds); 864 k = 0; 865 for (i = 0; i < nw; i++) { 866 /* grab the abi_ulong */ 867 __get_user(b, &target_fds[i]); 868 for (j = 0; j < TARGET_ABI_BITS; j++) { 869 /* check the bit inside the abi_ulong */ 870 if ((b >> j) & 1) 871 FD_SET(k, fds); 872 k++; 873 } 874 } 875 876 unlock_user(target_fds, target_fds_addr, 0); 877 878 return 0; 879 } 880 881 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr, 882 abi_ulong target_fds_addr, 883 int n) 884 { 885 if (target_fds_addr) { 886 if (copy_from_user_fdset(fds, target_fds_addr, n)) 887 return -TARGET_EFAULT; 888 *fds_ptr = fds; 889 } else { 890 *fds_ptr = NULL; 891 } 892 return 0; 893 } 894 895 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr, 896 const fd_set *fds, 897 int n) 898 { 899 int i, nw, j, k; 900 abi_long v; 901 abi_ulong *target_fds; 902 903 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 904 if (!(target_fds = lock_user(VERIFY_WRITE, 905 target_fds_addr, 906 sizeof(abi_ulong) * nw, 907 0))) 908 return -TARGET_EFAULT; 909 910 k = 0; 911 for (i = 0; i < nw; i++) { 912 v = 0; 913 for (j = 0; j < TARGET_ABI_BITS; j++) { 914 v |= ((FD_ISSET(k, fds) != 0) << j); 915 k++; 916 } 917 __put_user(v, &target_fds[i]); 918 } 919 920 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw); 921 922 return 0; 923 } 924 925 #if defined(__alpha__) 926 #define HOST_HZ 1024 927 #else 928 #define HOST_HZ 100 929 #endif 930 931 static inline abi_long host_to_target_clock_t(long ticks) 932 { 933 #if HOST_HZ == TARGET_HZ 934 return ticks; 935 #else 936 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ; 937 #endif 938 } 939 940 static inline abi_long host_to_target_rusage(abi_ulong target_addr, 941 const struct rusage *rusage) 942 { 943 struct target_rusage *target_rusage; 944 945 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0)) 946 return -TARGET_EFAULT; 947 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec); 948 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec); 949 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec); 950 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec); 951 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss); 952 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss); 953 target_rusage->ru_idrss = tswapal(rusage->ru_idrss); 954 target_rusage->ru_isrss = tswapal(rusage->ru_isrss); 955 target_rusage->ru_minflt = tswapal(rusage->ru_minflt); 956 target_rusage->ru_majflt = tswapal(rusage->ru_majflt); 957 target_rusage->ru_nswap = tswapal(rusage->ru_nswap); 958 target_rusage->ru_inblock = tswapal(rusage->ru_inblock); 959 target_rusage->ru_oublock = tswapal(rusage->ru_oublock); 960 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd); 961 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv); 962 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals); 963 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw); 964 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw); 965 unlock_user_struct(target_rusage, target_addr, 1); 966 967 return 0; 968 } 969 970 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim) 971 { 972 abi_ulong target_rlim_swap; 973 rlim_t result; 974 975 target_rlim_swap = tswapal(target_rlim); 976 if (target_rlim_swap == TARGET_RLIM_INFINITY) 977 return RLIM_INFINITY; 978 979 result = target_rlim_swap; 980 if (target_rlim_swap != (rlim_t)result) 981 return RLIM_INFINITY; 982 983 return result; 984 } 985 986 static inline abi_ulong host_to_target_rlim(rlim_t rlim) 987 { 988 abi_ulong target_rlim_swap; 989 abi_ulong result; 990 991 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim) 992 target_rlim_swap = TARGET_RLIM_INFINITY; 993 else 994 target_rlim_swap = rlim; 995 result = tswapal(target_rlim_swap); 996 997 return result; 998 } 999 1000 static inline int target_to_host_resource(int code) 1001 { 1002 switch (code) { 1003 case TARGET_RLIMIT_AS: 1004 return RLIMIT_AS; 1005 case TARGET_RLIMIT_CORE: 1006 return RLIMIT_CORE; 1007 case TARGET_RLIMIT_CPU: 1008 return RLIMIT_CPU; 1009 case TARGET_RLIMIT_DATA: 1010 return RLIMIT_DATA; 1011 case TARGET_RLIMIT_FSIZE: 1012 return RLIMIT_FSIZE; 1013 case TARGET_RLIMIT_LOCKS: 1014 return RLIMIT_LOCKS; 1015 case TARGET_RLIMIT_MEMLOCK: 1016 return RLIMIT_MEMLOCK; 1017 case TARGET_RLIMIT_MSGQUEUE: 1018 return RLIMIT_MSGQUEUE; 1019 case TARGET_RLIMIT_NICE: 1020 return RLIMIT_NICE; 1021 case TARGET_RLIMIT_NOFILE: 1022 return RLIMIT_NOFILE; 1023 case TARGET_RLIMIT_NPROC: 1024 return RLIMIT_NPROC; 1025 case TARGET_RLIMIT_RSS: 1026 return RLIMIT_RSS; 1027 case TARGET_RLIMIT_RTPRIO: 1028 return RLIMIT_RTPRIO; 1029 case TARGET_RLIMIT_SIGPENDING: 1030 return RLIMIT_SIGPENDING; 1031 case TARGET_RLIMIT_STACK: 1032 return RLIMIT_STACK; 1033 default: 1034 return code; 1035 } 1036 } 1037 1038 static inline abi_long copy_from_user_timeval(struct timeval *tv, 1039 abi_ulong target_tv_addr) 1040 { 1041 struct target_timeval *target_tv; 1042 1043 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) 1044 return -TARGET_EFAULT; 1045 1046 __get_user(tv->tv_sec, &target_tv->tv_sec); 1047 __get_user(tv->tv_usec, &target_tv->tv_usec); 1048 1049 unlock_user_struct(target_tv, target_tv_addr, 0); 1050 1051 return 0; 1052 } 1053 1054 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr, 1055 const struct timeval *tv) 1056 { 1057 struct target_timeval *target_tv; 1058 1059 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) 1060 return -TARGET_EFAULT; 1061 1062 __put_user(tv->tv_sec, &target_tv->tv_sec); 1063 __put_user(tv->tv_usec, &target_tv->tv_usec); 1064 1065 unlock_user_struct(target_tv, target_tv_addr, 1); 1066 1067 return 0; 1068 } 1069 1070 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 1071 #include <mqueue.h> 1072 1073 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr, 1074 abi_ulong target_mq_attr_addr) 1075 { 1076 struct target_mq_attr *target_mq_attr; 1077 1078 if (!lock_user_struct(VERIFY_READ, target_mq_attr, 1079 target_mq_attr_addr, 1)) 1080 return -TARGET_EFAULT; 1081 1082 __get_user(attr->mq_flags, &target_mq_attr->mq_flags); 1083 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1084 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1085 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1086 1087 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0); 1088 1089 return 0; 1090 } 1091 1092 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr, 1093 const struct mq_attr *attr) 1094 { 1095 struct target_mq_attr *target_mq_attr; 1096 1097 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr, 1098 target_mq_attr_addr, 0)) 1099 return -TARGET_EFAULT; 1100 1101 __put_user(attr->mq_flags, &target_mq_attr->mq_flags); 1102 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1103 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1104 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1105 1106 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1); 1107 1108 return 0; 1109 } 1110 #endif 1111 1112 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) 1113 /* do_select() must return target values and target errnos. */ 1114 static abi_long do_select(int n, 1115 abi_ulong rfd_addr, abi_ulong wfd_addr, 1116 abi_ulong efd_addr, abi_ulong target_tv_addr) 1117 { 1118 fd_set rfds, wfds, efds; 1119 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 1120 struct timeval tv, *tv_ptr; 1121 abi_long ret; 1122 1123 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 1124 if (ret) { 1125 return ret; 1126 } 1127 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 1128 if (ret) { 1129 return ret; 1130 } 1131 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 1132 if (ret) { 1133 return ret; 1134 } 1135 1136 if (target_tv_addr) { 1137 if (copy_from_user_timeval(&tv, target_tv_addr)) 1138 return -TARGET_EFAULT; 1139 tv_ptr = &tv; 1140 } else { 1141 tv_ptr = NULL; 1142 } 1143 1144 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr)); 1145 1146 if (!is_error(ret)) { 1147 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 1148 return -TARGET_EFAULT; 1149 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 1150 return -TARGET_EFAULT; 1151 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 1152 return -TARGET_EFAULT; 1153 1154 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv)) 1155 return -TARGET_EFAULT; 1156 } 1157 1158 return ret; 1159 } 1160 #endif 1161 1162 static abi_long do_pipe2(int host_pipe[], int flags) 1163 { 1164 #ifdef CONFIG_PIPE2 1165 return pipe2(host_pipe, flags); 1166 #else 1167 return -ENOSYS; 1168 #endif 1169 } 1170 1171 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes, 1172 int flags, int is_pipe2) 1173 { 1174 int host_pipe[2]; 1175 abi_long ret; 1176 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe); 1177 1178 if (is_error(ret)) 1179 return get_errno(ret); 1180 1181 /* Several targets have special calling conventions for the original 1182 pipe syscall, but didn't replicate this into the pipe2 syscall. */ 1183 if (!is_pipe2) { 1184 #if defined(TARGET_ALPHA) 1185 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1]; 1186 return host_pipe[0]; 1187 #elif defined(TARGET_MIPS) 1188 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1]; 1189 return host_pipe[0]; 1190 #elif defined(TARGET_SH4) 1191 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1]; 1192 return host_pipe[0]; 1193 #endif 1194 } 1195 1196 if (put_user_s32(host_pipe[0], pipedes) 1197 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0]))) 1198 return -TARGET_EFAULT; 1199 return get_errno(ret); 1200 } 1201 1202 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn, 1203 abi_ulong target_addr, 1204 socklen_t len) 1205 { 1206 struct target_ip_mreqn *target_smreqn; 1207 1208 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1); 1209 if (!target_smreqn) 1210 return -TARGET_EFAULT; 1211 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr; 1212 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr; 1213 if (len == sizeof(struct target_ip_mreqn)) 1214 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex); 1215 unlock_user(target_smreqn, target_addr, 0); 1216 1217 return 0; 1218 } 1219 1220 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr, 1221 abi_ulong target_addr, 1222 socklen_t len) 1223 { 1224 const socklen_t unix_maxlen = sizeof (struct sockaddr_un); 1225 sa_family_t sa_family; 1226 struct target_sockaddr *target_saddr; 1227 1228 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 1229 if (!target_saddr) 1230 return -TARGET_EFAULT; 1231 1232 sa_family = tswap16(target_saddr->sa_family); 1233 1234 /* Oops. The caller might send a incomplete sun_path; sun_path 1235 * must be terminated by \0 (see the manual page), but 1236 * unfortunately it is quite common to specify sockaddr_un 1237 * length as "strlen(x->sun_path)" while it should be 1238 * "strlen(...) + 1". We'll fix that here if needed. 1239 * Linux kernel has a similar feature. 1240 */ 1241 1242 if (sa_family == AF_UNIX) { 1243 if (len < unix_maxlen && len > 0) { 1244 char *cp = (char*)target_saddr; 1245 1246 if ( cp[len-1] && !cp[len] ) 1247 len++; 1248 } 1249 if (len > unix_maxlen) 1250 len = unix_maxlen; 1251 } 1252 1253 memcpy(addr, target_saddr, len); 1254 addr->sa_family = sa_family; 1255 unlock_user(target_saddr, target_addr, 0); 1256 1257 return 0; 1258 } 1259 1260 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr, 1261 struct sockaddr *addr, 1262 socklen_t len) 1263 { 1264 struct target_sockaddr *target_saddr; 1265 1266 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0); 1267 if (!target_saddr) 1268 return -TARGET_EFAULT; 1269 memcpy(target_saddr, addr, len); 1270 target_saddr->sa_family = tswap16(addr->sa_family); 1271 unlock_user(target_saddr, target_addr, len); 1272 1273 return 0; 1274 } 1275 1276 static inline abi_long target_to_host_cmsg(struct msghdr *msgh, 1277 struct target_msghdr *target_msgh) 1278 { 1279 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1280 abi_long msg_controllen; 1281 abi_ulong target_cmsg_addr; 1282 struct target_cmsghdr *target_cmsg; 1283 socklen_t space = 0; 1284 1285 msg_controllen = tswapal(target_msgh->msg_controllen); 1286 if (msg_controllen < sizeof (struct target_cmsghdr)) 1287 goto the_end; 1288 target_cmsg_addr = tswapal(target_msgh->msg_control); 1289 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1); 1290 if (!target_cmsg) 1291 return -TARGET_EFAULT; 1292 1293 while (cmsg && target_cmsg) { 1294 void *data = CMSG_DATA(cmsg); 1295 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1296 1297 int len = tswapal(target_cmsg->cmsg_len) 1298 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr)); 1299 1300 space += CMSG_SPACE(len); 1301 if (space > msgh->msg_controllen) { 1302 space -= CMSG_SPACE(len); 1303 gemu_log("Host cmsg overflow\n"); 1304 break; 1305 } 1306 1307 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level); 1308 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type); 1309 cmsg->cmsg_len = CMSG_LEN(len); 1310 1311 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) { 1312 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type); 1313 memcpy(data, target_data, len); 1314 } else { 1315 int *fd = (int *)data; 1316 int *target_fd = (int *)target_data; 1317 int i, numfds = len / sizeof(int); 1318 1319 for (i = 0; i < numfds; i++) 1320 fd[i] = tswap32(target_fd[i]); 1321 } 1322 1323 cmsg = CMSG_NXTHDR(msgh, cmsg); 1324 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1325 } 1326 unlock_user(target_cmsg, target_cmsg_addr, 0); 1327 the_end: 1328 msgh->msg_controllen = space; 1329 return 0; 1330 } 1331 1332 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, 1333 struct msghdr *msgh) 1334 { 1335 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1336 abi_long msg_controllen; 1337 abi_ulong target_cmsg_addr; 1338 struct target_cmsghdr *target_cmsg; 1339 socklen_t space = 0; 1340 1341 msg_controllen = tswapal(target_msgh->msg_controllen); 1342 if (msg_controllen < sizeof (struct target_cmsghdr)) 1343 goto the_end; 1344 target_cmsg_addr = tswapal(target_msgh->msg_control); 1345 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0); 1346 if (!target_cmsg) 1347 return -TARGET_EFAULT; 1348 1349 while (cmsg && target_cmsg) { 1350 void *data = CMSG_DATA(cmsg); 1351 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1352 1353 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr)); 1354 1355 space += TARGET_CMSG_SPACE(len); 1356 if (space > msg_controllen) { 1357 space -= TARGET_CMSG_SPACE(len); 1358 gemu_log("Target cmsg overflow\n"); 1359 break; 1360 } 1361 1362 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level); 1363 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type); 1364 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len)); 1365 1366 if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) && 1367 (cmsg->cmsg_type == SCM_RIGHTS)) { 1368 int *fd = (int *)data; 1369 int *target_fd = (int *)target_data; 1370 int i, numfds = len / sizeof(int); 1371 1372 for (i = 0; i < numfds; i++) 1373 target_fd[i] = tswap32(fd[i]); 1374 } else if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) && 1375 (cmsg->cmsg_type == SO_TIMESTAMP) && 1376 (len == sizeof(struct timeval))) { 1377 /* copy struct timeval to target */ 1378 struct timeval *tv = (struct timeval *)data; 1379 struct target_timeval *target_tv = 1380 (struct target_timeval *)target_data; 1381 1382 target_tv->tv_sec = tswapal(tv->tv_sec); 1383 target_tv->tv_usec = tswapal(tv->tv_usec); 1384 } else { 1385 gemu_log("Unsupported ancillary data: %d/%d\n", 1386 cmsg->cmsg_level, cmsg->cmsg_type); 1387 memcpy(target_data, data, len); 1388 } 1389 1390 cmsg = CMSG_NXTHDR(msgh, cmsg); 1391 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1392 } 1393 unlock_user(target_cmsg, target_cmsg_addr, space); 1394 the_end: 1395 target_msgh->msg_controllen = tswapal(space); 1396 return 0; 1397 } 1398 1399 /* do_setsockopt() Must return target values and target errnos. */ 1400 static abi_long do_setsockopt(int sockfd, int level, int optname, 1401 abi_ulong optval_addr, socklen_t optlen) 1402 { 1403 abi_long ret; 1404 int val; 1405 struct ip_mreqn *ip_mreq; 1406 struct ip_mreq_source *ip_mreq_source; 1407 1408 switch(level) { 1409 case SOL_TCP: 1410 /* TCP options all take an 'int' value. */ 1411 if (optlen < sizeof(uint32_t)) 1412 return -TARGET_EINVAL; 1413 1414 if (get_user_u32(val, optval_addr)) 1415 return -TARGET_EFAULT; 1416 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1417 break; 1418 case SOL_IP: 1419 switch(optname) { 1420 case IP_TOS: 1421 case IP_TTL: 1422 case IP_HDRINCL: 1423 case IP_ROUTER_ALERT: 1424 case IP_RECVOPTS: 1425 case IP_RETOPTS: 1426 case IP_PKTINFO: 1427 case IP_MTU_DISCOVER: 1428 case IP_RECVERR: 1429 case IP_RECVTOS: 1430 #ifdef IP_FREEBIND 1431 case IP_FREEBIND: 1432 #endif 1433 case IP_MULTICAST_TTL: 1434 case IP_MULTICAST_LOOP: 1435 val = 0; 1436 if (optlen >= sizeof(uint32_t)) { 1437 if (get_user_u32(val, optval_addr)) 1438 return -TARGET_EFAULT; 1439 } else if (optlen >= 1) { 1440 if (get_user_u8(val, optval_addr)) 1441 return -TARGET_EFAULT; 1442 } 1443 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1444 break; 1445 case IP_ADD_MEMBERSHIP: 1446 case IP_DROP_MEMBERSHIP: 1447 if (optlen < sizeof (struct target_ip_mreq) || 1448 optlen > sizeof (struct target_ip_mreqn)) 1449 return -TARGET_EINVAL; 1450 1451 ip_mreq = (struct ip_mreqn *) alloca(optlen); 1452 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen); 1453 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen)); 1454 break; 1455 1456 case IP_BLOCK_SOURCE: 1457 case IP_UNBLOCK_SOURCE: 1458 case IP_ADD_SOURCE_MEMBERSHIP: 1459 case IP_DROP_SOURCE_MEMBERSHIP: 1460 if (optlen != sizeof (struct target_ip_mreq_source)) 1461 return -TARGET_EINVAL; 1462 1463 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1); 1464 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen)); 1465 unlock_user (ip_mreq_source, optval_addr, 0); 1466 break; 1467 1468 default: 1469 goto unimplemented; 1470 } 1471 break; 1472 case SOL_RAW: 1473 switch (optname) { 1474 case ICMP_FILTER: 1475 /* struct icmp_filter takes an u32 value */ 1476 if (optlen < sizeof(uint32_t)) { 1477 return -TARGET_EINVAL; 1478 } 1479 1480 if (get_user_u32(val, optval_addr)) { 1481 return -TARGET_EFAULT; 1482 } 1483 ret = get_errno(setsockopt(sockfd, level, optname, 1484 &val, sizeof(val))); 1485 break; 1486 1487 default: 1488 goto unimplemented; 1489 } 1490 break; 1491 case TARGET_SOL_SOCKET: 1492 switch (optname) { 1493 case TARGET_SO_RCVTIMEO: 1494 { 1495 struct timeval tv; 1496 1497 optname = SO_RCVTIMEO; 1498 1499 set_timeout: 1500 if (optlen != sizeof(struct target_timeval)) { 1501 return -TARGET_EINVAL; 1502 } 1503 1504 if (copy_from_user_timeval(&tv, optval_addr)) { 1505 return -TARGET_EFAULT; 1506 } 1507 1508 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 1509 &tv, sizeof(tv))); 1510 return ret; 1511 } 1512 case TARGET_SO_SNDTIMEO: 1513 optname = SO_SNDTIMEO; 1514 goto set_timeout; 1515 /* Options with 'int' argument. */ 1516 case TARGET_SO_DEBUG: 1517 optname = SO_DEBUG; 1518 break; 1519 case TARGET_SO_REUSEADDR: 1520 optname = SO_REUSEADDR; 1521 break; 1522 case TARGET_SO_TYPE: 1523 optname = SO_TYPE; 1524 break; 1525 case TARGET_SO_ERROR: 1526 optname = SO_ERROR; 1527 break; 1528 case TARGET_SO_DONTROUTE: 1529 optname = SO_DONTROUTE; 1530 break; 1531 case TARGET_SO_BROADCAST: 1532 optname = SO_BROADCAST; 1533 break; 1534 case TARGET_SO_SNDBUF: 1535 optname = SO_SNDBUF; 1536 break; 1537 case TARGET_SO_RCVBUF: 1538 optname = SO_RCVBUF; 1539 break; 1540 case TARGET_SO_KEEPALIVE: 1541 optname = SO_KEEPALIVE; 1542 break; 1543 case TARGET_SO_OOBINLINE: 1544 optname = SO_OOBINLINE; 1545 break; 1546 case TARGET_SO_NO_CHECK: 1547 optname = SO_NO_CHECK; 1548 break; 1549 case TARGET_SO_PRIORITY: 1550 optname = SO_PRIORITY; 1551 break; 1552 #ifdef SO_BSDCOMPAT 1553 case TARGET_SO_BSDCOMPAT: 1554 optname = SO_BSDCOMPAT; 1555 break; 1556 #endif 1557 case TARGET_SO_PASSCRED: 1558 optname = SO_PASSCRED; 1559 break; 1560 case TARGET_SO_TIMESTAMP: 1561 optname = SO_TIMESTAMP; 1562 break; 1563 case TARGET_SO_RCVLOWAT: 1564 optname = SO_RCVLOWAT; 1565 break; 1566 break; 1567 default: 1568 goto unimplemented; 1569 } 1570 if (optlen < sizeof(uint32_t)) 1571 return -TARGET_EINVAL; 1572 1573 if (get_user_u32(val, optval_addr)) 1574 return -TARGET_EFAULT; 1575 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val))); 1576 break; 1577 default: 1578 unimplemented: 1579 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname); 1580 ret = -TARGET_ENOPROTOOPT; 1581 } 1582 return ret; 1583 } 1584 1585 /* do_getsockopt() Must return target values and target errnos. */ 1586 static abi_long do_getsockopt(int sockfd, int level, int optname, 1587 abi_ulong optval_addr, abi_ulong optlen) 1588 { 1589 abi_long ret; 1590 int len, val; 1591 socklen_t lv; 1592 1593 switch(level) { 1594 case TARGET_SOL_SOCKET: 1595 level = SOL_SOCKET; 1596 switch (optname) { 1597 /* These don't just return a single integer */ 1598 case TARGET_SO_LINGER: 1599 case TARGET_SO_RCVTIMEO: 1600 case TARGET_SO_SNDTIMEO: 1601 case TARGET_SO_PEERNAME: 1602 goto unimplemented; 1603 case TARGET_SO_PEERCRED: { 1604 struct ucred cr; 1605 socklen_t crlen; 1606 struct target_ucred *tcr; 1607 1608 if (get_user_u32(len, optlen)) { 1609 return -TARGET_EFAULT; 1610 } 1611 if (len < 0) { 1612 return -TARGET_EINVAL; 1613 } 1614 1615 crlen = sizeof(cr); 1616 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED, 1617 &cr, &crlen)); 1618 if (ret < 0) { 1619 return ret; 1620 } 1621 if (len > crlen) { 1622 len = crlen; 1623 } 1624 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) { 1625 return -TARGET_EFAULT; 1626 } 1627 __put_user(cr.pid, &tcr->pid); 1628 __put_user(cr.uid, &tcr->uid); 1629 __put_user(cr.gid, &tcr->gid); 1630 unlock_user_struct(tcr, optval_addr, 1); 1631 if (put_user_u32(len, optlen)) { 1632 return -TARGET_EFAULT; 1633 } 1634 break; 1635 } 1636 /* Options with 'int' argument. */ 1637 case TARGET_SO_DEBUG: 1638 optname = SO_DEBUG; 1639 goto int_case; 1640 case TARGET_SO_REUSEADDR: 1641 optname = SO_REUSEADDR; 1642 goto int_case; 1643 case TARGET_SO_TYPE: 1644 optname = SO_TYPE; 1645 goto int_case; 1646 case TARGET_SO_ERROR: 1647 optname = SO_ERROR; 1648 goto int_case; 1649 case TARGET_SO_DONTROUTE: 1650 optname = SO_DONTROUTE; 1651 goto int_case; 1652 case TARGET_SO_BROADCAST: 1653 optname = SO_BROADCAST; 1654 goto int_case; 1655 case TARGET_SO_SNDBUF: 1656 optname = SO_SNDBUF; 1657 goto int_case; 1658 case TARGET_SO_RCVBUF: 1659 optname = SO_RCVBUF; 1660 goto int_case; 1661 case TARGET_SO_KEEPALIVE: 1662 optname = SO_KEEPALIVE; 1663 goto int_case; 1664 case TARGET_SO_OOBINLINE: 1665 optname = SO_OOBINLINE; 1666 goto int_case; 1667 case TARGET_SO_NO_CHECK: 1668 optname = SO_NO_CHECK; 1669 goto int_case; 1670 case TARGET_SO_PRIORITY: 1671 optname = SO_PRIORITY; 1672 goto int_case; 1673 #ifdef SO_BSDCOMPAT 1674 case TARGET_SO_BSDCOMPAT: 1675 optname = SO_BSDCOMPAT; 1676 goto int_case; 1677 #endif 1678 case TARGET_SO_PASSCRED: 1679 optname = SO_PASSCRED; 1680 goto int_case; 1681 case TARGET_SO_TIMESTAMP: 1682 optname = SO_TIMESTAMP; 1683 goto int_case; 1684 case TARGET_SO_RCVLOWAT: 1685 optname = SO_RCVLOWAT; 1686 goto int_case; 1687 default: 1688 goto int_case; 1689 } 1690 break; 1691 case SOL_TCP: 1692 /* TCP options all take an 'int' value. */ 1693 int_case: 1694 if (get_user_u32(len, optlen)) 1695 return -TARGET_EFAULT; 1696 if (len < 0) 1697 return -TARGET_EINVAL; 1698 lv = sizeof(lv); 1699 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1700 if (ret < 0) 1701 return ret; 1702 if (len > lv) 1703 len = lv; 1704 if (len == 4) { 1705 if (put_user_u32(val, optval_addr)) 1706 return -TARGET_EFAULT; 1707 } else { 1708 if (put_user_u8(val, optval_addr)) 1709 return -TARGET_EFAULT; 1710 } 1711 if (put_user_u32(len, optlen)) 1712 return -TARGET_EFAULT; 1713 break; 1714 case SOL_IP: 1715 switch(optname) { 1716 case IP_TOS: 1717 case IP_TTL: 1718 case IP_HDRINCL: 1719 case IP_ROUTER_ALERT: 1720 case IP_RECVOPTS: 1721 case IP_RETOPTS: 1722 case IP_PKTINFO: 1723 case IP_MTU_DISCOVER: 1724 case IP_RECVERR: 1725 case IP_RECVTOS: 1726 #ifdef IP_FREEBIND 1727 case IP_FREEBIND: 1728 #endif 1729 case IP_MULTICAST_TTL: 1730 case IP_MULTICAST_LOOP: 1731 if (get_user_u32(len, optlen)) 1732 return -TARGET_EFAULT; 1733 if (len < 0) 1734 return -TARGET_EINVAL; 1735 lv = sizeof(lv); 1736 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1737 if (ret < 0) 1738 return ret; 1739 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 1740 len = 1; 1741 if (put_user_u32(len, optlen) 1742 || put_user_u8(val, optval_addr)) 1743 return -TARGET_EFAULT; 1744 } else { 1745 if (len > sizeof(int)) 1746 len = sizeof(int); 1747 if (put_user_u32(len, optlen) 1748 || put_user_u32(val, optval_addr)) 1749 return -TARGET_EFAULT; 1750 } 1751 break; 1752 default: 1753 ret = -TARGET_ENOPROTOOPT; 1754 break; 1755 } 1756 break; 1757 default: 1758 unimplemented: 1759 gemu_log("getsockopt level=%d optname=%d not yet supported\n", 1760 level, optname); 1761 ret = -TARGET_EOPNOTSUPP; 1762 break; 1763 } 1764 return ret; 1765 } 1766 1767 static struct iovec *lock_iovec(int type, abi_ulong target_addr, 1768 int count, int copy) 1769 { 1770 struct target_iovec *target_vec; 1771 struct iovec *vec; 1772 abi_ulong total_len, max_len; 1773 int i; 1774 1775 if (count == 0) { 1776 errno = 0; 1777 return NULL; 1778 } 1779 if (count > IOV_MAX) { 1780 errno = EINVAL; 1781 return NULL; 1782 } 1783 1784 vec = calloc(count, sizeof(struct iovec)); 1785 if (vec == NULL) { 1786 errno = ENOMEM; 1787 return NULL; 1788 } 1789 1790 target_vec = lock_user(VERIFY_READ, target_addr, 1791 count * sizeof(struct target_iovec), 1); 1792 if (target_vec == NULL) { 1793 errno = EFAULT; 1794 goto fail2; 1795 } 1796 1797 /* ??? If host page size > target page size, this will result in a 1798 value larger than what we can actually support. */ 1799 max_len = 0x7fffffff & TARGET_PAGE_MASK; 1800 total_len = 0; 1801 1802 for (i = 0; i < count; i++) { 1803 abi_ulong base = tswapal(target_vec[i].iov_base); 1804 abi_long len = tswapal(target_vec[i].iov_len); 1805 1806 if (len < 0) { 1807 errno = EINVAL; 1808 goto fail; 1809 } else if (len == 0) { 1810 /* Zero length pointer is ignored. */ 1811 vec[i].iov_base = 0; 1812 } else { 1813 vec[i].iov_base = lock_user(type, base, len, copy); 1814 if (!vec[i].iov_base) { 1815 errno = EFAULT; 1816 goto fail; 1817 } 1818 if (len > max_len - total_len) { 1819 len = max_len - total_len; 1820 } 1821 } 1822 vec[i].iov_len = len; 1823 total_len += len; 1824 } 1825 1826 unlock_user(target_vec, target_addr, 0); 1827 return vec; 1828 1829 fail: 1830 free(vec); 1831 fail2: 1832 unlock_user(target_vec, target_addr, 0); 1833 return NULL; 1834 } 1835 1836 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr, 1837 int count, int copy) 1838 { 1839 struct target_iovec *target_vec; 1840 int i; 1841 1842 target_vec = lock_user(VERIFY_READ, target_addr, 1843 count * sizeof(struct target_iovec), 1); 1844 if (target_vec) { 1845 for (i = 0; i < count; i++) { 1846 abi_ulong base = tswapal(target_vec[i].iov_base); 1847 abi_long len = tswapal(target_vec[i].iov_base); 1848 if (len < 0) { 1849 break; 1850 } 1851 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0); 1852 } 1853 unlock_user(target_vec, target_addr, 0); 1854 } 1855 1856 free(vec); 1857 } 1858 1859 /* do_socket() Must return target values and target errnos. */ 1860 static abi_long do_socket(int domain, int type, int protocol) 1861 { 1862 #if defined(TARGET_MIPS) 1863 switch(type) { 1864 case TARGET_SOCK_DGRAM: 1865 type = SOCK_DGRAM; 1866 break; 1867 case TARGET_SOCK_STREAM: 1868 type = SOCK_STREAM; 1869 break; 1870 case TARGET_SOCK_RAW: 1871 type = SOCK_RAW; 1872 break; 1873 case TARGET_SOCK_RDM: 1874 type = SOCK_RDM; 1875 break; 1876 case TARGET_SOCK_SEQPACKET: 1877 type = SOCK_SEQPACKET; 1878 break; 1879 case TARGET_SOCK_PACKET: 1880 type = SOCK_PACKET; 1881 break; 1882 } 1883 #endif 1884 if (domain == PF_NETLINK) 1885 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */ 1886 return get_errno(socket(domain, type, protocol)); 1887 } 1888 1889 /* do_bind() Must return target values and target errnos. */ 1890 static abi_long do_bind(int sockfd, abi_ulong target_addr, 1891 socklen_t addrlen) 1892 { 1893 void *addr; 1894 abi_long ret; 1895 1896 if ((int)addrlen < 0) { 1897 return -TARGET_EINVAL; 1898 } 1899 1900 addr = alloca(addrlen+1); 1901 1902 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1903 if (ret) 1904 return ret; 1905 1906 return get_errno(bind(sockfd, addr, addrlen)); 1907 } 1908 1909 /* do_connect() Must return target values and target errnos. */ 1910 static abi_long do_connect(int sockfd, abi_ulong target_addr, 1911 socklen_t addrlen) 1912 { 1913 void *addr; 1914 abi_long ret; 1915 1916 if ((int)addrlen < 0) { 1917 return -TARGET_EINVAL; 1918 } 1919 1920 addr = alloca(addrlen); 1921 1922 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1923 if (ret) 1924 return ret; 1925 1926 return get_errno(connect(sockfd, addr, addrlen)); 1927 } 1928 1929 /* do_sendrecvmsg() Must return target values and target errnos. */ 1930 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg, 1931 int flags, int send) 1932 { 1933 abi_long ret, len; 1934 struct target_msghdr *msgp; 1935 struct msghdr msg; 1936 int count; 1937 struct iovec *vec; 1938 abi_ulong target_vec; 1939 1940 /* FIXME */ 1941 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE, 1942 msgp, 1943 target_msg, 1944 send ? 1 : 0)) 1945 return -TARGET_EFAULT; 1946 if (msgp->msg_name) { 1947 msg.msg_namelen = tswap32(msgp->msg_namelen); 1948 msg.msg_name = alloca(msg.msg_namelen); 1949 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name), 1950 msg.msg_namelen); 1951 if (ret) { 1952 goto out2; 1953 } 1954 } else { 1955 msg.msg_name = NULL; 1956 msg.msg_namelen = 0; 1957 } 1958 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen); 1959 msg.msg_control = alloca(msg.msg_controllen); 1960 msg.msg_flags = tswap32(msgp->msg_flags); 1961 1962 count = tswapal(msgp->msg_iovlen); 1963 target_vec = tswapal(msgp->msg_iov); 1964 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, 1965 target_vec, count, send); 1966 if (vec == NULL) { 1967 ret = -host_to_target_errno(errno); 1968 goto out2; 1969 } 1970 msg.msg_iovlen = count; 1971 msg.msg_iov = vec; 1972 1973 if (send) { 1974 ret = target_to_host_cmsg(&msg, msgp); 1975 if (ret == 0) 1976 ret = get_errno(sendmsg(fd, &msg, flags)); 1977 } else { 1978 ret = get_errno(recvmsg(fd, &msg, flags)); 1979 if (!is_error(ret)) { 1980 len = ret; 1981 ret = host_to_target_cmsg(msgp, &msg); 1982 if (!is_error(ret)) { 1983 msgp->msg_namelen = tswap32(msg.msg_namelen); 1984 if (msg.msg_name != NULL) { 1985 ret = host_to_target_sockaddr(tswapal(msgp->msg_name), 1986 msg.msg_name, msg.msg_namelen); 1987 if (ret) { 1988 goto out; 1989 } 1990 } 1991 1992 ret = len; 1993 } 1994 } 1995 } 1996 1997 out: 1998 unlock_iovec(vec, target_vec, count, !send); 1999 out2: 2000 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 2001 return ret; 2002 } 2003 2004 /* do_accept() Must return target values and target errnos. */ 2005 static abi_long do_accept(int fd, abi_ulong target_addr, 2006 abi_ulong target_addrlen_addr) 2007 { 2008 socklen_t addrlen; 2009 void *addr; 2010 abi_long ret; 2011 2012 if (target_addr == 0) 2013 return get_errno(accept(fd, NULL, NULL)); 2014 2015 /* linux returns EINVAL if addrlen pointer is invalid */ 2016 if (get_user_u32(addrlen, target_addrlen_addr)) 2017 return -TARGET_EINVAL; 2018 2019 if ((int)addrlen < 0) { 2020 return -TARGET_EINVAL; 2021 } 2022 2023 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2024 return -TARGET_EINVAL; 2025 2026 addr = alloca(addrlen); 2027 2028 ret = get_errno(accept(fd, addr, &addrlen)); 2029 if (!is_error(ret)) { 2030 host_to_target_sockaddr(target_addr, addr, addrlen); 2031 if (put_user_u32(addrlen, target_addrlen_addr)) 2032 ret = -TARGET_EFAULT; 2033 } 2034 return ret; 2035 } 2036 2037 /* do_getpeername() Must return target values and target errnos. */ 2038 static abi_long do_getpeername(int fd, abi_ulong target_addr, 2039 abi_ulong target_addrlen_addr) 2040 { 2041 socklen_t addrlen; 2042 void *addr; 2043 abi_long ret; 2044 2045 if (get_user_u32(addrlen, target_addrlen_addr)) 2046 return -TARGET_EFAULT; 2047 2048 if ((int)addrlen < 0) { 2049 return -TARGET_EINVAL; 2050 } 2051 2052 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2053 return -TARGET_EFAULT; 2054 2055 addr = alloca(addrlen); 2056 2057 ret = get_errno(getpeername(fd, addr, &addrlen)); 2058 if (!is_error(ret)) { 2059 host_to_target_sockaddr(target_addr, addr, addrlen); 2060 if (put_user_u32(addrlen, target_addrlen_addr)) 2061 ret = -TARGET_EFAULT; 2062 } 2063 return ret; 2064 } 2065 2066 /* do_getsockname() Must return target values and target errnos. */ 2067 static abi_long do_getsockname(int fd, abi_ulong target_addr, 2068 abi_ulong target_addrlen_addr) 2069 { 2070 socklen_t addrlen; 2071 void *addr; 2072 abi_long ret; 2073 2074 if (get_user_u32(addrlen, target_addrlen_addr)) 2075 return -TARGET_EFAULT; 2076 2077 if ((int)addrlen < 0) { 2078 return -TARGET_EINVAL; 2079 } 2080 2081 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2082 return -TARGET_EFAULT; 2083 2084 addr = alloca(addrlen); 2085 2086 ret = get_errno(getsockname(fd, addr, &addrlen)); 2087 if (!is_error(ret)) { 2088 host_to_target_sockaddr(target_addr, addr, addrlen); 2089 if (put_user_u32(addrlen, target_addrlen_addr)) 2090 ret = -TARGET_EFAULT; 2091 } 2092 return ret; 2093 } 2094 2095 /* do_socketpair() Must return target values and target errnos. */ 2096 static abi_long do_socketpair(int domain, int type, int protocol, 2097 abi_ulong target_tab_addr) 2098 { 2099 int tab[2]; 2100 abi_long ret; 2101 2102 ret = get_errno(socketpair(domain, type, protocol, tab)); 2103 if (!is_error(ret)) { 2104 if (put_user_s32(tab[0], target_tab_addr) 2105 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0]))) 2106 ret = -TARGET_EFAULT; 2107 } 2108 return ret; 2109 } 2110 2111 /* do_sendto() Must return target values and target errnos. */ 2112 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags, 2113 abi_ulong target_addr, socklen_t addrlen) 2114 { 2115 void *addr; 2116 void *host_msg; 2117 abi_long ret; 2118 2119 if ((int)addrlen < 0) { 2120 return -TARGET_EINVAL; 2121 } 2122 2123 host_msg = lock_user(VERIFY_READ, msg, len, 1); 2124 if (!host_msg) 2125 return -TARGET_EFAULT; 2126 if (target_addr) { 2127 addr = alloca(addrlen); 2128 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 2129 if (ret) { 2130 unlock_user(host_msg, msg, 0); 2131 return ret; 2132 } 2133 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen)); 2134 } else { 2135 ret = get_errno(send(fd, host_msg, len, flags)); 2136 } 2137 unlock_user(host_msg, msg, 0); 2138 return ret; 2139 } 2140 2141 /* do_recvfrom() Must return target values and target errnos. */ 2142 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags, 2143 abi_ulong target_addr, 2144 abi_ulong target_addrlen) 2145 { 2146 socklen_t addrlen; 2147 void *addr; 2148 void *host_msg; 2149 abi_long ret; 2150 2151 host_msg = lock_user(VERIFY_WRITE, msg, len, 0); 2152 if (!host_msg) 2153 return -TARGET_EFAULT; 2154 if (target_addr) { 2155 if (get_user_u32(addrlen, target_addrlen)) { 2156 ret = -TARGET_EFAULT; 2157 goto fail; 2158 } 2159 if ((int)addrlen < 0) { 2160 ret = -TARGET_EINVAL; 2161 goto fail; 2162 } 2163 addr = alloca(addrlen); 2164 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen)); 2165 } else { 2166 addr = NULL; /* To keep compiler quiet. */ 2167 ret = get_errno(qemu_recv(fd, host_msg, len, flags)); 2168 } 2169 if (!is_error(ret)) { 2170 if (target_addr) { 2171 host_to_target_sockaddr(target_addr, addr, addrlen); 2172 if (put_user_u32(addrlen, target_addrlen)) { 2173 ret = -TARGET_EFAULT; 2174 goto fail; 2175 } 2176 } 2177 unlock_user(host_msg, msg, len); 2178 } else { 2179 fail: 2180 unlock_user(host_msg, msg, 0); 2181 } 2182 return ret; 2183 } 2184 2185 #ifdef TARGET_NR_socketcall 2186 /* do_socketcall() Must return target values and target errnos. */ 2187 static abi_long do_socketcall(int num, abi_ulong vptr) 2188 { 2189 abi_long ret; 2190 const int n = sizeof(abi_ulong); 2191 2192 switch(num) { 2193 case SOCKOP_socket: 2194 { 2195 abi_ulong domain, type, protocol; 2196 2197 if (get_user_ual(domain, vptr) 2198 || get_user_ual(type, vptr + n) 2199 || get_user_ual(protocol, vptr + 2 * n)) 2200 return -TARGET_EFAULT; 2201 2202 ret = do_socket(domain, type, protocol); 2203 } 2204 break; 2205 case SOCKOP_bind: 2206 { 2207 abi_ulong sockfd; 2208 abi_ulong target_addr; 2209 socklen_t addrlen; 2210 2211 if (get_user_ual(sockfd, vptr) 2212 || get_user_ual(target_addr, vptr + n) 2213 || get_user_ual(addrlen, vptr + 2 * n)) 2214 return -TARGET_EFAULT; 2215 2216 ret = do_bind(sockfd, target_addr, addrlen); 2217 } 2218 break; 2219 case SOCKOP_connect: 2220 { 2221 abi_ulong sockfd; 2222 abi_ulong target_addr; 2223 socklen_t addrlen; 2224 2225 if (get_user_ual(sockfd, vptr) 2226 || get_user_ual(target_addr, vptr + n) 2227 || get_user_ual(addrlen, vptr + 2 * n)) 2228 return -TARGET_EFAULT; 2229 2230 ret = do_connect(sockfd, target_addr, addrlen); 2231 } 2232 break; 2233 case SOCKOP_listen: 2234 { 2235 abi_ulong sockfd, backlog; 2236 2237 if (get_user_ual(sockfd, vptr) 2238 || get_user_ual(backlog, vptr + n)) 2239 return -TARGET_EFAULT; 2240 2241 ret = get_errno(listen(sockfd, backlog)); 2242 } 2243 break; 2244 case SOCKOP_accept: 2245 { 2246 abi_ulong sockfd; 2247 abi_ulong target_addr, target_addrlen; 2248 2249 if (get_user_ual(sockfd, vptr) 2250 || get_user_ual(target_addr, vptr + n) 2251 || get_user_ual(target_addrlen, vptr + 2 * n)) 2252 return -TARGET_EFAULT; 2253 2254 ret = do_accept(sockfd, target_addr, target_addrlen); 2255 } 2256 break; 2257 case SOCKOP_getsockname: 2258 { 2259 abi_ulong sockfd; 2260 abi_ulong target_addr, target_addrlen; 2261 2262 if (get_user_ual(sockfd, vptr) 2263 || get_user_ual(target_addr, vptr + n) 2264 || get_user_ual(target_addrlen, vptr + 2 * n)) 2265 return -TARGET_EFAULT; 2266 2267 ret = do_getsockname(sockfd, target_addr, target_addrlen); 2268 } 2269 break; 2270 case SOCKOP_getpeername: 2271 { 2272 abi_ulong sockfd; 2273 abi_ulong target_addr, target_addrlen; 2274 2275 if (get_user_ual(sockfd, vptr) 2276 || get_user_ual(target_addr, vptr + n) 2277 || get_user_ual(target_addrlen, vptr + 2 * n)) 2278 return -TARGET_EFAULT; 2279 2280 ret = do_getpeername(sockfd, target_addr, target_addrlen); 2281 } 2282 break; 2283 case SOCKOP_socketpair: 2284 { 2285 abi_ulong domain, type, protocol; 2286 abi_ulong tab; 2287 2288 if (get_user_ual(domain, vptr) 2289 || get_user_ual(type, vptr + n) 2290 || get_user_ual(protocol, vptr + 2 * n) 2291 || get_user_ual(tab, vptr + 3 * n)) 2292 return -TARGET_EFAULT; 2293 2294 ret = do_socketpair(domain, type, protocol, tab); 2295 } 2296 break; 2297 case SOCKOP_send: 2298 { 2299 abi_ulong sockfd; 2300 abi_ulong msg; 2301 size_t len; 2302 abi_ulong flags; 2303 2304 if (get_user_ual(sockfd, vptr) 2305 || get_user_ual(msg, vptr + n) 2306 || get_user_ual(len, vptr + 2 * n) 2307 || get_user_ual(flags, vptr + 3 * n)) 2308 return -TARGET_EFAULT; 2309 2310 ret = do_sendto(sockfd, msg, len, flags, 0, 0); 2311 } 2312 break; 2313 case SOCKOP_recv: 2314 { 2315 abi_ulong sockfd; 2316 abi_ulong msg; 2317 size_t len; 2318 abi_ulong flags; 2319 2320 if (get_user_ual(sockfd, vptr) 2321 || get_user_ual(msg, vptr + n) 2322 || get_user_ual(len, vptr + 2 * n) 2323 || get_user_ual(flags, vptr + 3 * n)) 2324 return -TARGET_EFAULT; 2325 2326 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0); 2327 } 2328 break; 2329 case SOCKOP_sendto: 2330 { 2331 abi_ulong sockfd; 2332 abi_ulong msg; 2333 size_t len; 2334 abi_ulong flags; 2335 abi_ulong addr; 2336 socklen_t addrlen; 2337 2338 if (get_user_ual(sockfd, vptr) 2339 || get_user_ual(msg, vptr + n) 2340 || get_user_ual(len, vptr + 2 * n) 2341 || get_user_ual(flags, vptr + 3 * n) 2342 || get_user_ual(addr, vptr + 4 * n) 2343 || get_user_ual(addrlen, vptr + 5 * n)) 2344 return -TARGET_EFAULT; 2345 2346 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen); 2347 } 2348 break; 2349 case SOCKOP_recvfrom: 2350 { 2351 abi_ulong sockfd; 2352 abi_ulong msg; 2353 size_t len; 2354 abi_ulong flags; 2355 abi_ulong addr; 2356 socklen_t addrlen; 2357 2358 if (get_user_ual(sockfd, vptr) 2359 || get_user_ual(msg, vptr + n) 2360 || get_user_ual(len, vptr + 2 * n) 2361 || get_user_ual(flags, vptr + 3 * n) 2362 || get_user_ual(addr, vptr + 4 * n) 2363 || get_user_ual(addrlen, vptr + 5 * n)) 2364 return -TARGET_EFAULT; 2365 2366 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen); 2367 } 2368 break; 2369 case SOCKOP_shutdown: 2370 { 2371 abi_ulong sockfd, how; 2372 2373 if (get_user_ual(sockfd, vptr) 2374 || get_user_ual(how, vptr + n)) 2375 return -TARGET_EFAULT; 2376 2377 ret = get_errno(shutdown(sockfd, how)); 2378 } 2379 break; 2380 case SOCKOP_sendmsg: 2381 case SOCKOP_recvmsg: 2382 { 2383 abi_ulong fd; 2384 abi_ulong target_msg; 2385 abi_ulong flags; 2386 2387 if (get_user_ual(fd, vptr) 2388 || get_user_ual(target_msg, vptr + n) 2389 || get_user_ual(flags, vptr + 2 * n)) 2390 return -TARGET_EFAULT; 2391 2392 ret = do_sendrecvmsg(fd, target_msg, flags, 2393 (num == SOCKOP_sendmsg)); 2394 } 2395 break; 2396 case SOCKOP_setsockopt: 2397 { 2398 abi_ulong sockfd; 2399 abi_ulong level; 2400 abi_ulong optname; 2401 abi_ulong optval; 2402 socklen_t optlen; 2403 2404 if (get_user_ual(sockfd, vptr) 2405 || get_user_ual(level, vptr + n) 2406 || get_user_ual(optname, vptr + 2 * n) 2407 || get_user_ual(optval, vptr + 3 * n) 2408 || get_user_ual(optlen, vptr + 4 * n)) 2409 return -TARGET_EFAULT; 2410 2411 ret = do_setsockopt(sockfd, level, optname, optval, optlen); 2412 } 2413 break; 2414 case SOCKOP_getsockopt: 2415 { 2416 abi_ulong sockfd; 2417 abi_ulong level; 2418 abi_ulong optname; 2419 abi_ulong optval; 2420 socklen_t optlen; 2421 2422 if (get_user_ual(sockfd, vptr) 2423 || get_user_ual(level, vptr + n) 2424 || get_user_ual(optname, vptr + 2 * n) 2425 || get_user_ual(optval, vptr + 3 * n) 2426 || get_user_ual(optlen, vptr + 4 * n)) 2427 return -TARGET_EFAULT; 2428 2429 ret = do_getsockopt(sockfd, level, optname, optval, optlen); 2430 } 2431 break; 2432 default: 2433 gemu_log("Unsupported socketcall: %d\n", num); 2434 ret = -TARGET_ENOSYS; 2435 break; 2436 } 2437 return ret; 2438 } 2439 #endif 2440 2441 #define N_SHM_REGIONS 32 2442 2443 static struct shm_region { 2444 abi_ulong start; 2445 abi_ulong size; 2446 } shm_regions[N_SHM_REGIONS]; 2447 2448 struct target_ipc_perm 2449 { 2450 abi_long __key; 2451 abi_ulong uid; 2452 abi_ulong gid; 2453 abi_ulong cuid; 2454 abi_ulong cgid; 2455 unsigned short int mode; 2456 unsigned short int __pad1; 2457 unsigned short int __seq; 2458 unsigned short int __pad2; 2459 abi_ulong __unused1; 2460 abi_ulong __unused2; 2461 }; 2462 2463 struct target_semid_ds 2464 { 2465 struct target_ipc_perm sem_perm; 2466 abi_ulong sem_otime; 2467 abi_ulong __unused1; 2468 abi_ulong sem_ctime; 2469 abi_ulong __unused2; 2470 abi_ulong sem_nsems; 2471 abi_ulong __unused3; 2472 abi_ulong __unused4; 2473 }; 2474 2475 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip, 2476 abi_ulong target_addr) 2477 { 2478 struct target_ipc_perm *target_ip; 2479 struct target_semid_ds *target_sd; 2480 2481 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2482 return -TARGET_EFAULT; 2483 target_ip = &(target_sd->sem_perm); 2484 host_ip->__key = tswapal(target_ip->__key); 2485 host_ip->uid = tswapal(target_ip->uid); 2486 host_ip->gid = tswapal(target_ip->gid); 2487 host_ip->cuid = tswapal(target_ip->cuid); 2488 host_ip->cgid = tswapal(target_ip->cgid); 2489 host_ip->mode = tswap16(target_ip->mode); 2490 unlock_user_struct(target_sd, target_addr, 0); 2491 return 0; 2492 } 2493 2494 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr, 2495 struct ipc_perm *host_ip) 2496 { 2497 struct target_ipc_perm *target_ip; 2498 struct target_semid_ds *target_sd; 2499 2500 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2501 return -TARGET_EFAULT; 2502 target_ip = &(target_sd->sem_perm); 2503 target_ip->__key = tswapal(host_ip->__key); 2504 target_ip->uid = tswapal(host_ip->uid); 2505 target_ip->gid = tswapal(host_ip->gid); 2506 target_ip->cuid = tswapal(host_ip->cuid); 2507 target_ip->cgid = tswapal(host_ip->cgid); 2508 target_ip->mode = tswap16(host_ip->mode); 2509 unlock_user_struct(target_sd, target_addr, 1); 2510 return 0; 2511 } 2512 2513 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd, 2514 abi_ulong target_addr) 2515 { 2516 struct target_semid_ds *target_sd; 2517 2518 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2519 return -TARGET_EFAULT; 2520 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr)) 2521 return -TARGET_EFAULT; 2522 host_sd->sem_nsems = tswapal(target_sd->sem_nsems); 2523 host_sd->sem_otime = tswapal(target_sd->sem_otime); 2524 host_sd->sem_ctime = tswapal(target_sd->sem_ctime); 2525 unlock_user_struct(target_sd, target_addr, 0); 2526 return 0; 2527 } 2528 2529 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr, 2530 struct semid_ds *host_sd) 2531 { 2532 struct target_semid_ds *target_sd; 2533 2534 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2535 return -TARGET_EFAULT; 2536 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm))) 2537 return -TARGET_EFAULT; 2538 target_sd->sem_nsems = tswapal(host_sd->sem_nsems); 2539 target_sd->sem_otime = tswapal(host_sd->sem_otime); 2540 target_sd->sem_ctime = tswapal(host_sd->sem_ctime); 2541 unlock_user_struct(target_sd, target_addr, 1); 2542 return 0; 2543 } 2544 2545 struct target_seminfo { 2546 int semmap; 2547 int semmni; 2548 int semmns; 2549 int semmnu; 2550 int semmsl; 2551 int semopm; 2552 int semume; 2553 int semusz; 2554 int semvmx; 2555 int semaem; 2556 }; 2557 2558 static inline abi_long host_to_target_seminfo(abi_ulong target_addr, 2559 struct seminfo *host_seminfo) 2560 { 2561 struct target_seminfo *target_seminfo; 2562 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0)) 2563 return -TARGET_EFAULT; 2564 __put_user(host_seminfo->semmap, &target_seminfo->semmap); 2565 __put_user(host_seminfo->semmni, &target_seminfo->semmni); 2566 __put_user(host_seminfo->semmns, &target_seminfo->semmns); 2567 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu); 2568 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl); 2569 __put_user(host_seminfo->semopm, &target_seminfo->semopm); 2570 __put_user(host_seminfo->semume, &target_seminfo->semume); 2571 __put_user(host_seminfo->semusz, &target_seminfo->semusz); 2572 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx); 2573 __put_user(host_seminfo->semaem, &target_seminfo->semaem); 2574 unlock_user_struct(target_seminfo, target_addr, 1); 2575 return 0; 2576 } 2577 2578 union semun { 2579 int val; 2580 struct semid_ds *buf; 2581 unsigned short *array; 2582 struct seminfo *__buf; 2583 }; 2584 2585 union target_semun { 2586 int val; 2587 abi_ulong buf; 2588 abi_ulong array; 2589 abi_ulong __buf; 2590 }; 2591 2592 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array, 2593 abi_ulong target_addr) 2594 { 2595 int nsems; 2596 unsigned short *array; 2597 union semun semun; 2598 struct semid_ds semid_ds; 2599 int i, ret; 2600 2601 semun.buf = &semid_ds; 2602 2603 ret = semctl(semid, 0, IPC_STAT, semun); 2604 if (ret == -1) 2605 return get_errno(ret); 2606 2607 nsems = semid_ds.sem_nsems; 2608 2609 *host_array = malloc(nsems*sizeof(unsigned short)); 2610 array = lock_user(VERIFY_READ, target_addr, 2611 nsems*sizeof(unsigned short), 1); 2612 if (!array) 2613 return -TARGET_EFAULT; 2614 2615 for(i=0; i<nsems; i++) { 2616 __get_user((*host_array)[i], &array[i]); 2617 } 2618 unlock_user(array, target_addr, 0); 2619 2620 return 0; 2621 } 2622 2623 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr, 2624 unsigned short **host_array) 2625 { 2626 int nsems; 2627 unsigned short *array; 2628 union semun semun; 2629 struct semid_ds semid_ds; 2630 int i, ret; 2631 2632 semun.buf = &semid_ds; 2633 2634 ret = semctl(semid, 0, IPC_STAT, semun); 2635 if (ret == -1) 2636 return get_errno(ret); 2637 2638 nsems = semid_ds.sem_nsems; 2639 2640 array = lock_user(VERIFY_WRITE, target_addr, 2641 nsems*sizeof(unsigned short), 0); 2642 if (!array) 2643 return -TARGET_EFAULT; 2644 2645 for(i=0; i<nsems; i++) { 2646 __put_user((*host_array)[i], &array[i]); 2647 } 2648 free(*host_array); 2649 unlock_user(array, target_addr, 1); 2650 2651 return 0; 2652 } 2653 2654 static inline abi_long do_semctl(int semid, int semnum, int cmd, 2655 union target_semun target_su) 2656 { 2657 union semun arg; 2658 struct semid_ds dsarg; 2659 unsigned short *array = NULL; 2660 struct seminfo seminfo; 2661 abi_long ret = -TARGET_EINVAL; 2662 abi_long err; 2663 cmd &= 0xff; 2664 2665 switch( cmd ) { 2666 case GETVAL: 2667 case SETVAL: 2668 arg.val = tswap32(target_su.val); 2669 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2670 target_su.val = tswap32(arg.val); 2671 break; 2672 case GETALL: 2673 case SETALL: 2674 err = target_to_host_semarray(semid, &array, target_su.array); 2675 if (err) 2676 return err; 2677 arg.array = array; 2678 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2679 err = host_to_target_semarray(semid, target_su.array, &array); 2680 if (err) 2681 return err; 2682 break; 2683 case IPC_STAT: 2684 case IPC_SET: 2685 case SEM_STAT: 2686 err = target_to_host_semid_ds(&dsarg, target_su.buf); 2687 if (err) 2688 return err; 2689 arg.buf = &dsarg; 2690 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2691 err = host_to_target_semid_ds(target_su.buf, &dsarg); 2692 if (err) 2693 return err; 2694 break; 2695 case IPC_INFO: 2696 case SEM_INFO: 2697 arg.__buf = &seminfo; 2698 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2699 err = host_to_target_seminfo(target_su.__buf, &seminfo); 2700 if (err) 2701 return err; 2702 break; 2703 case IPC_RMID: 2704 case GETPID: 2705 case GETNCNT: 2706 case GETZCNT: 2707 ret = get_errno(semctl(semid, semnum, cmd, NULL)); 2708 break; 2709 } 2710 2711 return ret; 2712 } 2713 2714 struct target_sembuf { 2715 unsigned short sem_num; 2716 short sem_op; 2717 short sem_flg; 2718 }; 2719 2720 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf, 2721 abi_ulong target_addr, 2722 unsigned nsops) 2723 { 2724 struct target_sembuf *target_sembuf; 2725 int i; 2726 2727 target_sembuf = lock_user(VERIFY_READ, target_addr, 2728 nsops*sizeof(struct target_sembuf), 1); 2729 if (!target_sembuf) 2730 return -TARGET_EFAULT; 2731 2732 for(i=0; i<nsops; i++) { 2733 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num); 2734 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op); 2735 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg); 2736 } 2737 2738 unlock_user(target_sembuf, target_addr, 0); 2739 2740 return 0; 2741 } 2742 2743 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops) 2744 { 2745 struct sembuf sops[nsops]; 2746 2747 if (target_to_host_sembuf(sops, ptr, nsops)) 2748 return -TARGET_EFAULT; 2749 2750 return semop(semid, sops, nsops); 2751 } 2752 2753 struct target_msqid_ds 2754 { 2755 struct target_ipc_perm msg_perm; 2756 abi_ulong msg_stime; 2757 #if TARGET_ABI_BITS == 32 2758 abi_ulong __unused1; 2759 #endif 2760 abi_ulong msg_rtime; 2761 #if TARGET_ABI_BITS == 32 2762 abi_ulong __unused2; 2763 #endif 2764 abi_ulong msg_ctime; 2765 #if TARGET_ABI_BITS == 32 2766 abi_ulong __unused3; 2767 #endif 2768 abi_ulong __msg_cbytes; 2769 abi_ulong msg_qnum; 2770 abi_ulong msg_qbytes; 2771 abi_ulong msg_lspid; 2772 abi_ulong msg_lrpid; 2773 abi_ulong __unused4; 2774 abi_ulong __unused5; 2775 }; 2776 2777 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md, 2778 abi_ulong target_addr) 2779 { 2780 struct target_msqid_ds *target_md; 2781 2782 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1)) 2783 return -TARGET_EFAULT; 2784 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr)) 2785 return -TARGET_EFAULT; 2786 host_md->msg_stime = tswapal(target_md->msg_stime); 2787 host_md->msg_rtime = tswapal(target_md->msg_rtime); 2788 host_md->msg_ctime = tswapal(target_md->msg_ctime); 2789 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes); 2790 host_md->msg_qnum = tswapal(target_md->msg_qnum); 2791 host_md->msg_qbytes = tswapal(target_md->msg_qbytes); 2792 host_md->msg_lspid = tswapal(target_md->msg_lspid); 2793 host_md->msg_lrpid = tswapal(target_md->msg_lrpid); 2794 unlock_user_struct(target_md, target_addr, 0); 2795 return 0; 2796 } 2797 2798 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr, 2799 struct msqid_ds *host_md) 2800 { 2801 struct target_msqid_ds *target_md; 2802 2803 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0)) 2804 return -TARGET_EFAULT; 2805 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm))) 2806 return -TARGET_EFAULT; 2807 target_md->msg_stime = tswapal(host_md->msg_stime); 2808 target_md->msg_rtime = tswapal(host_md->msg_rtime); 2809 target_md->msg_ctime = tswapal(host_md->msg_ctime); 2810 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes); 2811 target_md->msg_qnum = tswapal(host_md->msg_qnum); 2812 target_md->msg_qbytes = tswapal(host_md->msg_qbytes); 2813 target_md->msg_lspid = tswapal(host_md->msg_lspid); 2814 target_md->msg_lrpid = tswapal(host_md->msg_lrpid); 2815 unlock_user_struct(target_md, target_addr, 1); 2816 return 0; 2817 } 2818 2819 struct target_msginfo { 2820 int msgpool; 2821 int msgmap; 2822 int msgmax; 2823 int msgmnb; 2824 int msgmni; 2825 int msgssz; 2826 int msgtql; 2827 unsigned short int msgseg; 2828 }; 2829 2830 static inline abi_long host_to_target_msginfo(abi_ulong target_addr, 2831 struct msginfo *host_msginfo) 2832 { 2833 struct target_msginfo *target_msginfo; 2834 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0)) 2835 return -TARGET_EFAULT; 2836 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool); 2837 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap); 2838 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax); 2839 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb); 2840 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni); 2841 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz); 2842 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql); 2843 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg); 2844 unlock_user_struct(target_msginfo, target_addr, 1); 2845 return 0; 2846 } 2847 2848 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr) 2849 { 2850 struct msqid_ds dsarg; 2851 struct msginfo msginfo; 2852 abi_long ret = -TARGET_EINVAL; 2853 2854 cmd &= 0xff; 2855 2856 switch (cmd) { 2857 case IPC_STAT: 2858 case IPC_SET: 2859 case MSG_STAT: 2860 if (target_to_host_msqid_ds(&dsarg,ptr)) 2861 return -TARGET_EFAULT; 2862 ret = get_errno(msgctl(msgid, cmd, &dsarg)); 2863 if (host_to_target_msqid_ds(ptr,&dsarg)) 2864 return -TARGET_EFAULT; 2865 break; 2866 case IPC_RMID: 2867 ret = get_errno(msgctl(msgid, cmd, NULL)); 2868 break; 2869 case IPC_INFO: 2870 case MSG_INFO: 2871 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo)); 2872 if (host_to_target_msginfo(ptr, &msginfo)) 2873 return -TARGET_EFAULT; 2874 break; 2875 } 2876 2877 return ret; 2878 } 2879 2880 struct target_msgbuf { 2881 abi_long mtype; 2882 char mtext[1]; 2883 }; 2884 2885 static inline abi_long do_msgsnd(int msqid, abi_long msgp, 2886 unsigned int msgsz, int msgflg) 2887 { 2888 struct target_msgbuf *target_mb; 2889 struct msgbuf *host_mb; 2890 abi_long ret = 0; 2891 2892 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0)) 2893 return -TARGET_EFAULT; 2894 host_mb = malloc(msgsz+sizeof(long)); 2895 host_mb->mtype = (abi_long) tswapal(target_mb->mtype); 2896 memcpy(host_mb->mtext, target_mb->mtext, msgsz); 2897 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg)); 2898 free(host_mb); 2899 unlock_user_struct(target_mb, msgp, 0); 2900 2901 return ret; 2902 } 2903 2904 static inline abi_long do_msgrcv(int msqid, abi_long msgp, 2905 unsigned int msgsz, abi_long msgtyp, 2906 int msgflg) 2907 { 2908 struct target_msgbuf *target_mb; 2909 char *target_mtext; 2910 struct msgbuf *host_mb; 2911 abi_long ret = 0; 2912 2913 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0)) 2914 return -TARGET_EFAULT; 2915 2916 host_mb = g_malloc(msgsz+sizeof(long)); 2917 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg)); 2918 2919 if (ret > 0) { 2920 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong); 2921 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0); 2922 if (!target_mtext) { 2923 ret = -TARGET_EFAULT; 2924 goto end; 2925 } 2926 memcpy(target_mb->mtext, host_mb->mtext, ret); 2927 unlock_user(target_mtext, target_mtext_addr, ret); 2928 } 2929 2930 target_mb->mtype = tswapal(host_mb->mtype); 2931 2932 end: 2933 if (target_mb) 2934 unlock_user_struct(target_mb, msgp, 1); 2935 g_free(host_mb); 2936 return ret; 2937 } 2938 2939 struct target_shmid_ds 2940 { 2941 struct target_ipc_perm shm_perm; 2942 abi_ulong shm_segsz; 2943 abi_ulong shm_atime; 2944 #if TARGET_ABI_BITS == 32 2945 abi_ulong __unused1; 2946 #endif 2947 abi_ulong shm_dtime; 2948 #if TARGET_ABI_BITS == 32 2949 abi_ulong __unused2; 2950 #endif 2951 abi_ulong shm_ctime; 2952 #if TARGET_ABI_BITS == 32 2953 abi_ulong __unused3; 2954 #endif 2955 int shm_cpid; 2956 int shm_lpid; 2957 abi_ulong shm_nattch; 2958 unsigned long int __unused4; 2959 unsigned long int __unused5; 2960 }; 2961 2962 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd, 2963 abi_ulong target_addr) 2964 { 2965 struct target_shmid_ds *target_sd; 2966 2967 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2968 return -TARGET_EFAULT; 2969 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr)) 2970 return -TARGET_EFAULT; 2971 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2972 __get_user(host_sd->shm_atime, &target_sd->shm_atime); 2973 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2974 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2975 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2976 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2977 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2978 unlock_user_struct(target_sd, target_addr, 0); 2979 return 0; 2980 } 2981 2982 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr, 2983 struct shmid_ds *host_sd) 2984 { 2985 struct target_shmid_ds *target_sd; 2986 2987 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2988 return -TARGET_EFAULT; 2989 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm))) 2990 return -TARGET_EFAULT; 2991 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2992 __put_user(host_sd->shm_atime, &target_sd->shm_atime); 2993 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2994 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2995 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2996 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2997 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2998 unlock_user_struct(target_sd, target_addr, 1); 2999 return 0; 3000 } 3001 3002 struct target_shminfo { 3003 abi_ulong shmmax; 3004 abi_ulong shmmin; 3005 abi_ulong shmmni; 3006 abi_ulong shmseg; 3007 abi_ulong shmall; 3008 }; 3009 3010 static inline abi_long host_to_target_shminfo(abi_ulong target_addr, 3011 struct shminfo *host_shminfo) 3012 { 3013 struct target_shminfo *target_shminfo; 3014 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0)) 3015 return -TARGET_EFAULT; 3016 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax); 3017 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin); 3018 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni); 3019 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg); 3020 __put_user(host_shminfo->shmall, &target_shminfo->shmall); 3021 unlock_user_struct(target_shminfo, target_addr, 1); 3022 return 0; 3023 } 3024 3025 struct target_shm_info { 3026 int used_ids; 3027 abi_ulong shm_tot; 3028 abi_ulong shm_rss; 3029 abi_ulong shm_swp; 3030 abi_ulong swap_attempts; 3031 abi_ulong swap_successes; 3032 }; 3033 3034 static inline abi_long host_to_target_shm_info(abi_ulong target_addr, 3035 struct shm_info *host_shm_info) 3036 { 3037 struct target_shm_info *target_shm_info; 3038 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0)) 3039 return -TARGET_EFAULT; 3040 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids); 3041 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot); 3042 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss); 3043 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp); 3044 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts); 3045 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes); 3046 unlock_user_struct(target_shm_info, target_addr, 1); 3047 return 0; 3048 } 3049 3050 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf) 3051 { 3052 struct shmid_ds dsarg; 3053 struct shminfo shminfo; 3054 struct shm_info shm_info; 3055 abi_long ret = -TARGET_EINVAL; 3056 3057 cmd &= 0xff; 3058 3059 switch(cmd) { 3060 case IPC_STAT: 3061 case IPC_SET: 3062 case SHM_STAT: 3063 if (target_to_host_shmid_ds(&dsarg, buf)) 3064 return -TARGET_EFAULT; 3065 ret = get_errno(shmctl(shmid, cmd, &dsarg)); 3066 if (host_to_target_shmid_ds(buf, &dsarg)) 3067 return -TARGET_EFAULT; 3068 break; 3069 case IPC_INFO: 3070 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo)); 3071 if (host_to_target_shminfo(buf, &shminfo)) 3072 return -TARGET_EFAULT; 3073 break; 3074 case SHM_INFO: 3075 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info)); 3076 if (host_to_target_shm_info(buf, &shm_info)) 3077 return -TARGET_EFAULT; 3078 break; 3079 case IPC_RMID: 3080 case SHM_LOCK: 3081 case SHM_UNLOCK: 3082 ret = get_errno(shmctl(shmid, cmd, NULL)); 3083 break; 3084 } 3085 3086 return ret; 3087 } 3088 3089 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg) 3090 { 3091 abi_long raddr; 3092 void *host_raddr; 3093 struct shmid_ds shm_info; 3094 int i,ret; 3095 3096 /* find out the length of the shared memory segment */ 3097 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); 3098 if (is_error(ret)) { 3099 /* can't get length, bail out */ 3100 return ret; 3101 } 3102 3103 mmap_lock(); 3104 3105 if (shmaddr) 3106 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg); 3107 else { 3108 abi_ulong mmap_start; 3109 3110 mmap_start = mmap_find_vma(0, shm_info.shm_segsz); 3111 3112 if (mmap_start == -1) { 3113 errno = ENOMEM; 3114 host_raddr = (void *)-1; 3115 } else 3116 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP); 3117 } 3118 3119 if (host_raddr == (void *)-1) { 3120 mmap_unlock(); 3121 return get_errno((long)host_raddr); 3122 } 3123 raddr=h2g((unsigned long)host_raddr); 3124 3125 page_set_flags(raddr, raddr + shm_info.shm_segsz, 3126 PAGE_VALID | PAGE_READ | 3127 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE)); 3128 3129 for (i = 0; i < N_SHM_REGIONS; i++) { 3130 if (shm_regions[i].start == 0) { 3131 shm_regions[i].start = raddr; 3132 shm_regions[i].size = shm_info.shm_segsz; 3133 break; 3134 } 3135 } 3136 3137 mmap_unlock(); 3138 return raddr; 3139 3140 } 3141 3142 static inline abi_long do_shmdt(abi_ulong shmaddr) 3143 { 3144 int i; 3145 3146 for (i = 0; i < N_SHM_REGIONS; ++i) { 3147 if (shm_regions[i].start == shmaddr) { 3148 shm_regions[i].start = 0; 3149 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0); 3150 break; 3151 } 3152 } 3153 3154 return get_errno(shmdt(g2h(shmaddr))); 3155 } 3156 3157 #ifdef TARGET_NR_ipc 3158 /* ??? This only works with linear mappings. */ 3159 /* do_ipc() must return target values and target errnos. */ 3160 static abi_long do_ipc(unsigned int call, int first, 3161 int second, int third, 3162 abi_long ptr, abi_long fifth) 3163 { 3164 int version; 3165 abi_long ret = 0; 3166 3167 version = call >> 16; 3168 call &= 0xffff; 3169 3170 switch (call) { 3171 case IPCOP_semop: 3172 ret = do_semop(first, ptr, second); 3173 break; 3174 3175 case IPCOP_semget: 3176 ret = get_errno(semget(first, second, third)); 3177 break; 3178 3179 case IPCOP_semctl: 3180 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr); 3181 break; 3182 3183 case IPCOP_msgget: 3184 ret = get_errno(msgget(first, second)); 3185 break; 3186 3187 case IPCOP_msgsnd: 3188 ret = do_msgsnd(first, ptr, second, third); 3189 break; 3190 3191 case IPCOP_msgctl: 3192 ret = do_msgctl(first, second, ptr); 3193 break; 3194 3195 case IPCOP_msgrcv: 3196 switch (version) { 3197 case 0: 3198 { 3199 struct target_ipc_kludge { 3200 abi_long msgp; 3201 abi_long msgtyp; 3202 } *tmp; 3203 3204 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) { 3205 ret = -TARGET_EFAULT; 3206 break; 3207 } 3208 3209 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third); 3210 3211 unlock_user_struct(tmp, ptr, 0); 3212 break; 3213 } 3214 default: 3215 ret = do_msgrcv(first, ptr, second, fifth, third); 3216 } 3217 break; 3218 3219 case IPCOP_shmat: 3220 switch (version) { 3221 default: 3222 { 3223 abi_ulong raddr; 3224 raddr = do_shmat(first, ptr, second); 3225 if (is_error(raddr)) 3226 return get_errno(raddr); 3227 if (put_user_ual(raddr, third)) 3228 return -TARGET_EFAULT; 3229 break; 3230 } 3231 case 1: 3232 ret = -TARGET_EINVAL; 3233 break; 3234 } 3235 break; 3236 case IPCOP_shmdt: 3237 ret = do_shmdt(ptr); 3238 break; 3239 3240 case IPCOP_shmget: 3241 /* IPC_* flag values are the same on all linux platforms */ 3242 ret = get_errno(shmget(first, second, third)); 3243 break; 3244 3245 /* IPC_* and SHM_* command values are the same on all linux platforms */ 3246 case IPCOP_shmctl: 3247 ret = do_shmctl(first, second, third); 3248 break; 3249 default: 3250 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version); 3251 ret = -TARGET_ENOSYS; 3252 break; 3253 } 3254 return ret; 3255 } 3256 #endif 3257 3258 /* kernel structure types definitions */ 3259 3260 #define STRUCT(name, ...) STRUCT_ ## name, 3261 #define STRUCT_SPECIAL(name) STRUCT_ ## name, 3262 enum { 3263 #include "syscall_types.h" 3264 }; 3265 #undef STRUCT 3266 #undef STRUCT_SPECIAL 3267 3268 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL }; 3269 #define STRUCT_SPECIAL(name) 3270 #include "syscall_types.h" 3271 #undef STRUCT 3272 #undef STRUCT_SPECIAL 3273 3274 typedef struct IOCTLEntry IOCTLEntry; 3275 3276 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp, 3277 int fd, abi_long cmd, abi_long arg); 3278 3279 struct IOCTLEntry { 3280 unsigned int target_cmd; 3281 unsigned int host_cmd; 3282 const char *name; 3283 int access; 3284 do_ioctl_fn *do_ioctl; 3285 const argtype arg_type[5]; 3286 }; 3287 3288 #define IOC_R 0x0001 3289 #define IOC_W 0x0002 3290 #define IOC_RW (IOC_R | IOC_W) 3291 3292 #define MAX_STRUCT_SIZE 4096 3293 3294 #ifdef CONFIG_FIEMAP 3295 /* So fiemap access checks don't overflow on 32 bit systems. 3296 * This is very slightly smaller than the limit imposed by 3297 * the underlying kernel. 3298 */ 3299 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \ 3300 / sizeof(struct fiemap_extent)) 3301 3302 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp, 3303 int fd, abi_long cmd, abi_long arg) 3304 { 3305 /* The parameter for this ioctl is a struct fiemap followed 3306 * by an array of struct fiemap_extent whose size is set 3307 * in fiemap->fm_extent_count. The array is filled in by the 3308 * ioctl. 3309 */ 3310 int target_size_in, target_size_out; 3311 struct fiemap *fm; 3312 const argtype *arg_type = ie->arg_type; 3313 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) }; 3314 void *argptr, *p; 3315 abi_long ret; 3316 int i, extent_size = thunk_type_size(extent_arg_type, 0); 3317 uint32_t outbufsz; 3318 int free_fm = 0; 3319 3320 assert(arg_type[0] == TYPE_PTR); 3321 assert(ie->access == IOC_RW); 3322 arg_type++; 3323 target_size_in = thunk_type_size(arg_type, 0); 3324 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1); 3325 if (!argptr) { 3326 return -TARGET_EFAULT; 3327 } 3328 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3329 unlock_user(argptr, arg, 0); 3330 fm = (struct fiemap *)buf_temp; 3331 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) { 3332 return -TARGET_EINVAL; 3333 } 3334 3335 outbufsz = sizeof (*fm) + 3336 (sizeof(struct fiemap_extent) * fm->fm_extent_count); 3337 3338 if (outbufsz > MAX_STRUCT_SIZE) { 3339 /* We can't fit all the extents into the fixed size buffer. 3340 * Allocate one that is large enough and use it instead. 3341 */ 3342 fm = malloc(outbufsz); 3343 if (!fm) { 3344 return -TARGET_ENOMEM; 3345 } 3346 memcpy(fm, buf_temp, sizeof(struct fiemap)); 3347 free_fm = 1; 3348 } 3349 ret = get_errno(ioctl(fd, ie->host_cmd, fm)); 3350 if (!is_error(ret)) { 3351 target_size_out = target_size_in; 3352 /* An extent_count of 0 means we were only counting the extents 3353 * so there are no structs to copy 3354 */ 3355 if (fm->fm_extent_count != 0) { 3356 target_size_out += fm->fm_mapped_extents * extent_size; 3357 } 3358 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0); 3359 if (!argptr) { 3360 ret = -TARGET_EFAULT; 3361 } else { 3362 /* Convert the struct fiemap */ 3363 thunk_convert(argptr, fm, arg_type, THUNK_TARGET); 3364 if (fm->fm_extent_count != 0) { 3365 p = argptr + target_size_in; 3366 /* ...and then all the struct fiemap_extents */ 3367 for (i = 0; i < fm->fm_mapped_extents; i++) { 3368 thunk_convert(p, &fm->fm_extents[i], extent_arg_type, 3369 THUNK_TARGET); 3370 p += extent_size; 3371 } 3372 } 3373 unlock_user(argptr, arg, target_size_out); 3374 } 3375 } 3376 if (free_fm) { 3377 free(fm); 3378 } 3379 return ret; 3380 } 3381 #endif 3382 3383 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, 3384 int fd, abi_long cmd, abi_long arg) 3385 { 3386 const argtype *arg_type = ie->arg_type; 3387 int target_size; 3388 void *argptr; 3389 int ret; 3390 struct ifconf *host_ifconf; 3391 uint32_t outbufsz; 3392 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) }; 3393 int target_ifreq_size; 3394 int nb_ifreq; 3395 int free_buf = 0; 3396 int i; 3397 int target_ifc_len; 3398 abi_long target_ifc_buf; 3399 int host_ifc_len; 3400 char *host_ifc_buf; 3401 3402 assert(arg_type[0] == TYPE_PTR); 3403 assert(ie->access == IOC_RW); 3404 3405 arg_type++; 3406 target_size = thunk_type_size(arg_type, 0); 3407 3408 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3409 if (!argptr) 3410 return -TARGET_EFAULT; 3411 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3412 unlock_user(argptr, arg, 0); 3413 3414 host_ifconf = (struct ifconf *)(unsigned long)buf_temp; 3415 target_ifc_len = host_ifconf->ifc_len; 3416 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf; 3417 3418 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0); 3419 nb_ifreq = target_ifc_len / target_ifreq_size; 3420 host_ifc_len = nb_ifreq * sizeof(struct ifreq); 3421 3422 outbufsz = sizeof(*host_ifconf) + host_ifc_len; 3423 if (outbufsz > MAX_STRUCT_SIZE) { 3424 /* We can't fit all the extents into the fixed size buffer. 3425 * Allocate one that is large enough and use it instead. 3426 */ 3427 host_ifconf = malloc(outbufsz); 3428 if (!host_ifconf) { 3429 return -TARGET_ENOMEM; 3430 } 3431 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf)); 3432 free_buf = 1; 3433 } 3434 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf); 3435 3436 host_ifconf->ifc_len = host_ifc_len; 3437 host_ifconf->ifc_buf = host_ifc_buf; 3438 3439 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf)); 3440 if (!is_error(ret)) { 3441 /* convert host ifc_len to target ifc_len */ 3442 3443 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq); 3444 target_ifc_len = nb_ifreq * target_ifreq_size; 3445 host_ifconf->ifc_len = target_ifc_len; 3446 3447 /* restore target ifc_buf */ 3448 3449 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf; 3450 3451 /* copy struct ifconf to target user */ 3452 3453 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3454 if (!argptr) 3455 return -TARGET_EFAULT; 3456 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET); 3457 unlock_user(argptr, arg, target_size); 3458 3459 /* copy ifreq[] to target user */ 3460 3461 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0); 3462 for (i = 0; i < nb_ifreq ; i++) { 3463 thunk_convert(argptr + i * target_ifreq_size, 3464 host_ifc_buf + i * sizeof(struct ifreq), 3465 ifreq_arg_type, THUNK_TARGET); 3466 } 3467 unlock_user(argptr, target_ifc_buf, target_ifc_len); 3468 } 3469 3470 if (free_buf) { 3471 free(host_ifconf); 3472 } 3473 3474 return ret; 3475 } 3476 3477 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 3478 abi_long cmd, abi_long arg) 3479 { 3480 void *argptr; 3481 struct dm_ioctl *host_dm; 3482 abi_long guest_data; 3483 uint32_t guest_data_size; 3484 int target_size; 3485 const argtype *arg_type = ie->arg_type; 3486 abi_long ret; 3487 void *big_buf = NULL; 3488 char *host_data; 3489 3490 arg_type++; 3491 target_size = thunk_type_size(arg_type, 0); 3492 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3493 if (!argptr) { 3494 ret = -TARGET_EFAULT; 3495 goto out; 3496 } 3497 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3498 unlock_user(argptr, arg, 0); 3499 3500 /* buf_temp is too small, so fetch things into a bigger buffer */ 3501 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2); 3502 memcpy(big_buf, buf_temp, target_size); 3503 buf_temp = big_buf; 3504 host_dm = big_buf; 3505 3506 guest_data = arg + host_dm->data_start; 3507 if ((guest_data - arg) < 0) { 3508 ret = -EINVAL; 3509 goto out; 3510 } 3511 guest_data_size = host_dm->data_size - host_dm->data_start; 3512 host_data = (char*)host_dm + host_dm->data_start; 3513 3514 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1); 3515 switch (ie->host_cmd) { 3516 case DM_REMOVE_ALL: 3517 case DM_LIST_DEVICES: 3518 case DM_DEV_CREATE: 3519 case DM_DEV_REMOVE: 3520 case DM_DEV_SUSPEND: 3521 case DM_DEV_STATUS: 3522 case DM_DEV_WAIT: 3523 case DM_TABLE_STATUS: 3524 case DM_TABLE_CLEAR: 3525 case DM_TABLE_DEPS: 3526 case DM_LIST_VERSIONS: 3527 /* no input data */ 3528 break; 3529 case DM_DEV_RENAME: 3530 case DM_DEV_SET_GEOMETRY: 3531 /* data contains only strings */ 3532 memcpy(host_data, argptr, guest_data_size); 3533 break; 3534 case DM_TARGET_MSG: 3535 memcpy(host_data, argptr, guest_data_size); 3536 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr); 3537 break; 3538 case DM_TABLE_LOAD: 3539 { 3540 void *gspec = argptr; 3541 void *cur_data = host_data; 3542 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 3543 int spec_size = thunk_type_size(arg_type, 0); 3544 int i; 3545 3546 for (i = 0; i < host_dm->target_count; i++) { 3547 struct dm_target_spec *spec = cur_data; 3548 uint32_t next; 3549 int slen; 3550 3551 thunk_convert(spec, gspec, arg_type, THUNK_HOST); 3552 slen = strlen((char*)gspec + spec_size) + 1; 3553 next = spec->next; 3554 spec->next = sizeof(*spec) + slen; 3555 strcpy((char*)&spec[1], gspec + spec_size); 3556 gspec += next; 3557 cur_data += spec->next; 3558 } 3559 break; 3560 } 3561 default: 3562 ret = -TARGET_EINVAL; 3563 goto out; 3564 } 3565 unlock_user(argptr, guest_data, 0); 3566 3567 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3568 if (!is_error(ret)) { 3569 guest_data = arg + host_dm->data_start; 3570 guest_data_size = host_dm->data_size - host_dm->data_start; 3571 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0); 3572 switch (ie->host_cmd) { 3573 case DM_REMOVE_ALL: 3574 case DM_DEV_CREATE: 3575 case DM_DEV_REMOVE: 3576 case DM_DEV_RENAME: 3577 case DM_DEV_SUSPEND: 3578 case DM_DEV_STATUS: 3579 case DM_TABLE_LOAD: 3580 case DM_TABLE_CLEAR: 3581 case DM_TARGET_MSG: 3582 case DM_DEV_SET_GEOMETRY: 3583 /* no return data */ 3584 break; 3585 case DM_LIST_DEVICES: 3586 { 3587 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start; 3588 uint32_t remaining_data = guest_data_size; 3589 void *cur_data = argptr; 3590 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) }; 3591 int nl_size = 12; /* can't use thunk_size due to alignment */ 3592 3593 while (1) { 3594 uint32_t next = nl->next; 3595 if (next) { 3596 nl->next = nl_size + (strlen(nl->name) + 1); 3597 } 3598 if (remaining_data < nl->next) { 3599 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3600 break; 3601 } 3602 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET); 3603 strcpy(cur_data + nl_size, nl->name); 3604 cur_data += nl->next; 3605 remaining_data -= nl->next; 3606 if (!next) { 3607 break; 3608 } 3609 nl = (void*)nl + next; 3610 } 3611 break; 3612 } 3613 case DM_DEV_WAIT: 3614 case DM_TABLE_STATUS: 3615 { 3616 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start; 3617 void *cur_data = argptr; 3618 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 3619 int spec_size = thunk_type_size(arg_type, 0); 3620 int i; 3621 3622 for (i = 0; i < host_dm->target_count; i++) { 3623 uint32_t next = spec->next; 3624 int slen = strlen((char*)&spec[1]) + 1; 3625 spec->next = (cur_data - argptr) + spec_size + slen; 3626 if (guest_data_size < spec->next) { 3627 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3628 break; 3629 } 3630 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET); 3631 strcpy(cur_data + spec_size, (char*)&spec[1]); 3632 cur_data = argptr + spec->next; 3633 spec = (void*)host_dm + host_dm->data_start + next; 3634 } 3635 break; 3636 } 3637 case DM_TABLE_DEPS: 3638 { 3639 void *hdata = (void*)host_dm + host_dm->data_start; 3640 int count = *(uint32_t*)hdata; 3641 uint64_t *hdev = hdata + 8; 3642 uint64_t *gdev = argptr + 8; 3643 int i; 3644 3645 *(uint32_t*)argptr = tswap32(count); 3646 for (i = 0; i < count; i++) { 3647 *gdev = tswap64(*hdev); 3648 gdev++; 3649 hdev++; 3650 } 3651 break; 3652 } 3653 case DM_LIST_VERSIONS: 3654 { 3655 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start; 3656 uint32_t remaining_data = guest_data_size; 3657 void *cur_data = argptr; 3658 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) }; 3659 int vers_size = thunk_type_size(arg_type, 0); 3660 3661 while (1) { 3662 uint32_t next = vers->next; 3663 if (next) { 3664 vers->next = vers_size + (strlen(vers->name) + 1); 3665 } 3666 if (remaining_data < vers->next) { 3667 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3668 break; 3669 } 3670 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET); 3671 strcpy(cur_data + vers_size, vers->name); 3672 cur_data += vers->next; 3673 remaining_data -= vers->next; 3674 if (!next) { 3675 break; 3676 } 3677 vers = (void*)vers + next; 3678 } 3679 break; 3680 } 3681 default: 3682 ret = -TARGET_EINVAL; 3683 goto out; 3684 } 3685 unlock_user(argptr, guest_data, guest_data_size); 3686 3687 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3688 if (!argptr) { 3689 ret = -TARGET_EFAULT; 3690 goto out; 3691 } 3692 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3693 unlock_user(argptr, arg, target_size); 3694 } 3695 out: 3696 g_free(big_buf); 3697 return ret; 3698 } 3699 3700 static IOCTLEntry ioctl_entries[] = { 3701 #define IOCTL(cmd, access, ...) \ 3702 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } }, 3703 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \ 3704 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } }, 3705 #include "ioctls.h" 3706 { 0, 0, }, 3707 }; 3708 3709 /* ??? Implement proper locking for ioctls. */ 3710 /* do_ioctl() Must return target values and target errnos. */ 3711 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg) 3712 { 3713 const IOCTLEntry *ie; 3714 const argtype *arg_type; 3715 abi_long ret; 3716 uint8_t buf_temp[MAX_STRUCT_SIZE]; 3717 int target_size; 3718 void *argptr; 3719 3720 ie = ioctl_entries; 3721 for(;;) { 3722 if (ie->target_cmd == 0) { 3723 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd); 3724 return -TARGET_ENOSYS; 3725 } 3726 if (ie->target_cmd == cmd) 3727 break; 3728 ie++; 3729 } 3730 arg_type = ie->arg_type; 3731 #if defined(DEBUG) 3732 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name); 3733 #endif 3734 if (ie->do_ioctl) { 3735 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg); 3736 } 3737 3738 switch(arg_type[0]) { 3739 case TYPE_NULL: 3740 /* no argument */ 3741 ret = get_errno(ioctl(fd, ie->host_cmd)); 3742 break; 3743 case TYPE_PTRVOID: 3744 case TYPE_INT: 3745 /* int argment */ 3746 ret = get_errno(ioctl(fd, ie->host_cmd, arg)); 3747 break; 3748 case TYPE_PTR: 3749 arg_type++; 3750 target_size = thunk_type_size(arg_type, 0); 3751 switch(ie->access) { 3752 case IOC_R: 3753 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3754 if (!is_error(ret)) { 3755 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3756 if (!argptr) 3757 return -TARGET_EFAULT; 3758 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3759 unlock_user(argptr, arg, target_size); 3760 } 3761 break; 3762 case IOC_W: 3763 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3764 if (!argptr) 3765 return -TARGET_EFAULT; 3766 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3767 unlock_user(argptr, arg, 0); 3768 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3769 break; 3770 default: 3771 case IOC_RW: 3772 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3773 if (!argptr) 3774 return -TARGET_EFAULT; 3775 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3776 unlock_user(argptr, arg, 0); 3777 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3778 if (!is_error(ret)) { 3779 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3780 if (!argptr) 3781 return -TARGET_EFAULT; 3782 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3783 unlock_user(argptr, arg, target_size); 3784 } 3785 break; 3786 } 3787 break; 3788 default: 3789 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n", 3790 (long)cmd, arg_type[0]); 3791 ret = -TARGET_ENOSYS; 3792 break; 3793 } 3794 return ret; 3795 } 3796 3797 static const bitmask_transtbl iflag_tbl[] = { 3798 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK }, 3799 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT }, 3800 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR }, 3801 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK }, 3802 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK }, 3803 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP }, 3804 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR }, 3805 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR }, 3806 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL }, 3807 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC }, 3808 { TARGET_IXON, TARGET_IXON, IXON, IXON }, 3809 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY }, 3810 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF }, 3811 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL }, 3812 { 0, 0, 0, 0 } 3813 }; 3814 3815 static const bitmask_transtbl oflag_tbl[] = { 3816 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST }, 3817 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC }, 3818 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR }, 3819 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL }, 3820 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR }, 3821 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET }, 3822 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL }, 3823 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL }, 3824 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 }, 3825 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 }, 3826 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 }, 3827 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 }, 3828 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 }, 3829 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 }, 3830 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 }, 3831 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 }, 3832 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 }, 3833 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 }, 3834 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 }, 3835 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 }, 3836 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 }, 3837 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 }, 3838 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 }, 3839 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 }, 3840 { 0, 0, 0, 0 } 3841 }; 3842 3843 static const bitmask_transtbl cflag_tbl[] = { 3844 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 }, 3845 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 }, 3846 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 }, 3847 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 }, 3848 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 }, 3849 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 }, 3850 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 }, 3851 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 }, 3852 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 }, 3853 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 }, 3854 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 }, 3855 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 }, 3856 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 }, 3857 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 }, 3858 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 }, 3859 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 }, 3860 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 }, 3861 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 }, 3862 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 }, 3863 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 }, 3864 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 }, 3865 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 }, 3866 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 }, 3867 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 }, 3868 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB }, 3869 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD }, 3870 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB }, 3871 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD }, 3872 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL }, 3873 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL }, 3874 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS }, 3875 { 0, 0, 0, 0 } 3876 }; 3877 3878 static const bitmask_transtbl lflag_tbl[] = { 3879 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG }, 3880 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON }, 3881 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE }, 3882 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO }, 3883 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE }, 3884 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK }, 3885 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL }, 3886 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH }, 3887 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP }, 3888 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL }, 3889 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT }, 3890 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE }, 3891 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO }, 3892 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN }, 3893 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN }, 3894 { 0, 0, 0, 0 } 3895 }; 3896 3897 static void target_to_host_termios (void *dst, const void *src) 3898 { 3899 struct host_termios *host = dst; 3900 const struct target_termios *target = src; 3901 3902 host->c_iflag = 3903 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl); 3904 host->c_oflag = 3905 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl); 3906 host->c_cflag = 3907 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl); 3908 host->c_lflag = 3909 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl); 3910 host->c_line = target->c_line; 3911 3912 memset(host->c_cc, 0, sizeof(host->c_cc)); 3913 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR]; 3914 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT]; 3915 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE]; 3916 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL]; 3917 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF]; 3918 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME]; 3919 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN]; 3920 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC]; 3921 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART]; 3922 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP]; 3923 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP]; 3924 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL]; 3925 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT]; 3926 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD]; 3927 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE]; 3928 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT]; 3929 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2]; 3930 } 3931 3932 static void host_to_target_termios (void *dst, const void *src) 3933 { 3934 struct target_termios *target = dst; 3935 const struct host_termios *host = src; 3936 3937 target->c_iflag = 3938 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl)); 3939 target->c_oflag = 3940 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl)); 3941 target->c_cflag = 3942 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl)); 3943 target->c_lflag = 3944 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl)); 3945 target->c_line = host->c_line; 3946 3947 memset(target->c_cc, 0, sizeof(target->c_cc)); 3948 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR]; 3949 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT]; 3950 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE]; 3951 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL]; 3952 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF]; 3953 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME]; 3954 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN]; 3955 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC]; 3956 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART]; 3957 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP]; 3958 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP]; 3959 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL]; 3960 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT]; 3961 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD]; 3962 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE]; 3963 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT]; 3964 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2]; 3965 } 3966 3967 static const StructEntry struct_termios_def = { 3968 .convert = { host_to_target_termios, target_to_host_termios }, 3969 .size = { sizeof(struct target_termios), sizeof(struct host_termios) }, 3970 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) }, 3971 }; 3972 3973 static bitmask_transtbl mmap_flags_tbl[] = { 3974 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED }, 3975 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE }, 3976 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED }, 3977 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS }, 3978 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN }, 3979 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE }, 3980 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE }, 3981 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED }, 3982 { 0, 0, 0, 0 } 3983 }; 3984 3985 #if defined(TARGET_I386) 3986 3987 /* NOTE: there is really one LDT for all the threads */ 3988 static uint8_t *ldt_table; 3989 3990 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount) 3991 { 3992 int size; 3993 void *p; 3994 3995 if (!ldt_table) 3996 return 0; 3997 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE; 3998 if (size > bytecount) 3999 size = bytecount; 4000 p = lock_user(VERIFY_WRITE, ptr, size, 0); 4001 if (!p) 4002 return -TARGET_EFAULT; 4003 /* ??? Should this by byteswapped? */ 4004 memcpy(p, ldt_table, size); 4005 unlock_user(p, ptr, size); 4006 return size; 4007 } 4008 4009 /* XXX: add locking support */ 4010 static abi_long write_ldt(CPUX86State *env, 4011 abi_ulong ptr, unsigned long bytecount, int oldmode) 4012 { 4013 struct target_modify_ldt_ldt_s ldt_info; 4014 struct target_modify_ldt_ldt_s *target_ldt_info; 4015 int seg_32bit, contents, read_exec_only, limit_in_pages; 4016 int seg_not_present, useable, lm; 4017 uint32_t *lp, entry_1, entry_2; 4018 4019 if (bytecount != sizeof(ldt_info)) 4020 return -TARGET_EINVAL; 4021 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1)) 4022 return -TARGET_EFAULT; 4023 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 4024 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 4025 ldt_info.limit = tswap32(target_ldt_info->limit); 4026 ldt_info.flags = tswap32(target_ldt_info->flags); 4027 unlock_user_struct(target_ldt_info, ptr, 0); 4028 4029 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES) 4030 return -TARGET_EINVAL; 4031 seg_32bit = ldt_info.flags & 1; 4032 contents = (ldt_info.flags >> 1) & 3; 4033 read_exec_only = (ldt_info.flags >> 3) & 1; 4034 limit_in_pages = (ldt_info.flags >> 4) & 1; 4035 seg_not_present = (ldt_info.flags >> 5) & 1; 4036 useable = (ldt_info.flags >> 6) & 1; 4037 #ifdef TARGET_ABI32 4038 lm = 0; 4039 #else 4040 lm = (ldt_info.flags >> 7) & 1; 4041 #endif 4042 if (contents == 3) { 4043 if (oldmode) 4044 return -TARGET_EINVAL; 4045 if (seg_not_present == 0) 4046 return -TARGET_EINVAL; 4047 } 4048 /* allocate the LDT */ 4049 if (!ldt_table) { 4050 env->ldt.base = target_mmap(0, 4051 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE, 4052 PROT_READ|PROT_WRITE, 4053 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 4054 if (env->ldt.base == -1) 4055 return -TARGET_ENOMEM; 4056 memset(g2h(env->ldt.base), 0, 4057 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE); 4058 env->ldt.limit = 0xffff; 4059 ldt_table = g2h(env->ldt.base); 4060 } 4061 4062 /* NOTE: same code as Linux kernel */ 4063 /* Allow LDTs to be cleared by the user. */ 4064 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 4065 if (oldmode || 4066 (contents == 0 && 4067 read_exec_only == 1 && 4068 seg_32bit == 0 && 4069 limit_in_pages == 0 && 4070 seg_not_present == 1 && 4071 useable == 0 )) { 4072 entry_1 = 0; 4073 entry_2 = 0; 4074 goto install; 4075 } 4076 } 4077 4078 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 4079 (ldt_info.limit & 0x0ffff); 4080 entry_2 = (ldt_info.base_addr & 0xff000000) | 4081 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 4082 (ldt_info.limit & 0xf0000) | 4083 ((read_exec_only ^ 1) << 9) | 4084 (contents << 10) | 4085 ((seg_not_present ^ 1) << 15) | 4086 (seg_32bit << 22) | 4087 (limit_in_pages << 23) | 4088 (lm << 21) | 4089 0x7000; 4090 if (!oldmode) 4091 entry_2 |= (useable << 20); 4092 4093 /* Install the new entry ... */ 4094 install: 4095 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3)); 4096 lp[0] = tswap32(entry_1); 4097 lp[1] = tswap32(entry_2); 4098 return 0; 4099 } 4100 4101 /* specific and weird i386 syscalls */ 4102 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr, 4103 unsigned long bytecount) 4104 { 4105 abi_long ret; 4106 4107 switch (func) { 4108 case 0: 4109 ret = read_ldt(ptr, bytecount); 4110 break; 4111 case 1: 4112 ret = write_ldt(env, ptr, bytecount, 1); 4113 break; 4114 case 0x11: 4115 ret = write_ldt(env, ptr, bytecount, 0); 4116 break; 4117 default: 4118 ret = -TARGET_ENOSYS; 4119 break; 4120 } 4121 return ret; 4122 } 4123 4124 #if defined(TARGET_I386) && defined(TARGET_ABI32) 4125 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr) 4126 { 4127 uint64_t *gdt_table = g2h(env->gdt.base); 4128 struct target_modify_ldt_ldt_s ldt_info; 4129 struct target_modify_ldt_ldt_s *target_ldt_info; 4130 int seg_32bit, contents, read_exec_only, limit_in_pages; 4131 int seg_not_present, useable, lm; 4132 uint32_t *lp, entry_1, entry_2; 4133 int i; 4134 4135 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 4136 if (!target_ldt_info) 4137 return -TARGET_EFAULT; 4138 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 4139 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 4140 ldt_info.limit = tswap32(target_ldt_info->limit); 4141 ldt_info.flags = tswap32(target_ldt_info->flags); 4142 if (ldt_info.entry_number == -1) { 4143 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) { 4144 if (gdt_table[i] == 0) { 4145 ldt_info.entry_number = i; 4146 target_ldt_info->entry_number = tswap32(i); 4147 break; 4148 } 4149 } 4150 } 4151 unlock_user_struct(target_ldt_info, ptr, 1); 4152 4153 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 4154 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX) 4155 return -TARGET_EINVAL; 4156 seg_32bit = ldt_info.flags & 1; 4157 contents = (ldt_info.flags >> 1) & 3; 4158 read_exec_only = (ldt_info.flags >> 3) & 1; 4159 limit_in_pages = (ldt_info.flags >> 4) & 1; 4160 seg_not_present = (ldt_info.flags >> 5) & 1; 4161 useable = (ldt_info.flags >> 6) & 1; 4162 #ifdef TARGET_ABI32 4163 lm = 0; 4164 #else 4165 lm = (ldt_info.flags >> 7) & 1; 4166 #endif 4167 4168 if (contents == 3) { 4169 if (seg_not_present == 0) 4170 return -TARGET_EINVAL; 4171 } 4172 4173 /* NOTE: same code as Linux kernel */ 4174 /* Allow LDTs to be cleared by the user. */ 4175 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 4176 if ((contents == 0 && 4177 read_exec_only == 1 && 4178 seg_32bit == 0 && 4179 limit_in_pages == 0 && 4180 seg_not_present == 1 && 4181 useable == 0 )) { 4182 entry_1 = 0; 4183 entry_2 = 0; 4184 goto install; 4185 } 4186 } 4187 4188 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 4189 (ldt_info.limit & 0x0ffff); 4190 entry_2 = (ldt_info.base_addr & 0xff000000) | 4191 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 4192 (ldt_info.limit & 0xf0000) | 4193 ((read_exec_only ^ 1) << 9) | 4194 (contents << 10) | 4195 ((seg_not_present ^ 1) << 15) | 4196 (seg_32bit << 22) | 4197 (limit_in_pages << 23) | 4198 (useable << 20) | 4199 (lm << 21) | 4200 0x7000; 4201 4202 /* Install the new entry ... */ 4203 install: 4204 lp = (uint32_t *)(gdt_table + ldt_info.entry_number); 4205 lp[0] = tswap32(entry_1); 4206 lp[1] = tswap32(entry_2); 4207 return 0; 4208 } 4209 4210 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr) 4211 { 4212 struct target_modify_ldt_ldt_s *target_ldt_info; 4213 uint64_t *gdt_table = g2h(env->gdt.base); 4214 uint32_t base_addr, limit, flags; 4215 int seg_32bit, contents, read_exec_only, limit_in_pages, idx; 4216 int seg_not_present, useable, lm; 4217 uint32_t *lp, entry_1, entry_2; 4218 4219 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 4220 if (!target_ldt_info) 4221 return -TARGET_EFAULT; 4222 idx = tswap32(target_ldt_info->entry_number); 4223 if (idx < TARGET_GDT_ENTRY_TLS_MIN || 4224 idx > TARGET_GDT_ENTRY_TLS_MAX) { 4225 unlock_user_struct(target_ldt_info, ptr, 1); 4226 return -TARGET_EINVAL; 4227 } 4228 lp = (uint32_t *)(gdt_table + idx); 4229 entry_1 = tswap32(lp[0]); 4230 entry_2 = tswap32(lp[1]); 4231 4232 read_exec_only = ((entry_2 >> 9) & 1) ^ 1; 4233 contents = (entry_2 >> 10) & 3; 4234 seg_not_present = ((entry_2 >> 15) & 1) ^ 1; 4235 seg_32bit = (entry_2 >> 22) & 1; 4236 limit_in_pages = (entry_2 >> 23) & 1; 4237 useable = (entry_2 >> 20) & 1; 4238 #ifdef TARGET_ABI32 4239 lm = 0; 4240 #else 4241 lm = (entry_2 >> 21) & 1; 4242 #endif 4243 flags = (seg_32bit << 0) | (contents << 1) | 4244 (read_exec_only << 3) | (limit_in_pages << 4) | 4245 (seg_not_present << 5) | (useable << 6) | (lm << 7); 4246 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000); 4247 base_addr = (entry_1 >> 16) | 4248 (entry_2 & 0xff000000) | 4249 ((entry_2 & 0xff) << 16); 4250 target_ldt_info->base_addr = tswapal(base_addr); 4251 target_ldt_info->limit = tswap32(limit); 4252 target_ldt_info->flags = tswap32(flags); 4253 unlock_user_struct(target_ldt_info, ptr, 1); 4254 return 0; 4255 } 4256 #endif /* TARGET_I386 && TARGET_ABI32 */ 4257 4258 #ifndef TARGET_ABI32 4259 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 4260 { 4261 abi_long ret = 0; 4262 abi_ulong val; 4263 int idx; 4264 4265 switch(code) { 4266 case TARGET_ARCH_SET_GS: 4267 case TARGET_ARCH_SET_FS: 4268 if (code == TARGET_ARCH_SET_GS) 4269 idx = R_GS; 4270 else 4271 idx = R_FS; 4272 cpu_x86_load_seg(env, idx, 0); 4273 env->segs[idx].base = addr; 4274 break; 4275 case TARGET_ARCH_GET_GS: 4276 case TARGET_ARCH_GET_FS: 4277 if (code == TARGET_ARCH_GET_GS) 4278 idx = R_GS; 4279 else 4280 idx = R_FS; 4281 val = env->segs[idx].base; 4282 if (put_user(val, addr, abi_ulong)) 4283 ret = -TARGET_EFAULT; 4284 break; 4285 default: 4286 ret = -TARGET_EINVAL; 4287 break; 4288 } 4289 return ret; 4290 } 4291 #endif 4292 4293 #endif /* defined(TARGET_I386) */ 4294 4295 #define NEW_STACK_SIZE 0x40000 4296 4297 #if defined(CONFIG_USE_NPTL) 4298 4299 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; 4300 typedef struct { 4301 CPUArchState *env; 4302 pthread_mutex_t mutex; 4303 pthread_cond_t cond; 4304 pthread_t thread; 4305 uint32_t tid; 4306 abi_ulong child_tidptr; 4307 abi_ulong parent_tidptr; 4308 sigset_t sigmask; 4309 } new_thread_info; 4310 4311 static void *clone_func(void *arg) 4312 { 4313 new_thread_info *info = arg; 4314 CPUArchState *env; 4315 CPUState *cpu; 4316 TaskState *ts; 4317 4318 env = info->env; 4319 cpu = ENV_GET_CPU(env); 4320 thread_env = env; 4321 ts = (TaskState *)thread_env->opaque; 4322 info->tid = gettid(); 4323 cpu->host_tid = info->tid; 4324 task_settid(ts); 4325 if (info->child_tidptr) 4326 put_user_u32(info->tid, info->child_tidptr); 4327 if (info->parent_tidptr) 4328 put_user_u32(info->tid, info->parent_tidptr); 4329 /* Enable signals. */ 4330 sigprocmask(SIG_SETMASK, &info->sigmask, NULL); 4331 /* Signal to the parent that we're ready. */ 4332 pthread_mutex_lock(&info->mutex); 4333 pthread_cond_broadcast(&info->cond); 4334 pthread_mutex_unlock(&info->mutex); 4335 /* Wait until the parent has finshed initializing the tls state. */ 4336 pthread_mutex_lock(&clone_lock); 4337 pthread_mutex_unlock(&clone_lock); 4338 cpu_loop(env); 4339 /* never exits */ 4340 return NULL; 4341 } 4342 #else 4343 4344 static int clone_func(void *arg) 4345 { 4346 CPUArchState *env = arg; 4347 cpu_loop(env); 4348 /* never exits */ 4349 return 0; 4350 } 4351 #endif 4352 4353 /* do_fork() Must return host values and target errnos (unlike most 4354 do_*() functions). */ 4355 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, 4356 abi_ulong parent_tidptr, target_ulong newtls, 4357 abi_ulong child_tidptr) 4358 { 4359 int ret; 4360 TaskState *ts; 4361 CPUArchState *new_env; 4362 #if defined(CONFIG_USE_NPTL) 4363 unsigned int nptl_flags; 4364 sigset_t sigmask; 4365 #else 4366 uint8_t *new_stack; 4367 #endif 4368 4369 /* Emulate vfork() with fork() */ 4370 if (flags & CLONE_VFORK) 4371 flags &= ~(CLONE_VFORK | CLONE_VM); 4372 4373 if (flags & CLONE_VM) { 4374 TaskState *parent_ts = (TaskState *)env->opaque; 4375 #if defined(CONFIG_USE_NPTL) 4376 new_thread_info info; 4377 pthread_attr_t attr; 4378 #endif 4379 ts = g_malloc0(sizeof(TaskState)); 4380 init_task_state(ts); 4381 /* we create a new CPU instance. */ 4382 new_env = cpu_copy(env); 4383 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC) 4384 cpu_reset(ENV_GET_CPU(new_env)); 4385 #endif 4386 /* Init regs that differ from the parent. */ 4387 cpu_clone_regs(new_env, newsp); 4388 new_env->opaque = ts; 4389 ts->bprm = parent_ts->bprm; 4390 ts->info = parent_ts->info; 4391 #if defined(CONFIG_USE_NPTL) 4392 nptl_flags = flags; 4393 flags &= ~CLONE_NPTL_FLAGS2; 4394 4395 if (nptl_flags & CLONE_CHILD_CLEARTID) { 4396 ts->child_tidptr = child_tidptr; 4397 } 4398 4399 if (nptl_flags & CLONE_SETTLS) 4400 cpu_set_tls (new_env, newtls); 4401 4402 /* Grab a mutex so that thread setup appears atomic. */ 4403 pthread_mutex_lock(&clone_lock); 4404 4405 memset(&info, 0, sizeof(info)); 4406 pthread_mutex_init(&info.mutex, NULL); 4407 pthread_mutex_lock(&info.mutex); 4408 pthread_cond_init(&info.cond, NULL); 4409 info.env = new_env; 4410 if (nptl_flags & CLONE_CHILD_SETTID) 4411 info.child_tidptr = child_tidptr; 4412 if (nptl_flags & CLONE_PARENT_SETTID) 4413 info.parent_tidptr = parent_tidptr; 4414 4415 ret = pthread_attr_init(&attr); 4416 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE); 4417 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 4418 /* It is not safe to deliver signals until the child has finished 4419 initializing, so temporarily block all signals. */ 4420 sigfillset(&sigmask); 4421 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); 4422 4423 ret = pthread_create(&info.thread, &attr, clone_func, &info); 4424 /* TODO: Free new CPU state if thread creation failed. */ 4425 4426 sigprocmask(SIG_SETMASK, &info.sigmask, NULL); 4427 pthread_attr_destroy(&attr); 4428 if (ret == 0) { 4429 /* Wait for the child to initialize. */ 4430 pthread_cond_wait(&info.cond, &info.mutex); 4431 ret = info.tid; 4432 if (flags & CLONE_PARENT_SETTID) 4433 put_user_u32(ret, parent_tidptr); 4434 } else { 4435 ret = -1; 4436 } 4437 pthread_mutex_unlock(&info.mutex); 4438 pthread_cond_destroy(&info.cond); 4439 pthread_mutex_destroy(&info.mutex); 4440 pthread_mutex_unlock(&clone_lock); 4441 #else 4442 if (flags & CLONE_NPTL_FLAGS2) 4443 return -EINVAL; 4444 /* This is probably going to die very quickly, but do it anyway. */ 4445 new_stack = g_malloc0 (NEW_STACK_SIZE); 4446 #ifdef __ia64__ 4447 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env); 4448 #else 4449 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env); 4450 #endif 4451 #endif 4452 } else { 4453 /* if no CLONE_VM, we consider it is a fork */ 4454 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) 4455 return -EINVAL; 4456 fork_start(); 4457 ret = fork(); 4458 if (ret == 0) { 4459 /* Child Process. */ 4460 cpu_clone_regs(env, newsp); 4461 fork_end(1); 4462 #if defined(CONFIG_USE_NPTL) 4463 /* There is a race condition here. The parent process could 4464 theoretically read the TID in the child process before the child 4465 tid is set. This would require using either ptrace 4466 (not implemented) or having *_tidptr to point at a shared memory 4467 mapping. We can't repeat the spinlock hack used above because 4468 the child process gets its own copy of the lock. */ 4469 if (flags & CLONE_CHILD_SETTID) 4470 put_user_u32(gettid(), child_tidptr); 4471 if (flags & CLONE_PARENT_SETTID) 4472 put_user_u32(gettid(), parent_tidptr); 4473 ts = (TaskState *)env->opaque; 4474 if (flags & CLONE_SETTLS) 4475 cpu_set_tls (env, newtls); 4476 if (flags & CLONE_CHILD_CLEARTID) 4477 ts->child_tidptr = child_tidptr; 4478 #endif 4479 } else { 4480 fork_end(0); 4481 } 4482 } 4483 return ret; 4484 } 4485 4486 /* warning : doesn't handle linux specific flags... */ 4487 static int target_to_host_fcntl_cmd(int cmd) 4488 { 4489 switch(cmd) { 4490 case TARGET_F_DUPFD: 4491 case TARGET_F_GETFD: 4492 case TARGET_F_SETFD: 4493 case TARGET_F_GETFL: 4494 case TARGET_F_SETFL: 4495 return cmd; 4496 case TARGET_F_GETLK: 4497 return F_GETLK; 4498 case TARGET_F_SETLK: 4499 return F_SETLK; 4500 case TARGET_F_SETLKW: 4501 return F_SETLKW; 4502 case TARGET_F_GETOWN: 4503 return F_GETOWN; 4504 case TARGET_F_SETOWN: 4505 return F_SETOWN; 4506 case TARGET_F_GETSIG: 4507 return F_GETSIG; 4508 case TARGET_F_SETSIG: 4509 return F_SETSIG; 4510 #if TARGET_ABI_BITS == 32 4511 case TARGET_F_GETLK64: 4512 return F_GETLK64; 4513 case TARGET_F_SETLK64: 4514 return F_SETLK64; 4515 case TARGET_F_SETLKW64: 4516 return F_SETLKW64; 4517 #endif 4518 case TARGET_F_SETLEASE: 4519 return F_SETLEASE; 4520 case TARGET_F_GETLEASE: 4521 return F_GETLEASE; 4522 #ifdef F_DUPFD_CLOEXEC 4523 case TARGET_F_DUPFD_CLOEXEC: 4524 return F_DUPFD_CLOEXEC; 4525 #endif 4526 case TARGET_F_NOTIFY: 4527 return F_NOTIFY; 4528 default: 4529 return -TARGET_EINVAL; 4530 } 4531 return -TARGET_EINVAL; 4532 } 4533 4534 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a } 4535 static const bitmask_transtbl flock_tbl[] = { 4536 TRANSTBL_CONVERT(F_RDLCK), 4537 TRANSTBL_CONVERT(F_WRLCK), 4538 TRANSTBL_CONVERT(F_UNLCK), 4539 TRANSTBL_CONVERT(F_EXLCK), 4540 TRANSTBL_CONVERT(F_SHLCK), 4541 { 0, 0, 0, 0 } 4542 }; 4543 4544 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) 4545 { 4546 struct flock fl; 4547 struct target_flock *target_fl; 4548 struct flock64 fl64; 4549 struct target_flock64 *target_fl64; 4550 abi_long ret; 4551 int host_cmd = target_to_host_fcntl_cmd(cmd); 4552 4553 if (host_cmd == -TARGET_EINVAL) 4554 return host_cmd; 4555 4556 switch(cmd) { 4557 case TARGET_F_GETLK: 4558 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4559 return -TARGET_EFAULT; 4560 fl.l_type = 4561 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl); 4562 fl.l_whence = tswap16(target_fl->l_whence); 4563 fl.l_start = tswapal(target_fl->l_start); 4564 fl.l_len = tswapal(target_fl->l_len); 4565 fl.l_pid = tswap32(target_fl->l_pid); 4566 unlock_user_struct(target_fl, arg, 0); 4567 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4568 if (ret == 0) { 4569 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0)) 4570 return -TARGET_EFAULT; 4571 target_fl->l_type = 4572 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl); 4573 target_fl->l_whence = tswap16(fl.l_whence); 4574 target_fl->l_start = tswapal(fl.l_start); 4575 target_fl->l_len = tswapal(fl.l_len); 4576 target_fl->l_pid = tswap32(fl.l_pid); 4577 unlock_user_struct(target_fl, arg, 1); 4578 } 4579 break; 4580 4581 case TARGET_F_SETLK: 4582 case TARGET_F_SETLKW: 4583 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4584 return -TARGET_EFAULT; 4585 fl.l_type = 4586 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl); 4587 fl.l_whence = tswap16(target_fl->l_whence); 4588 fl.l_start = tswapal(target_fl->l_start); 4589 fl.l_len = tswapal(target_fl->l_len); 4590 fl.l_pid = tswap32(target_fl->l_pid); 4591 unlock_user_struct(target_fl, arg, 0); 4592 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4593 break; 4594 4595 case TARGET_F_GETLK64: 4596 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4597 return -TARGET_EFAULT; 4598 fl64.l_type = 4599 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1; 4600 fl64.l_whence = tswap16(target_fl64->l_whence); 4601 fl64.l_start = tswap64(target_fl64->l_start); 4602 fl64.l_len = tswap64(target_fl64->l_len); 4603 fl64.l_pid = tswap32(target_fl64->l_pid); 4604 unlock_user_struct(target_fl64, arg, 0); 4605 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4606 if (ret == 0) { 4607 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0)) 4608 return -TARGET_EFAULT; 4609 target_fl64->l_type = 4610 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1; 4611 target_fl64->l_whence = tswap16(fl64.l_whence); 4612 target_fl64->l_start = tswap64(fl64.l_start); 4613 target_fl64->l_len = tswap64(fl64.l_len); 4614 target_fl64->l_pid = tswap32(fl64.l_pid); 4615 unlock_user_struct(target_fl64, arg, 1); 4616 } 4617 break; 4618 case TARGET_F_SETLK64: 4619 case TARGET_F_SETLKW64: 4620 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4621 return -TARGET_EFAULT; 4622 fl64.l_type = 4623 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1; 4624 fl64.l_whence = tswap16(target_fl64->l_whence); 4625 fl64.l_start = tswap64(target_fl64->l_start); 4626 fl64.l_len = tswap64(target_fl64->l_len); 4627 fl64.l_pid = tswap32(target_fl64->l_pid); 4628 unlock_user_struct(target_fl64, arg, 0); 4629 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4630 break; 4631 4632 case TARGET_F_GETFL: 4633 ret = get_errno(fcntl(fd, host_cmd, arg)); 4634 if (ret >= 0) { 4635 ret = host_to_target_bitmask(ret, fcntl_flags_tbl); 4636 } 4637 break; 4638 4639 case TARGET_F_SETFL: 4640 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl))); 4641 break; 4642 4643 case TARGET_F_SETOWN: 4644 case TARGET_F_GETOWN: 4645 case TARGET_F_SETSIG: 4646 case TARGET_F_GETSIG: 4647 case TARGET_F_SETLEASE: 4648 case TARGET_F_GETLEASE: 4649 ret = get_errno(fcntl(fd, host_cmd, arg)); 4650 break; 4651 4652 default: 4653 ret = get_errno(fcntl(fd, cmd, arg)); 4654 break; 4655 } 4656 return ret; 4657 } 4658 4659 #ifdef USE_UID16 4660 4661 static inline int high2lowuid(int uid) 4662 { 4663 if (uid > 65535) 4664 return 65534; 4665 else 4666 return uid; 4667 } 4668 4669 static inline int high2lowgid(int gid) 4670 { 4671 if (gid > 65535) 4672 return 65534; 4673 else 4674 return gid; 4675 } 4676 4677 static inline int low2highuid(int uid) 4678 { 4679 if ((int16_t)uid == -1) 4680 return -1; 4681 else 4682 return uid; 4683 } 4684 4685 static inline int low2highgid(int gid) 4686 { 4687 if ((int16_t)gid == -1) 4688 return -1; 4689 else 4690 return gid; 4691 } 4692 static inline int tswapid(int id) 4693 { 4694 return tswap16(id); 4695 } 4696 #else /* !USE_UID16 */ 4697 static inline int high2lowuid(int uid) 4698 { 4699 return uid; 4700 } 4701 static inline int high2lowgid(int gid) 4702 { 4703 return gid; 4704 } 4705 static inline int low2highuid(int uid) 4706 { 4707 return uid; 4708 } 4709 static inline int low2highgid(int gid) 4710 { 4711 return gid; 4712 } 4713 static inline int tswapid(int id) 4714 { 4715 return tswap32(id); 4716 } 4717 #endif /* USE_UID16 */ 4718 4719 void syscall_init(void) 4720 { 4721 IOCTLEntry *ie; 4722 const argtype *arg_type; 4723 int size; 4724 int i; 4725 4726 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def); 4727 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def); 4728 #include "syscall_types.h" 4729 #undef STRUCT 4730 #undef STRUCT_SPECIAL 4731 4732 /* Build target_to_host_errno_table[] table from 4733 * host_to_target_errno_table[]. */ 4734 for (i = 0; i < ERRNO_TABLE_SIZE; i++) { 4735 target_to_host_errno_table[host_to_target_errno_table[i]] = i; 4736 } 4737 4738 /* we patch the ioctl size if necessary. We rely on the fact that 4739 no ioctl has all the bits at '1' in the size field */ 4740 ie = ioctl_entries; 4741 while (ie->target_cmd != 0) { 4742 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) == 4743 TARGET_IOC_SIZEMASK) { 4744 arg_type = ie->arg_type; 4745 if (arg_type[0] != TYPE_PTR) { 4746 fprintf(stderr, "cannot patch size for ioctl 0x%x\n", 4747 ie->target_cmd); 4748 exit(1); 4749 } 4750 arg_type++; 4751 size = thunk_type_size(arg_type, 0); 4752 ie->target_cmd = (ie->target_cmd & 4753 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) | 4754 (size << TARGET_IOC_SIZESHIFT); 4755 } 4756 4757 /* automatic consistency check if same arch */ 4758 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 4759 (defined(__x86_64__) && defined(TARGET_X86_64)) 4760 if (unlikely(ie->target_cmd != ie->host_cmd)) { 4761 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n", 4762 ie->name, ie->target_cmd, ie->host_cmd); 4763 } 4764 #endif 4765 ie++; 4766 } 4767 } 4768 4769 #if TARGET_ABI_BITS == 32 4770 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1) 4771 { 4772 #ifdef TARGET_WORDS_BIGENDIAN 4773 return ((uint64_t)word0 << 32) | word1; 4774 #else 4775 return ((uint64_t)word1 << 32) | word0; 4776 #endif 4777 } 4778 #else /* TARGET_ABI_BITS == 32 */ 4779 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1) 4780 { 4781 return word0; 4782 } 4783 #endif /* TARGET_ABI_BITS != 32 */ 4784 4785 #ifdef TARGET_NR_truncate64 4786 static inline abi_long target_truncate64(void *cpu_env, const char *arg1, 4787 abi_long arg2, 4788 abi_long arg3, 4789 abi_long arg4) 4790 { 4791 if (regpairs_aligned(cpu_env)) { 4792 arg2 = arg3; 4793 arg3 = arg4; 4794 } 4795 return get_errno(truncate64(arg1, target_offset64(arg2, arg3))); 4796 } 4797 #endif 4798 4799 #ifdef TARGET_NR_ftruncate64 4800 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1, 4801 abi_long arg2, 4802 abi_long arg3, 4803 abi_long arg4) 4804 { 4805 if (regpairs_aligned(cpu_env)) { 4806 arg2 = arg3; 4807 arg3 = arg4; 4808 } 4809 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3))); 4810 } 4811 #endif 4812 4813 static inline abi_long target_to_host_timespec(struct timespec *host_ts, 4814 abi_ulong target_addr) 4815 { 4816 struct target_timespec *target_ts; 4817 4818 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) 4819 return -TARGET_EFAULT; 4820 host_ts->tv_sec = tswapal(target_ts->tv_sec); 4821 host_ts->tv_nsec = tswapal(target_ts->tv_nsec); 4822 unlock_user_struct(target_ts, target_addr, 0); 4823 return 0; 4824 } 4825 4826 static inline abi_long host_to_target_timespec(abi_ulong target_addr, 4827 struct timespec *host_ts) 4828 { 4829 struct target_timespec *target_ts; 4830 4831 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) 4832 return -TARGET_EFAULT; 4833 target_ts->tv_sec = tswapal(host_ts->tv_sec); 4834 target_ts->tv_nsec = tswapal(host_ts->tv_nsec); 4835 unlock_user_struct(target_ts, target_addr, 1); 4836 return 0; 4837 } 4838 4839 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat) 4840 static inline abi_long host_to_target_stat64(void *cpu_env, 4841 abi_ulong target_addr, 4842 struct stat *host_st) 4843 { 4844 #ifdef TARGET_ARM 4845 if (((CPUARMState *)cpu_env)->eabi) { 4846 struct target_eabi_stat64 *target_st; 4847 4848 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 4849 return -TARGET_EFAULT; 4850 memset(target_st, 0, sizeof(struct target_eabi_stat64)); 4851 __put_user(host_st->st_dev, &target_st->st_dev); 4852 __put_user(host_st->st_ino, &target_st->st_ino); 4853 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 4854 __put_user(host_st->st_ino, &target_st->__st_ino); 4855 #endif 4856 __put_user(host_st->st_mode, &target_st->st_mode); 4857 __put_user(host_st->st_nlink, &target_st->st_nlink); 4858 __put_user(host_st->st_uid, &target_st->st_uid); 4859 __put_user(host_st->st_gid, &target_st->st_gid); 4860 __put_user(host_st->st_rdev, &target_st->st_rdev); 4861 __put_user(host_st->st_size, &target_st->st_size); 4862 __put_user(host_st->st_blksize, &target_st->st_blksize); 4863 __put_user(host_st->st_blocks, &target_st->st_blocks); 4864 __put_user(host_st->st_atime, &target_st->target_st_atime); 4865 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 4866 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 4867 unlock_user_struct(target_st, target_addr, 1); 4868 } else 4869 #endif 4870 { 4871 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA) 4872 struct target_stat *target_st; 4873 #else 4874 struct target_stat64 *target_st; 4875 #endif 4876 4877 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 4878 return -TARGET_EFAULT; 4879 memset(target_st, 0, sizeof(*target_st)); 4880 __put_user(host_st->st_dev, &target_st->st_dev); 4881 __put_user(host_st->st_ino, &target_st->st_ino); 4882 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 4883 __put_user(host_st->st_ino, &target_st->__st_ino); 4884 #endif 4885 __put_user(host_st->st_mode, &target_st->st_mode); 4886 __put_user(host_st->st_nlink, &target_st->st_nlink); 4887 __put_user(host_st->st_uid, &target_st->st_uid); 4888 __put_user(host_st->st_gid, &target_st->st_gid); 4889 __put_user(host_st->st_rdev, &target_st->st_rdev); 4890 /* XXX: better use of kernel struct */ 4891 __put_user(host_st->st_size, &target_st->st_size); 4892 __put_user(host_st->st_blksize, &target_st->st_blksize); 4893 __put_user(host_st->st_blocks, &target_st->st_blocks); 4894 __put_user(host_st->st_atime, &target_st->target_st_atime); 4895 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 4896 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 4897 unlock_user_struct(target_st, target_addr, 1); 4898 } 4899 4900 return 0; 4901 } 4902 #endif 4903 4904 #if defined(CONFIG_USE_NPTL) 4905 /* ??? Using host futex calls even when target atomic operations 4906 are not really atomic probably breaks things. However implementing 4907 futexes locally would make futexes shared between multiple processes 4908 tricky. However they're probably useless because guest atomic 4909 operations won't work either. */ 4910 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout, 4911 target_ulong uaddr2, int val3) 4912 { 4913 struct timespec ts, *pts; 4914 int base_op; 4915 4916 /* ??? We assume FUTEX_* constants are the same on both host 4917 and target. */ 4918 #ifdef FUTEX_CMD_MASK 4919 base_op = op & FUTEX_CMD_MASK; 4920 #else 4921 base_op = op; 4922 #endif 4923 switch (base_op) { 4924 case FUTEX_WAIT: 4925 if (timeout) { 4926 pts = &ts; 4927 target_to_host_timespec(pts, timeout); 4928 } else { 4929 pts = NULL; 4930 } 4931 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val), 4932 pts, NULL, 0)); 4933 case FUTEX_WAKE: 4934 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 4935 case FUTEX_FD: 4936 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 4937 case FUTEX_REQUEUE: 4938 case FUTEX_CMP_REQUEUE: 4939 case FUTEX_WAKE_OP: 4940 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 4941 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 4942 But the prototype takes a `struct timespec *'; insert casts 4943 to satisfy the compiler. We do not need to tswap TIMEOUT 4944 since it's not compared to guest memory. */ 4945 pts = (struct timespec *)(uintptr_t) timeout; 4946 return get_errno(sys_futex(g2h(uaddr), op, val, pts, 4947 g2h(uaddr2), 4948 (base_op == FUTEX_CMP_REQUEUE 4949 ? tswap32(val3) 4950 : val3))); 4951 default: 4952 return -TARGET_ENOSYS; 4953 } 4954 } 4955 #endif 4956 4957 /* Map host to target signal numbers for the wait family of syscalls. 4958 Assume all other status bits are the same. */ 4959 int host_to_target_waitstatus(int status) 4960 { 4961 if (WIFSIGNALED(status)) { 4962 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f); 4963 } 4964 if (WIFSTOPPED(status)) { 4965 return (host_to_target_signal(WSTOPSIG(status)) << 8) 4966 | (status & 0xff); 4967 } 4968 return status; 4969 } 4970 4971 int get_osversion(void) 4972 { 4973 static int osversion; 4974 struct new_utsname buf; 4975 const char *s; 4976 int i, n, tmp; 4977 if (osversion) 4978 return osversion; 4979 if (qemu_uname_release && *qemu_uname_release) { 4980 s = qemu_uname_release; 4981 } else { 4982 if (sys_uname(&buf)) 4983 return 0; 4984 s = buf.release; 4985 } 4986 tmp = 0; 4987 for (i = 0; i < 3; i++) { 4988 n = 0; 4989 while (*s >= '0' && *s <= '9') { 4990 n *= 10; 4991 n += *s - '0'; 4992 s++; 4993 } 4994 tmp = (tmp << 8) + n; 4995 if (*s == '.') 4996 s++; 4997 } 4998 osversion = tmp; 4999 return osversion; 5000 } 5001 5002 5003 static int open_self_maps(void *cpu_env, int fd) 5004 { 5005 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32) 5006 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 5007 #endif 5008 FILE *fp; 5009 char *line = NULL; 5010 size_t len = 0; 5011 ssize_t read; 5012 5013 fp = fopen("/proc/self/maps", "r"); 5014 if (fp == NULL) { 5015 return -EACCES; 5016 } 5017 5018 while ((read = getline(&line, &len, fp)) != -1) { 5019 int fields, dev_maj, dev_min, inode; 5020 uint64_t min, max, offset; 5021 char flag_r, flag_w, flag_x, flag_p; 5022 char path[512] = ""; 5023 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d" 5024 " %512s", &min, &max, &flag_r, &flag_w, &flag_x, 5025 &flag_p, &offset, &dev_maj, &dev_min, &inode, path); 5026 5027 if ((fields < 10) || (fields > 11)) { 5028 continue; 5029 } 5030 if (!strncmp(path, "[stack]", 7)) { 5031 continue; 5032 } 5033 if (h2g_valid(min) && h2g_valid(max)) { 5034 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx 5035 " %c%c%c%c %08" PRIx64 " %02x:%02x %d%s%s\n", 5036 h2g(min), h2g(max), flag_r, flag_w, 5037 flag_x, flag_p, offset, dev_maj, dev_min, inode, 5038 path[0] ? " " : "", path); 5039 } 5040 } 5041 5042 free(line); 5043 fclose(fp); 5044 5045 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32) 5046 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n", 5047 (unsigned long long)ts->info->stack_limit, 5048 (unsigned long long)(ts->info->start_stack + 5049 (TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK, 5050 (unsigned long long)0); 5051 #endif 5052 5053 return 0; 5054 } 5055 5056 static int open_self_stat(void *cpu_env, int fd) 5057 { 5058 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 5059 abi_ulong start_stack = ts->info->start_stack; 5060 int i; 5061 5062 for (i = 0; i < 44; i++) { 5063 char buf[128]; 5064 int len; 5065 uint64_t val = 0; 5066 5067 if (i == 0) { 5068 /* pid */ 5069 val = getpid(); 5070 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 5071 } else if (i == 1) { 5072 /* app name */ 5073 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]); 5074 } else if (i == 27) { 5075 /* stack bottom */ 5076 val = start_stack; 5077 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 5078 } else { 5079 /* for the rest, there is MasterCard */ 5080 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' '); 5081 } 5082 5083 len = strlen(buf); 5084 if (write(fd, buf, len) != len) { 5085 return -1; 5086 } 5087 } 5088 5089 return 0; 5090 } 5091 5092 static int open_self_auxv(void *cpu_env, int fd) 5093 { 5094 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 5095 abi_ulong auxv = ts->info->saved_auxv; 5096 abi_ulong len = ts->info->auxv_len; 5097 char *ptr; 5098 5099 /* 5100 * Auxiliary vector is stored in target process stack. 5101 * read in whole auxv vector and copy it to file 5102 */ 5103 ptr = lock_user(VERIFY_READ, auxv, len, 0); 5104 if (ptr != NULL) { 5105 while (len > 0) { 5106 ssize_t r; 5107 r = write(fd, ptr, len); 5108 if (r <= 0) { 5109 break; 5110 } 5111 len -= r; 5112 ptr += r; 5113 } 5114 lseek(fd, 0, SEEK_SET); 5115 unlock_user(ptr, auxv, len); 5116 } 5117 5118 return 0; 5119 } 5120 5121 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode) 5122 { 5123 struct fake_open { 5124 const char *filename; 5125 int (*fill)(void *cpu_env, int fd); 5126 }; 5127 const struct fake_open *fake_open; 5128 static const struct fake_open fakes[] = { 5129 { "/proc/self/maps", open_self_maps }, 5130 { "/proc/self/stat", open_self_stat }, 5131 { "/proc/self/auxv", open_self_auxv }, 5132 { NULL, NULL } 5133 }; 5134 5135 for (fake_open = fakes; fake_open->filename; fake_open++) { 5136 if (!strncmp(pathname, fake_open->filename, 5137 strlen(fake_open->filename))) { 5138 break; 5139 } 5140 } 5141 5142 if (fake_open->filename) { 5143 const char *tmpdir; 5144 char filename[PATH_MAX]; 5145 int fd, r; 5146 5147 /* create temporary file to map stat to */ 5148 tmpdir = getenv("TMPDIR"); 5149 if (!tmpdir) 5150 tmpdir = "/tmp"; 5151 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir); 5152 fd = mkstemp(filename); 5153 if (fd < 0) { 5154 return fd; 5155 } 5156 unlink(filename); 5157 5158 if ((r = fake_open->fill(cpu_env, fd))) { 5159 close(fd); 5160 return r; 5161 } 5162 lseek(fd, 0, SEEK_SET); 5163 5164 return fd; 5165 } 5166 5167 return get_errno(open(path(pathname), flags, mode)); 5168 } 5169 5170 /* do_syscall() should always have a single exit point at the end so 5171 that actions, such as logging of syscall results, can be performed. 5172 All errnos that do_syscall() returns must be -TARGET_<errcode>. */ 5173 abi_long do_syscall(void *cpu_env, int num, abi_long arg1, 5174 abi_long arg2, abi_long arg3, abi_long arg4, 5175 abi_long arg5, abi_long arg6, abi_long arg7, 5176 abi_long arg8) 5177 { 5178 abi_long ret; 5179 struct stat st; 5180 struct statfs stfs; 5181 void *p; 5182 5183 #ifdef DEBUG 5184 gemu_log("syscall %d", num); 5185 #endif 5186 if(do_strace) 5187 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); 5188 5189 switch(num) { 5190 case TARGET_NR_exit: 5191 #ifdef CONFIG_USE_NPTL 5192 /* In old applications this may be used to implement _exit(2). 5193 However in threaded applictions it is used for thread termination, 5194 and _exit_group is used for application termination. 5195 Do thread termination if we have more then one thread. */ 5196 /* FIXME: This probably breaks if a signal arrives. We should probably 5197 be disabling signals. */ 5198 if (first_cpu->next_cpu) { 5199 TaskState *ts; 5200 CPUArchState **lastp; 5201 CPUArchState *p; 5202 5203 cpu_list_lock(); 5204 lastp = &first_cpu; 5205 p = first_cpu; 5206 while (p && p != (CPUArchState *)cpu_env) { 5207 lastp = &p->next_cpu; 5208 p = p->next_cpu; 5209 } 5210 /* If we didn't find the CPU for this thread then something is 5211 horribly wrong. */ 5212 if (!p) 5213 abort(); 5214 /* Remove the CPU from the list. */ 5215 *lastp = p->next_cpu; 5216 cpu_list_unlock(); 5217 ts = ((CPUArchState *)cpu_env)->opaque; 5218 if (ts->child_tidptr) { 5219 put_user_u32(0, ts->child_tidptr); 5220 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX, 5221 NULL, NULL, 0); 5222 } 5223 thread_env = NULL; 5224 object_unref(OBJECT(ENV_GET_CPU(cpu_env))); 5225 g_free(ts); 5226 pthread_exit(NULL); 5227 } 5228 #endif 5229 #ifdef TARGET_GPROF 5230 _mcleanup(); 5231 #endif 5232 gdb_exit(cpu_env, arg1); 5233 _exit(arg1); 5234 ret = 0; /* avoid warning */ 5235 break; 5236 case TARGET_NR_read: 5237 if (arg3 == 0) 5238 ret = 0; 5239 else { 5240 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 5241 goto efault; 5242 ret = get_errno(read(arg1, p, arg3)); 5243 unlock_user(p, arg2, ret); 5244 } 5245 break; 5246 case TARGET_NR_write: 5247 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 5248 goto efault; 5249 ret = get_errno(write(arg1, p, arg3)); 5250 unlock_user(p, arg2, 0); 5251 break; 5252 case TARGET_NR_open: 5253 if (!(p = lock_user_string(arg1))) 5254 goto efault; 5255 ret = get_errno(do_open(cpu_env, p, 5256 target_to_host_bitmask(arg2, fcntl_flags_tbl), 5257 arg3)); 5258 unlock_user(p, arg1, 0); 5259 break; 5260 #if defined(TARGET_NR_openat) && defined(__NR_openat) 5261 case TARGET_NR_openat: 5262 if (!(p = lock_user_string(arg2))) 5263 goto efault; 5264 ret = get_errno(sys_openat(arg1, 5265 path(p), 5266 target_to_host_bitmask(arg3, fcntl_flags_tbl), 5267 arg4)); 5268 unlock_user(p, arg2, 0); 5269 break; 5270 #endif 5271 case TARGET_NR_close: 5272 ret = get_errno(close(arg1)); 5273 break; 5274 case TARGET_NR_brk: 5275 ret = do_brk(arg1); 5276 break; 5277 case TARGET_NR_fork: 5278 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0)); 5279 break; 5280 #ifdef TARGET_NR_waitpid 5281 case TARGET_NR_waitpid: 5282 { 5283 int status; 5284 ret = get_errno(waitpid(arg1, &status, arg3)); 5285 if (!is_error(ret) && arg2 && ret 5286 && put_user_s32(host_to_target_waitstatus(status), arg2)) 5287 goto efault; 5288 } 5289 break; 5290 #endif 5291 #ifdef TARGET_NR_waitid 5292 case TARGET_NR_waitid: 5293 { 5294 siginfo_t info; 5295 info.si_pid = 0; 5296 ret = get_errno(waitid(arg1, arg2, &info, arg4)); 5297 if (!is_error(ret) && arg3 && info.si_pid != 0) { 5298 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) 5299 goto efault; 5300 host_to_target_siginfo(p, &info); 5301 unlock_user(p, arg3, sizeof(target_siginfo_t)); 5302 } 5303 } 5304 break; 5305 #endif 5306 #ifdef TARGET_NR_creat /* not on alpha */ 5307 case TARGET_NR_creat: 5308 if (!(p = lock_user_string(arg1))) 5309 goto efault; 5310 ret = get_errno(creat(p, arg2)); 5311 unlock_user(p, arg1, 0); 5312 break; 5313 #endif 5314 case TARGET_NR_link: 5315 { 5316 void * p2; 5317 p = lock_user_string(arg1); 5318 p2 = lock_user_string(arg2); 5319 if (!p || !p2) 5320 ret = -TARGET_EFAULT; 5321 else 5322 ret = get_errno(link(p, p2)); 5323 unlock_user(p2, arg2, 0); 5324 unlock_user(p, arg1, 0); 5325 } 5326 break; 5327 #if defined(TARGET_NR_linkat) && defined(__NR_linkat) 5328 case TARGET_NR_linkat: 5329 { 5330 void * p2 = NULL; 5331 if (!arg2 || !arg4) 5332 goto efault; 5333 p = lock_user_string(arg2); 5334 p2 = lock_user_string(arg4); 5335 if (!p || !p2) 5336 ret = -TARGET_EFAULT; 5337 else 5338 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5)); 5339 unlock_user(p, arg2, 0); 5340 unlock_user(p2, arg4, 0); 5341 } 5342 break; 5343 #endif 5344 case TARGET_NR_unlink: 5345 if (!(p = lock_user_string(arg1))) 5346 goto efault; 5347 ret = get_errno(unlink(p)); 5348 unlock_user(p, arg1, 0); 5349 break; 5350 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat) 5351 case TARGET_NR_unlinkat: 5352 if (!(p = lock_user_string(arg2))) 5353 goto efault; 5354 ret = get_errno(sys_unlinkat(arg1, p, arg3)); 5355 unlock_user(p, arg2, 0); 5356 break; 5357 #endif 5358 case TARGET_NR_execve: 5359 { 5360 char **argp, **envp; 5361 int argc, envc; 5362 abi_ulong gp; 5363 abi_ulong guest_argp; 5364 abi_ulong guest_envp; 5365 abi_ulong addr; 5366 char **q; 5367 int total_size = 0; 5368 5369 argc = 0; 5370 guest_argp = arg2; 5371 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { 5372 if (get_user_ual(addr, gp)) 5373 goto efault; 5374 if (!addr) 5375 break; 5376 argc++; 5377 } 5378 envc = 0; 5379 guest_envp = arg3; 5380 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { 5381 if (get_user_ual(addr, gp)) 5382 goto efault; 5383 if (!addr) 5384 break; 5385 envc++; 5386 } 5387 5388 argp = alloca((argc + 1) * sizeof(void *)); 5389 envp = alloca((envc + 1) * sizeof(void *)); 5390 5391 for (gp = guest_argp, q = argp; gp; 5392 gp += sizeof(abi_ulong), q++) { 5393 if (get_user_ual(addr, gp)) 5394 goto execve_efault; 5395 if (!addr) 5396 break; 5397 if (!(*q = lock_user_string(addr))) 5398 goto execve_efault; 5399 total_size += strlen(*q) + 1; 5400 } 5401 *q = NULL; 5402 5403 for (gp = guest_envp, q = envp; gp; 5404 gp += sizeof(abi_ulong), q++) { 5405 if (get_user_ual(addr, gp)) 5406 goto execve_efault; 5407 if (!addr) 5408 break; 5409 if (!(*q = lock_user_string(addr))) 5410 goto execve_efault; 5411 total_size += strlen(*q) + 1; 5412 } 5413 *q = NULL; 5414 5415 /* This case will not be caught by the host's execve() if its 5416 page size is bigger than the target's. */ 5417 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) { 5418 ret = -TARGET_E2BIG; 5419 goto execve_end; 5420 } 5421 if (!(p = lock_user_string(arg1))) 5422 goto execve_efault; 5423 ret = get_errno(execve(p, argp, envp)); 5424 unlock_user(p, arg1, 0); 5425 5426 goto execve_end; 5427 5428 execve_efault: 5429 ret = -TARGET_EFAULT; 5430 5431 execve_end: 5432 for (gp = guest_argp, q = argp; *q; 5433 gp += sizeof(abi_ulong), q++) { 5434 if (get_user_ual(addr, gp) 5435 || !addr) 5436 break; 5437 unlock_user(*q, addr, 0); 5438 } 5439 for (gp = guest_envp, q = envp; *q; 5440 gp += sizeof(abi_ulong), q++) { 5441 if (get_user_ual(addr, gp) 5442 || !addr) 5443 break; 5444 unlock_user(*q, addr, 0); 5445 } 5446 } 5447 break; 5448 case TARGET_NR_chdir: 5449 if (!(p = lock_user_string(arg1))) 5450 goto efault; 5451 ret = get_errno(chdir(p)); 5452 unlock_user(p, arg1, 0); 5453 break; 5454 #ifdef TARGET_NR_time 5455 case TARGET_NR_time: 5456 { 5457 time_t host_time; 5458 ret = get_errno(time(&host_time)); 5459 if (!is_error(ret) 5460 && arg1 5461 && put_user_sal(host_time, arg1)) 5462 goto efault; 5463 } 5464 break; 5465 #endif 5466 case TARGET_NR_mknod: 5467 if (!(p = lock_user_string(arg1))) 5468 goto efault; 5469 ret = get_errno(mknod(p, arg2, arg3)); 5470 unlock_user(p, arg1, 0); 5471 break; 5472 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat) 5473 case TARGET_NR_mknodat: 5474 if (!(p = lock_user_string(arg2))) 5475 goto efault; 5476 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4)); 5477 unlock_user(p, arg2, 0); 5478 break; 5479 #endif 5480 case TARGET_NR_chmod: 5481 if (!(p = lock_user_string(arg1))) 5482 goto efault; 5483 ret = get_errno(chmod(p, arg2)); 5484 unlock_user(p, arg1, 0); 5485 break; 5486 #ifdef TARGET_NR_break 5487 case TARGET_NR_break: 5488 goto unimplemented; 5489 #endif 5490 #ifdef TARGET_NR_oldstat 5491 case TARGET_NR_oldstat: 5492 goto unimplemented; 5493 #endif 5494 case TARGET_NR_lseek: 5495 ret = get_errno(lseek(arg1, arg2, arg3)); 5496 break; 5497 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) 5498 /* Alpha specific */ 5499 case TARGET_NR_getxpid: 5500 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid(); 5501 ret = get_errno(getpid()); 5502 break; 5503 #endif 5504 #ifdef TARGET_NR_getpid 5505 case TARGET_NR_getpid: 5506 ret = get_errno(getpid()); 5507 break; 5508 #endif 5509 case TARGET_NR_mount: 5510 { 5511 /* need to look at the data field */ 5512 void *p2, *p3; 5513 p = lock_user_string(arg1); 5514 p2 = lock_user_string(arg2); 5515 p3 = lock_user_string(arg3); 5516 if (!p || !p2 || !p3) 5517 ret = -TARGET_EFAULT; 5518 else { 5519 /* FIXME - arg5 should be locked, but it isn't clear how to 5520 * do that since it's not guaranteed to be a NULL-terminated 5521 * string. 5522 */ 5523 if ( ! arg5 ) 5524 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL)); 5525 else 5526 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5))); 5527 } 5528 unlock_user(p, arg1, 0); 5529 unlock_user(p2, arg2, 0); 5530 unlock_user(p3, arg3, 0); 5531 break; 5532 } 5533 #ifdef TARGET_NR_umount 5534 case TARGET_NR_umount: 5535 if (!(p = lock_user_string(arg1))) 5536 goto efault; 5537 ret = get_errno(umount(p)); 5538 unlock_user(p, arg1, 0); 5539 break; 5540 #endif 5541 #ifdef TARGET_NR_stime /* not on alpha */ 5542 case TARGET_NR_stime: 5543 { 5544 time_t host_time; 5545 if (get_user_sal(host_time, arg1)) 5546 goto efault; 5547 ret = get_errno(stime(&host_time)); 5548 } 5549 break; 5550 #endif 5551 case TARGET_NR_ptrace: 5552 goto unimplemented; 5553 #ifdef TARGET_NR_alarm /* not on alpha */ 5554 case TARGET_NR_alarm: 5555 ret = alarm(arg1); 5556 break; 5557 #endif 5558 #ifdef TARGET_NR_oldfstat 5559 case TARGET_NR_oldfstat: 5560 goto unimplemented; 5561 #endif 5562 #ifdef TARGET_NR_pause /* not on alpha */ 5563 case TARGET_NR_pause: 5564 ret = get_errno(pause()); 5565 break; 5566 #endif 5567 #ifdef TARGET_NR_utime 5568 case TARGET_NR_utime: 5569 { 5570 struct utimbuf tbuf, *host_tbuf; 5571 struct target_utimbuf *target_tbuf; 5572 if (arg2) { 5573 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) 5574 goto efault; 5575 tbuf.actime = tswapal(target_tbuf->actime); 5576 tbuf.modtime = tswapal(target_tbuf->modtime); 5577 unlock_user_struct(target_tbuf, arg2, 0); 5578 host_tbuf = &tbuf; 5579 } else { 5580 host_tbuf = NULL; 5581 } 5582 if (!(p = lock_user_string(arg1))) 5583 goto efault; 5584 ret = get_errno(utime(p, host_tbuf)); 5585 unlock_user(p, arg1, 0); 5586 } 5587 break; 5588 #endif 5589 case TARGET_NR_utimes: 5590 { 5591 struct timeval *tvp, tv[2]; 5592 if (arg2) { 5593 if (copy_from_user_timeval(&tv[0], arg2) 5594 || copy_from_user_timeval(&tv[1], 5595 arg2 + sizeof(struct target_timeval))) 5596 goto efault; 5597 tvp = tv; 5598 } else { 5599 tvp = NULL; 5600 } 5601 if (!(p = lock_user_string(arg1))) 5602 goto efault; 5603 ret = get_errno(utimes(p, tvp)); 5604 unlock_user(p, arg1, 0); 5605 } 5606 break; 5607 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat) 5608 case TARGET_NR_futimesat: 5609 { 5610 struct timeval *tvp, tv[2]; 5611 if (arg3) { 5612 if (copy_from_user_timeval(&tv[0], arg3) 5613 || copy_from_user_timeval(&tv[1], 5614 arg3 + sizeof(struct target_timeval))) 5615 goto efault; 5616 tvp = tv; 5617 } else { 5618 tvp = NULL; 5619 } 5620 if (!(p = lock_user_string(arg2))) 5621 goto efault; 5622 ret = get_errno(sys_futimesat(arg1, path(p), tvp)); 5623 unlock_user(p, arg2, 0); 5624 } 5625 break; 5626 #endif 5627 #ifdef TARGET_NR_stty 5628 case TARGET_NR_stty: 5629 goto unimplemented; 5630 #endif 5631 #ifdef TARGET_NR_gtty 5632 case TARGET_NR_gtty: 5633 goto unimplemented; 5634 #endif 5635 case TARGET_NR_access: 5636 if (!(p = lock_user_string(arg1))) 5637 goto efault; 5638 ret = get_errno(access(path(p), arg2)); 5639 unlock_user(p, arg1, 0); 5640 break; 5641 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 5642 case TARGET_NR_faccessat: 5643 if (!(p = lock_user_string(arg2))) 5644 goto efault; 5645 ret = get_errno(sys_faccessat(arg1, p, arg3)); 5646 unlock_user(p, arg2, 0); 5647 break; 5648 #endif 5649 #ifdef TARGET_NR_nice /* not on alpha */ 5650 case TARGET_NR_nice: 5651 ret = get_errno(nice(arg1)); 5652 break; 5653 #endif 5654 #ifdef TARGET_NR_ftime 5655 case TARGET_NR_ftime: 5656 goto unimplemented; 5657 #endif 5658 case TARGET_NR_sync: 5659 sync(); 5660 ret = 0; 5661 break; 5662 case TARGET_NR_kill: 5663 ret = get_errno(kill(arg1, target_to_host_signal(arg2))); 5664 break; 5665 case TARGET_NR_rename: 5666 { 5667 void *p2; 5668 p = lock_user_string(arg1); 5669 p2 = lock_user_string(arg2); 5670 if (!p || !p2) 5671 ret = -TARGET_EFAULT; 5672 else 5673 ret = get_errno(rename(p, p2)); 5674 unlock_user(p2, arg2, 0); 5675 unlock_user(p, arg1, 0); 5676 } 5677 break; 5678 #if defined(TARGET_NR_renameat) && defined(__NR_renameat) 5679 case TARGET_NR_renameat: 5680 { 5681 void *p2; 5682 p = lock_user_string(arg2); 5683 p2 = lock_user_string(arg4); 5684 if (!p || !p2) 5685 ret = -TARGET_EFAULT; 5686 else 5687 ret = get_errno(sys_renameat(arg1, p, arg3, p2)); 5688 unlock_user(p2, arg4, 0); 5689 unlock_user(p, arg2, 0); 5690 } 5691 break; 5692 #endif 5693 case TARGET_NR_mkdir: 5694 if (!(p = lock_user_string(arg1))) 5695 goto efault; 5696 ret = get_errno(mkdir(p, arg2)); 5697 unlock_user(p, arg1, 0); 5698 break; 5699 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat) 5700 case TARGET_NR_mkdirat: 5701 if (!(p = lock_user_string(arg2))) 5702 goto efault; 5703 ret = get_errno(sys_mkdirat(arg1, p, arg3)); 5704 unlock_user(p, arg2, 0); 5705 break; 5706 #endif 5707 case TARGET_NR_rmdir: 5708 if (!(p = lock_user_string(arg1))) 5709 goto efault; 5710 ret = get_errno(rmdir(p)); 5711 unlock_user(p, arg1, 0); 5712 break; 5713 case TARGET_NR_dup: 5714 ret = get_errno(dup(arg1)); 5715 break; 5716 case TARGET_NR_pipe: 5717 ret = do_pipe(cpu_env, arg1, 0, 0); 5718 break; 5719 #ifdef TARGET_NR_pipe2 5720 case TARGET_NR_pipe2: 5721 ret = do_pipe(cpu_env, arg1, 5722 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1); 5723 break; 5724 #endif 5725 case TARGET_NR_times: 5726 { 5727 struct target_tms *tmsp; 5728 struct tms tms; 5729 ret = get_errno(times(&tms)); 5730 if (arg1) { 5731 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); 5732 if (!tmsp) 5733 goto efault; 5734 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime)); 5735 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime)); 5736 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime)); 5737 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime)); 5738 } 5739 if (!is_error(ret)) 5740 ret = host_to_target_clock_t(ret); 5741 } 5742 break; 5743 #ifdef TARGET_NR_prof 5744 case TARGET_NR_prof: 5745 goto unimplemented; 5746 #endif 5747 #ifdef TARGET_NR_signal 5748 case TARGET_NR_signal: 5749 goto unimplemented; 5750 #endif 5751 case TARGET_NR_acct: 5752 if (arg1 == 0) { 5753 ret = get_errno(acct(NULL)); 5754 } else { 5755 if (!(p = lock_user_string(arg1))) 5756 goto efault; 5757 ret = get_errno(acct(path(p))); 5758 unlock_user(p, arg1, 0); 5759 } 5760 break; 5761 #ifdef TARGET_NR_umount2 /* not on alpha */ 5762 case TARGET_NR_umount2: 5763 if (!(p = lock_user_string(arg1))) 5764 goto efault; 5765 ret = get_errno(umount2(p, arg2)); 5766 unlock_user(p, arg1, 0); 5767 break; 5768 #endif 5769 #ifdef TARGET_NR_lock 5770 case TARGET_NR_lock: 5771 goto unimplemented; 5772 #endif 5773 case TARGET_NR_ioctl: 5774 ret = do_ioctl(arg1, arg2, arg3); 5775 break; 5776 case TARGET_NR_fcntl: 5777 ret = do_fcntl(arg1, arg2, arg3); 5778 break; 5779 #ifdef TARGET_NR_mpx 5780 case TARGET_NR_mpx: 5781 goto unimplemented; 5782 #endif 5783 case TARGET_NR_setpgid: 5784 ret = get_errno(setpgid(arg1, arg2)); 5785 break; 5786 #ifdef TARGET_NR_ulimit 5787 case TARGET_NR_ulimit: 5788 goto unimplemented; 5789 #endif 5790 #ifdef TARGET_NR_oldolduname 5791 case TARGET_NR_oldolduname: 5792 goto unimplemented; 5793 #endif 5794 case TARGET_NR_umask: 5795 ret = get_errno(umask(arg1)); 5796 break; 5797 case TARGET_NR_chroot: 5798 if (!(p = lock_user_string(arg1))) 5799 goto efault; 5800 ret = get_errno(chroot(p)); 5801 unlock_user(p, arg1, 0); 5802 break; 5803 case TARGET_NR_ustat: 5804 goto unimplemented; 5805 case TARGET_NR_dup2: 5806 ret = get_errno(dup2(arg1, arg2)); 5807 break; 5808 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) 5809 case TARGET_NR_dup3: 5810 ret = get_errno(dup3(arg1, arg2, arg3)); 5811 break; 5812 #endif 5813 #ifdef TARGET_NR_getppid /* not on alpha */ 5814 case TARGET_NR_getppid: 5815 ret = get_errno(getppid()); 5816 break; 5817 #endif 5818 case TARGET_NR_getpgrp: 5819 ret = get_errno(getpgrp()); 5820 break; 5821 case TARGET_NR_setsid: 5822 ret = get_errno(setsid()); 5823 break; 5824 #ifdef TARGET_NR_sigaction 5825 case TARGET_NR_sigaction: 5826 { 5827 #if defined(TARGET_ALPHA) 5828 struct target_sigaction act, oact, *pact = 0; 5829 struct target_old_sigaction *old_act; 5830 if (arg2) { 5831 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5832 goto efault; 5833 act._sa_handler = old_act->_sa_handler; 5834 target_siginitset(&act.sa_mask, old_act->sa_mask); 5835 act.sa_flags = old_act->sa_flags; 5836 act.sa_restorer = 0; 5837 unlock_user_struct(old_act, arg2, 0); 5838 pact = &act; 5839 } 5840 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5841 if (!is_error(ret) && arg3) { 5842 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5843 goto efault; 5844 old_act->_sa_handler = oact._sa_handler; 5845 old_act->sa_mask = oact.sa_mask.sig[0]; 5846 old_act->sa_flags = oact.sa_flags; 5847 unlock_user_struct(old_act, arg3, 1); 5848 } 5849 #elif defined(TARGET_MIPS) 5850 struct target_sigaction act, oact, *pact, *old_act; 5851 5852 if (arg2) { 5853 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5854 goto efault; 5855 act._sa_handler = old_act->_sa_handler; 5856 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); 5857 act.sa_flags = old_act->sa_flags; 5858 unlock_user_struct(old_act, arg2, 0); 5859 pact = &act; 5860 } else { 5861 pact = NULL; 5862 } 5863 5864 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5865 5866 if (!is_error(ret) && arg3) { 5867 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5868 goto efault; 5869 old_act->_sa_handler = oact._sa_handler; 5870 old_act->sa_flags = oact.sa_flags; 5871 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; 5872 old_act->sa_mask.sig[1] = 0; 5873 old_act->sa_mask.sig[2] = 0; 5874 old_act->sa_mask.sig[3] = 0; 5875 unlock_user_struct(old_act, arg3, 1); 5876 } 5877 #else 5878 struct target_old_sigaction *old_act; 5879 struct target_sigaction act, oact, *pact; 5880 if (arg2) { 5881 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5882 goto efault; 5883 act._sa_handler = old_act->_sa_handler; 5884 target_siginitset(&act.sa_mask, old_act->sa_mask); 5885 act.sa_flags = old_act->sa_flags; 5886 act.sa_restorer = old_act->sa_restorer; 5887 unlock_user_struct(old_act, arg2, 0); 5888 pact = &act; 5889 } else { 5890 pact = NULL; 5891 } 5892 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5893 if (!is_error(ret) && arg3) { 5894 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5895 goto efault; 5896 old_act->_sa_handler = oact._sa_handler; 5897 old_act->sa_mask = oact.sa_mask.sig[0]; 5898 old_act->sa_flags = oact.sa_flags; 5899 old_act->sa_restorer = oact.sa_restorer; 5900 unlock_user_struct(old_act, arg3, 1); 5901 } 5902 #endif 5903 } 5904 break; 5905 #endif 5906 case TARGET_NR_rt_sigaction: 5907 { 5908 #if defined(TARGET_ALPHA) 5909 struct target_sigaction act, oact, *pact = 0; 5910 struct target_rt_sigaction *rt_act; 5911 /* ??? arg4 == sizeof(sigset_t). */ 5912 if (arg2) { 5913 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1)) 5914 goto efault; 5915 act._sa_handler = rt_act->_sa_handler; 5916 act.sa_mask = rt_act->sa_mask; 5917 act.sa_flags = rt_act->sa_flags; 5918 act.sa_restorer = arg5; 5919 unlock_user_struct(rt_act, arg2, 0); 5920 pact = &act; 5921 } 5922 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5923 if (!is_error(ret) && arg3) { 5924 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0)) 5925 goto efault; 5926 rt_act->_sa_handler = oact._sa_handler; 5927 rt_act->sa_mask = oact.sa_mask; 5928 rt_act->sa_flags = oact.sa_flags; 5929 unlock_user_struct(rt_act, arg3, 1); 5930 } 5931 #else 5932 struct target_sigaction *act; 5933 struct target_sigaction *oact; 5934 5935 if (arg2) { 5936 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) 5937 goto efault; 5938 } else 5939 act = NULL; 5940 if (arg3) { 5941 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { 5942 ret = -TARGET_EFAULT; 5943 goto rt_sigaction_fail; 5944 } 5945 } else 5946 oact = NULL; 5947 ret = get_errno(do_sigaction(arg1, act, oact)); 5948 rt_sigaction_fail: 5949 if (act) 5950 unlock_user_struct(act, arg2, 0); 5951 if (oact) 5952 unlock_user_struct(oact, arg3, 1); 5953 #endif 5954 } 5955 break; 5956 #ifdef TARGET_NR_sgetmask /* not on alpha */ 5957 case TARGET_NR_sgetmask: 5958 { 5959 sigset_t cur_set; 5960 abi_ulong target_set; 5961 sigprocmask(0, NULL, &cur_set); 5962 host_to_target_old_sigset(&target_set, &cur_set); 5963 ret = target_set; 5964 } 5965 break; 5966 #endif 5967 #ifdef TARGET_NR_ssetmask /* not on alpha */ 5968 case TARGET_NR_ssetmask: 5969 { 5970 sigset_t set, oset, cur_set; 5971 abi_ulong target_set = arg1; 5972 sigprocmask(0, NULL, &cur_set); 5973 target_to_host_old_sigset(&set, &target_set); 5974 sigorset(&set, &set, &cur_set); 5975 sigprocmask(SIG_SETMASK, &set, &oset); 5976 host_to_target_old_sigset(&target_set, &oset); 5977 ret = target_set; 5978 } 5979 break; 5980 #endif 5981 #ifdef TARGET_NR_sigprocmask 5982 case TARGET_NR_sigprocmask: 5983 { 5984 #if defined(TARGET_ALPHA) 5985 sigset_t set, oldset; 5986 abi_ulong mask; 5987 int how; 5988 5989 switch (arg1) { 5990 case TARGET_SIG_BLOCK: 5991 how = SIG_BLOCK; 5992 break; 5993 case TARGET_SIG_UNBLOCK: 5994 how = SIG_UNBLOCK; 5995 break; 5996 case TARGET_SIG_SETMASK: 5997 how = SIG_SETMASK; 5998 break; 5999 default: 6000 ret = -TARGET_EINVAL; 6001 goto fail; 6002 } 6003 mask = arg2; 6004 target_to_host_old_sigset(&set, &mask); 6005 6006 ret = get_errno(sigprocmask(how, &set, &oldset)); 6007 if (!is_error(ret)) { 6008 host_to_target_old_sigset(&mask, &oldset); 6009 ret = mask; 6010 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */ 6011 } 6012 #else 6013 sigset_t set, oldset, *set_ptr; 6014 int how; 6015 6016 if (arg2) { 6017 switch (arg1) { 6018 case TARGET_SIG_BLOCK: 6019 how = SIG_BLOCK; 6020 break; 6021 case TARGET_SIG_UNBLOCK: 6022 how = SIG_UNBLOCK; 6023 break; 6024 case TARGET_SIG_SETMASK: 6025 how = SIG_SETMASK; 6026 break; 6027 default: 6028 ret = -TARGET_EINVAL; 6029 goto fail; 6030 } 6031 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 6032 goto efault; 6033 target_to_host_old_sigset(&set, p); 6034 unlock_user(p, arg2, 0); 6035 set_ptr = &set; 6036 } else { 6037 how = 0; 6038 set_ptr = NULL; 6039 } 6040 ret = get_errno(sigprocmask(how, set_ptr, &oldset)); 6041 if (!is_error(ret) && arg3) { 6042 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 6043 goto efault; 6044 host_to_target_old_sigset(p, &oldset); 6045 unlock_user(p, arg3, sizeof(target_sigset_t)); 6046 } 6047 #endif 6048 } 6049 break; 6050 #endif 6051 case TARGET_NR_rt_sigprocmask: 6052 { 6053 int how = arg1; 6054 sigset_t set, oldset, *set_ptr; 6055 6056 if (arg2) { 6057 switch(how) { 6058 case TARGET_SIG_BLOCK: 6059 how = SIG_BLOCK; 6060 break; 6061 case TARGET_SIG_UNBLOCK: 6062 how = SIG_UNBLOCK; 6063 break; 6064 case TARGET_SIG_SETMASK: 6065 how = SIG_SETMASK; 6066 break; 6067 default: 6068 ret = -TARGET_EINVAL; 6069 goto fail; 6070 } 6071 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 6072 goto efault; 6073 target_to_host_sigset(&set, p); 6074 unlock_user(p, arg2, 0); 6075 set_ptr = &set; 6076 } else { 6077 how = 0; 6078 set_ptr = NULL; 6079 } 6080 ret = get_errno(sigprocmask(how, set_ptr, &oldset)); 6081 if (!is_error(ret) && arg3) { 6082 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 6083 goto efault; 6084 host_to_target_sigset(p, &oldset); 6085 unlock_user(p, arg3, sizeof(target_sigset_t)); 6086 } 6087 } 6088 break; 6089 #ifdef TARGET_NR_sigpending 6090 case TARGET_NR_sigpending: 6091 { 6092 sigset_t set; 6093 ret = get_errno(sigpending(&set)); 6094 if (!is_error(ret)) { 6095 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 6096 goto efault; 6097 host_to_target_old_sigset(p, &set); 6098 unlock_user(p, arg1, sizeof(target_sigset_t)); 6099 } 6100 } 6101 break; 6102 #endif 6103 case TARGET_NR_rt_sigpending: 6104 { 6105 sigset_t set; 6106 ret = get_errno(sigpending(&set)); 6107 if (!is_error(ret)) { 6108 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 6109 goto efault; 6110 host_to_target_sigset(p, &set); 6111 unlock_user(p, arg1, sizeof(target_sigset_t)); 6112 } 6113 } 6114 break; 6115 #ifdef TARGET_NR_sigsuspend 6116 case TARGET_NR_sigsuspend: 6117 { 6118 sigset_t set; 6119 #if defined(TARGET_ALPHA) 6120 abi_ulong mask = arg1; 6121 target_to_host_old_sigset(&set, &mask); 6122 #else 6123 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6124 goto efault; 6125 target_to_host_old_sigset(&set, p); 6126 unlock_user(p, arg1, 0); 6127 #endif 6128 ret = get_errno(sigsuspend(&set)); 6129 } 6130 break; 6131 #endif 6132 case TARGET_NR_rt_sigsuspend: 6133 { 6134 sigset_t set; 6135 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6136 goto efault; 6137 target_to_host_sigset(&set, p); 6138 unlock_user(p, arg1, 0); 6139 ret = get_errno(sigsuspend(&set)); 6140 } 6141 break; 6142 case TARGET_NR_rt_sigtimedwait: 6143 { 6144 sigset_t set; 6145 struct timespec uts, *puts; 6146 siginfo_t uinfo; 6147 6148 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6149 goto efault; 6150 target_to_host_sigset(&set, p); 6151 unlock_user(p, arg1, 0); 6152 if (arg3) { 6153 puts = &uts; 6154 target_to_host_timespec(puts, arg3); 6155 } else { 6156 puts = NULL; 6157 } 6158 ret = get_errno(sigtimedwait(&set, &uinfo, puts)); 6159 if (!is_error(ret) && arg2) { 6160 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0))) 6161 goto efault; 6162 host_to_target_siginfo(p, &uinfo); 6163 unlock_user(p, arg2, sizeof(target_siginfo_t)); 6164 } 6165 } 6166 break; 6167 case TARGET_NR_rt_sigqueueinfo: 6168 { 6169 siginfo_t uinfo; 6170 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1))) 6171 goto efault; 6172 target_to_host_siginfo(&uinfo, p); 6173 unlock_user(p, arg1, 0); 6174 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo)); 6175 } 6176 break; 6177 #ifdef TARGET_NR_sigreturn 6178 case TARGET_NR_sigreturn: 6179 /* NOTE: ret is eax, so not transcoding must be done */ 6180 ret = do_sigreturn(cpu_env); 6181 break; 6182 #endif 6183 case TARGET_NR_rt_sigreturn: 6184 /* NOTE: ret is eax, so not transcoding must be done */ 6185 ret = do_rt_sigreturn(cpu_env); 6186 break; 6187 case TARGET_NR_sethostname: 6188 if (!(p = lock_user_string(arg1))) 6189 goto efault; 6190 ret = get_errno(sethostname(p, arg2)); 6191 unlock_user(p, arg1, 0); 6192 break; 6193 case TARGET_NR_setrlimit: 6194 { 6195 int resource = target_to_host_resource(arg1); 6196 struct target_rlimit *target_rlim; 6197 struct rlimit rlim; 6198 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) 6199 goto efault; 6200 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur); 6201 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max); 6202 unlock_user_struct(target_rlim, arg2, 0); 6203 ret = get_errno(setrlimit(resource, &rlim)); 6204 } 6205 break; 6206 case TARGET_NR_getrlimit: 6207 { 6208 int resource = target_to_host_resource(arg1); 6209 struct target_rlimit *target_rlim; 6210 struct rlimit rlim; 6211 6212 ret = get_errno(getrlimit(resource, &rlim)); 6213 if (!is_error(ret)) { 6214 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 6215 goto efault; 6216 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 6217 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 6218 unlock_user_struct(target_rlim, arg2, 1); 6219 } 6220 } 6221 break; 6222 case TARGET_NR_getrusage: 6223 { 6224 struct rusage rusage; 6225 ret = get_errno(getrusage(arg1, &rusage)); 6226 if (!is_error(ret)) { 6227 host_to_target_rusage(arg2, &rusage); 6228 } 6229 } 6230 break; 6231 case TARGET_NR_gettimeofday: 6232 { 6233 struct timeval tv; 6234 ret = get_errno(gettimeofday(&tv, NULL)); 6235 if (!is_error(ret)) { 6236 if (copy_to_user_timeval(arg1, &tv)) 6237 goto efault; 6238 } 6239 } 6240 break; 6241 case TARGET_NR_settimeofday: 6242 { 6243 struct timeval tv; 6244 if (copy_from_user_timeval(&tv, arg1)) 6245 goto efault; 6246 ret = get_errno(settimeofday(&tv, NULL)); 6247 } 6248 break; 6249 #if defined(TARGET_NR_select) 6250 case TARGET_NR_select: 6251 #if defined(TARGET_S390X) || defined(TARGET_ALPHA) 6252 ret = do_select(arg1, arg2, arg3, arg4, arg5); 6253 #else 6254 { 6255 struct target_sel_arg_struct *sel; 6256 abi_ulong inp, outp, exp, tvp; 6257 long nsel; 6258 6259 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) 6260 goto efault; 6261 nsel = tswapal(sel->n); 6262 inp = tswapal(sel->inp); 6263 outp = tswapal(sel->outp); 6264 exp = tswapal(sel->exp); 6265 tvp = tswapal(sel->tvp); 6266 unlock_user_struct(sel, arg1, 0); 6267 ret = do_select(nsel, inp, outp, exp, tvp); 6268 } 6269 #endif 6270 break; 6271 #endif 6272 #ifdef TARGET_NR_pselect6 6273 case TARGET_NR_pselect6: 6274 { 6275 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr; 6276 fd_set rfds, wfds, efds; 6277 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 6278 struct timespec ts, *ts_ptr; 6279 6280 /* 6281 * The 6th arg is actually two args smashed together, 6282 * so we cannot use the C library. 6283 */ 6284 sigset_t set; 6285 struct { 6286 sigset_t *set; 6287 size_t size; 6288 } sig, *sig_ptr; 6289 6290 abi_ulong arg_sigset, arg_sigsize, *arg7; 6291 target_sigset_t *target_sigset; 6292 6293 n = arg1; 6294 rfd_addr = arg2; 6295 wfd_addr = arg3; 6296 efd_addr = arg4; 6297 ts_addr = arg5; 6298 6299 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 6300 if (ret) { 6301 goto fail; 6302 } 6303 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 6304 if (ret) { 6305 goto fail; 6306 } 6307 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 6308 if (ret) { 6309 goto fail; 6310 } 6311 6312 /* 6313 * This takes a timespec, and not a timeval, so we cannot 6314 * use the do_select() helper ... 6315 */ 6316 if (ts_addr) { 6317 if (target_to_host_timespec(&ts, ts_addr)) { 6318 goto efault; 6319 } 6320 ts_ptr = &ts; 6321 } else { 6322 ts_ptr = NULL; 6323 } 6324 6325 /* Extract the two packed args for the sigset */ 6326 if (arg6) { 6327 sig_ptr = &sig; 6328 sig.size = _NSIG / 8; 6329 6330 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1); 6331 if (!arg7) { 6332 goto efault; 6333 } 6334 arg_sigset = tswapal(arg7[0]); 6335 arg_sigsize = tswapal(arg7[1]); 6336 unlock_user(arg7, arg6, 0); 6337 6338 if (arg_sigset) { 6339 sig.set = &set; 6340 if (arg_sigsize != sizeof(*target_sigset)) { 6341 /* Like the kernel, we enforce correct size sigsets */ 6342 ret = -TARGET_EINVAL; 6343 goto fail; 6344 } 6345 target_sigset = lock_user(VERIFY_READ, arg_sigset, 6346 sizeof(*target_sigset), 1); 6347 if (!target_sigset) { 6348 goto efault; 6349 } 6350 target_to_host_sigset(&set, target_sigset); 6351 unlock_user(target_sigset, arg_sigset, 0); 6352 } else { 6353 sig.set = NULL; 6354 } 6355 } else { 6356 sig_ptr = NULL; 6357 } 6358 6359 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 6360 ts_ptr, sig_ptr)); 6361 6362 if (!is_error(ret)) { 6363 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 6364 goto efault; 6365 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 6366 goto efault; 6367 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 6368 goto efault; 6369 6370 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) 6371 goto efault; 6372 } 6373 } 6374 break; 6375 #endif 6376 case TARGET_NR_symlink: 6377 { 6378 void *p2; 6379 p = lock_user_string(arg1); 6380 p2 = lock_user_string(arg2); 6381 if (!p || !p2) 6382 ret = -TARGET_EFAULT; 6383 else 6384 ret = get_errno(symlink(p, p2)); 6385 unlock_user(p2, arg2, 0); 6386 unlock_user(p, arg1, 0); 6387 } 6388 break; 6389 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat) 6390 case TARGET_NR_symlinkat: 6391 { 6392 void *p2; 6393 p = lock_user_string(arg1); 6394 p2 = lock_user_string(arg3); 6395 if (!p || !p2) 6396 ret = -TARGET_EFAULT; 6397 else 6398 ret = get_errno(sys_symlinkat(p, arg2, p2)); 6399 unlock_user(p2, arg3, 0); 6400 unlock_user(p, arg1, 0); 6401 } 6402 break; 6403 #endif 6404 #ifdef TARGET_NR_oldlstat 6405 case TARGET_NR_oldlstat: 6406 goto unimplemented; 6407 #endif 6408 case TARGET_NR_readlink: 6409 { 6410 void *p2, *temp; 6411 p = lock_user_string(arg1); 6412 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); 6413 if (!p || !p2) 6414 ret = -TARGET_EFAULT; 6415 else { 6416 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) { 6417 char real[PATH_MAX]; 6418 temp = realpath(exec_path,real); 6419 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ; 6420 snprintf((char *)p2, arg3, "%s", real); 6421 } 6422 else 6423 ret = get_errno(readlink(path(p), p2, arg3)); 6424 } 6425 unlock_user(p2, arg2, ret); 6426 unlock_user(p, arg1, 0); 6427 } 6428 break; 6429 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat) 6430 case TARGET_NR_readlinkat: 6431 { 6432 void *p2; 6433 p = lock_user_string(arg2); 6434 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); 6435 if (!p || !p2) 6436 ret = -TARGET_EFAULT; 6437 else 6438 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4)); 6439 unlock_user(p2, arg3, ret); 6440 unlock_user(p, arg2, 0); 6441 } 6442 break; 6443 #endif 6444 #ifdef TARGET_NR_uselib 6445 case TARGET_NR_uselib: 6446 goto unimplemented; 6447 #endif 6448 #ifdef TARGET_NR_swapon 6449 case TARGET_NR_swapon: 6450 if (!(p = lock_user_string(arg1))) 6451 goto efault; 6452 ret = get_errno(swapon(p, arg2)); 6453 unlock_user(p, arg1, 0); 6454 break; 6455 #endif 6456 case TARGET_NR_reboot: 6457 if (arg3 == LINUX_REBOOT_CMD_RESTART2) { 6458 /* arg4 must be ignored in all other cases */ 6459 p = lock_user_string(arg4); 6460 if (!p) { 6461 goto efault; 6462 } 6463 ret = get_errno(reboot(arg1, arg2, arg3, p)); 6464 unlock_user(p, arg4, 0); 6465 } else { 6466 ret = get_errno(reboot(arg1, arg2, arg3, NULL)); 6467 } 6468 break; 6469 #ifdef TARGET_NR_readdir 6470 case TARGET_NR_readdir: 6471 goto unimplemented; 6472 #endif 6473 #ifdef TARGET_NR_mmap 6474 case TARGET_NR_mmap: 6475 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \ 6476 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ 6477 || defined(TARGET_S390X) 6478 { 6479 abi_ulong *v; 6480 abi_ulong v1, v2, v3, v4, v5, v6; 6481 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) 6482 goto efault; 6483 v1 = tswapal(v[0]); 6484 v2 = tswapal(v[1]); 6485 v3 = tswapal(v[2]); 6486 v4 = tswapal(v[3]); 6487 v5 = tswapal(v[4]); 6488 v6 = tswapal(v[5]); 6489 unlock_user(v, arg1, 0); 6490 ret = get_errno(target_mmap(v1, v2, v3, 6491 target_to_host_bitmask(v4, mmap_flags_tbl), 6492 v5, v6)); 6493 } 6494 #else 6495 ret = get_errno(target_mmap(arg1, arg2, arg3, 6496 target_to_host_bitmask(arg4, mmap_flags_tbl), 6497 arg5, 6498 arg6)); 6499 #endif 6500 break; 6501 #endif 6502 #ifdef TARGET_NR_mmap2 6503 case TARGET_NR_mmap2: 6504 #ifndef MMAP_SHIFT 6505 #define MMAP_SHIFT 12 6506 #endif 6507 ret = get_errno(target_mmap(arg1, arg2, arg3, 6508 target_to_host_bitmask(arg4, mmap_flags_tbl), 6509 arg5, 6510 arg6 << MMAP_SHIFT)); 6511 break; 6512 #endif 6513 case TARGET_NR_munmap: 6514 ret = get_errno(target_munmap(arg1, arg2)); 6515 break; 6516 case TARGET_NR_mprotect: 6517 { 6518 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 6519 /* Special hack to detect libc making the stack executable. */ 6520 if ((arg3 & PROT_GROWSDOWN) 6521 && arg1 >= ts->info->stack_limit 6522 && arg1 <= ts->info->start_stack) { 6523 arg3 &= ~PROT_GROWSDOWN; 6524 arg2 = arg2 + arg1 - ts->info->stack_limit; 6525 arg1 = ts->info->stack_limit; 6526 } 6527 } 6528 ret = get_errno(target_mprotect(arg1, arg2, arg3)); 6529 break; 6530 #ifdef TARGET_NR_mremap 6531 case TARGET_NR_mremap: 6532 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); 6533 break; 6534 #endif 6535 /* ??? msync/mlock/munlock are broken for softmmu. */ 6536 #ifdef TARGET_NR_msync 6537 case TARGET_NR_msync: 6538 ret = get_errno(msync(g2h(arg1), arg2, arg3)); 6539 break; 6540 #endif 6541 #ifdef TARGET_NR_mlock 6542 case TARGET_NR_mlock: 6543 ret = get_errno(mlock(g2h(arg1), arg2)); 6544 break; 6545 #endif 6546 #ifdef TARGET_NR_munlock 6547 case TARGET_NR_munlock: 6548 ret = get_errno(munlock(g2h(arg1), arg2)); 6549 break; 6550 #endif 6551 #ifdef TARGET_NR_mlockall 6552 case TARGET_NR_mlockall: 6553 ret = get_errno(mlockall(arg1)); 6554 break; 6555 #endif 6556 #ifdef TARGET_NR_munlockall 6557 case TARGET_NR_munlockall: 6558 ret = get_errno(munlockall()); 6559 break; 6560 #endif 6561 case TARGET_NR_truncate: 6562 if (!(p = lock_user_string(arg1))) 6563 goto efault; 6564 ret = get_errno(truncate(p, arg2)); 6565 unlock_user(p, arg1, 0); 6566 break; 6567 case TARGET_NR_ftruncate: 6568 ret = get_errno(ftruncate(arg1, arg2)); 6569 break; 6570 case TARGET_NR_fchmod: 6571 ret = get_errno(fchmod(arg1, arg2)); 6572 break; 6573 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat) 6574 case TARGET_NR_fchmodat: 6575 if (!(p = lock_user_string(arg2))) 6576 goto efault; 6577 ret = get_errno(sys_fchmodat(arg1, p, arg3)); 6578 unlock_user(p, arg2, 0); 6579 break; 6580 #endif 6581 case TARGET_NR_getpriority: 6582 /* Note that negative values are valid for getpriority, so we must 6583 differentiate based on errno settings. */ 6584 errno = 0; 6585 ret = getpriority(arg1, arg2); 6586 if (ret == -1 && errno != 0) { 6587 ret = -host_to_target_errno(errno); 6588 break; 6589 } 6590 #ifdef TARGET_ALPHA 6591 /* Return value is the unbiased priority. Signal no error. */ 6592 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; 6593 #else 6594 /* Return value is a biased priority to avoid negative numbers. */ 6595 ret = 20 - ret; 6596 #endif 6597 break; 6598 case TARGET_NR_setpriority: 6599 ret = get_errno(setpriority(arg1, arg2, arg3)); 6600 break; 6601 #ifdef TARGET_NR_profil 6602 case TARGET_NR_profil: 6603 goto unimplemented; 6604 #endif 6605 case TARGET_NR_statfs: 6606 if (!(p = lock_user_string(arg1))) 6607 goto efault; 6608 ret = get_errno(statfs(path(p), &stfs)); 6609 unlock_user(p, arg1, 0); 6610 convert_statfs: 6611 if (!is_error(ret)) { 6612 struct target_statfs *target_stfs; 6613 6614 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) 6615 goto efault; 6616 __put_user(stfs.f_type, &target_stfs->f_type); 6617 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 6618 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 6619 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 6620 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 6621 __put_user(stfs.f_files, &target_stfs->f_files); 6622 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 6623 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 6624 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 6625 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 6626 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 6627 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 6628 unlock_user_struct(target_stfs, arg2, 1); 6629 } 6630 break; 6631 case TARGET_NR_fstatfs: 6632 ret = get_errno(fstatfs(arg1, &stfs)); 6633 goto convert_statfs; 6634 #ifdef TARGET_NR_statfs64 6635 case TARGET_NR_statfs64: 6636 if (!(p = lock_user_string(arg1))) 6637 goto efault; 6638 ret = get_errno(statfs(path(p), &stfs)); 6639 unlock_user(p, arg1, 0); 6640 convert_statfs64: 6641 if (!is_error(ret)) { 6642 struct target_statfs64 *target_stfs; 6643 6644 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) 6645 goto efault; 6646 __put_user(stfs.f_type, &target_stfs->f_type); 6647 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 6648 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 6649 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 6650 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 6651 __put_user(stfs.f_files, &target_stfs->f_files); 6652 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 6653 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 6654 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 6655 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 6656 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 6657 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 6658 unlock_user_struct(target_stfs, arg3, 1); 6659 } 6660 break; 6661 case TARGET_NR_fstatfs64: 6662 ret = get_errno(fstatfs(arg1, &stfs)); 6663 goto convert_statfs64; 6664 #endif 6665 #ifdef TARGET_NR_ioperm 6666 case TARGET_NR_ioperm: 6667 goto unimplemented; 6668 #endif 6669 #ifdef TARGET_NR_socketcall 6670 case TARGET_NR_socketcall: 6671 ret = do_socketcall(arg1, arg2); 6672 break; 6673 #endif 6674 #ifdef TARGET_NR_accept 6675 case TARGET_NR_accept: 6676 ret = do_accept(arg1, arg2, arg3); 6677 break; 6678 #endif 6679 #ifdef TARGET_NR_bind 6680 case TARGET_NR_bind: 6681 ret = do_bind(arg1, arg2, arg3); 6682 break; 6683 #endif 6684 #ifdef TARGET_NR_connect 6685 case TARGET_NR_connect: 6686 ret = do_connect(arg1, arg2, arg3); 6687 break; 6688 #endif 6689 #ifdef TARGET_NR_getpeername 6690 case TARGET_NR_getpeername: 6691 ret = do_getpeername(arg1, arg2, arg3); 6692 break; 6693 #endif 6694 #ifdef TARGET_NR_getsockname 6695 case TARGET_NR_getsockname: 6696 ret = do_getsockname(arg1, arg2, arg3); 6697 break; 6698 #endif 6699 #ifdef TARGET_NR_getsockopt 6700 case TARGET_NR_getsockopt: 6701 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5); 6702 break; 6703 #endif 6704 #ifdef TARGET_NR_listen 6705 case TARGET_NR_listen: 6706 ret = get_errno(listen(arg1, arg2)); 6707 break; 6708 #endif 6709 #ifdef TARGET_NR_recv 6710 case TARGET_NR_recv: 6711 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); 6712 break; 6713 #endif 6714 #ifdef TARGET_NR_recvfrom 6715 case TARGET_NR_recvfrom: 6716 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); 6717 break; 6718 #endif 6719 #ifdef TARGET_NR_recvmsg 6720 case TARGET_NR_recvmsg: 6721 ret = do_sendrecvmsg(arg1, arg2, arg3, 0); 6722 break; 6723 #endif 6724 #ifdef TARGET_NR_send 6725 case TARGET_NR_send: 6726 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0); 6727 break; 6728 #endif 6729 #ifdef TARGET_NR_sendmsg 6730 case TARGET_NR_sendmsg: 6731 ret = do_sendrecvmsg(arg1, arg2, arg3, 1); 6732 break; 6733 #endif 6734 #ifdef TARGET_NR_sendto 6735 case TARGET_NR_sendto: 6736 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); 6737 break; 6738 #endif 6739 #ifdef TARGET_NR_shutdown 6740 case TARGET_NR_shutdown: 6741 ret = get_errno(shutdown(arg1, arg2)); 6742 break; 6743 #endif 6744 #ifdef TARGET_NR_socket 6745 case TARGET_NR_socket: 6746 ret = do_socket(arg1, arg2, arg3); 6747 break; 6748 #endif 6749 #ifdef TARGET_NR_socketpair 6750 case TARGET_NR_socketpair: 6751 ret = do_socketpair(arg1, arg2, arg3, arg4); 6752 break; 6753 #endif 6754 #ifdef TARGET_NR_setsockopt 6755 case TARGET_NR_setsockopt: 6756 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); 6757 break; 6758 #endif 6759 6760 case TARGET_NR_syslog: 6761 if (!(p = lock_user_string(arg2))) 6762 goto efault; 6763 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); 6764 unlock_user(p, arg2, 0); 6765 break; 6766 6767 case TARGET_NR_setitimer: 6768 { 6769 struct itimerval value, ovalue, *pvalue; 6770 6771 if (arg2) { 6772 pvalue = &value; 6773 if (copy_from_user_timeval(&pvalue->it_interval, arg2) 6774 || copy_from_user_timeval(&pvalue->it_value, 6775 arg2 + sizeof(struct target_timeval))) 6776 goto efault; 6777 } else { 6778 pvalue = NULL; 6779 } 6780 ret = get_errno(setitimer(arg1, pvalue, &ovalue)); 6781 if (!is_error(ret) && arg3) { 6782 if (copy_to_user_timeval(arg3, 6783 &ovalue.it_interval) 6784 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), 6785 &ovalue.it_value)) 6786 goto efault; 6787 } 6788 } 6789 break; 6790 case TARGET_NR_getitimer: 6791 { 6792 struct itimerval value; 6793 6794 ret = get_errno(getitimer(arg1, &value)); 6795 if (!is_error(ret) && arg2) { 6796 if (copy_to_user_timeval(arg2, 6797 &value.it_interval) 6798 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), 6799 &value.it_value)) 6800 goto efault; 6801 } 6802 } 6803 break; 6804 case TARGET_NR_stat: 6805 if (!(p = lock_user_string(arg1))) 6806 goto efault; 6807 ret = get_errno(stat(path(p), &st)); 6808 unlock_user(p, arg1, 0); 6809 goto do_stat; 6810 case TARGET_NR_lstat: 6811 if (!(p = lock_user_string(arg1))) 6812 goto efault; 6813 ret = get_errno(lstat(path(p), &st)); 6814 unlock_user(p, arg1, 0); 6815 goto do_stat; 6816 case TARGET_NR_fstat: 6817 { 6818 ret = get_errno(fstat(arg1, &st)); 6819 do_stat: 6820 if (!is_error(ret)) { 6821 struct target_stat *target_st; 6822 6823 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) 6824 goto efault; 6825 memset(target_st, 0, sizeof(*target_st)); 6826 __put_user(st.st_dev, &target_st->st_dev); 6827 __put_user(st.st_ino, &target_st->st_ino); 6828 __put_user(st.st_mode, &target_st->st_mode); 6829 __put_user(st.st_uid, &target_st->st_uid); 6830 __put_user(st.st_gid, &target_st->st_gid); 6831 __put_user(st.st_nlink, &target_st->st_nlink); 6832 __put_user(st.st_rdev, &target_st->st_rdev); 6833 __put_user(st.st_size, &target_st->st_size); 6834 __put_user(st.st_blksize, &target_st->st_blksize); 6835 __put_user(st.st_blocks, &target_st->st_blocks); 6836 __put_user(st.st_atime, &target_st->target_st_atime); 6837 __put_user(st.st_mtime, &target_st->target_st_mtime); 6838 __put_user(st.st_ctime, &target_st->target_st_ctime); 6839 unlock_user_struct(target_st, arg2, 1); 6840 } 6841 } 6842 break; 6843 #ifdef TARGET_NR_olduname 6844 case TARGET_NR_olduname: 6845 goto unimplemented; 6846 #endif 6847 #ifdef TARGET_NR_iopl 6848 case TARGET_NR_iopl: 6849 goto unimplemented; 6850 #endif 6851 case TARGET_NR_vhangup: 6852 ret = get_errno(vhangup()); 6853 break; 6854 #ifdef TARGET_NR_idle 6855 case TARGET_NR_idle: 6856 goto unimplemented; 6857 #endif 6858 #ifdef TARGET_NR_syscall 6859 case TARGET_NR_syscall: 6860 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5, 6861 arg6, arg7, arg8, 0); 6862 break; 6863 #endif 6864 case TARGET_NR_wait4: 6865 { 6866 int status; 6867 abi_long status_ptr = arg2; 6868 struct rusage rusage, *rusage_ptr; 6869 abi_ulong target_rusage = arg4; 6870 if (target_rusage) 6871 rusage_ptr = &rusage; 6872 else 6873 rusage_ptr = NULL; 6874 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr)); 6875 if (!is_error(ret)) { 6876 if (status_ptr && ret) { 6877 status = host_to_target_waitstatus(status); 6878 if (put_user_s32(status, status_ptr)) 6879 goto efault; 6880 } 6881 if (target_rusage) 6882 host_to_target_rusage(target_rusage, &rusage); 6883 } 6884 } 6885 break; 6886 #ifdef TARGET_NR_swapoff 6887 case TARGET_NR_swapoff: 6888 if (!(p = lock_user_string(arg1))) 6889 goto efault; 6890 ret = get_errno(swapoff(p)); 6891 unlock_user(p, arg1, 0); 6892 break; 6893 #endif 6894 case TARGET_NR_sysinfo: 6895 { 6896 struct target_sysinfo *target_value; 6897 struct sysinfo value; 6898 ret = get_errno(sysinfo(&value)); 6899 if (!is_error(ret) && arg1) 6900 { 6901 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) 6902 goto efault; 6903 __put_user(value.uptime, &target_value->uptime); 6904 __put_user(value.loads[0], &target_value->loads[0]); 6905 __put_user(value.loads[1], &target_value->loads[1]); 6906 __put_user(value.loads[2], &target_value->loads[2]); 6907 __put_user(value.totalram, &target_value->totalram); 6908 __put_user(value.freeram, &target_value->freeram); 6909 __put_user(value.sharedram, &target_value->sharedram); 6910 __put_user(value.bufferram, &target_value->bufferram); 6911 __put_user(value.totalswap, &target_value->totalswap); 6912 __put_user(value.freeswap, &target_value->freeswap); 6913 __put_user(value.procs, &target_value->procs); 6914 __put_user(value.totalhigh, &target_value->totalhigh); 6915 __put_user(value.freehigh, &target_value->freehigh); 6916 __put_user(value.mem_unit, &target_value->mem_unit); 6917 unlock_user_struct(target_value, arg1, 1); 6918 } 6919 } 6920 break; 6921 #ifdef TARGET_NR_ipc 6922 case TARGET_NR_ipc: 6923 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6); 6924 break; 6925 #endif 6926 #ifdef TARGET_NR_semget 6927 case TARGET_NR_semget: 6928 ret = get_errno(semget(arg1, arg2, arg3)); 6929 break; 6930 #endif 6931 #ifdef TARGET_NR_semop 6932 case TARGET_NR_semop: 6933 ret = get_errno(do_semop(arg1, arg2, arg3)); 6934 break; 6935 #endif 6936 #ifdef TARGET_NR_semctl 6937 case TARGET_NR_semctl: 6938 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4); 6939 break; 6940 #endif 6941 #ifdef TARGET_NR_msgctl 6942 case TARGET_NR_msgctl: 6943 ret = do_msgctl(arg1, arg2, arg3); 6944 break; 6945 #endif 6946 #ifdef TARGET_NR_msgget 6947 case TARGET_NR_msgget: 6948 ret = get_errno(msgget(arg1, arg2)); 6949 break; 6950 #endif 6951 #ifdef TARGET_NR_msgrcv 6952 case TARGET_NR_msgrcv: 6953 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5); 6954 break; 6955 #endif 6956 #ifdef TARGET_NR_msgsnd 6957 case TARGET_NR_msgsnd: 6958 ret = do_msgsnd(arg1, arg2, arg3, arg4); 6959 break; 6960 #endif 6961 #ifdef TARGET_NR_shmget 6962 case TARGET_NR_shmget: 6963 ret = get_errno(shmget(arg1, arg2, arg3)); 6964 break; 6965 #endif 6966 #ifdef TARGET_NR_shmctl 6967 case TARGET_NR_shmctl: 6968 ret = do_shmctl(arg1, arg2, arg3); 6969 break; 6970 #endif 6971 #ifdef TARGET_NR_shmat 6972 case TARGET_NR_shmat: 6973 ret = do_shmat(arg1, arg2, arg3); 6974 break; 6975 #endif 6976 #ifdef TARGET_NR_shmdt 6977 case TARGET_NR_shmdt: 6978 ret = do_shmdt(arg1); 6979 break; 6980 #endif 6981 case TARGET_NR_fsync: 6982 ret = get_errno(fsync(arg1)); 6983 break; 6984 case TARGET_NR_clone: 6985 #if defined(TARGET_SH4) || defined(TARGET_ALPHA) 6986 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); 6987 #elif defined(TARGET_CRIS) 6988 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5)); 6989 #elif defined(TARGET_MICROBLAZE) 6990 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5)); 6991 #elif defined(TARGET_S390X) 6992 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); 6993 #else 6994 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); 6995 #endif 6996 break; 6997 #ifdef __NR_exit_group 6998 /* new thread calls */ 6999 case TARGET_NR_exit_group: 7000 #ifdef TARGET_GPROF 7001 _mcleanup(); 7002 #endif 7003 gdb_exit(cpu_env, arg1); 7004 ret = get_errno(exit_group(arg1)); 7005 break; 7006 #endif 7007 case TARGET_NR_setdomainname: 7008 if (!(p = lock_user_string(arg1))) 7009 goto efault; 7010 ret = get_errno(setdomainname(p, arg2)); 7011 unlock_user(p, arg1, 0); 7012 break; 7013 case TARGET_NR_uname: 7014 /* no need to transcode because we use the linux syscall */ 7015 { 7016 struct new_utsname * buf; 7017 7018 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) 7019 goto efault; 7020 ret = get_errno(sys_uname(buf)); 7021 if (!is_error(ret)) { 7022 /* Overrite the native machine name with whatever is being 7023 emulated. */ 7024 strcpy (buf->machine, cpu_to_uname_machine(cpu_env)); 7025 /* Allow the user to override the reported release. */ 7026 if (qemu_uname_release && *qemu_uname_release) 7027 strcpy (buf->release, qemu_uname_release); 7028 } 7029 unlock_user_struct(buf, arg1, 1); 7030 } 7031 break; 7032 #ifdef TARGET_I386 7033 case TARGET_NR_modify_ldt: 7034 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3); 7035 break; 7036 #if !defined(TARGET_X86_64) 7037 case TARGET_NR_vm86old: 7038 goto unimplemented; 7039 case TARGET_NR_vm86: 7040 ret = do_vm86(cpu_env, arg1, arg2); 7041 break; 7042 #endif 7043 #endif 7044 case TARGET_NR_adjtimex: 7045 goto unimplemented; 7046 #ifdef TARGET_NR_create_module 7047 case TARGET_NR_create_module: 7048 #endif 7049 case TARGET_NR_init_module: 7050 case TARGET_NR_delete_module: 7051 #ifdef TARGET_NR_get_kernel_syms 7052 case TARGET_NR_get_kernel_syms: 7053 #endif 7054 goto unimplemented; 7055 case TARGET_NR_quotactl: 7056 goto unimplemented; 7057 case TARGET_NR_getpgid: 7058 ret = get_errno(getpgid(arg1)); 7059 break; 7060 case TARGET_NR_fchdir: 7061 ret = get_errno(fchdir(arg1)); 7062 break; 7063 #ifdef TARGET_NR_bdflush /* not on x86_64 */ 7064 case TARGET_NR_bdflush: 7065 goto unimplemented; 7066 #endif 7067 #ifdef TARGET_NR_sysfs 7068 case TARGET_NR_sysfs: 7069 goto unimplemented; 7070 #endif 7071 case TARGET_NR_personality: 7072 ret = get_errno(personality(arg1)); 7073 break; 7074 #ifdef TARGET_NR_afs_syscall 7075 case TARGET_NR_afs_syscall: 7076 goto unimplemented; 7077 #endif 7078 #ifdef TARGET_NR__llseek /* Not on alpha */ 7079 case TARGET_NR__llseek: 7080 { 7081 int64_t res; 7082 #if !defined(__NR_llseek) 7083 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5); 7084 if (res == -1) { 7085 ret = get_errno(res); 7086 } else { 7087 ret = 0; 7088 } 7089 #else 7090 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); 7091 #endif 7092 if ((ret == 0) && put_user_s64(res, arg4)) { 7093 goto efault; 7094 } 7095 } 7096 break; 7097 #endif 7098 case TARGET_NR_getdents: 7099 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 7100 { 7101 struct target_dirent *target_dirp; 7102 struct linux_dirent *dirp; 7103 abi_long count = arg3; 7104 7105 dirp = malloc(count); 7106 if (!dirp) { 7107 ret = -TARGET_ENOMEM; 7108 goto fail; 7109 } 7110 7111 ret = get_errno(sys_getdents(arg1, dirp, count)); 7112 if (!is_error(ret)) { 7113 struct linux_dirent *de; 7114 struct target_dirent *tde; 7115 int len = ret; 7116 int reclen, treclen; 7117 int count1, tnamelen; 7118 7119 count1 = 0; 7120 de = dirp; 7121 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7122 goto efault; 7123 tde = target_dirp; 7124 while (len > 0) { 7125 reclen = de->d_reclen; 7126 tnamelen = reclen - offsetof(struct linux_dirent, d_name); 7127 assert(tnamelen >= 0); 7128 treclen = tnamelen + offsetof(struct target_dirent, d_name); 7129 assert(count1 + treclen <= count); 7130 tde->d_reclen = tswap16(treclen); 7131 tde->d_ino = tswapal(de->d_ino); 7132 tde->d_off = tswapal(de->d_off); 7133 memcpy(tde->d_name, de->d_name, tnamelen); 7134 de = (struct linux_dirent *)((char *)de + reclen); 7135 len -= reclen; 7136 tde = (struct target_dirent *)((char *)tde + treclen); 7137 count1 += treclen; 7138 } 7139 ret = count1; 7140 unlock_user(target_dirp, arg2, ret); 7141 } 7142 free(dirp); 7143 } 7144 #else 7145 { 7146 struct linux_dirent *dirp; 7147 abi_long count = arg3; 7148 7149 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7150 goto efault; 7151 ret = get_errno(sys_getdents(arg1, dirp, count)); 7152 if (!is_error(ret)) { 7153 struct linux_dirent *de; 7154 int len = ret; 7155 int reclen; 7156 de = dirp; 7157 while (len > 0) { 7158 reclen = de->d_reclen; 7159 if (reclen > len) 7160 break; 7161 de->d_reclen = tswap16(reclen); 7162 tswapls(&de->d_ino); 7163 tswapls(&de->d_off); 7164 de = (struct linux_dirent *)((char *)de + reclen); 7165 len -= reclen; 7166 } 7167 } 7168 unlock_user(dirp, arg2, ret); 7169 } 7170 #endif 7171 break; 7172 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 7173 case TARGET_NR_getdents64: 7174 { 7175 struct linux_dirent64 *dirp; 7176 abi_long count = arg3; 7177 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7178 goto efault; 7179 ret = get_errno(sys_getdents64(arg1, dirp, count)); 7180 if (!is_error(ret)) { 7181 struct linux_dirent64 *de; 7182 int len = ret; 7183 int reclen; 7184 de = dirp; 7185 while (len > 0) { 7186 reclen = de->d_reclen; 7187 if (reclen > len) 7188 break; 7189 de->d_reclen = tswap16(reclen); 7190 tswap64s((uint64_t *)&de->d_ino); 7191 tswap64s((uint64_t *)&de->d_off); 7192 de = (struct linux_dirent64 *)((char *)de + reclen); 7193 len -= reclen; 7194 } 7195 } 7196 unlock_user(dirp, arg2, ret); 7197 } 7198 break; 7199 #endif /* TARGET_NR_getdents64 */ 7200 #if defined(TARGET_NR__newselect) 7201 case TARGET_NR__newselect: 7202 ret = do_select(arg1, arg2, arg3, arg4, arg5); 7203 break; 7204 #endif 7205 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) 7206 # ifdef TARGET_NR_poll 7207 case TARGET_NR_poll: 7208 # endif 7209 # ifdef TARGET_NR_ppoll 7210 case TARGET_NR_ppoll: 7211 # endif 7212 { 7213 struct target_pollfd *target_pfd; 7214 unsigned int nfds = arg2; 7215 int timeout = arg3; 7216 struct pollfd *pfd; 7217 unsigned int i; 7218 7219 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1); 7220 if (!target_pfd) 7221 goto efault; 7222 7223 pfd = alloca(sizeof(struct pollfd) * nfds); 7224 for(i = 0; i < nfds; i++) { 7225 pfd[i].fd = tswap32(target_pfd[i].fd); 7226 pfd[i].events = tswap16(target_pfd[i].events); 7227 } 7228 7229 # ifdef TARGET_NR_ppoll 7230 if (num == TARGET_NR_ppoll) { 7231 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; 7232 target_sigset_t *target_set; 7233 sigset_t _set, *set = &_set; 7234 7235 if (arg3) { 7236 if (target_to_host_timespec(timeout_ts, arg3)) { 7237 unlock_user(target_pfd, arg1, 0); 7238 goto efault; 7239 } 7240 } else { 7241 timeout_ts = NULL; 7242 } 7243 7244 if (arg4) { 7245 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1); 7246 if (!target_set) { 7247 unlock_user(target_pfd, arg1, 0); 7248 goto efault; 7249 } 7250 target_to_host_sigset(set, target_set); 7251 } else { 7252 set = NULL; 7253 } 7254 7255 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8)); 7256 7257 if (!is_error(ret) && arg3) { 7258 host_to_target_timespec(arg3, timeout_ts); 7259 } 7260 if (arg4) { 7261 unlock_user(target_set, arg4, 0); 7262 } 7263 } else 7264 # endif 7265 ret = get_errno(poll(pfd, nfds, timeout)); 7266 7267 if (!is_error(ret)) { 7268 for(i = 0; i < nfds; i++) { 7269 target_pfd[i].revents = tswap16(pfd[i].revents); 7270 } 7271 } 7272 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds); 7273 } 7274 break; 7275 #endif 7276 case TARGET_NR_flock: 7277 /* NOTE: the flock constant seems to be the same for every 7278 Linux platform */ 7279 ret = get_errno(flock(arg1, arg2)); 7280 break; 7281 case TARGET_NR_readv: 7282 { 7283 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 7284 if (vec != NULL) { 7285 ret = get_errno(readv(arg1, vec, arg3)); 7286 unlock_iovec(vec, arg2, arg3, 1); 7287 } else { 7288 ret = -host_to_target_errno(errno); 7289 } 7290 } 7291 break; 7292 case TARGET_NR_writev: 7293 { 7294 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 7295 if (vec != NULL) { 7296 ret = get_errno(writev(arg1, vec, arg3)); 7297 unlock_iovec(vec, arg2, arg3, 0); 7298 } else { 7299 ret = -host_to_target_errno(errno); 7300 } 7301 } 7302 break; 7303 case TARGET_NR_getsid: 7304 ret = get_errno(getsid(arg1)); 7305 break; 7306 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ 7307 case TARGET_NR_fdatasync: 7308 ret = get_errno(fdatasync(arg1)); 7309 break; 7310 #endif 7311 case TARGET_NR__sysctl: 7312 /* We don't implement this, but ENOTDIR is always a safe 7313 return value. */ 7314 ret = -TARGET_ENOTDIR; 7315 break; 7316 case TARGET_NR_sched_getaffinity: 7317 { 7318 unsigned int mask_size; 7319 unsigned long *mask; 7320 7321 /* 7322 * sched_getaffinity needs multiples of ulong, so need to take 7323 * care of mismatches between target ulong and host ulong sizes. 7324 */ 7325 if (arg2 & (sizeof(abi_ulong) - 1)) { 7326 ret = -TARGET_EINVAL; 7327 break; 7328 } 7329 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 7330 7331 mask = alloca(mask_size); 7332 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask)); 7333 7334 if (!is_error(ret)) { 7335 if (copy_to_user(arg3, mask, ret)) { 7336 goto efault; 7337 } 7338 } 7339 } 7340 break; 7341 case TARGET_NR_sched_setaffinity: 7342 { 7343 unsigned int mask_size; 7344 unsigned long *mask; 7345 7346 /* 7347 * sched_setaffinity needs multiples of ulong, so need to take 7348 * care of mismatches between target ulong and host ulong sizes. 7349 */ 7350 if (arg2 & (sizeof(abi_ulong) - 1)) { 7351 ret = -TARGET_EINVAL; 7352 break; 7353 } 7354 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 7355 7356 mask = alloca(mask_size); 7357 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) { 7358 goto efault; 7359 } 7360 memcpy(mask, p, arg2); 7361 unlock_user_struct(p, arg2, 0); 7362 7363 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask)); 7364 } 7365 break; 7366 case TARGET_NR_sched_setparam: 7367 { 7368 struct sched_param *target_schp; 7369 struct sched_param schp; 7370 7371 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) 7372 goto efault; 7373 schp.sched_priority = tswap32(target_schp->sched_priority); 7374 unlock_user_struct(target_schp, arg2, 0); 7375 ret = get_errno(sched_setparam(arg1, &schp)); 7376 } 7377 break; 7378 case TARGET_NR_sched_getparam: 7379 { 7380 struct sched_param *target_schp; 7381 struct sched_param schp; 7382 ret = get_errno(sched_getparam(arg1, &schp)); 7383 if (!is_error(ret)) { 7384 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) 7385 goto efault; 7386 target_schp->sched_priority = tswap32(schp.sched_priority); 7387 unlock_user_struct(target_schp, arg2, 1); 7388 } 7389 } 7390 break; 7391 case TARGET_NR_sched_setscheduler: 7392 { 7393 struct sched_param *target_schp; 7394 struct sched_param schp; 7395 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) 7396 goto efault; 7397 schp.sched_priority = tswap32(target_schp->sched_priority); 7398 unlock_user_struct(target_schp, arg3, 0); 7399 ret = get_errno(sched_setscheduler(arg1, arg2, &schp)); 7400 } 7401 break; 7402 case TARGET_NR_sched_getscheduler: 7403 ret = get_errno(sched_getscheduler(arg1)); 7404 break; 7405 case TARGET_NR_sched_yield: 7406 ret = get_errno(sched_yield()); 7407 break; 7408 case TARGET_NR_sched_get_priority_max: 7409 ret = get_errno(sched_get_priority_max(arg1)); 7410 break; 7411 case TARGET_NR_sched_get_priority_min: 7412 ret = get_errno(sched_get_priority_min(arg1)); 7413 break; 7414 case TARGET_NR_sched_rr_get_interval: 7415 { 7416 struct timespec ts; 7417 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 7418 if (!is_error(ret)) { 7419 host_to_target_timespec(arg2, &ts); 7420 } 7421 } 7422 break; 7423 case TARGET_NR_nanosleep: 7424 { 7425 struct timespec req, rem; 7426 target_to_host_timespec(&req, arg1); 7427 ret = get_errno(nanosleep(&req, &rem)); 7428 if (is_error(ret) && arg2) { 7429 host_to_target_timespec(arg2, &rem); 7430 } 7431 } 7432 break; 7433 #ifdef TARGET_NR_query_module 7434 case TARGET_NR_query_module: 7435 goto unimplemented; 7436 #endif 7437 #ifdef TARGET_NR_nfsservctl 7438 case TARGET_NR_nfsservctl: 7439 goto unimplemented; 7440 #endif 7441 case TARGET_NR_prctl: 7442 switch (arg1) { 7443 case PR_GET_PDEATHSIG: 7444 { 7445 int deathsig; 7446 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5)); 7447 if (!is_error(ret) && arg2 7448 && put_user_ual(deathsig, arg2)) { 7449 goto efault; 7450 } 7451 break; 7452 } 7453 #ifdef PR_GET_NAME 7454 case PR_GET_NAME: 7455 { 7456 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1); 7457 if (!name) { 7458 goto efault; 7459 } 7460 ret = get_errno(prctl(arg1, (unsigned long)name, 7461 arg3, arg4, arg5)); 7462 unlock_user(name, arg2, 16); 7463 break; 7464 } 7465 case PR_SET_NAME: 7466 { 7467 void *name = lock_user(VERIFY_READ, arg2, 16, 1); 7468 if (!name) { 7469 goto efault; 7470 } 7471 ret = get_errno(prctl(arg1, (unsigned long)name, 7472 arg3, arg4, arg5)); 7473 unlock_user(name, arg2, 0); 7474 break; 7475 } 7476 #endif 7477 default: 7478 /* Most prctl options have no pointer arguments */ 7479 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5)); 7480 break; 7481 } 7482 break; 7483 #ifdef TARGET_NR_arch_prctl 7484 case TARGET_NR_arch_prctl: 7485 #if defined(TARGET_I386) && !defined(TARGET_ABI32) 7486 ret = do_arch_prctl(cpu_env, arg1, arg2); 7487 break; 7488 #else 7489 goto unimplemented; 7490 #endif 7491 #endif 7492 #ifdef TARGET_NR_pread64 7493 case TARGET_NR_pread64: 7494 if (regpairs_aligned(cpu_env)) { 7495 arg4 = arg5; 7496 arg5 = arg6; 7497 } 7498 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 7499 goto efault; 7500 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); 7501 unlock_user(p, arg2, ret); 7502 break; 7503 case TARGET_NR_pwrite64: 7504 if (regpairs_aligned(cpu_env)) { 7505 arg4 = arg5; 7506 arg5 = arg6; 7507 } 7508 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 7509 goto efault; 7510 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); 7511 unlock_user(p, arg2, 0); 7512 break; 7513 #endif 7514 case TARGET_NR_getcwd: 7515 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) 7516 goto efault; 7517 ret = get_errno(sys_getcwd1(p, arg2)); 7518 unlock_user(p, arg1, ret); 7519 break; 7520 case TARGET_NR_capget: 7521 goto unimplemented; 7522 case TARGET_NR_capset: 7523 goto unimplemented; 7524 case TARGET_NR_sigaltstack: 7525 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \ 7526 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \ 7527 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC) 7528 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env)); 7529 break; 7530 #else 7531 goto unimplemented; 7532 #endif 7533 case TARGET_NR_sendfile: 7534 goto unimplemented; 7535 #ifdef TARGET_NR_getpmsg 7536 case TARGET_NR_getpmsg: 7537 goto unimplemented; 7538 #endif 7539 #ifdef TARGET_NR_putpmsg 7540 case TARGET_NR_putpmsg: 7541 goto unimplemented; 7542 #endif 7543 #ifdef TARGET_NR_vfork 7544 case TARGET_NR_vfork: 7545 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 7546 0, 0, 0, 0)); 7547 break; 7548 #endif 7549 #ifdef TARGET_NR_ugetrlimit 7550 case TARGET_NR_ugetrlimit: 7551 { 7552 struct rlimit rlim; 7553 int resource = target_to_host_resource(arg1); 7554 ret = get_errno(getrlimit(resource, &rlim)); 7555 if (!is_error(ret)) { 7556 struct target_rlimit *target_rlim; 7557 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 7558 goto efault; 7559 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 7560 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 7561 unlock_user_struct(target_rlim, arg2, 1); 7562 } 7563 break; 7564 } 7565 #endif 7566 #ifdef TARGET_NR_truncate64 7567 case TARGET_NR_truncate64: 7568 if (!(p = lock_user_string(arg1))) 7569 goto efault; 7570 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); 7571 unlock_user(p, arg1, 0); 7572 break; 7573 #endif 7574 #ifdef TARGET_NR_ftruncate64 7575 case TARGET_NR_ftruncate64: 7576 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); 7577 break; 7578 #endif 7579 #ifdef TARGET_NR_stat64 7580 case TARGET_NR_stat64: 7581 if (!(p = lock_user_string(arg1))) 7582 goto efault; 7583 ret = get_errno(stat(path(p), &st)); 7584 unlock_user(p, arg1, 0); 7585 if (!is_error(ret)) 7586 ret = host_to_target_stat64(cpu_env, arg2, &st); 7587 break; 7588 #endif 7589 #ifdef TARGET_NR_lstat64 7590 case TARGET_NR_lstat64: 7591 if (!(p = lock_user_string(arg1))) 7592 goto efault; 7593 ret = get_errno(lstat(path(p), &st)); 7594 unlock_user(p, arg1, 0); 7595 if (!is_error(ret)) 7596 ret = host_to_target_stat64(cpu_env, arg2, &st); 7597 break; 7598 #endif 7599 #ifdef TARGET_NR_fstat64 7600 case TARGET_NR_fstat64: 7601 ret = get_errno(fstat(arg1, &st)); 7602 if (!is_error(ret)) 7603 ret = host_to_target_stat64(cpu_env, arg2, &st); 7604 break; 7605 #endif 7606 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \ 7607 (defined(__NR_fstatat64) || defined(__NR_newfstatat)) 7608 #ifdef TARGET_NR_fstatat64 7609 case TARGET_NR_fstatat64: 7610 #endif 7611 #ifdef TARGET_NR_newfstatat 7612 case TARGET_NR_newfstatat: 7613 #endif 7614 if (!(p = lock_user_string(arg2))) 7615 goto efault; 7616 #ifdef __NR_fstatat64 7617 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4)); 7618 #else 7619 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4)); 7620 #endif 7621 if (!is_error(ret)) 7622 ret = host_to_target_stat64(cpu_env, arg3, &st); 7623 break; 7624 #endif 7625 case TARGET_NR_lchown: 7626 if (!(p = lock_user_string(arg1))) 7627 goto efault; 7628 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); 7629 unlock_user(p, arg1, 0); 7630 break; 7631 #ifdef TARGET_NR_getuid 7632 case TARGET_NR_getuid: 7633 ret = get_errno(high2lowuid(getuid())); 7634 break; 7635 #endif 7636 #ifdef TARGET_NR_getgid 7637 case TARGET_NR_getgid: 7638 ret = get_errno(high2lowgid(getgid())); 7639 break; 7640 #endif 7641 #ifdef TARGET_NR_geteuid 7642 case TARGET_NR_geteuid: 7643 ret = get_errno(high2lowuid(geteuid())); 7644 break; 7645 #endif 7646 #ifdef TARGET_NR_getegid 7647 case TARGET_NR_getegid: 7648 ret = get_errno(high2lowgid(getegid())); 7649 break; 7650 #endif 7651 case TARGET_NR_setreuid: 7652 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); 7653 break; 7654 case TARGET_NR_setregid: 7655 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); 7656 break; 7657 case TARGET_NR_getgroups: 7658 { 7659 int gidsetsize = arg1; 7660 target_id *target_grouplist; 7661 gid_t *grouplist; 7662 int i; 7663 7664 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7665 ret = get_errno(getgroups(gidsetsize, grouplist)); 7666 if (gidsetsize == 0) 7667 break; 7668 if (!is_error(ret)) { 7669 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0); 7670 if (!target_grouplist) 7671 goto efault; 7672 for(i = 0;i < ret; i++) 7673 target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); 7674 unlock_user(target_grouplist, arg2, gidsetsize * 2); 7675 } 7676 } 7677 break; 7678 case TARGET_NR_setgroups: 7679 { 7680 int gidsetsize = arg1; 7681 target_id *target_grouplist; 7682 gid_t *grouplist; 7683 int i; 7684 7685 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7686 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1); 7687 if (!target_grouplist) { 7688 ret = -TARGET_EFAULT; 7689 goto fail; 7690 } 7691 for(i = 0;i < gidsetsize; i++) 7692 grouplist[i] = low2highgid(tswapid(target_grouplist[i])); 7693 unlock_user(target_grouplist, arg2, 0); 7694 ret = get_errno(setgroups(gidsetsize, grouplist)); 7695 } 7696 break; 7697 case TARGET_NR_fchown: 7698 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); 7699 break; 7700 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) 7701 case TARGET_NR_fchownat: 7702 if (!(p = lock_user_string(arg2))) 7703 goto efault; 7704 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5)); 7705 unlock_user(p, arg2, 0); 7706 break; 7707 #endif 7708 #ifdef TARGET_NR_setresuid 7709 case TARGET_NR_setresuid: 7710 ret = get_errno(setresuid(low2highuid(arg1), 7711 low2highuid(arg2), 7712 low2highuid(arg3))); 7713 break; 7714 #endif 7715 #ifdef TARGET_NR_getresuid 7716 case TARGET_NR_getresuid: 7717 { 7718 uid_t ruid, euid, suid; 7719 ret = get_errno(getresuid(&ruid, &euid, &suid)); 7720 if (!is_error(ret)) { 7721 if (put_user_u16(high2lowuid(ruid), arg1) 7722 || put_user_u16(high2lowuid(euid), arg2) 7723 || put_user_u16(high2lowuid(suid), arg3)) 7724 goto efault; 7725 } 7726 } 7727 break; 7728 #endif 7729 #ifdef TARGET_NR_getresgid 7730 case TARGET_NR_setresgid: 7731 ret = get_errno(setresgid(low2highgid(arg1), 7732 low2highgid(arg2), 7733 low2highgid(arg3))); 7734 break; 7735 #endif 7736 #ifdef TARGET_NR_getresgid 7737 case TARGET_NR_getresgid: 7738 { 7739 gid_t rgid, egid, sgid; 7740 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 7741 if (!is_error(ret)) { 7742 if (put_user_u16(high2lowgid(rgid), arg1) 7743 || put_user_u16(high2lowgid(egid), arg2) 7744 || put_user_u16(high2lowgid(sgid), arg3)) 7745 goto efault; 7746 } 7747 } 7748 break; 7749 #endif 7750 case TARGET_NR_chown: 7751 if (!(p = lock_user_string(arg1))) 7752 goto efault; 7753 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); 7754 unlock_user(p, arg1, 0); 7755 break; 7756 case TARGET_NR_setuid: 7757 ret = get_errno(setuid(low2highuid(arg1))); 7758 break; 7759 case TARGET_NR_setgid: 7760 ret = get_errno(setgid(low2highgid(arg1))); 7761 break; 7762 case TARGET_NR_setfsuid: 7763 ret = get_errno(setfsuid(arg1)); 7764 break; 7765 case TARGET_NR_setfsgid: 7766 ret = get_errno(setfsgid(arg1)); 7767 break; 7768 7769 #ifdef TARGET_NR_lchown32 7770 case TARGET_NR_lchown32: 7771 if (!(p = lock_user_string(arg1))) 7772 goto efault; 7773 ret = get_errno(lchown(p, arg2, arg3)); 7774 unlock_user(p, arg1, 0); 7775 break; 7776 #endif 7777 #ifdef TARGET_NR_getuid32 7778 case TARGET_NR_getuid32: 7779 ret = get_errno(getuid()); 7780 break; 7781 #endif 7782 7783 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) 7784 /* Alpha specific */ 7785 case TARGET_NR_getxuid: 7786 { 7787 uid_t euid; 7788 euid=geteuid(); 7789 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid; 7790 } 7791 ret = get_errno(getuid()); 7792 break; 7793 #endif 7794 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) 7795 /* Alpha specific */ 7796 case TARGET_NR_getxgid: 7797 { 7798 uid_t egid; 7799 egid=getegid(); 7800 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid; 7801 } 7802 ret = get_errno(getgid()); 7803 break; 7804 #endif 7805 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) 7806 /* Alpha specific */ 7807 case TARGET_NR_osf_getsysinfo: 7808 ret = -TARGET_EOPNOTSUPP; 7809 switch (arg1) { 7810 case TARGET_GSI_IEEE_FP_CONTROL: 7811 { 7812 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env); 7813 7814 /* Copied from linux ieee_fpcr_to_swcr. */ 7815 swcr = (fpcr >> 35) & SWCR_STATUS_MASK; 7816 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ; 7817 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV 7818 | SWCR_TRAP_ENABLE_DZE 7819 | SWCR_TRAP_ENABLE_OVF); 7820 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF 7821 | SWCR_TRAP_ENABLE_INE); 7822 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ; 7823 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO; 7824 7825 if (put_user_u64 (swcr, arg2)) 7826 goto efault; 7827 ret = 0; 7828 } 7829 break; 7830 7831 /* case GSI_IEEE_STATE_AT_SIGNAL: 7832 -- Not implemented in linux kernel. 7833 case GSI_UACPROC: 7834 -- Retrieves current unaligned access state; not much used. 7835 case GSI_PROC_TYPE: 7836 -- Retrieves implver information; surely not used. 7837 case GSI_GET_HWRPB: 7838 -- Grabs a copy of the HWRPB; surely not used. 7839 */ 7840 } 7841 break; 7842 #endif 7843 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) 7844 /* Alpha specific */ 7845 case TARGET_NR_osf_setsysinfo: 7846 ret = -TARGET_EOPNOTSUPP; 7847 switch (arg1) { 7848 case TARGET_SSI_IEEE_FP_CONTROL: 7849 { 7850 uint64_t swcr, fpcr, orig_fpcr; 7851 7852 if (get_user_u64 (swcr, arg2)) { 7853 goto efault; 7854 } 7855 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 7856 fpcr = orig_fpcr & FPCR_DYN_MASK; 7857 7858 /* Copied from linux ieee_swcr_to_fpcr. */ 7859 fpcr |= (swcr & SWCR_STATUS_MASK) << 35; 7860 fpcr |= (swcr & SWCR_MAP_DMZ) << 36; 7861 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV 7862 | SWCR_TRAP_ENABLE_DZE 7863 | SWCR_TRAP_ENABLE_OVF)) << 48; 7864 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF 7865 | SWCR_TRAP_ENABLE_INE)) << 57; 7866 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0); 7867 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41; 7868 7869 cpu_alpha_store_fpcr(cpu_env, fpcr); 7870 ret = 0; 7871 } 7872 break; 7873 7874 case TARGET_SSI_IEEE_RAISE_EXCEPTION: 7875 { 7876 uint64_t exc, fpcr, orig_fpcr; 7877 int si_code; 7878 7879 if (get_user_u64(exc, arg2)) { 7880 goto efault; 7881 } 7882 7883 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 7884 7885 /* We only add to the exception status here. */ 7886 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35); 7887 7888 cpu_alpha_store_fpcr(cpu_env, fpcr); 7889 ret = 0; 7890 7891 /* Old exceptions are not signaled. */ 7892 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK); 7893 7894 /* If any exceptions set by this call, 7895 and are unmasked, send a signal. */ 7896 si_code = 0; 7897 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) { 7898 si_code = TARGET_FPE_FLTRES; 7899 } 7900 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) { 7901 si_code = TARGET_FPE_FLTUND; 7902 } 7903 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) { 7904 si_code = TARGET_FPE_FLTOVF; 7905 } 7906 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) { 7907 si_code = TARGET_FPE_FLTDIV; 7908 } 7909 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) { 7910 si_code = TARGET_FPE_FLTINV; 7911 } 7912 if (si_code != 0) { 7913 target_siginfo_t info; 7914 info.si_signo = SIGFPE; 7915 info.si_errno = 0; 7916 info.si_code = si_code; 7917 info._sifields._sigfault._addr 7918 = ((CPUArchState *)cpu_env)->pc; 7919 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info); 7920 } 7921 } 7922 break; 7923 7924 /* case SSI_NVPAIRS: 7925 -- Used with SSIN_UACPROC to enable unaligned accesses. 7926 case SSI_IEEE_STATE_AT_SIGNAL: 7927 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: 7928 -- Not implemented in linux kernel 7929 */ 7930 } 7931 break; 7932 #endif 7933 #ifdef TARGET_NR_osf_sigprocmask 7934 /* Alpha specific. */ 7935 case TARGET_NR_osf_sigprocmask: 7936 { 7937 abi_ulong mask; 7938 int how; 7939 sigset_t set, oldset; 7940 7941 switch(arg1) { 7942 case TARGET_SIG_BLOCK: 7943 how = SIG_BLOCK; 7944 break; 7945 case TARGET_SIG_UNBLOCK: 7946 how = SIG_UNBLOCK; 7947 break; 7948 case TARGET_SIG_SETMASK: 7949 how = SIG_SETMASK; 7950 break; 7951 default: 7952 ret = -TARGET_EINVAL; 7953 goto fail; 7954 } 7955 mask = arg2; 7956 target_to_host_old_sigset(&set, &mask); 7957 sigprocmask(how, &set, &oldset); 7958 host_to_target_old_sigset(&mask, &oldset); 7959 ret = mask; 7960 } 7961 break; 7962 #endif 7963 7964 #ifdef TARGET_NR_getgid32 7965 case TARGET_NR_getgid32: 7966 ret = get_errno(getgid()); 7967 break; 7968 #endif 7969 #ifdef TARGET_NR_geteuid32 7970 case TARGET_NR_geteuid32: 7971 ret = get_errno(geteuid()); 7972 break; 7973 #endif 7974 #ifdef TARGET_NR_getegid32 7975 case TARGET_NR_getegid32: 7976 ret = get_errno(getegid()); 7977 break; 7978 #endif 7979 #ifdef TARGET_NR_setreuid32 7980 case TARGET_NR_setreuid32: 7981 ret = get_errno(setreuid(arg1, arg2)); 7982 break; 7983 #endif 7984 #ifdef TARGET_NR_setregid32 7985 case TARGET_NR_setregid32: 7986 ret = get_errno(setregid(arg1, arg2)); 7987 break; 7988 #endif 7989 #ifdef TARGET_NR_getgroups32 7990 case TARGET_NR_getgroups32: 7991 { 7992 int gidsetsize = arg1; 7993 uint32_t *target_grouplist; 7994 gid_t *grouplist; 7995 int i; 7996 7997 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7998 ret = get_errno(getgroups(gidsetsize, grouplist)); 7999 if (gidsetsize == 0) 8000 break; 8001 if (!is_error(ret)) { 8002 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); 8003 if (!target_grouplist) { 8004 ret = -TARGET_EFAULT; 8005 goto fail; 8006 } 8007 for(i = 0;i < ret; i++) 8008 target_grouplist[i] = tswap32(grouplist[i]); 8009 unlock_user(target_grouplist, arg2, gidsetsize * 4); 8010 } 8011 } 8012 break; 8013 #endif 8014 #ifdef TARGET_NR_setgroups32 8015 case TARGET_NR_setgroups32: 8016 { 8017 int gidsetsize = arg1; 8018 uint32_t *target_grouplist; 8019 gid_t *grouplist; 8020 int i; 8021 8022 grouplist = alloca(gidsetsize * sizeof(gid_t)); 8023 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); 8024 if (!target_grouplist) { 8025 ret = -TARGET_EFAULT; 8026 goto fail; 8027 } 8028 for(i = 0;i < gidsetsize; i++) 8029 grouplist[i] = tswap32(target_grouplist[i]); 8030 unlock_user(target_grouplist, arg2, 0); 8031 ret = get_errno(setgroups(gidsetsize, grouplist)); 8032 } 8033 break; 8034 #endif 8035 #ifdef TARGET_NR_fchown32 8036 case TARGET_NR_fchown32: 8037 ret = get_errno(fchown(arg1, arg2, arg3)); 8038 break; 8039 #endif 8040 #ifdef TARGET_NR_setresuid32 8041 case TARGET_NR_setresuid32: 8042 ret = get_errno(setresuid(arg1, arg2, arg3)); 8043 break; 8044 #endif 8045 #ifdef TARGET_NR_getresuid32 8046 case TARGET_NR_getresuid32: 8047 { 8048 uid_t ruid, euid, suid; 8049 ret = get_errno(getresuid(&ruid, &euid, &suid)); 8050 if (!is_error(ret)) { 8051 if (put_user_u32(ruid, arg1) 8052 || put_user_u32(euid, arg2) 8053 || put_user_u32(suid, arg3)) 8054 goto efault; 8055 } 8056 } 8057 break; 8058 #endif 8059 #ifdef TARGET_NR_setresgid32 8060 case TARGET_NR_setresgid32: 8061 ret = get_errno(setresgid(arg1, arg2, arg3)); 8062 break; 8063 #endif 8064 #ifdef TARGET_NR_getresgid32 8065 case TARGET_NR_getresgid32: 8066 { 8067 gid_t rgid, egid, sgid; 8068 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 8069 if (!is_error(ret)) { 8070 if (put_user_u32(rgid, arg1) 8071 || put_user_u32(egid, arg2) 8072 || put_user_u32(sgid, arg3)) 8073 goto efault; 8074 } 8075 } 8076 break; 8077 #endif 8078 #ifdef TARGET_NR_chown32 8079 case TARGET_NR_chown32: 8080 if (!(p = lock_user_string(arg1))) 8081 goto efault; 8082 ret = get_errno(chown(p, arg2, arg3)); 8083 unlock_user(p, arg1, 0); 8084 break; 8085 #endif 8086 #ifdef TARGET_NR_setuid32 8087 case TARGET_NR_setuid32: 8088 ret = get_errno(setuid(arg1)); 8089 break; 8090 #endif 8091 #ifdef TARGET_NR_setgid32 8092 case TARGET_NR_setgid32: 8093 ret = get_errno(setgid(arg1)); 8094 break; 8095 #endif 8096 #ifdef TARGET_NR_setfsuid32 8097 case TARGET_NR_setfsuid32: 8098 ret = get_errno(setfsuid(arg1)); 8099 break; 8100 #endif 8101 #ifdef TARGET_NR_setfsgid32 8102 case TARGET_NR_setfsgid32: 8103 ret = get_errno(setfsgid(arg1)); 8104 break; 8105 #endif 8106 8107 case TARGET_NR_pivot_root: 8108 goto unimplemented; 8109 #ifdef TARGET_NR_mincore 8110 case TARGET_NR_mincore: 8111 { 8112 void *a; 8113 ret = -TARGET_EFAULT; 8114 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0))) 8115 goto efault; 8116 if (!(p = lock_user_string(arg3))) 8117 goto mincore_fail; 8118 ret = get_errno(mincore(a, arg2, p)); 8119 unlock_user(p, arg3, ret); 8120 mincore_fail: 8121 unlock_user(a, arg1, 0); 8122 } 8123 break; 8124 #endif 8125 #ifdef TARGET_NR_arm_fadvise64_64 8126 case TARGET_NR_arm_fadvise64_64: 8127 { 8128 /* 8129 * arm_fadvise64_64 looks like fadvise64_64 but 8130 * with different argument order 8131 */ 8132 abi_long temp; 8133 temp = arg3; 8134 arg3 = arg4; 8135 arg4 = temp; 8136 } 8137 #endif 8138 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64) 8139 #ifdef TARGET_NR_fadvise64_64 8140 case TARGET_NR_fadvise64_64: 8141 #endif 8142 #ifdef TARGET_NR_fadvise64 8143 case TARGET_NR_fadvise64: 8144 #endif 8145 #ifdef TARGET_S390X 8146 switch (arg4) { 8147 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ 8148 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ 8149 case 6: arg4 = POSIX_FADV_DONTNEED; break; 8150 case 7: arg4 = POSIX_FADV_NOREUSE; break; 8151 default: break; 8152 } 8153 #endif 8154 ret = -posix_fadvise(arg1, arg2, arg3, arg4); 8155 break; 8156 #endif 8157 #ifdef TARGET_NR_madvise 8158 case TARGET_NR_madvise: 8159 /* A straight passthrough may not be safe because qemu sometimes 8160 turns private flie-backed mappings into anonymous mappings. 8161 This will break MADV_DONTNEED. 8162 This is a hint, so ignoring and returning success is ok. */ 8163 ret = get_errno(0); 8164 break; 8165 #endif 8166 #if TARGET_ABI_BITS == 32 8167 case TARGET_NR_fcntl64: 8168 { 8169 int cmd; 8170 struct flock64 fl; 8171 struct target_flock64 *target_fl; 8172 #ifdef TARGET_ARM 8173 struct target_eabi_flock64 *target_efl; 8174 #endif 8175 8176 cmd = target_to_host_fcntl_cmd(arg2); 8177 if (cmd == -TARGET_EINVAL) { 8178 ret = cmd; 8179 break; 8180 } 8181 8182 switch(arg2) { 8183 case TARGET_F_GETLK64: 8184 #ifdef TARGET_ARM 8185 if (((CPUARMState *)cpu_env)->eabi) { 8186 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 8187 goto efault; 8188 fl.l_type = tswap16(target_efl->l_type); 8189 fl.l_whence = tswap16(target_efl->l_whence); 8190 fl.l_start = tswap64(target_efl->l_start); 8191 fl.l_len = tswap64(target_efl->l_len); 8192 fl.l_pid = tswap32(target_efl->l_pid); 8193 unlock_user_struct(target_efl, arg3, 0); 8194 } else 8195 #endif 8196 { 8197 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 8198 goto efault; 8199 fl.l_type = tswap16(target_fl->l_type); 8200 fl.l_whence = tswap16(target_fl->l_whence); 8201 fl.l_start = tswap64(target_fl->l_start); 8202 fl.l_len = tswap64(target_fl->l_len); 8203 fl.l_pid = tswap32(target_fl->l_pid); 8204 unlock_user_struct(target_fl, arg3, 0); 8205 } 8206 ret = get_errno(fcntl(arg1, cmd, &fl)); 8207 if (ret == 0) { 8208 #ifdef TARGET_ARM 8209 if (((CPUARMState *)cpu_env)->eabi) { 8210 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0)) 8211 goto efault; 8212 target_efl->l_type = tswap16(fl.l_type); 8213 target_efl->l_whence = tswap16(fl.l_whence); 8214 target_efl->l_start = tswap64(fl.l_start); 8215 target_efl->l_len = tswap64(fl.l_len); 8216 target_efl->l_pid = tswap32(fl.l_pid); 8217 unlock_user_struct(target_efl, arg3, 1); 8218 } else 8219 #endif 8220 { 8221 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0)) 8222 goto efault; 8223 target_fl->l_type = tswap16(fl.l_type); 8224 target_fl->l_whence = tswap16(fl.l_whence); 8225 target_fl->l_start = tswap64(fl.l_start); 8226 target_fl->l_len = tswap64(fl.l_len); 8227 target_fl->l_pid = tswap32(fl.l_pid); 8228 unlock_user_struct(target_fl, arg3, 1); 8229 } 8230 } 8231 break; 8232 8233 case TARGET_F_SETLK64: 8234 case TARGET_F_SETLKW64: 8235 #ifdef TARGET_ARM 8236 if (((CPUARMState *)cpu_env)->eabi) { 8237 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 8238 goto efault; 8239 fl.l_type = tswap16(target_efl->l_type); 8240 fl.l_whence = tswap16(target_efl->l_whence); 8241 fl.l_start = tswap64(target_efl->l_start); 8242 fl.l_len = tswap64(target_efl->l_len); 8243 fl.l_pid = tswap32(target_efl->l_pid); 8244 unlock_user_struct(target_efl, arg3, 0); 8245 } else 8246 #endif 8247 { 8248 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 8249 goto efault; 8250 fl.l_type = tswap16(target_fl->l_type); 8251 fl.l_whence = tswap16(target_fl->l_whence); 8252 fl.l_start = tswap64(target_fl->l_start); 8253 fl.l_len = tswap64(target_fl->l_len); 8254 fl.l_pid = tswap32(target_fl->l_pid); 8255 unlock_user_struct(target_fl, arg3, 0); 8256 } 8257 ret = get_errno(fcntl(arg1, cmd, &fl)); 8258 break; 8259 default: 8260 ret = do_fcntl(arg1, arg2, arg3); 8261 break; 8262 } 8263 break; 8264 } 8265 #endif 8266 #ifdef TARGET_NR_cacheflush 8267 case TARGET_NR_cacheflush: 8268 /* self-modifying code is handled automatically, so nothing needed */ 8269 ret = 0; 8270 break; 8271 #endif 8272 #ifdef TARGET_NR_security 8273 case TARGET_NR_security: 8274 goto unimplemented; 8275 #endif 8276 #ifdef TARGET_NR_getpagesize 8277 case TARGET_NR_getpagesize: 8278 ret = TARGET_PAGE_SIZE; 8279 break; 8280 #endif 8281 case TARGET_NR_gettid: 8282 ret = get_errno(gettid()); 8283 break; 8284 #ifdef TARGET_NR_readahead 8285 case TARGET_NR_readahead: 8286 #if TARGET_ABI_BITS == 32 8287 if (regpairs_aligned(cpu_env)) { 8288 arg2 = arg3; 8289 arg3 = arg4; 8290 arg4 = arg5; 8291 } 8292 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4)); 8293 #else 8294 ret = get_errno(readahead(arg1, arg2, arg3)); 8295 #endif 8296 break; 8297 #endif 8298 #ifdef CONFIG_ATTR 8299 #ifdef TARGET_NR_setxattr 8300 case TARGET_NR_listxattr: 8301 case TARGET_NR_llistxattr: 8302 { 8303 void *p, *b = 0; 8304 if (arg2) { 8305 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 8306 if (!b) { 8307 ret = -TARGET_EFAULT; 8308 break; 8309 } 8310 } 8311 p = lock_user_string(arg1); 8312 if (p) { 8313 if (num == TARGET_NR_listxattr) { 8314 ret = get_errno(listxattr(p, b, arg3)); 8315 } else { 8316 ret = get_errno(llistxattr(p, b, arg3)); 8317 } 8318 } else { 8319 ret = -TARGET_EFAULT; 8320 } 8321 unlock_user(p, arg1, 0); 8322 unlock_user(b, arg2, arg3); 8323 break; 8324 } 8325 case TARGET_NR_flistxattr: 8326 { 8327 void *b = 0; 8328 if (arg2) { 8329 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 8330 if (!b) { 8331 ret = -TARGET_EFAULT; 8332 break; 8333 } 8334 } 8335 ret = get_errno(flistxattr(arg1, b, arg3)); 8336 unlock_user(b, arg2, arg3); 8337 break; 8338 } 8339 case TARGET_NR_setxattr: 8340 case TARGET_NR_lsetxattr: 8341 { 8342 void *p, *n, *v = 0; 8343 if (arg3) { 8344 v = lock_user(VERIFY_READ, arg3, arg4, 1); 8345 if (!v) { 8346 ret = -TARGET_EFAULT; 8347 break; 8348 } 8349 } 8350 p = lock_user_string(arg1); 8351 n = lock_user_string(arg2); 8352 if (p && n) { 8353 if (num == TARGET_NR_setxattr) { 8354 ret = get_errno(setxattr(p, n, v, arg4, arg5)); 8355 } else { 8356 ret = get_errno(lsetxattr(p, n, v, arg4, arg5)); 8357 } 8358 } else { 8359 ret = -TARGET_EFAULT; 8360 } 8361 unlock_user(p, arg1, 0); 8362 unlock_user(n, arg2, 0); 8363 unlock_user(v, arg3, 0); 8364 } 8365 break; 8366 case TARGET_NR_fsetxattr: 8367 { 8368 void *n, *v = 0; 8369 if (arg3) { 8370 v = lock_user(VERIFY_READ, arg3, arg4, 1); 8371 if (!v) { 8372 ret = -TARGET_EFAULT; 8373 break; 8374 } 8375 } 8376 n = lock_user_string(arg2); 8377 if (n) { 8378 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5)); 8379 } else { 8380 ret = -TARGET_EFAULT; 8381 } 8382 unlock_user(n, arg2, 0); 8383 unlock_user(v, arg3, 0); 8384 } 8385 break; 8386 case TARGET_NR_getxattr: 8387 case TARGET_NR_lgetxattr: 8388 { 8389 void *p, *n, *v = 0; 8390 if (arg3) { 8391 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 8392 if (!v) { 8393 ret = -TARGET_EFAULT; 8394 break; 8395 } 8396 } 8397 p = lock_user_string(arg1); 8398 n = lock_user_string(arg2); 8399 if (p && n) { 8400 if (num == TARGET_NR_getxattr) { 8401 ret = get_errno(getxattr(p, n, v, arg4)); 8402 } else { 8403 ret = get_errno(lgetxattr(p, n, v, arg4)); 8404 } 8405 } else { 8406 ret = -TARGET_EFAULT; 8407 } 8408 unlock_user(p, arg1, 0); 8409 unlock_user(n, arg2, 0); 8410 unlock_user(v, arg3, arg4); 8411 } 8412 break; 8413 case TARGET_NR_fgetxattr: 8414 { 8415 void *n, *v = 0; 8416 if (arg3) { 8417 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 8418 if (!v) { 8419 ret = -TARGET_EFAULT; 8420 break; 8421 } 8422 } 8423 n = lock_user_string(arg2); 8424 if (n) { 8425 ret = get_errno(fgetxattr(arg1, n, v, arg4)); 8426 } else { 8427 ret = -TARGET_EFAULT; 8428 } 8429 unlock_user(n, arg2, 0); 8430 unlock_user(v, arg3, arg4); 8431 } 8432 break; 8433 case TARGET_NR_removexattr: 8434 case TARGET_NR_lremovexattr: 8435 { 8436 void *p, *n; 8437 p = lock_user_string(arg1); 8438 n = lock_user_string(arg2); 8439 if (p && n) { 8440 if (num == TARGET_NR_removexattr) { 8441 ret = get_errno(removexattr(p, n)); 8442 } else { 8443 ret = get_errno(lremovexattr(p, n)); 8444 } 8445 } else { 8446 ret = -TARGET_EFAULT; 8447 } 8448 unlock_user(p, arg1, 0); 8449 unlock_user(n, arg2, 0); 8450 } 8451 break; 8452 case TARGET_NR_fremovexattr: 8453 { 8454 void *n; 8455 n = lock_user_string(arg2); 8456 if (n) { 8457 ret = get_errno(fremovexattr(arg1, n)); 8458 } else { 8459 ret = -TARGET_EFAULT; 8460 } 8461 unlock_user(n, arg2, 0); 8462 } 8463 break; 8464 #endif 8465 #endif /* CONFIG_ATTR */ 8466 #ifdef TARGET_NR_set_thread_area 8467 case TARGET_NR_set_thread_area: 8468 #if defined(TARGET_MIPS) 8469 ((CPUMIPSState *) cpu_env)->tls_value = arg1; 8470 ret = 0; 8471 break; 8472 #elif defined(TARGET_CRIS) 8473 if (arg1 & 0xff) 8474 ret = -TARGET_EINVAL; 8475 else { 8476 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1; 8477 ret = 0; 8478 } 8479 break; 8480 #elif defined(TARGET_I386) && defined(TARGET_ABI32) 8481 ret = do_set_thread_area(cpu_env, arg1); 8482 break; 8483 #else 8484 goto unimplemented_nowarn; 8485 #endif 8486 #endif 8487 #ifdef TARGET_NR_get_thread_area 8488 case TARGET_NR_get_thread_area: 8489 #if defined(TARGET_I386) && defined(TARGET_ABI32) 8490 ret = do_get_thread_area(cpu_env, arg1); 8491 #else 8492 goto unimplemented_nowarn; 8493 #endif 8494 #endif 8495 #ifdef TARGET_NR_getdomainname 8496 case TARGET_NR_getdomainname: 8497 goto unimplemented_nowarn; 8498 #endif 8499 8500 #ifdef TARGET_NR_clock_gettime 8501 case TARGET_NR_clock_gettime: 8502 { 8503 struct timespec ts; 8504 ret = get_errno(clock_gettime(arg1, &ts)); 8505 if (!is_error(ret)) { 8506 host_to_target_timespec(arg2, &ts); 8507 } 8508 break; 8509 } 8510 #endif 8511 #ifdef TARGET_NR_clock_getres 8512 case TARGET_NR_clock_getres: 8513 { 8514 struct timespec ts; 8515 ret = get_errno(clock_getres(arg1, &ts)); 8516 if (!is_error(ret)) { 8517 host_to_target_timespec(arg2, &ts); 8518 } 8519 break; 8520 } 8521 #endif 8522 #ifdef TARGET_NR_clock_nanosleep 8523 case TARGET_NR_clock_nanosleep: 8524 { 8525 struct timespec ts; 8526 target_to_host_timespec(&ts, arg3); 8527 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL)); 8528 if (arg4) 8529 host_to_target_timespec(arg4, &ts); 8530 break; 8531 } 8532 #endif 8533 8534 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 8535 case TARGET_NR_set_tid_address: 8536 ret = get_errno(set_tid_address((int *)g2h(arg1))); 8537 break; 8538 #endif 8539 8540 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 8541 case TARGET_NR_tkill: 8542 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2))); 8543 break; 8544 #endif 8545 8546 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 8547 case TARGET_NR_tgkill: 8548 ret = get_errno(sys_tgkill((int)arg1, (int)arg2, 8549 target_to_host_signal(arg3))); 8550 break; 8551 #endif 8552 8553 #ifdef TARGET_NR_set_robust_list 8554 case TARGET_NR_set_robust_list: 8555 goto unimplemented_nowarn; 8556 #endif 8557 8558 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat) 8559 case TARGET_NR_utimensat: 8560 { 8561 struct timespec *tsp, ts[2]; 8562 if (!arg3) { 8563 tsp = NULL; 8564 } else { 8565 target_to_host_timespec(ts, arg3); 8566 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec)); 8567 tsp = ts; 8568 } 8569 if (!arg2) 8570 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 8571 else { 8572 if (!(p = lock_user_string(arg2))) { 8573 ret = -TARGET_EFAULT; 8574 goto fail; 8575 } 8576 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 8577 unlock_user(p, arg2, 0); 8578 } 8579 } 8580 break; 8581 #endif 8582 #if defined(CONFIG_USE_NPTL) 8583 case TARGET_NR_futex: 8584 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6); 8585 break; 8586 #endif 8587 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 8588 case TARGET_NR_inotify_init: 8589 ret = get_errno(sys_inotify_init()); 8590 break; 8591 #endif 8592 #ifdef CONFIG_INOTIFY1 8593 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 8594 case TARGET_NR_inotify_init1: 8595 ret = get_errno(sys_inotify_init1(arg1)); 8596 break; 8597 #endif 8598 #endif 8599 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 8600 case TARGET_NR_inotify_add_watch: 8601 p = lock_user_string(arg2); 8602 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3)); 8603 unlock_user(p, arg2, 0); 8604 break; 8605 #endif 8606 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 8607 case TARGET_NR_inotify_rm_watch: 8608 ret = get_errno(sys_inotify_rm_watch(arg1, arg2)); 8609 break; 8610 #endif 8611 8612 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 8613 case TARGET_NR_mq_open: 8614 { 8615 struct mq_attr posix_mq_attr; 8616 8617 p = lock_user_string(arg1 - 1); 8618 if (arg4 != 0) 8619 copy_from_user_mq_attr (&posix_mq_attr, arg4); 8620 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr)); 8621 unlock_user (p, arg1, 0); 8622 } 8623 break; 8624 8625 case TARGET_NR_mq_unlink: 8626 p = lock_user_string(arg1 - 1); 8627 ret = get_errno(mq_unlink(p)); 8628 unlock_user (p, arg1, 0); 8629 break; 8630 8631 case TARGET_NR_mq_timedsend: 8632 { 8633 struct timespec ts; 8634 8635 p = lock_user (VERIFY_READ, arg2, arg3, 1); 8636 if (arg5 != 0) { 8637 target_to_host_timespec(&ts, arg5); 8638 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts)); 8639 host_to_target_timespec(arg5, &ts); 8640 } 8641 else 8642 ret = get_errno(mq_send(arg1, p, arg3, arg4)); 8643 unlock_user (p, arg2, arg3); 8644 } 8645 break; 8646 8647 case TARGET_NR_mq_timedreceive: 8648 { 8649 struct timespec ts; 8650 unsigned int prio; 8651 8652 p = lock_user (VERIFY_READ, arg2, arg3, 1); 8653 if (arg5 != 0) { 8654 target_to_host_timespec(&ts, arg5); 8655 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts)); 8656 host_to_target_timespec(arg5, &ts); 8657 } 8658 else 8659 ret = get_errno(mq_receive(arg1, p, arg3, &prio)); 8660 unlock_user (p, arg2, arg3); 8661 if (arg4 != 0) 8662 put_user_u32(prio, arg4); 8663 } 8664 break; 8665 8666 /* Not implemented for now... */ 8667 /* case TARGET_NR_mq_notify: */ 8668 /* break; */ 8669 8670 case TARGET_NR_mq_getsetattr: 8671 { 8672 struct mq_attr posix_mq_attr_in, posix_mq_attr_out; 8673 ret = 0; 8674 if (arg3 != 0) { 8675 ret = mq_getattr(arg1, &posix_mq_attr_out); 8676 copy_to_user_mq_attr(arg3, &posix_mq_attr_out); 8677 } 8678 if (arg2 != 0) { 8679 copy_from_user_mq_attr(&posix_mq_attr_in, arg2); 8680 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out); 8681 } 8682 8683 } 8684 break; 8685 #endif 8686 8687 #ifdef CONFIG_SPLICE 8688 #ifdef TARGET_NR_tee 8689 case TARGET_NR_tee: 8690 { 8691 ret = get_errno(tee(arg1,arg2,arg3,arg4)); 8692 } 8693 break; 8694 #endif 8695 #ifdef TARGET_NR_splice 8696 case TARGET_NR_splice: 8697 { 8698 loff_t loff_in, loff_out; 8699 loff_t *ploff_in = NULL, *ploff_out = NULL; 8700 if(arg2) { 8701 get_user_u64(loff_in, arg2); 8702 ploff_in = &loff_in; 8703 } 8704 if(arg4) { 8705 get_user_u64(loff_out, arg2); 8706 ploff_out = &loff_out; 8707 } 8708 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); 8709 } 8710 break; 8711 #endif 8712 #ifdef TARGET_NR_vmsplice 8713 case TARGET_NR_vmsplice: 8714 { 8715 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 8716 if (vec != NULL) { 8717 ret = get_errno(vmsplice(arg1, vec, arg3, arg4)); 8718 unlock_iovec(vec, arg2, arg3, 0); 8719 } else { 8720 ret = -host_to_target_errno(errno); 8721 } 8722 } 8723 break; 8724 #endif 8725 #endif /* CONFIG_SPLICE */ 8726 #ifdef CONFIG_EVENTFD 8727 #if defined(TARGET_NR_eventfd) 8728 case TARGET_NR_eventfd: 8729 ret = get_errno(eventfd(arg1, 0)); 8730 break; 8731 #endif 8732 #if defined(TARGET_NR_eventfd2) 8733 case TARGET_NR_eventfd2: 8734 ret = get_errno(eventfd(arg1, arg2)); 8735 break; 8736 #endif 8737 #endif /* CONFIG_EVENTFD */ 8738 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) 8739 case TARGET_NR_fallocate: 8740 #if TARGET_ABI_BITS == 32 8741 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4), 8742 target_offset64(arg5, arg6))); 8743 #else 8744 ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); 8745 #endif 8746 break; 8747 #endif 8748 #if defined(CONFIG_SYNC_FILE_RANGE) 8749 #if defined(TARGET_NR_sync_file_range) 8750 case TARGET_NR_sync_file_range: 8751 #if TARGET_ABI_BITS == 32 8752 #if defined(TARGET_MIPS) 8753 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 8754 target_offset64(arg5, arg6), arg7)); 8755 #else 8756 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3), 8757 target_offset64(arg4, arg5), arg6)); 8758 #endif /* !TARGET_MIPS */ 8759 #else 8760 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4)); 8761 #endif 8762 break; 8763 #endif 8764 #if defined(TARGET_NR_sync_file_range2) 8765 case TARGET_NR_sync_file_range2: 8766 /* This is like sync_file_range but the arguments are reordered */ 8767 #if TARGET_ABI_BITS == 32 8768 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 8769 target_offset64(arg5, arg6), arg2)); 8770 #else 8771 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2)); 8772 #endif 8773 break; 8774 #endif 8775 #endif 8776 #if defined(CONFIG_EPOLL) 8777 #if defined(TARGET_NR_epoll_create) 8778 case TARGET_NR_epoll_create: 8779 ret = get_errno(epoll_create(arg1)); 8780 break; 8781 #endif 8782 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1) 8783 case TARGET_NR_epoll_create1: 8784 ret = get_errno(epoll_create1(arg1)); 8785 break; 8786 #endif 8787 #if defined(TARGET_NR_epoll_ctl) 8788 case TARGET_NR_epoll_ctl: 8789 { 8790 struct epoll_event ep; 8791 struct epoll_event *epp = 0; 8792 if (arg4) { 8793 struct target_epoll_event *target_ep; 8794 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) { 8795 goto efault; 8796 } 8797 ep.events = tswap32(target_ep->events); 8798 /* The epoll_data_t union is just opaque data to the kernel, 8799 * so we transfer all 64 bits across and need not worry what 8800 * actual data type it is. 8801 */ 8802 ep.data.u64 = tswap64(target_ep->data.u64); 8803 unlock_user_struct(target_ep, arg4, 0); 8804 epp = &ep; 8805 } 8806 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp)); 8807 break; 8808 } 8809 #endif 8810 8811 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT) 8812 #define IMPLEMENT_EPOLL_PWAIT 8813 #endif 8814 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT) 8815 #if defined(TARGET_NR_epoll_wait) 8816 case TARGET_NR_epoll_wait: 8817 #endif 8818 #if defined(IMPLEMENT_EPOLL_PWAIT) 8819 case TARGET_NR_epoll_pwait: 8820 #endif 8821 { 8822 struct target_epoll_event *target_ep; 8823 struct epoll_event *ep; 8824 int epfd = arg1; 8825 int maxevents = arg3; 8826 int timeout = arg4; 8827 8828 target_ep = lock_user(VERIFY_WRITE, arg2, 8829 maxevents * sizeof(struct target_epoll_event), 1); 8830 if (!target_ep) { 8831 goto efault; 8832 } 8833 8834 ep = alloca(maxevents * sizeof(struct epoll_event)); 8835 8836 switch (num) { 8837 #if defined(IMPLEMENT_EPOLL_PWAIT) 8838 case TARGET_NR_epoll_pwait: 8839 { 8840 target_sigset_t *target_set; 8841 sigset_t _set, *set = &_set; 8842 8843 if (arg5) { 8844 target_set = lock_user(VERIFY_READ, arg5, 8845 sizeof(target_sigset_t), 1); 8846 if (!target_set) { 8847 unlock_user(target_ep, arg2, 0); 8848 goto efault; 8849 } 8850 target_to_host_sigset(set, target_set); 8851 unlock_user(target_set, arg5, 0); 8852 } else { 8853 set = NULL; 8854 } 8855 8856 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set)); 8857 break; 8858 } 8859 #endif 8860 #if defined(TARGET_NR_epoll_wait) 8861 case TARGET_NR_epoll_wait: 8862 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout)); 8863 break; 8864 #endif 8865 default: 8866 ret = -TARGET_ENOSYS; 8867 } 8868 if (!is_error(ret)) { 8869 int i; 8870 for (i = 0; i < ret; i++) { 8871 target_ep[i].events = tswap32(ep[i].events); 8872 target_ep[i].data.u64 = tswap64(ep[i].data.u64); 8873 } 8874 } 8875 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event)); 8876 break; 8877 } 8878 #endif 8879 #endif 8880 #ifdef TARGET_NR_prlimit64 8881 case TARGET_NR_prlimit64: 8882 { 8883 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */ 8884 struct target_rlimit64 *target_rnew, *target_rold; 8885 struct host_rlimit64 rnew, rold, *rnewp = 0; 8886 if (arg3) { 8887 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { 8888 goto efault; 8889 } 8890 rnew.rlim_cur = tswap64(target_rnew->rlim_cur); 8891 rnew.rlim_max = tswap64(target_rnew->rlim_max); 8892 unlock_user_struct(target_rnew, arg3, 0); 8893 rnewp = &rnew; 8894 } 8895 8896 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0)); 8897 if (!is_error(ret) && arg4) { 8898 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { 8899 goto efault; 8900 } 8901 target_rold->rlim_cur = tswap64(rold.rlim_cur); 8902 target_rold->rlim_max = tswap64(rold.rlim_max); 8903 unlock_user_struct(target_rold, arg4, 1); 8904 } 8905 break; 8906 } 8907 #endif 8908 #ifdef TARGET_NR_gethostname 8909 case TARGET_NR_gethostname: 8910 { 8911 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0); 8912 if (name) { 8913 ret = get_errno(gethostname(name, arg2)); 8914 unlock_user(name, arg1, arg2); 8915 } else { 8916 ret = -TARGET_EFAULT; 8917 } 8918 break; 8919 } 8920 #endif 8921 default: 8922 unimplemented: 8923 gemu_log("qemu: Unsupported syscall: %d\n", num); 8924 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list) 8925 unimplemented_nowarn: 8926 #endif 8927 ret = -TARGET_ENOSYS; 8928 break; 8929 } 8930 fail: 8931 #ifdef DEBUG 8932 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret); 8933 #endif 8934 if(do_strace) 8935 print_syscall_ret(num, ret); 8936 return ret; 8937 efault: 8938 ret = -TARGET_EFAULT; 8939 goto fail; 8940 } 8941