1 /* 2 * Linux syscalls 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #define _ATFILE_SOURCE 20 #include <stdlib.h> 21 #include <stdio.h> 22 #include <stdarg.h> 23 #include <string.h> 24 #include <elf.h> 25 #include <endian.h> 26 #include <errno.h> 27 #include <unistd.h> 28 #include <fcntl.h> 29 #include <time.h> 30 #include <limits.h> 31 #include <grp.h> 32 #include <sys/types.h> 33 #include <sys/ipc.h> 34 #include <sys/msg.h> 35 #include <sys/wait.h> 36 #include <sys/time.h> 37 #include <sys/stat.h> 38 #include <sys/mount.h> 39 #include <sys/file.h> 40 #include <sys/fsuid.h> 41 #include <sys/personality.h> 42 #include <sys/prctl.h> 43 #include <sys/resource.h> 44 #include <sys/mman.h> 45 #include <sys/swap.h> 46 #include <signal.h> 47 #include <sched.h> 48 #ifdef __ia64__ 49 int __clone2(int (*fn)(void *), void *child_stack_base, 50 size_t stack_size, int flags, void *arg, ...); 51 #endif 52 #include <sys/socket.h> 53 #include <sys/un.h> 54 #include <sys/uio.h> 55 #include <sys/poll.h> 56 #include <sys/times.h> 57 #include <sys/shm.h> 58 #include <sys/sem.h> 59 #include <sys/statfs.h> 60 #include <utime.h> 61 #include <sys/sysinfo.h> 62 #include <sys/utsname.h> 63 //#include <sys/user.h> 64 #include <netinet/ip.h> 65 #include <netinet/tcp.h> 66 #include <linux/wireless.h> 67 #include <linux/icmp.h> 68 #include "qemu-common.h" 69 #ifdef TARGET_GPROF 70 #include <sys/gmon.h> 71 #endif 72 #ifdef CONFIG_EVENTFD 73 #include <sys/eventfd.h> 74 #endif 75 #ifdef CONFIG_EPOLL 76 #include <sys/epoll.h> 77 #endif 78 #ifdef CONFIG_ATTR 79 #include "qemu/xattr.h" 80 #endif 81 82 #define termios host_termios 83 #define winsize host_winsize 84 #define termio host_termio 85 #define sgttyb host_sgttyb /* same as target */ 86 #define tchars host_tchars /* same as target */ 87 #define ltchars host_ltchars /* same as target */ 88 89 #include <linux/termios.h> 90 #include <linux/unistd.h> 91 #include <linux/utsname.h> 92 #include <linux/cdrom.h> 93 #include <linux/hdreg.h> 94 #include <linux/soundcard.h> 95 #include <linux/kd.h> 96 #include <linux/mtio.h> 97 #include <linux/fs.h> 98 #if defined(CONFIG_FIEMAP) 99 #include <linux/fiemap.h> 100 #endif 101 #include <linux/fb.h> 102 #include <linux/vt.h> 103 #include <linux/dm-ioctl.h> 104 #include <linux/reboot.h> 105 #include "linux_loop.h" 106 #include "cpu-uname.h" 107 108 #include "qemu.h" 109 110 #if defined(CONFIG_USE_NPTL) 111 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \ 112 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID) 113 #else 114 /* XXX: Hardcode the above values. */ 115 #define CLONE_NPTL_FLAGS2 0 116 #endif 117 118 //#define DEBUG 119 120 //#include <linux/msdos_fs.h> 121 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2]) 122 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2]) 123 124 125 #undef _syscall0 126 #undef _syscall1 127 #undef _syscall2 128 #undef _syscall3 129 #undef _syscall4 130 #undef _syscall5 131 #undef _syscall6 132 133 #define _syscall0(type,name) \ 134 static type name (void) \ 135 { \ 136 return syscall(__NR_##name); \ 137 } 138 139 #define _syscall1(type,name,type1,arg1) \ 140 static type name (type1 arg1) \ 141 { \ 142 return syscall(__NR_##name, arg1); \ 143 } 144 145 #define _syscall2(type,name,type1,arg1,type2,arg2) \ 146 static type name (type1 arg1,type2 arg2) \ 147 { \ 148 return syscall(__NR_##name, arg1, arg2); \ 149 } 150 151 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 152 static type name (type1 arg1,type2 arg2,type3 arg3) \ 153 { \ 154 return syscall(__NR_##name, arg1, arg2, arg3); \ 155 } 156 157 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 158 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ 159 { \ 160 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 161 } 162 163 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 164 type5,arg5) \ 165 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ 166 { \ 167 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 168 } 169 170 171 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 172 type5,arg5,type6,arg6) \ 173 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \ 174 type6 arg6) \ 175 { \ 176 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 177 } 178 179 180 #define __NR_sys_uname __NR_uname 181 #define __NR_sys_faccessat __NR_faccessat 182 #define __NR_sys_fchmodat __NR_fchmodat 183 #define __NR_sys_fchownat __NR_fchownat 184 #define __NR_sys_fstatat64 __NR_fstatat64 185 #define __NR_sys_futimesat __NR_futimesat 186 #define __NR_sys_getcwd1 __NR_getcwd 187 #define __NR_sys_getdents __NR_getdents 188 #define __NR_sys_getdents64 __NR_getdents64 189 #define __NR_sys_getpriority __NR_getpriority 190 #define __NR_sys_linkat __NR_linkat 191 #define __NR_sys_mkdirat __NR_mkdirat 192 #define __NR_sys_mknodat __NR_mknodat 193 #define __NR_sys_newfstatat __NR_newfstatat 194 #define __NR_sys_openat __NR_openat 195 #define __NR_sys_readlinkat __NR_readlinkat 196 #define __NR_sys_renameat __NR_renameat 197 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo 198 #define __NR_sys_symlinkat __NR_symlinkat 199 #define __NR_sys_syslog __NR_syslog 200 #define __NR_sys_tgkill __NR_tgkill 201 #define __NR_sys_tkill __NR_tkill 202 #define __NR_sys_unlinkat __NR_unlinkat 203 #define __NR_sys_utimensat __NR_utimensat 204 #define __NR_sys_futex __NR_futex 205 #define __NR_sys_inotify_init __NR_inotify_init 206 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch 207 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch 208 209 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \ 210 defined(__s390x__) 211 #define __NR__llseek __NR_lseek 212 #endif 213 214 #ifdef __NR_gettid 215 _syscall0(int, gettid) 216 #else 217 /* This is a replacement for the host gettid() and must return a host 218 errno. */ 219 static int gettid(void) { 220 return -ENOSYS; 221 } 222 #endif 223 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count); 224 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 225 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count); 226 #endif 227 #if defined(TARGET_NR__llseek) && defined(__NR_llseek) 228 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo, 229 loff_t *, res, uint, wh); 230 #endif 231 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo) 232 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len) 233 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 234 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig) 235 #endif 236 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 237 _syscall2(int,sys_tkill,int,tid,int,sig) 238 #endif 239 #ifdef __NR_exit_group 240 _syscall1(int,exit_group,int,error_code) 241 #endif 242 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 243 _syscall1(int,set_tid_address,int *,tidptr) 244 #endif 245 #if defined(CONFIG_USE_NPTL) 246 #if defined(TARGET_NR_futex) && defined(__NR_futex) 247 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, 248 const struct timespec *,timeout,int *,uaddr2,int,val3) 249 #endif 250 #endif 251 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity 252 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, 253 unsigned long *, user_mask_ptr); 254 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity 255 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len, 256 unsigned long *, user_mask_ptr); 257 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd, 258 void *, arg); 259 260 static bitmask_transtbl fcntl_flags_tbl[] = { 261 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, }, 262 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, }, 263 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, }, 264 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, }, 265 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, }, 266 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, }, 267 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, }, 268 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, }, 269 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, }, 270 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, }, 271 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, }, 272 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, }, 273 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, }, 274 #if defined(O_DIRECT) 275 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, }, 276 #endif 277 #if defined(O_NOATIME) 278 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME }, 279 #endif 280 #if defined(O_CLOEXEC) 281 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC }, 282 #endif 283 #if defined(O_PATH) 284 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH }, 285 #endif 286 /* Don't terminate the list prematurely on 64-bit host+guest. */ 287 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0 288 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, }, 289 #endif 290 { 0, 0, 0, 0 } 291 }; 292 293 #define COPY_UTSNAME_FIELD(dest, src) \ 294 do { \ 295 /* __NEW_UTS_LEN doesn't include terminating null */ \ 296 (void) strncpy((dest), (src), __NEW_UTS_LEN); \ 297 (dest)[__NEW_UTS_LEN] = '\0'; \ 298 } while (0) 299 300 static int sys_uname(struct new_utsname *buf) 301 { 302 struct utsname uts_buf; 303 304 if (uname(&uts_buf) < 0) 305 return (-1); 306 307 /* 308 * Just in case these have some differences, we 309 * translate utsname to new_utsname (which is the 310 * struct linux kernel uses). 311 */ 312 313 memset(buf, 0, sizeof(*buf)); 314 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname); 315 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename); 316 COPY_UTSNAME_FIELD(buf->release, uts_buf.release); 317 COPY_UTSNAME_FIELD(buf->version, uts_buf.version); 318 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine); 319 #ifdef _GNU_SOURCE 320 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname); 321 #endif 322 return (0); 323 324 #undef COPY_UTSNAME_FIELD 325 } 326 327 static int sys_getcwd1(char *buf, size_t size) 328 { 329 if (getcwd(buf, size) == NULL) { 330 /* getcwd() sets errno */ 331 return (-1); 332 } 333 return strlen(buf)+1; 334 } 335 336 #ifdef CONFIG_ATFILE 337 /* 338 * Host system seems to have atfile syscall stubs available. We 339 * now enable them one by one as specified by target syscall_nr.h. 340 */ 341 342 #ifdef TARGET_NR_faccessat 343 static int sys_faccessat(int dirfd, const char *pathname, int mode) 344 { 345 return (faccessat(dirfd, pathname, mode, 0)); 346 } 347 #endif 348 #ifdef TARGET_NR_fchmodat 349 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode) 350 { 351 return (fchmodat(dirfd, pathname, mode, 0)); 352 } 353 #endif 354 #if defined(TARGET_NR_fchownat) 355 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner, 356 gid_t group, int flags) 357 { 358 return (fchownat(dirfd, pathname, owner, group, flags)); 359 } 360 #endif 361 #ifdef __NR_fstatat64 362 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf, 363 int flags) 364 { 365 return (fstatat(dirfd, pathname, buf, flags)); 366 } 367 #endif 368 #ifdef __NR_newfstatat 369 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf, 370 int flags) 371 { 372 return (fstatat(dirfd, pathname, buf, flags)); 373 } 374 #endif 375 #ifdef TARGET_NR_futimesat 376 static int sys_futimesat(int dirfd, const char *pathname, 377 const struct timeval times[2]) 378 { 379 return (futimesat(dirfd, pathname, times)); 380 } 381 #endif 382 #ifdef TARGET_NR_linkat 383 static int sys_linkat(int olddirfd, const char *oldpath, 384 int newdirfd, const char *newpath, int flags) 385 { 386 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags)); 387 } 388 #endif 389 #ifdef TARGET_NR_mkdirat 390 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode) 391 { 392 return (mkdirat(dirfd, pathname, mode)); 393 } 394 #endif 395 #ifdef TARGET_NR_mknodat 396 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode, 397 dev_t dev) 398 { 399 return (mknodat(dirfd, pathname, mode, dev)); 400 } 401 #endif 402 #ifdef TARGET_NR_openat 403 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode) 404 { 405 /* 406 * open(2) has extra parameter 'mode' when called with 407 * flag O_CREAT. 408 */ 409 if ((flags & O_CREAT) != 0) { 410 return (openat(dirfd, pathname, flags, mode)); 411 } 412 return (openat(dirfd, pathname, flags)); 413 } 414 #endif 415 #ifdef TARGET_NR_readlinkat 416 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz) 417 { 418 return (readlinkat(dirfd, pathname, buf, bufsiz)); 419 } 420 #endif 421 #ifdef TARGET_NR_renameat 422 static int sys_renameat(int olddirfd, const char *oldpath, 423 int newdirfd, const char *newpath) 424 { 425 return (renameat(olddirfd, oldpath, newdirfd, newpath)); 426 } 427 #endif 428 #ifdef TARGET_NR_symlinkat 429 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath) 430 { 431 return (symlinkat(oldpath, newdirfd, newpath)); 432 } 433 #endif 434 #ifdef TARGET_NR_unlinkat 435 static int sys_unlinkat(int dirfd, const char *pathname, int flags) 436 { 437 return (unlinkat(dirfd, pathname, flags)); 438 } 439 #endif 440 #else /* !CONFIG_ATFILE */ 441 442 /* 443 * Try direct syscalls instead 444 */ 445 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 446 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode) 447 #endif 448 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat) 449 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode) 450 #endif 451 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) 452 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname, 453 uid_t,owner,gid_t,group,int,flags) 454 #endif 455 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \ 456 defined(__NR_fstatat64) 457 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname, 458 struct stat *,buf,int,flags) 459 #endif 460 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat) 461 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname, 462 const struct timeval *,times) 463 #endif 464 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \ 465 defined(__NR_newfstatat) 466 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname, 467 struct stat *,buf,int,flags) 468 #endif 469 #if defined(TARGET_NR_linkat) && defined(__NR_linkat) 470 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath, 471 int,newdirfd,const char *,newpath,int,flags) 472 #endif 473 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat) 474 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode) 475 #endif 476 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat) 477 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname, 478 mode_t,mode,dev_t,dev) 479 #endif 480 #if defined(TARGET_NR_openat) && defined(__NR_openat) 481 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode) 482 #endif 483 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat) 484 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname, 485 char *,buf,size_t,bufsize) 486 #endif 487 #if defined(TARGET_NR_renameat) && defined(__NR_renameat) 488 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath, 489 int,newdirfd,const char *,newpath) 490 #endif 491 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat) 492 _syscall3(int,sys_symlinkat,const char *,oldpath, 493 int,newdirfd,const char *,newpath) 494 #endif 495 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat) 496 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags) 497 #endif 498 499 #endif /* CONFIG_ATFILE */ 500 501 #ifdef CONFIG_UTIMENSAT 502 static int sys_utimensat(int dirfd, const char *pathname, 503 const struct timespec times[2], int flags) 504 { 505 if (pathname == NULL) 506 return futimens(dirfd, times); 507 else 508 return utimensat(dirfd, pathname, times, flags); 509 } 510 #else 511 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat) 512 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname, 513 const struct timespec *,tsp,int,flags) 514 #endif 515 #endif /* CONFIG_UTIMENSAT */ 516 517 #ifdef CONFIG_INOTIFY 518 #include <sys/inotify.h> 519 520 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 521 static int sys_inotify_init(void) 522 { 523 return (inotify_init()); 524 } 525 #endif 526 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 527 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask) 528 { 529 return (inotify_add_watch(fd, pathname, mask)); 530 } 531 #endif 532 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 533 static int sys_inotify_rm_watch(int fd, int32_t wd) 534 { 535 return (inotify_rm_watch(fd, wd)); 536 } 537 #endif 538 #ifdef CONFIG_INOTIFY1 539 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 540 static int sys_inotify_init1(int flags) 541 { 542 return (inotify_init1(flags)); 543 } 544 #endif 545 #endif 546 #else 547 /* Userspace can usually survive runtime without inotify */ 548 #undef TARGET_NR_inotify_init 549 #undef TARGET_NR_inotify_init1 550 #undef TARGET_NR_inotify_add_watch 551 #undef TARGET_NR_inotify_rm_watch 552 #endif /* CONFIG_INOTIFY */ 553 554 #if defined(TARGET_NR_ppoll) 555 #ifndef __NR_ppoll 556 # define __NR_ppoll -1 557 #endif 558 #define __NR_sys_ppoll __NR_ppoll 559 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds, 560 struct timespec *, timeout, const __sigset_t *, sigmask, 561 size_t, sigsetsize) 562 #endif 563 564 #if defined(TARGET_NR_pselect6) 565 #ifndef __NR_pselect6 566 # define __NR_pselect6 -1 567 #endif 568 #define __NR_sys_pselect6 __NR_pselect6 569 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, 570 fd_set *, exceptfds, struct timespec *, timeout, void *, sig); 571 #endif 572 573 #if defined(TARGET_NR_prlimit64) 574 #ifndef __NR_prlimit64 575 # define __NR_prlimit64 -1 576 #endif 577 #define __NR_sys_prlimit64 __NR_prlimit64 578 /* The glibc rlimit structure may not be that used by the underlying syscall */ 579 struct host_rlimit64 { 580 uint64_t rlim_cur; 581 uint64_t rlim_max; 582 }; 583 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource, 584 const struct host_rlimit64 *, new_limit, 585 struct host_rlimit64 *, old_limit) 586 #endif 587 588 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */ 589 #ifdef TARGET_ARM 590 static inline int regpairs_aligned(void *cpu_env) { 591 return ((((CPUARMState *)cpu_env)->eabi) == 1) ; 592 } 593 #elif defined(TARGET_MIPS) 594 static inline int regpairs_aligned(void *cpu_env) { return 1; } 595 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64) 596 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs 597 * of registers which translates to the same as ARM/MIPS, because we start with 598 * r3 as arg1 */ 599 static inline int regpairs_aligned(void *cpu_env) { return 1; } 600 #else 601 static inline int regpairs_aligned(void *cpu_env) { return 0; } 602 #endif 603 604 #define ERRNO_TABLE_SIZE 1200 605 606 /* target_to_host_errno_table[] is initialized from 607 * host_to_target_errno_table[] in syscall_init(). */ 608 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = { 609 }; 610 611 /* 612 * This list is the union of errno values overridden in asm-<arch>/errno.h 613 * minus the errnos that are not actually generic to all archs. 614 */ 615 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = { 616 [EIDRM] = TARGET_EIDRM, 617 [ECHRNG] = TARGET_ECHRNG, 618 [EL2NSYNC] = TARGET_EL2NSYNC, 619 [EL3HLT] = TARGET_EL3HLT, 620 [EL3RST] = TARGET_EL3RST, 621 [ELNRNG] = TARGET_ELNRNG, 622 [EUNATCH] = TARGET_EUNATCH, 623 [ENOCSI] = TARGET_ENOCSI, 624 [EL2HLT] = TARGET_EL2HLT, 625 [EDEADLK] = TARGET_EDEADLK, 626 [ENOLCK] = TARGET_ENOLCK, 627 [EBADE] = TARGET_EBADE, 628 [EBADR] = TARGET_EBADR, 629 [EXFULL] = TARGET_EXFULL, 630 [ENOANO] = TARGET_ENOANO, 631 [EBADRQC] = TARGET_EBADRQC, 632 [EBADSLT] = TARGET_EBADSLT, 633 [EBFONT] = TARGET_EBFONT, 634 [ENOSTR] = TARGET_ENOSTR, 635 [ENODATA] = TARGET_ENODATA, 636 [ETIME] = TARGET_ETIME, 637 [ENOSR] = TARGET_ENOSR, 638 [ENONET] = TARGET_ENONET, 639 [ENOPKG] = TARGET_ENOPKG, 640 [EREMOTE] = TARGET_EREMOTE, 641 [ENOLINK] = TARGET_ENOLINK, 642 [EADV] = TARGET_EADV, 643 [ESRMNT] = TARGET_ESRMNT, 644 [ECOMM] = TARGET_ECOMM, 645 [EPROTO] = TARGET_EPROTO, 646 [EDOTDOT] = TARGET_EDOTDOT, 647 [EMULTIHOP] = TARGET_EMULTIHOP, 648 [EBADMSG] = TARGET_EBADMSG, 649 [ENAMETOOLONG] = TARGET_ENAMETOOLONG, 650 [EOVERFLOW] = TARGET_EOVERFLOW, 651 [ENOTUNIQ] = TARGET_ENOTUNIQ, 652 [EBADFD] = TARGET_EBADFD, 653 [EREMCHG] = TARGET_EREMCHG, 654 [ELIBACC] = TARGET_ELIBACC, 655 [ELIBBAD] = TARGET_ELIBBAD, 656 [ELIBSCN] = TARGET_ELIBSCN, 657 [ELIBMAX] = TARGET_ELIBMAX, 658 [ELIBEXEC] = TARGET_ELIBEXEC, 659 [EILSEQ] = TARGET_EILSEQ, 660 [ENOSYS] = TARGET_ENOSYS, 661 [ELOOP] = TARGET_ELOOP, 662 [ERESTART] = TARGET_ERESTART, 663 [ESTRPIPE] = TARGET_ESTRPIPE, 664 [ENOTEMPTY] = TARGET_ENOTEMPTY, 665 [EUSERS] = TARGET_EUSERS, 666 [ENOTSOCK] = TARGET_ENOTSOCK, 667 [EDESTADDRREQ] = TARGET_EDESTADDRREQ, 668 [EMSGSIZE] = TARGET_EMSGSIZE, 669 [EPROTOTYPE] = TARGET_EPROTOTYPE, 670 [ENOPROTOOPT] = TARGET_ENOPROTOOPT, 671 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT, 672 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT, 673 [EOPNOTSUPP] = TARGET_EOPNOTSUPP, 674 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT, 675 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT, 676 [EADDRINUSE] = TARGET_EADDRINUSE, 677 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL, 678 [ENETDOWN] = TARGET_ENETDOWN, 679 [ENETUNREACH] = TARGET_ENETUNREACH, 680 [ENETRESET] = TARGET_ENETRESET, 681 [ECONNABORTED] = TARGET_ECONNABORTED, 682 [ECONNRESET] = TARGET_ECONNRESET, 683 [ENOBUFS] = TARGET_ENOBUFS, 684 [EISCONN] = TARGET_EISCONN, 685 [ENOTCONN] = TARGET_ENOTCONN, 686 [EUCLEAN] = TARGET_EUCLEAN, 687 [ENOTNAM] = TARGET_ENOTNAM, 688 [ENAVAIL] = TARGET_ENAVAIL, 689 [EISNAM] = TARGET_EISNAM, 690 [EREMOTEIO] = TARGET_EREMOTEIO, 691 [ESHUTDOWN] = TARGET_ESHUTDOWN, 692 [ETOOMANYREFS] = TARGET_ETOOMANYREFS, 693 [ETIMEDOUT] = TARGET_ETIMEDOUT, 694 [ECONNREFUSED] = TARGET_ECONNREFUSED, 695 [EHOSTDOWN] = TARGET_EHOSTDOWN, 696 [EHOSTUNREACH] = TARGET_EHOSTUNREACH, 697 [EALREADY] = TARGET_EALREADY, 698 [EINPROGRESS] = TARGET_EINPROGRESS, 699 [ESTALE] = TARGET_ESTALE, 700 [ECANCELED] = TARGET_ECANCELED, 701 [ENOMEDIUM] = TARGET_ENOMEDIUM, 702 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE, 703 #ifdef ENOKEY 704 [ENOKEY] = TARGET_ENOKEY, 705 #endif 706 #ifdef EKEYEXPIRED 707 [EKEYEXPIRED] = TARGET_EKEYEXPIRED, 708 #endif 709 #ifdef EKEYREVOKED 710 [EKEYREVOKED] = TARGET_EKEYREVOKED, 711 #endif 712 #ifdef EKEYREJECTED 713 [EKEYREJECTED] = TARGET_EKEYREJECTED, 714 #endif 715 #ifdef EOWNERDEAD 716 [EOWNERDEAD] = TARGET_EOWNERDEAD, 717 #endif 718 #ifdef ENOTRECOVERABLE 719 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE, 720 #endif 721 }; 722 723 static inline int host_to_target_errno(int err) 724 { 725 if(host_to_target_errno_table[err]) 726 return host_to_target_errno_table[err]; 727 return err; 728 } 729 730 static inline int target_to_host_errno(int err) 731 { 732 if (target_to_host_errno_table[err]) 733 return target_to_host_errno_table[err]; 734 return err; 735 } 736 737 static inline abi_long get_errno(abi_long ret) 738 { 739 if (ret == -1) 740 return -host_to_target_errno(errno); 741 else 742 return ret; 743 } 744 745 static inline int is_error(abi_long ret) 746 { 747 return (abi_ulong)ret >= (abi_ulong)(-4096); 748 } 749 750 char *target_strerror(int err) 751 { 752 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) { 753 return NULL; 754 } 755 return strerror(target_to_host_errno(err)); 756 } 757 758 static abi_ulong target_brk; 759 static abi_ulong target_original_brk; 760 static abi_ulong brk_page; 761 762 void target_set_brk(abi_ulong new_brk) 763 { 764 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk); 765 brk_page = HOST_PAGE_ALIGN(target_brk); 766 } 767 768 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0) 769 #define DEBUGF_BRK(message, args...) 770 771 /* do_brk() must return target values and target errnos. */ 772 abi_long do_brk(abi_ulong new_brk) 773 { 774 abi_long mapped_addr; 775 int new_alloc_size; 776 777 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk); 778 779 if (!new_brk) { 780 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk); 781 return target_brk; 782 } 783 if (new_brk < target_original_brk) { 784 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n", 785 target_brk); 786 return target_brk; 787 } 788 789 /* If the new brk is less than the highest page reserved to the 790 * target heap allocation, set it and we're almost done... */ 791 if (new_brk <= brk_page) { 792 /* Heap contents are initialized to zero, as for anonymous 793 * mapped pages. */ 794 if (new_brk > target_brk) { 795 memset(g2h(target_brk), 0, new_brk - target_brk); 796 } 797 target_brk = new_brk; 798 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk); 799 return target_brk; 800 } 801 802 /* We need to allocate more memory after the brk... Note that 803 * we don't use MAP_FIXED because that will map over the top of 804 * any existing mapping (like the one with the host libc or qemu 805 * itself); instead we treat "mapped but at wrong address" as 806 * a failure and unmap again. 807 */ 808 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page); 809 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, 810 PROT_READ|PROT_WRITE, 811 MAP_ANON|MAP_PRIVATE, 0, 0)); 812 813 if (mapped_addr == brk_page) { 814 /* Heap contents are initialized to zero, as for anonymous 815 * mapped pages. Technically the new pages are already 816 * initialized to zero since they *are* anonymous mapped 817 * pages, however we have to take care with the contents that 818 * come from the remaining part of the previous page: it may 819 * contains garbage data due to a previous heap usage (grown 820 * then shrunken). */ 821 memset(g2h(target_brk), 0, brk_page - target_brk); 822 823 target_brk = new_brk; 824 brk_page = HOST_PAGE_ALIGN(target_brk); 825 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n", 826 target_brk); 827 return target_brk; 828 } else if (mapped_addr != -1) { 829 /* Mapped but at wrong address, meaning there wasn't actually 830 * enough space for this brk. 831 */ 832 target_munmap(mapped_addr, new_alloc_size); 833 mapped_addr = -1; 834 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk); 835 } 836 else { 837 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk); 838 } 839 840 #if defined(TARGET_ALPHA) 841 /* We (partially) emulate OSF/1 on Alpha, which requires we 842 return a proper errno, not an unchanged brk value. */ 843 return -TARGET_ENOMEM; 844 #endif 845 /* For everything else, return the previous break. */ 846 return target_brk; 847 } 848 849 static inline abi_long copy_from_user_fdset(fd_set *fds, 850 abi_ulong target_fds_addr, 851 int n) 852 { 853 int i, nw, j, k; 854 abi_ulong b, *target_fds; 855 856 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 857 if (!(target_fds = lock_user(VERIFY_READ, 858 target_fds_addr, 859 sizeof(abi_ulong) * nw, 860 1))) 861 return -TARGET_EFAULT; 862 863 FD_ZERO(fds); 864 k = 0; 865 for (i = 0; i < nw; i++) { 866 /* grab the abi_ulong */ 867 __get_user(b, &target_fds[i]); 868 for (j = 0; j < TARGET_ABI_BITS; j++) { 869 /* check the bit inside the abi_ulong */ 870 if ((b >> j) & 1) 871 FD_SET(k, fds); 872 k++; 873 } 874 } 875 876 unlock_user(target_fds, target_fds_addr, 0); 877 878 return 0; 879 } 880 881 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr, 882 abi_ulong target_fds_addr, 883 int n) 884 { 885 if (target_fds_addr) { 886 if (copy_from_user_fdset(fds, target_fds_addr, n)) 887 return -TARGET_EFAULT; 888 *fds_ptr = fds; 889 } else { 890 *fds_ptr = NULL; 891 } 892 return 0; 893 } 894 895 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr, 896 const fd_set *fds, 897 int n) 898 { 899 int i, nw, j, k; 900 abi_long v; 901 abi_ulong *target_fds; 902 903 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 904 if (!(target_fds = lock_user(VERIFY_WRITE, 905 target_fds_addr, 906 sizeof(abi_ulong) * nw, 907 0))) 908 return -TARGET_EFAULT; 909 910 k = 0; 911 for (i = 0; i < nw; i++) { 912 v = 0; 913 for (j = 0; j < TARGET_ABI_BITS; j++) { 914 v |= ((FD_ISSET(k, fds) != 0) << j); 915 k++; 916 } 917 __put_user(v, &target_fds[i]); 918 } 919 920 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw); 921 922 return 0; 923 } 924 925 #if defined(__alpha__) 926 #define HOST_HZ 1024 927 #else 928 #define HOST_HZ 100 929 #endif 930 931 static inline abi_long host_to_target_clock_t(long ticks) 932 { 933 #if HOST_HZ == TARGET_HZ 934 return ticks; 935 #else 936 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ; 937 #endif 938 } 939 940 static inline abi_long host_to_target_rusage(abi_ulong target_addr, 941 const struct rusage *rusage) 942 { 943 struct target_rusage *target_rusage; 944 945 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0)) 946 return -TARGET_EFAULT; 947 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec); 948 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec); 949 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec); 950 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec); 951 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss); 952 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss); 953 target_rusage->ru_idrss = tswapal(rusage->ru_idrss); 954 target_rusage->ru_isrss = tswapal(rusage->ru_isrss); 955 target_rusage->ru_minflt = tswapal(rusage->ru_minflt); 956 target_rusage->ru_majflt = tswapal(rusage->ru_majflt); 957 target_rusage->ru_nswap = tswapal(rusage->ru_nswap); 958 target_rusage->ru_inblock = tswapal(rusage->ru_inblock); 959 target_rusage->ru_oublock = tswapal(rusage->ru_oublock); 960 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd); 961 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv); 962 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals); 963 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw); 964 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw); 965 unlock_user_struct(target_rusage, target_addr, 1); 966 967 return 0; 968 } 969 970 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim) 971 { 972 abi_ulong target_rlim_swap; 973 rlim_t result; 974 975 target_rlim_swap = tswapal(target_rlim); 976 if (target_rlim_swap == TARGET_RLIM_INFINITY) 977 return RLIM_INFINITY; 978 979 result = target_rlim_swap; 980 if (target_rlim_swap != (rlim_t)result) 981 return RLIM_INFINITY; 982 983 return result; 984 } 985 986 static inline abi_ulong host_to_target_rlim(rlim_t rlim) 987 { 988 abi_ulong target_rlim_swap; 989 abi_ulong result; 990 991 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim) 992 target_rlim_swap = TARGET_RLIM_INFINITY; 993 else 994 target_rlim_swap = rlim; 995 result = tswapal(target_rlim_swap); 996 997 return result; 998 } 999 1000 static inline int target_to_host_resource(int code) 1001 { 1002 switch (code) { 1003 case TARGET_RLIMIT_AS: 1004 return RLIMIT_AS; 1005 case TARGET_RLIMIT_CORE: 1006 return RLIMIT_CORE; 1007 case TARGET_RLIMIT_CPU: 1008 return RLIMIT_CPU; 1009 case TARGET_RLIMIT_DATA: 1010 return RLIMIT_DATA; 1011 case TARGET_RLIMIT_FSIZE: 1012 return RLIMIT_FSIZE; 1013 case TARGET_RLIMIT_LOCKS: 1014 return RLIMIT_LOCKS; 1015 case TARGET_RLIMIT_MEMLOCK: 1016 return RLIMIT_MEMLOCK; 1017 case TARGET_RLIMIT_MSGQUEUE: 1018 return RLIMIT_MSGQUEUE; 1019 case TARGET_RLIMIT_NICE: 1020 return RLIMIT_NICE; 1021 case TARGET_RLIMIT_NOFILE: 1022 return RLIMIT_NOFILE; 1023 case TARGET_RLIMIT_NPROC: 1024 return RLIMIT_NPROC; 1025 case TARGET_RLIMIT_RSS: 1026 return RLIMIT_RSS; 1027 case TARGET_RLIMIT_RTPRIO: 1028 return RLIMIT_RTPRIO; 1029 case TARGET_RLIMIT_SIGPENDING: 1030 return RLIMIT_SIGPENDING; 1031 case TARGET_RLIMIT_STACK: 1032 return RLIMIT_STACK; 1033 default: 1034 return code; 1035 } 1036 } 1037 1038 static inline abi_long copy_from_user_timeval(struct timeval *tv, 1039 abi_ulong target_tv_addr) 1040 { 1041 struct target_timeval *target_tv; 1042 1043 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) 1044 return -TARGET_EFAULT; 1045 1046 __get_user(tv->tv_sec, &target_tv->tv_sec); 1047 __get_user(tv->tv_usec, &target_tv->tv_usec); 1048 1049 unlock_user_struct(target_tv, target_tv_addr, 0); 1050 1051 return 0; 1052 } 1053 1054 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr, 1055 const struct timeval *tv) 1056 { 1057 struct target_timeval *target_tv; 1058 1059 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) 1060 return -TARGET_EFAULT; 1061 1062 __put_user(tv->tv_sec, &target_tv->tv_sec); 1063 __put_user(tv->tv_usec, &target_tv->tv_usec); 1064 1065 unlock_user_struct(target_tv, target_tv_addr, 1); 1066 1067 return 0; 1068 } 1069 1070 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 1071 #include <mqueue.h> 1072 1073 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr, 1074 abi_ulong target_mq_attr_addr) 1075 { 1076 struct target_mq_attr *target_mq_attr; 1077 1078 if (!lock_user_struct(VERIFY_READ, target_mq_attr, 1079 target_mq_attr_addr, 1)) 1080 return -TARGET_EFAULT; 1081 1082 __get_user(attr->mq_flags, &target_mq_attr->mq_flags); 1083 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1084 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1085 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1086 1087 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0); 1088 1089 return 0; 1090 } 1091 1092 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr, 1093 const struct mq_attr *attr) 1094 { 1095 struct target_mq_attr *target_mq_attr; 1096 1097 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr, 1098 target_mq_attr_addr, 0)) 1099 return -TARGET_EFAULT; 1100 1101 __put_user(attr->mq_flags, &target_mq_attr->mq_flags); 1102 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1103 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1104 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1105 1106 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1); 1107 1108 return 0; 1109 } 1110 #endif 1111 1112 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) 1113 /* do_select() must return target values and target errnos. */ 1114 static abi_long do_select(int n, 1115 abi_ulong rfd_addr, abi_ulong wfd_addr, 1116 abi_ulong efd_addr, abi_ulong target_tv_addr) 1117 { 1118 fd_set rfds, wfds, efds; 1119 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 1120 struct timeval tv, *tv_ptr; 1121 abi_long ret; 1122 1123 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 1124 if (ret) { 1125 return ret; 1126 } 1127 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 1128 if (ret) { 1129 return ret; 1130 } 1131 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 1132 if (ret) { 1133 return ret; 1134 } 1135 1136 if (target_tv_addr) { 1137 if (copy_from_user_timeval(&tv, target_tv_addr)) 1138 return -TARGET_EFAULT; 1139 tv_ptr = &tv; 1140 } else { 1141 tv_ptr = NULL; 1142 } 1143 1144 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr)); 1145 1146 if (!is_error(ret)) { 1147 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 1148 return -TARGET_EFAULT; 1149 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 1150 return -TARGET_EFAULT; 1151 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 1152 return -TARGET_EFAULT; 1153 1154 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv)) 1155 return -TARGET_EFAULT; 1156 } 1157 1158 return ret; 1159 } 1160 #endif 1161 1162 static abi_long do_pipe2(int host_pipe[], int flags) 1163 { 1164 #ifdef CONFIG_PIPE2 1165 return pipe2(host_pipe, flags); 1166 #else 1167 return -ENOSYS; 1168 #endif 1169 } 1170 1171 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes, 1172 int flags, int is_pipe2) 1173 { 1174 int host_pipe[2]; 1175 abi_long ret; 1176 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe); 1177 1178 if (is_error(ret)) 1179 return get_errno(ret); 1180 1181 /* Several targets have special calling conventions for the original 1182 pipe syscall, but didn't replicate this into the pipe2 syscall. */ 1183 if (!is_pipe2) { 1184 #if defined(TARGET_ALPHA) 1185 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1]; 1186 return host_pipe[0]; 1187 #elif defined(TARGET_MIPS) 1188 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1]; 1189 return host_pipe[0]; 1190 #elif defined(TARGET_SH4) 1191 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1]; 1192 return host_pipe[0]; 1193 #endif 1194 } 1195 1196 if (put_user_s32(host_pipe[0], pipedes) 1197 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0]))) 1198 return -TARGET_EFAULT; 1199 return get_errno(ret); 1200 } 1201 1202 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn, 1203 abi_ulong target_addr, 1204 socklen_t len) 1205 { 1206 struct target_ip_mreqn *target_smreqn; 1207 1208 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1); 1209 if (!target_smreqn) 1210 return -TARGET_EFAULT; 1211 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr; 1212 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr; 1213 if (len == sizeof(struct target_ip_mreqn)) 1214 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex); 1215 unlock_user(target_smreqn, target_addr, 0); 1216 1217 return 0; 1218 } 1219 1220 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr, 1221 abi_ulong target_addr, 1222 socklen_t len) 1223 { 1224 const socklen_t unix_maxlen = sizeof (struct sockaddr_un); 1225 sa_family_t sa_family; 1226 struct target_sockaddr *target_saddr; 1227 1228 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 1229 if (!target_saddr) 1230 return -TARGET_EFAULT; 1231 1232 sa_family = tswap16(target_saddr->sa_family); 1233 1234 /* Oops. The caller might send a incomplete sun_path; sun_path 1235 * must be terminated by \0 (see the manual page), but 1236 * unfortunately it is quite common to specify sockaddr_un 1237 * length as "strlen(x->sun_path)" while it should be 1238 * "strlen(...) + 1". We'll fix that here if needed. 1239 * Linux kernel has a similar feature. 1240 */ 1241 1242 if (sa_family == AF_UNIX) { 1243 if (len < unix_maxlen && len > 0) { 1244 char *cp = (char*)target_saddr; 1245 1246 if ( cp[len-1] && !cp[len] ) 1247 len++; 1248 } 1249 if (len > unix_maxlen) 1250 len = unix_maxlen; 1251 } 1252 1253 memcpy(addr, target_saddr, len); 1254 addr->sa_family = sa_family; 1255 unlock_user(target_saddr, target_addr, 0); 1256 1257 return 0; 1258 } 1259 1260 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr, 1261 struct sockaddr *addr, 1262 socklen_t len) 1263 { 1264 struct target_sockaddr *target_saddr; 1265 1266 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0); 1267 if (!target_saddr) 1268 return -TARGET_EFAULT; 1269 memcpy(target_saddr, addr, len); 1270 target_saddr->sa_family = tswap16(addr->sa_family); 1271 unlock_user(target_saddr, target_addr, len); 1272 1273 return 0; 1274 } 1275 1276 static inline abi_long target_to_host_cmsg(struct msghdr *msgh, 1277 struct target_msghdr *target_msgh) 1278 { 1279 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1280 abi_long msg_controllen; 1281 abi_ulong target_cmsg_addr; 1282 struct target_cmsghdr *target_cmsg; 1283 socklen_t space = 0; 1284 1285 msg_controllen = tswapal(target_msgh->msg_controllen); 1286 if (msg_controllen < sizeof (struct target_cmsghdr)) 1287 goto the_end; 1288 target_cmsg_addr = tswapal(target_msgh->msg_control); 1289 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1); 1290 if (!target_cmsg) 1291 return -TARGET_EFAULT; 1292 1293 while (cmsg && target_cmsg) { 1294 void *data = CMSG_DATA(cmsg); 1295 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1296 1297 int len = tswapal(target_cmsg->cmsg_len) 1298 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr)); 1299 1300 space += CMSG_SPACE(len); 1301 if (space > msgh->msg_controllen) { 1302 space -= CMSG_SPACE(len); 1303 gemu_log("Host cmsg overflow\n"); 1304 break; 1305 } 1306 1307 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level); 1308 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type); 1309 cmsg->cmsg_len = CMSG_LEN(len); 1310 1311 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) { 1312 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type); 1313 memcpy(data, target_data, len); 1314 } else { 1315 int *fd = (int *)data; 1316 int *target_fd = (int *)target_data; 1317 int i, numfds = len / sizeof(int); 1318 1319 for (i = 0; i < numfds; i++) 1320 fd[i] = tswap32(target_fd[i]); 1321 } 1322 1323 cmsg = CMSG_NXTHDR(msgh, cmsg); 1324 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1325 } 1326 unlock_user(target_cmsg, target_cmsg_addr, 0); 1327 the_end: 1328 msgh->msg_controllen = space; 1329 return 0; 1330 } 1331 1332 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, 1333 struct msghdr *msgh) 1334 { 1335 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1336 abi_long msg_controllen; 1337 abi_ulong target_cmsg_addr; 1338 struct target_cmsghdr *target_cmsg; 1339 socklen_t space = 0; 1340 1341 msg_controllen = tswapal(target_msgh->msg_controllen); 1342 if (msg_controllen < sizeof (struct target_cmsghdr)) 1343 goto the_end; 1344 target_cmsg_addr = tswapal(target_msgh->msg_control); 1345 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0); 1346 if (!target_cmsg) 1347 return -TARGET_EFAULT; 1348 1349 while (cmsg && target_cmsg) { 1350 void *data = CMSG_DATA(cmsg); 1351 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1352 1353 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr)); 1354 1355 space += TARGET_CMSG_SPACE(len); 1356 if (space > msg_controllen) { 1357 space -= TARGET_CMSG_SPACE(len); 1358 gemu_log("Target cmsg overflow\n"); 1359 break; 1360 } 1361 1362 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level); 1363 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type); 1364 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len)); 1365 1366 if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) && 1367 (cmsg->cmsg_type == SCM_RIGHTS)) { 1368 int *fd = (int *)data; 1369 int *target_fd = (int *)target_data; 1370 int i, numfds = len / sizeof(int); 1371 1372 for (i = 0; i < numfds; i++) 1373 target_fd[i] = tswap32(fd[i]); 1374 } else if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) && 1375 (cmsg->cmsg_type == SO_TIMESTAMP) && 1376 (len == sizeof(struct timeval))) { 1377 /* copy struct timeval to target */ 1378 struct timeval *tv = (struct timeval *)data; 1379 struct target_timeval *target_tv = 1380 (struct target_timeval *)target_data; 1381 1382 target_tv->tv_sec = tswapal(tv->tv_sec); 1383 target_tv->tv_usec = tswapal(tv->tv_usec); 1384 } else { 1385 gemu_log("Unsupported ancillary data: %d/%d\n", 1386 cmsg->cmsg_level, cmsg->cmsg_type); 1387 memcpy(target_data, data, len); 1388 } 1389 1390 cmsg = CMSG_NXTHDR(msgh, cmsg); 1391 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1392 } 1393 unlock_user(target_cmsg, target_cmsg_addr, space); 1394 the_end: 1395 target_msgh->msg_controllen = tswapal(space); 1396 return 0; 1397 } 1398 1399 /* do_setsockopt() Must return target values and target errnos. */ 1400 static abi_long do_setsockopt(int sockfd, int level, int optname, 1401 abi_ulong optval_addr, socklen_t optlen) 1402 { 1403 abi_long ret; 1404 int val; 1405 struct ip_mreqn *ip_mreq; 1406 struct ip_mreq_source *ip_mreq_source; 1407 1408 switch(level) { 1409 case SOL_TCP: 1410 /* TCP options all take an 'int' value. */ 1411 if (optlen < sizeof(uint32_t)) 1412 return -TARGET_EINVAL; 1413 1414 if (get_user_u32(val, optval_addr)) 1415 return -TARGET_EFAULT; 1416 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1417 break; 1418 case SOL_IP: 1419 switch(optname) { 1420 case IP_TOS: 1421 case IP_TTL: 1422 case IP_HDRINCL: 1423 case IP_ROUTER_ALERT: 1424 case IP_RECVOPTS: 1425 case IP_RETOPTS: 1426 case IP_PKTINFO: 1427 case IP_MTU_DISCOVER: 1428 case IP_RECVERR: 1429 case IP_RECVTOS: 1430 #ifdef IP_FREEBIND 1431 case IP_FREEBIND: 1432 #endif 1433 case IP_MULTICAST_TTL: 1434 case IP_MULTICAST_LOOP: 1435 val = 0; 1436 if (optlen >= sizeof(uint32_t)) { 1437 if (get_user_u32(val, optval_addr)) 1438 return -TARGET_EFAULT; 1439 } else if (optlen >= 1) { 1440 if (get_user_u8(val, optval_addr)) 1441 return -TARGET_EFAULT; 1442 } 1443 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1444 break; 1445 case IP_ADD_MEMBERSHIP: 1446 case IP_DROP_MEMBERSHIP: 1447 if (optlen < sizeof (struct target_ip_mreq) || 1448 optlen > sizeof (struct target_ip_mreqn)) 1449 return -TARGET_EINVAL; 1450 1451 ip_mreq = (struct ip_mreqn *) alloca(optlen); 1452 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen); 1453 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen)); 1454 break; 1455 1456 case IP_BLOCK_SOURCE: 1457 case IP_UNBLOCK_SOURCE: 1458 case IP_ADD_SOURCE_MEMBERSHIP: 1459 case IP_DROP_SOURCE_MEMBERSHIP: 1460 if (optlen != sizeof (struct target_ip_mreq_source)) 1461 return -TARGET_EINVAL; 1462 1463 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1); 1464 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen)); 1465 unlock_user (ip_mreq_source, optval_addr, 0); 1466 break; 1467 1468 default: 1469 goto unimplemented; 1470 } 1471 break; 1472 case SOL_RAW: 1473 switch (optname) { 1474 case ICMP_FILTER: 1475 /* struct icmp_filter takes an u32 value */ 1476 if (optlen < sizeof(uint32_t)) { 1477 return -TARGET_EINVAL; 1478 } 1479 1480 if (get_user_u32(val, optval_addr)) { 1481 return -TARGET_EFAULT; 1482 } 1483 ret = get_errno(setsockopt(sockfd, level, optname, 1484 &val, sizeof(val))); 1485 break; 1486 1487 default: 1488 goto unimplemented; 1489 } 1490 break; 1491 case TARGET_SOL_SOCKET: 1492 switch (optname) { 1493 case TARGET_SO_RCVTIMEO: 1494 { 1495 struct timeval tv; 1496 1497 optname = SO_RCVTIMEO; 1498 1499 set_timeout: 1500 if (optlen != sizeof(struct target_timeval)) { 1501 return -TARGET_EINVAL; 1502 } 1503 1504 if (copy_from_user_timeval(&tv, optval_addr)) { 1505 return -TARGET_EFAULT; 1506 } 1507 1508 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 1509 &tv, sizeof(tv))); 1510 return ret; 1511 } 1512 case TARGET_SO_SNDTIMEO: 1513 optname = SO_SNDTIMEO; 1514 goto set_timeout; 1515 /* Options with 'int' argument. */ 1516 case TARGET_SO_DEBUG: 1517 optname = SO_DEBUG; 1518 break; 1519 case TARGET_SO_REUSEADDR: 1520 optname = SO_REUSEADDR; 1521 break; 1522 case TARGET_SO_TYPE: 1523 optname = SO_TYPE; 1524 break; 1525 case TARGET_SO_ERROR: 1526 optname = SO_ERROR; 1527 break; 1528 case TARGET_SO_DONTROUTE: 1529 optname = SO_DONTROUTE; 1530 break; 1531 case TARGET_SO_BROADCAST: 1532 optname = SO_BROADCAST; 1533 break; 1534 case TARGET_SO_SNDBUF: 1535 optname = SO_SNDBUF; 1536 break; 1537 case TARGET_SO_RCVBUF: 1538 optname = SO_RCVBUF; 1539 break; 1540 case TARGET_SO_KEEPALIVE: 1541 optname = SO_KEEPALIVE; 1542 break; 1543 case TARGET_SO_OOBINLINE: 1544 optname = SO_OOBINLINE; 1545 break; 1546 case TARGET_SO_NO_CHECK: 1547 optname = SO_NO_CHECK; 1548 break; 1549 case TARGET_SO_PRIORITY: 1550 optname = SO_PRIORITY; 1551 break; 1552 #ifdef SO_BSDCOMPAT 1553 case TARGET_SO_BSDCOMPAT: 1554 optname = SO_BSDCOMPAT; 1555 break; 1556 #endif 1557 case TARGET_SO_PASSCRED: 1558 optname = SO_PASSCRED; 1559 break; 1560 case TARGET_SO_TIMESTAMP: 1561 optname = SO_TIMESTAMP; 1562 break; 1563 case TARGET_SO_RCVLOWAT: 1564 optname = SO_RCVLOWAT; 1565 break; 1566 break; 1567 default: 1568 goto unimplemented; 1569 } 1570 if (optlen < sizeof(uint32_t)) 1571 return -TARGET_EINVAL; 1572 1573 if (get_user_u32(val, optval_addr)) 1574 return -TARGET_EFAULT; 1575 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val))); 1576 break; 1577 default: 1578 unimplemented: 1579 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname); 1580 ret = -TARGET_ENOPROTOOPT; 1581 } 1582 return ret; 1583 } 1584 1585 /* do_getsockopt() Must return target values and target errnos. */ 1586 static abi_long do_getsockopt(int sockfd, int level, int optname, 1587 abi_ulong optval_addr, abi_ulong optlen) 1588 { 1589 abi_long ret; 1590 int len, val; 1591 socklen_t lv; 1592 1593 switch(level) { 1594 case TARGET_SOL_SOCKET: 1595 level = SOL_SOCKET; 1596 switch (optname) { 1597 /* These don't just return a single integer */ 1598 case TARGET_SO_LINGER: 1599 case TARGET_SO_RCVTIMEO: 1600 case TARGET_SO_SNDTIMEO: 1601 case TARGET_SO_PEERNAME: 1602 goto unimplemented; 1603 case TARGET_SO_PEERCRED: { 1604 struct ucred cr; 1605 socklen_t crlen; 1606 struct target_ucred *tcr; 1607 1608 if (get_user_u32(len, optlen)) { 1609 return -TARGET_EFAULT; 1610 } 1611 if (len < 0) { 1612 return -TARGET_EINVAL; 1613 } 1614 1615 crlen = sizeof(cr); 1616 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED, 1617 &cr, &crlen)); 1618 if (ret < 0) { 1619 return ret; 1620 } 1621 if (len > crlen) { 1622 len = crlen; 1623 } 1624 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) { 1625 return -TARGET_EFAULT; 1626 } 1627 __put_user(cr.pid, &tcr->pid); 1628 __put_user(cr.uid, &tcr->uid); 1629 __put_user(cr.gid, &tcr->gid); 1630 unlock_user_struct(tcr, optval_addr, 1); 1631 if (put_user_u32(len, optlen)) { 1632 return -TARGET_EFAULT; 1633 } 1634 break; 1635 } 1636 /* Options with 'int' argument. */ 1637 case TARGET_SO_DEBUG: 1638 optname = SO_DEBUG; 1639 goto int_case; 1640 case TARGET_SO_REUSEADDR: 1641 optname = SO_REUSEADDR; 1642 goto int_case; 1643 case TARGET_SO_TYPE: 1644 optname = SO_TYPE; 1645 goto int_case; 1646 case TARGET_SO_ERROR: 1647 optname = SO_ERROR; 1648 goto int_case; 1649 case TARGET_SO_DONTROUTE: 1650 optname = SO_DONTROUTE; 1651 goto int_case; 1652 case TARGET_SO_BROADCAST: 1653 optname = SO_BROADCAST; 1654 goto int_case; 1655 case TARGET_SO_SNDBUF: 1656 optname = SO_SNDBUF; 1657 goto int_case; 1658 case TARGET_SO_RCVBUF: 1659 optname = SO_RCVBUF; 1660 goto int_case; 1661 case TARGET_SO_KEEPALIVE: 1662 optname = SO_KEEPALIVE; 1663 goto int_case; 1664 case TARGET_SO_OOBINLINE: 1665 optname = SO_OOBINLINE; 1666 goto int_case; 1667 case TARGET_SO_NO_CHECK: 1668 optname = SO_NO_CHECK; 1669 goto int_case; 1670 case TARGET_SO_PRIORITY: 1671 optname = SO_PRIORITY; 1672 goto int_case; 1673 #ifdef SO_BSDCOMPAT 1674 case TARGET_SO_BSDCOMPAT: 1675 optname = SO_BSDCOMPAT; 1676 goto int_case; 1677 #endif 1678 case TARGET_SO_PASSCRED: 1679 optname = SO_PASSCRED; 1680 goto int_case; 1681 case TARGET_SO_TIMESTAMP: 1682 optname = SO_TIMESTAMP; 1683 goto int_case; 1684 case TARGET_SO_RCVLOWAT: 1685 optname = SO_RCVLOWAT; 1686 goto int_case; 1687 default: 1688 goto int_case; 1689 } 1690 break; 1691 case SOL_TCP: 1692 /* TCP options all take an 'int' value. */ 1693 int_case: 1694 if (get_user_u32(len, optlen)) 1695 return -TARGET_EFAULT; 1696 if (len < 0) 1697 return -TARGET_EINVAL; 1698 lv = sizeof(lv); 1699 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1700 if (ret < 0) 1701 return ret; 1702 if (len > lv) 1703 len = lv; 1704 if (len == 4) { 1705 if (put_user_u32(val, optval_addr)) 1706 return -TARGET_EFAULT; 1707 } else { 1708 if (put_user_u8(val, optval_addr)) 1709 return -TARGET_EFAULT; 1710 } 1711 if (put_user_u32(len, optlen)) 1712 return -TARGET_EFAULT; 1713 break; 1714 case SOL_IP: 1715 switch(optname) { 1716 case IP_TOS: 1717 case IP_TTL: 1718 case IP_HDRINCL: 1719 case IP_ROUTER_ALERT: 1720 case IP_RECVOPTS: 1721 case IP_RETOPTS: 1722 case IP_PKTINFO: 1723 case IP_MTU_DISCOVER: 1724 case IP_RECVERR: 1725 case IP_RECVTOS: 1726 #ifdef IP_FREEBIND 1727 case IP_FREEBIND: 1728 #endif 1729 case IP_MULTICAST_TTL: 1730 case IP_MULTICAST_LOOP: 1731 if (get_user_u32(len, optlen)) 1732 return -TARGET_EFAULT; 1733 if (len < 0) 1734 return -TARGET_EINVAL; 1735 lv = sizeof(lv); 1736 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1737 if (ret < 0) 1738 return ret; 1739 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 1740 len = 1; 1741 if (put_user_u32(len, optlen) 1742 || put_user_u8(val, optval_addr)) 1743 return -TARGET_EFAULT; 1744 } else { 1745 if (len > sizeof(int)) 1746 len = sizeof(int); 1747 if (put_user_u32(len, optlen) 1748 || put_user_u32(val, optval_addr)) 1749 return -TARGET_EFAULT; 1750 } 1751 break; 1752 default: 1753 ret = -TARGET_ENOPROTOOPT; 1754 break; 1755 } 1756 break; 1757 default: 1758 unimplemented: 1759 gemu_log("getsockopt level=%d optname=%d not yet supported\n", 1760 level, optname); 1761 ret = -TARGET_EOPNOTSUPP; 1762 break; 1763 } 1764 return ret; 1765 } 1766 1767 static struct iovec *lock_iovec(int type, abi_ulong target_addr, 1768 int count, int copy) 1769 { 1770 struct target_iovec *target_vec; 1771 struct iovec *vec; 1772 abi_ulong total_len, max_len; 1773 int i; 1774 1775 if (count == 0) { 1776 errno = 0; 1777 return NULL; 1778 } 1779 if (count < 0 || count > IOV_MAX) { 1780 errno = EINVAL; 1781 return NULL; 1782 } 1783 1784 vec = calloc(count, sizeof(struct iovec)); 1785 if (vec == NULL) { 1786 errno = ENOMEM; 1787 return NULL; 1788 } 1789 1790 target_vec = lock_user(VERIFY_READ, target_addr, 1791 count * sizeof(struct target_iovec), 1); 1792 if (target_vec == NULL) { 1793 errno = EFAULT; 1794 goto fail2; 1795 } 1796 1797 /* ??? If host page size > target page size, this will result in a 1798 value larger than what we can actually support. */ 1799 max_len = 0x7fffffff & TARGET_PAGE_MASK; 1800 total_len = 0; 1801 1802 for (i = 0; i < count; i++) { 1803 abi_ulong base = tswapal(target_vec[i].iov_base); 1804 abi_long len = tswapal(target_vec[i].iov_len); 1805 1806 if (len < 0) { 1807 errno = EINVAL; 1808 goto fail; 1809 } else if (len == 0) { 1810 /* Zero length pointer is ignored. */ 1811 vec[i].iov_base = 0; 1812 } else { 1813 vec[i].iov_base = lock_user(type, base, len, copy); 1814 if (!vec[i].iov_base) { 1815 errno = EFAULT; 1816 goto fail; 1817 } 1818 if (len > max_len - total_len) { 1819 len = max_len - total_len; 1820 } 1821 } 1822 vec[i].iov_len = len; 1823 total_len += len; 1824 } 1825 1826 unlock_user(target_vec, target_addr, 0); 1827 return vec; 1828 1829 fail: 1830 free(vec); 1831 fail2: 1832 unlock_user(target_vec, target_addr, 0); 1833 return NULL; 1834 } 1835 1836 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr, 1837 int count, int copy) 1838 { 1839 struct target_iovec *target_vec; 1840 int i; 1841 1842 target_vec = lock_user(VERIFY_READ, target_addr, 1843 count * sizeof(struct target_iovec), 1); 1844 if (target_vec) { 1845 for (i = 0; i < count; i++) { 1846 abi_ulong base = tswapal(target_vec[i].iov_base); 1847 abi_long len = tswapal(target_vec[i].iov_base); 1848 if (len < 0) { 1849 break; 1850 } 1851 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0); 1852 } 1853 unlock_user(target_vec, target_addr, 0); 1854 } 1855 1856 free(vec); 1857 } 1858 1859 /* do_socket() Must return target values and target errnos. */ 1860 static abi_long do_socket(int domain, int type, int protocol) 1861 { 1862 #if defined(TARGET_MIPS) 1863 switch(type) { 1864 case TARGET_SOCK_DGRAM: 1865 type = SOCK_DGRAM; 1866 break; 1867 case TARGET_SOCK_STREAM: 1868 type = SOCK_STREAM; 1869 break; 1870 case TARGET_SOCK_RAW: 1871 type = SOCK_RAW; 1872 break; 1873 case TARGET_SOCK_RDM: 1874 type = SOCK_RDM; 1875 break; 1876 case TARGET_SOCK_SEQPACKET: 1877 type = SOCK_SEQPACKET; 1878 break; 1879 case TARGET_SOCK_PACKET: 1880 type = SOCK_PACKET; 1881 break; 1882 } 1883 #endif 1884 if (domain == PF_NETLINK) 1885 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */ 1886 return get_errno(socket(domain, type, protocol)); 1887 } 1888 1889 /* do_bind() Must return target values and target errnos. */ 1890 static abi_long do_bind(int sockfd, abi_ulong target_addr, 1891 socklen_t addrlen) 1892 { 1893 void *addr; 1894 abi_long ret; 1895 1896 if ((int)addrlen < 0) { 1897 return -TARGET_EINVAL; 1898 } 1899 1900 addr = alloca(addrlen+1); 1901 1902 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1903 if (ret) 1904 return ret; 1905 1906 return get_errno(bind(sockfd, addr, addrlen)); 1907 } 1908 1909 /* do_connect() Must return target values and target errnos. */ 1910 static abi_long do_connect(int sockfd, abi_ulong target_addr, 1911 socklen_t addrlen) 1912 { 1913 void *addr; 1914 abi_long ret; 1915 1916 if ((int)addrlen < 0) { 1917 return -TARGET_EINVAL; 1918 } 1919 1920 addr = alloca(addrlen); 1921 1922 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1923 if (ret) 1924 return ret; 1925 1926 return get_errno(connect(sockfd, addr, addrlen)); 1927 } 1928 1929 /* do_sendrecvmsg() Must return target values and target errnos. */ 1930 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg, 1931 int flags, int send) 1932 { 1933 abi_long ret, len; 1934 struct target_msghdr *msgp; 1935 struct msghdr msg; 1936 int count; 1937 struct iovec *vec; 1938 abi_ulong target_vec; 1939 1940 /* FIXME */ 1941 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE, 1942 msgp, 1943 target_msg, 1944 send ? 1 : 0)) 1945 return -TARGET_EFAULT; 1946 if (msgp->msg_name) { 1947 msg.msg_namelen = tswap32(msgp->msg_namelen); 1948 msg.msg_name = alloca(msg.msg_namelen); 1949 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name), 1950 msg.msg_namelen); 1951 if (ret) { 1952 goto out2; 1953 } 1954 } else { 1955 msg.msg_name = NULL; 1956 msg.msg_namelen = 0; 1957 } 1958 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen); 1959 msg.msg_control = alloca(msg.msg_controllen); 1960 msg.msg_flags = tswap32(msgp->msg_flags); 1961 1962 count = tswapal(msgp->msg_iovlen); 1963 target_vec = tswapal(msgp->msg_iov); 1964 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, 1965 target_vec, count, send); 1966 if (vec == NULL) { 1967 ret = -host_to_target_errno(errno); 1968 goto out2; 1969 } 1970 msg.msg_iovlen = count; 1971 msg.msg_iov = vec; 1972 1973 if (send) { 1974 ret = target_to_host_cmsg(&msg, msgp); 1975 if (ret == 0) 1976 ret = get_errno(sendmsg(fd, &msg, flags)); 1977 } else { 1978 ret = get_errno(recvmsg(fd, &msg, flags)); 1979 if (!is_error(ret)) { 1980 len = ret; 1981 ret = host_to_target_cmsg(msgp, &msg); 1982 if (!is_error(ret)) { 1983 msgp->msg_namelen = tswap32(msg.msg_namelen); 1984 if (msg.msg_name != NULL) { 1985 ret = host_to_target_sockaddr(tswapal(msgp->msg_name), 1986 msg.msg_name, msg.msg_namelen); 1987 if (ret) { 1988 goto out; 1989 } 1990 } 1991 1992 ret = len; 1993 } 1994 } 1995 } 1996 1997 out: 1998 unlock_iovec(vec, target_vec, count, !send); 1999 out2: 2000 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 2001 return ret; 2002 } 2003 2004 /* do_accept() Must return target values and target errnos. */ 2005 static abi_long do_accept(int fd, abi_ulong target_addr, 2006 abi_ulong target_addrlen_addr) 2007 { 2008 socklen_t addrlen; 2009 void *addr; 2010 abi_long ret; 2011 2012 if (target_addr == 0) 2013 return get_errno(accept(fd, NULL, NULL)); 2014 2015 /* linux returns EINVAL if addrlen pointer is invalid */ 2016 if (get_user_u32(addrlen, target_addrlen_addr)) 2017 return -TARGET_EINVAL; 2018 2019 if ((int)addrlen < 0) { 2020 return -TARGET_EINVAL; 2021 } 2022 2023 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2024 return -TARGET_EINVAL; 2025 2026 addr = alloca(addrlen); 2027 2028 ret = get_errno(accept(fd, addr, &addrlen)); 2029 if (!is_error(ret)) { 2030 host_to_target_sockaddr(target_addr, addr, addrlen); 2031 if (put_user_u32(addrlen, target_addrlen_addr)) 2032 ret = -TARGET_EFAULT; 2033 } 2034 return ret; 2035 } 2036 2037 /* do_getpeername() Must return target values and target errnos. */ 2038 static abi_long do_getpeername(int fd, abi_ulong target_addr, 2039 abi_ulong target_addrlen_addr) 2040 { 2041 socklen_t addrlen; 2042 void *addr; 2043 abi_long ret; 2044 2045 if (get_user_u32(addrlen, target_addrlen_addr)) 2046 return -TARGET_EFAULT; 2047 2048 if ((int)addrlen < 0) { 2049 return -TARGET_EINVAL; 2050 } 2051 2052 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2053 return -TARGET_EFAULT; 2054 2055 addr = alloca(addrlen); 2056 2057 ret = get_errno(getpeername(fd, addr, &addrlen)); 2058 if (!is_error(ret)) { 2059 host_to_target_sockaddr(target_addr, addr, addrlen); 2060 if (put_user_u32(addrlen, target_addrlen_addr)) 2061 ret = -TARGET_EFAULT; 2062 } 2063 return ret; 2064 } 2065 2066 /* do_getsockname() Must return target values and target errnos. */ 2067 static abi_long do_getsockname(int fd, abi_ulong target_addr, 2068 abi_ulong target_addrlen_addr) 2069 { 2070 socklen_t addrlen; 2071 void *addr; 2072 abi_long ret; 2073 2074 if (get_user_u32(addrlen, target_addrlen_addr)) 2075 return -TARGET_EFAULT; 2076 2077 if ((int)addrlen < 0) { 2078 return -TARGET_EINVAL; 2079 } 2080 2081 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2082 return -TARGET_EFAULT; 2083 2084 addr = alloca(addrlen); 2085 2086 ret = get_errno(getsockname(fd, addr, &addrlen)); 2087 if (!is_error(ret)) { 2088 host_to_target_sockaddr(target_addr, addr, addrlen); 2089 if (put_user_u32(addrlen, target_addrlen_addr)) 2090 ret = -TARGET_EFAULT; 2091 } 2092 return ret; 2093 } 2094 2095 /* do_socketpair() Must return target values and target errnos. */ 2096 static abi_long do_socketpair(int domain, int type, int protocol, 2097 abi_ulong target_tab_addr) 2098 { 2099 int tab[2]; 2100 abi_long ret; 2101 2102 ret = get_errno(socketpair(domain, type, protocol, tab)); 2103 if (!is_error(ret)) { 2104 if (put_user_s32(tab[0], target_tab_addr) 2105 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0]))) 2106 ret = -TARGET_EFAULT; 2107 } 2108 return ret; 2109 } 2110 2111 /* do_sendto() Must return target values and target errnos. */ 2112 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags, 2113 abi_ulong target_addr, socklen_t addrlen) 2114 { 2115 void *addr; 2116 void *host_msg; 2117 abi_long ret; 2118 2119 if ((int)addrlen < 0) { 2120 return -TARGET_EINVAL; 2121 } 2122 2123 host_msg = lock_user(VERIFY_READ, msg, len, 1); 2124 if (!host_msg) 2125 return -TARGET_EFAULT; 2126 if (target_addr) { 2127 addr = alloca(addrlen); 2128 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 2129 if (ret) { 2130 unlock_user(host_msg, msg, 0); 2131 return ret; 2132 } 2133 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen)); 2134 } else { 2135 ret = get_errno(send(fd, host_msg, len, flags)); 2136 } 2137 unlock_user(host_msg, msg, 0); 2138 return ret; 2139 } 2140 2141 /* do_recvfrom() Must return target values and target errnos. */ 2142 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags, 2143 abi_ulong target_addr, 2144 abi_ulong target_addrlen) 2145 { 2146 socklen_t addrlen; 2147 void *addr; 2148 void *host_msg; 2149 abi_long ret; 2150 2151 host_msg = lock_user(VERIFY_WRITE, msg, len, 0); 2152 if (!host_msg) 2153 return -TARGET_EFAULT; 2154 if (target_addr) { 2155 if (get_user_u32(addrlen, target_addrlen)) { 2156 ret = -TARGET_EFAULT; 2157 goto fail; 2158 } 2159 if ((int)addrlen < 0) { 2160 ret = -TARGET_EINVAL; 2161 goto fail; 2162 } 2163 addr = alloca(addrlen); 2164 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen)); 2165 } else { 2166 addr = NULL; /* To keep compiler quiet. */ 2167 ret = get_errno(qemu_recv(fd, host_msg, len, flags)); 2168 } 2169 if (!is_error(ret)) { 2170 if (target_addr) { 2171 host_to_target_sockaddr(target_addr, addr, addrlen); 2172 if (put_user_u32(addrlen, target_addrlen)) { 2173 ret = -TARGET_EFAULT; 2174 goto fail; 2175 } 2176 } 2177 unlock_user(host_msg, msg, len); 2178 } else { 2179 fail: 2180 unlock_user(host_msg, msg, 0); 2181 } 2182 return ret; 2183 } 2184 2185 #ifdef TARGET_NR_socketcall 2186 /* do_socketcall() Must return target values and target errnos. */ 2187 static abi_long do_socketcall(int num, abi_ulong vptr) 2188 { 2189 abi_long ret; 2190 const int n = sizeof(abi_ulong); 2191 2192 switch(num) { 2193 case SOCKOP_socket: 2194 { 2195 abi_ulong domain, type, protocol; 2196 2197 if (get_user_ual(domain, vptr) 2198 || get_user_ual(type, vptr + n) 2199 || get_user_ual(protocol, vptr + 2 * n)) 2200 return -TARGET_EFAULT; 2201 2202 ret = do_socket(domain, type, protocol); 2203 } 2204 break; 2205 case SOCKOP_bind: 2206 { 2207 abi_ulong sockfd; 2208 abi_ulong target_addr; 2209 socklen_t addrlen; 2210 2211 if (get_user_ual(sockfd, vptr) 2212 || get_user_ual(target_addr, vptr + n) 2213 || get_user_ual(addrlen, vptr + 2 * n)) 2214 return -TARGET_EFAULT; 2215 2216 ret = do_bind(sockfd, target_addr, addrlen); 2217 } 2218 break; 2219 case SOCKOP_connect: 2220 { 2221 abi_ulong sockfd; 2222 abi_ulong target_addr; 2223 socklen_t addrlen; 2224 2225 if (get_user_ual(sockfd, vptr) 2226 || get_user_ual(target_addr, vptr + n) 2227 || get_user_ual(addrlen, vptr + 2 * n)) 2228 return -TARGET_EFAULT; 2229 2230 ret = do_connect(sockfd, target_addr, addrlen); 2231 } 2232 break; 2233 case SOCKOP_listen: 2234 { 2235 abi_ulong sockfd, backlog; 2236 2237 if (get_user_ual(sockfd, vptr) 2238 || get_user_ual(backlog, vptr + n)) 2239 return -TARGET_EFAULT; 2240 2241 ret = get_errno(listen(sockfd, backlog)); 2242 } 2243 break; 2244 case SOCKOP_accept: 2245 { 2246 abi_ulong sockfd; 2247 abi_ulong target_addr, target_addrlen; 2248 2249 if (get_user_ual(sockfd, vptr) 2250 || get_user_ual(target_addr, vptr + n) 2251 || get_user_ual(target_addrlen, vptr + 2 * n)) 2252 return -TARGET_EFAULT; 2253 2254 ret = do_accept(sockfd, target_addr, target_addrlen); 2255 } 2256 break; 2257 case SOCKOP_getsockname: 2258 { 2259 abi_ulong sockfd; 2260 abi_ulong target_addr, target_addrlen; 2261 2262 if (get_user_ual(sockfd, vptr) 2263 || get_user_ual(target_addr, vptr + n) 2264 || get_user_ual(target_addrlen, vptr + 2 * n)) 2265 return -TARGET_EFAULT; 2266 2267 ret = do_getsockname(sockfd, target_addr, target_addrlen); 2268 } 2269 break; 2270 case SOCKOP_getpeername: 2271 { 2272 abi_ulong sockfd; 2273 abi_ulong target_addr, target_addrlen; 2274 2275 if (get_user_ual(sockfd, vptr) 2276 || get_user_ual(target_addr, vptr + n) 2277 || get_user_ual(target_addrlen, vptr + 2 * n)) 2278 return -TARGET_EFAULT; 2279 2280 ret = do_getpeername(sockfd, target_addr, target_addrlen); 2281 } 2282 break; 2283 case SOCKOP_socketpair: 2284 { 2285 abi_ulong domain, type, protocol; 2286 abi_ulong tab; 2287 2288 if (get_user_ual(domain, vptr) 2289 || get_user_ual(type, vptr + n) 2290 || get_user_ual(protocol, vptr + 2 * n) 2291 || get_user_ual(tab, vptr + 3 * n)) 2292 return -TARGET_EFAULT; 2293 2294 ret = do_socketpair(domain, type, protocol, tab); 2295 } 2296 break; 2297 case SOCKOP_send: 2298 { 2299 abi_ulong sockfd; 2300 abi_ulong msg; 2301 size_t len; 2302 abi_ulong flags; 2303 2304 if (get_user_ual(sockfd, vptr) 2305 || get_user_ual(msg, vptr + n) 2306 || get_user_ual(len, vptr + 2 * n) 2307 || get_user_ual(flags, vptr + 3 * n)) 2308 return -TARGET_EFAULT; 2309 2310 ret = do_sendto(sockfd, msg, len, flags, 0, 0); 2311 } 2312 break; 2313 case SOCKOP_recv: 2314 { 2315 abi_ulong sockfd; 2316 abi_ulong msg; 2317 size_t len; 2318 abi_ulong flags; 2319 2320 if (get_user_ual(sockfd, vptr) 2321 || get_user_ual(msg, vptr + n) 2322 || get_user_ual(len, vptr + 2 * n) 2323 || get_user_ual(flags, vptr + 3 * n)) 2324 return -TARGET_EFAULT; 2325 2326 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0); 2327 } 2328 break; 2329 case SOCKOP_sendto: 2330 { 2331 abi_ulong sockfd; 2332 abi_ulong msg; 2333 size_t len; 2334 abi_ulong flags; 2335 abi_ulong addr; 2336 socklen_t addrlen; 2337 2338 if (get_user_ual(sockfd, vptr) 2339 || get_user_ual(msg, vptr + n) 2340 || get_user_ual(len, vptr + 2 * n) 2341 || get_user_ual(flags, vptr + 3 * n) 2342 || get_user_ual(addr, vptr + 4 * n) 2343 || get_user_ual(addrlen, vptr + 5 * n)) 2344 return -TARGET_EFAULT; 2345 2346 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen); 2347 } 2348 break; 2349 case SOCKOP_recvfrom: 2350 { 2351 abi_ulong sockfd; 2352 abi_ulong msg; 2353 size_t len; 2354 abi_ulong flags; 2355 abi_ulong addr; 2356 socklen_t addrlen; 2357 2358 if (get_user_ual(sockfd, vptr) 2359 || get_user_ual(msg, vptr + n) 2360 || get_user_ual(len, vptr + 2 * n) 2361 || get_user_ual(flags, vptr + 3 * n) 2362 || get_user_ual(addr, vptr + 4 * n) 2363 || get_user_ual(addrlen, vptr + 5 * n)) 2364 return -TARGET_EFAULT; 2365 2366 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen); 2367 } 2368 break; 2369 case SOCKOP_shutdown: 2370 { 2371 abi_ulong sockfd, how; 2372 2373 if (get_user_ual(sockfd, vptr) 2374 || get_user_ual(how, vptr + n)) 2375 return -TARGET_EFAULT; 2376 2377 ret = get_errno(shutdown(sockfd, how)); 2378 } 2379 break; 2380 case SOCKOP_sendmsg: 2381 case SOCKOP_recvmsg: 2382 { 2383 abi_ulong fd; 2384 abi_ulong target_msg; 2385 abi_ulong flags; 2386 2387 if (get_user_ual(fd, vptr) 2388 || get_user_ual(target_msg, vptr + n) 2389 || get_user_ual(flags, vptr + 2 * n)) 2390 return -TARGET_EFAULT; 2391 2392 ret = do_sendrecvmsg(fd, target_msg, flags, 2393 (num == SOCKOP_sendmsg)); 2394 } 2395 break; 2396 case SOCKOP_setsockopt: 2397 { 2398 abi_ulong sockfd; 2399 abi_ulong level; 2400 abi_ulong optname; 2401 abi_ulong optval; 2402 socklen_t optlen; 2403 2404 if (get_user_ual(sockfd, vptr) 2405 || get_user_ual(level, vptr + n) 2406 || get_user_ual(optname, vptr + 2 * n) 2407 || get_user_ual(optval, vptr + 3 * n) 2408 || get_user_ual(optlen, vptr + 4 * n)) 2409 return -TARGET_EFAULT; 2410 2411 ret = do_setsockopt(sockfd, level, optname, optval, optlen); 2412 } 2413 break; 2414 case SOCKOP_getsockopt: 2415 { 2416 abi_ulong sockfd; 2417 abi_ulong level; 2418 abi_ulong optname; 2419 abi_ulong optval; 2420 socklen_t optlen; 2421 2422 if (get_user_ual(sockfd, vptr) 2423 || get_user_ual(level, vptr + n) 2424 || get_user_ual(optname, vptr + 2 * n) 2425 || get_user_ual(optval, vptr + 3 * n) 2426 || get_user_ual(optlen, vptr + 4 * n)) 2427 return -TARGET_EFAULT; 2428 2429 ret = do_getsockopt(sockfd, level, optname, optval, optlen); 2430 } 2431 break; 2432 default: 2433 gemu_log("Unsupported socketcall: %d\n", num); 2434 ret = -TARGET_ENOSYS; 2435 break; 2436 } 2437 return ret; 2438 } 2439 #endif 2440 2441 #define N_SHM_REGIONS 32 2442 2443 static struct shm_region { 2444 abi_ulong start; 2445 abi_ulong size; 2446 } shm_regions[N_SHM_REGIONS]; 2447 2448 struct target_ipc_perm 2449 { 2450 abi_long __key; 2451 abi_ulong uid; 2452 abi_ulong gid; 2453 abi_ulong cuid; 2454 abi_ulong cgid; 2455 unsigned short int mode; 2456 unsigned short int __pad1; 2457 unsigned short int __seq; 2458 unsigned short int __pad2; 2459 abi_ulong __unused1; 2460 abi_ulong __unused2; 2461 }; 2462 2463 struct target_semid_ds 2464 { 2465 struct target_ipc_perm sem_perm; 2466 abi_ulong sem_otime; 2467 abi_ulong __unused1; 2468 abi_ulong sem_ctime; 2469 abi_ulong __unused2; 2470 abi_ulong sem_nsems; 2471 abi_ulong __unused3; 2472 abi_ulong __unused4; 2473 }; 2474 2475 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip, 2476 abi_ulong target_addr) 2477 { 2478 struct target_ipc_perm *target_ip; 2479 struct target_semid_ds *target_sd; 2480 2481 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2482 return -TARGET_EFAULT; 2483 target_ip = &(target_sd->sem_perm); 2484 host_ip->__key = tswapal(target_ip->__key); 2485 host_ip->uid = tswapal(target_ip->uid); 2486 host_ip->gid = tswapal(target_ip->gid); 2487 host_ip->cuid = tswapal(target_ip->cuid); 2488 host_ip->cgid = tswapal(target_ip->cgid); 2489 host_ip->mode = tswap16(target_ip->mode); 2490 unlock_user_struct(target_sd, target_addr, 0); 2491 return 0; 2492 } 2493 2494 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr, 2495 struct ipc_perm *host_ip) 2496 { 2497 struct target_ipc_perm *target_ip; 2498 struct target_semid_ds *target_sd; 2499 2500 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2501 return -TARGET_EFAULT; 2502 target_ip = &(target_sd->sem_perm); 2503 target_ip->__key = tswapal(host_ip->__key); 2504 target_ip->uid = tswapal(host_ip->uid); 2505 target_ip->gid = tswapal(host_ip->gid); 2506 target_ip->cuid = tswapal(host_ip->cuid); 2507 target_ip->cgid = tswapal(host_ip->cgid); 2508 target_ip->mode = tswap16(host_ip->mode); 2509 unlock_user_struct(target_sd, target_addr, 1); 2510 return 0; 2511 } 2512 2513 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd, 2514 abi_ulong target_addr) 2515 { 2516 struct target_semid_ds *target_sd; 2517 2518 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2519 return -TARGET_EFAULT; 2520 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr)) 2521 return -TARGET_EFAULT; 2522 host_sd->sem_nsems = tswapal(target_sd->sem_nsems); 2523 host_sd->sem_otime = tswapal(target_sd->sem_otime); 2524 host_sd->sem_ctime = tswapal(target_sd->sem_ctime); 2525 unlock_user_struct(target_sd, target_addr, 0); 2526 return 0; 2527 } 2528 2529 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr, 2530 struct semid_ds *host_sd) 2531 { 2532 struct target_semid_ds *target_sd; 2533 2534 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2535 return -TARGET_EFAULT; 2536 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm))) 2537 return -TARGET_EFAULT; 2538 target_sd->sem_nsems = tswapal(host_sd->sem_nsems); 2539 target_sd->sem_otime = tswapal(host_sd->sem_otime); 2540 target_sd->sem_ctime = tswapal(host_sd->sem_ctime); 2541 unlock_user_struct(target_sd, target_addr, 1); 2542 return 0; 2543 } 2544 2545 struct target_seminfo { 2546 int semmap; 2547 int semmni; 2548 int semmns; 2549 int semmnu; 2550 int semmsl; 2551 int semopm; 2552 int semume; 2553 int semusz; 2554 int semvmx; 2555 int semaem; 2556 }; 2557 2558 static inline abi_long host_to_target_seminfo(abi_ulong target_addr, 2559 struct seminfo *host_seminfo) 2560 { 2561 struct target_seminfo *target_seminfo; 2562 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0)) 2563 return -TARGET_EFAULT; 2564 __put_user(host_seminfo->semmap, &target_seminfo->semmap); 2565 __put_user(host_seminfo->semmni, &target_seminfo->semmni); 2566 __put_user(host_seminfo->semmns, &target_seminfo->semmns); 2567 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu); 2568 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl); 2569 __put_user(host_seminfo->semopm, &target_seminfo->semopm); 2570 __put_user(host_seminfo->semume, &target_seminfo->semume); 2571 __put_user(host_seminfo->semusz, &target_seminfo->semusz); 2572 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx); 2573 __put_user(host_seminfo->semaem, &target_seminfo->semaem); 2574 unlock_user_struct(target_seminfo, target_addr, 1); 2575 return 0; 2576 } 2577 2578 union semun { 2579 int val; 2580 struct semid_ds *buf; 2581 unsigned short *array; 2582 struct seminfo *__buf; 2583 }; 2584 2585 union target_semun { 2586 int val; 2587 abi_ulong buf; 2588 abi_ulong array; 2589 abi_ulong __buf; 2590 }; 2591 2592 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array, 2593 abi_ulong target_addr) 2594 { 2595 int nsems; 2596 unsigned short *array; 2597 union semun semun; 2598 struct semid_ds semid_ds; 2599 int i, ret; 2600 2601 semun.buf = &semid_ds; 2602 2603 ret = semctl(semid, 0, IPC_STAT, semun); 2604 if (ret == -1) 2605 return get_errno(ret); 2606 2607 nsems = semid_ds.sem_nsems; 2608 2609 *host_array = malloc(nsems*sizeof(unsigned short)); 2610 array = lock_user(VERIFY_READ, target_addr, 2611 nsems*sizeof(unsigned short), 1); 2612 if (!array) 2613 return -TARGET_EFAULT; 2614 2615 for(i=0; i<nsems; i++) { 2616 __get_user((*host_array)[i], &array[i]); 2617 } 2618 unlock_user(array, target_addr, 0); 2619 2620 return 0; 2621 } 2622 2623 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr, 2624 unsigned short **host_array) 2625 { 2626 int nsems; 2627 unsigned short *array; 2628 union semun semun; 2629 struct semid_ds semid_ds; 2630 int i, ret; 2631 2632 semun.buf = &semid_ds; 2633 2634 ret = semctl(semid, 0, IPC_STAT, semun); 2635 if (ret == -1) 2636 return get_errno(ret); 2637 2638 nsems = semid_ds.sem_nsems; 2639 2640 array = lock_user(VERIFY_WRITE, target_addr, 2641 nsems*sizeof(unsigned short), 0); 2642 if (!array) 2643 return -TARGET_EFAULT; 2644 2645 for(i=0; i<nsems; i++) { 2646 __put_user((*host_array)[i], &array[i]); 2647 } 2648 free(*host_array); 2649 unlock_user(array, target_addr, 1); 2650 2651 return 0; 2652 } 2653 2654 static inline abi_long do_semctl(int semid, int semnum, int cmd, 2655 union target_semun target_su) 2656 { 2657 union semun arg; 2658 struct semid_ds dsarg; 2659 unsigned short *array = NULL; 2660 struct seminfo seminfo; 2661 abi_long ret = -TARGET_EINVAL; 2662 abi_long err; 2663 cmd &= 0xff; 2664 2665 switch( cmd ) { 2666 case GETVAL: 2667 case SETVAL: 2668 arg.val = tswap32(target_su.val); 2669 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2670 target_su.val = tswap32(arg.val); 2671 break; 2672 case GETALL: 2673 case SETALL: 2674 err = target_to_host_semarray(semid, &array, target_su.array); 2675 if (err) 2676 return err; 2677 arg.array = array; 2678 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2679 err = host_to_target_semarray(semid, target_su.array, &array); 2680 if (err) 2681 return err; 2682 break; 2683 case IPC_STAT: 2684 case IPC_SET: 2685 case SEM_STAT: 2686 err = target_to_host_semid_ds(&dsarg, target_su.buf); 2687 if (err) 2688 return err; 2689 arg.buf = &dsarg; 2690 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2691 err = host_to_target_semid_ds(target_su.buf, &dsarg); 2692 if (err) 2693 return err; 2694 break; 2695 case IPC_INFO: 2696 case SEM_INFO: 2697 arg.__buf = &seminfo; 2698 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2699 err = host_to_target_seminfo(target_su.__buf, &seminfo); 2700 if (err) 2701 return err; 2702 break; 2703 case IPC_RMID: 2704 case GETPID: 2705 case GETNCNT: 2706 case GETZCNT: 2707 ret = get_errno(semctl(semid, semnum, cmd, NULL)); 2708 break; 2709 } 2710 2711 return ret; 2712 } 2713 2714 struct target_sembuf { 2715 unsigned short sem_num; 2716 short sem_op; 2717 short sem_flg; 2718 }; 2719 2720 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf, 2721 abi_ulong target_addr, 2722 unsigned nsops) 2723 { 2724 struct target_sembuf *target_sembuf; 2725 int i; 2726 2727 target_sembuf = lock_user(VERIFY_READ, target_addr, 2728 nsops*sizeof(struct target_sembuf), 1); 2729 if (!target_sembuf) 2730 return -TARGET_EFAULT; 2731 2732 for(i=0; i<nsops; i++) { 2733 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num); 2734 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op); 2735 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg); 2736 } 2737 2738 unlock_user(target_sembuf, target_addr, 0); 2739 2740 return 0; 2741 } 2742 2743 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops) 2744 { 2745 struct sembuf sops[nsops]; 2746 2747 if (target_to_host_sembuf(sops, ptr, nsops)) 2748 return -TARGET_EFAULT; 2749 2750 return semop(semid, sops, nsops); 2751 } 2752 2753 struct target_msqid_ds 2754 { 2755 struct target_ipc_perm msg_perm; 2756 abi_ulong msg_stime; 2757 #if TARGET_ABI_BITS == 32 2758 abi_ulong __unused1; 2759 #endif 2760 abi_ulong msg_rtime; 2761 #if TARGET_ABI_BITS == 32 2762 abi_ulong __unused2; 2763 #endif 2764 abi_ulong msg_ctime; 2765 #if TARGET_ABI_BITS == 32 2766 abi_ulong __unused3; 2767 #endif 2768 abi_ulong __msg_cbytes; 2769 abi_ulong msg_qnum; 2770 abi_ulong msg_qbytes; 2771 abi_ulong msg_lspid; 2772 abi_ulong msg_lrpid; 2773 abi_ulong __unused4; 2774 abi_ulong __unused5; 2775 }; 2776 2777 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md, 2778 abi_ulong target_addr) 2779 { 2780 struct target_msqid_ds *target_md; 2781 2782 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1)) 2783 return -TARGET_EFAULT; 2784 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr)) 2785 return -TARGET_EFAULT; 2786 host_md->msg_stime = tswapal(target_md->msg_stime); 2787 host_md->msg_rtime = tswapal(target_md->msg_rtime); 2788 host_md->msg_ctime = tswapal(target_md->msg_ctime); 2789 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes); 2790 host_md->msg_qnum = tswapal(target_md->msg_qnum); 2791 host_md->msg_qbytes = tswapal(target_md->msg_qbytes); 2792 host_md->msg_lspid = tswapal(target_md->msg_lspid); 2793 host_md->msg_lrpid = tswapal(target_md->msg_lrpid); 2794 unlock_user_struct(target_md, target_addr, 0); 2795 return 0; 2796 } 2797 2798 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr, 2799 struct msqid_ds *host_md) 2800 { 2801 struct target_msqid_ds *target_md; 2802 2803 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0)) 2804 return -TARGET_EFAULT; 2805 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm))) 2806 return -TARGET_EFAULT; 2807 target_md->msg_stime = tswapal(host_md->msg_stime); 2808 target_md->msg_rtime = tswapal(host_md->msg_rtime); 2809 target_md->msg_ctime = tswapal(host_md->msg_ctime); 2810 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes); 2811 target_md->msg_qnum = tswapal(host_md->msg_qnum); 2812 target_md->msg_qbytes = tswapal(host_md->msg_qbytes); 2813 target_md->msg_lspid = tswapal(host_md->msg_lspid); 2814 target_md->msg_lrpid = tswapal(host_md->msg_lrpid); 2815 unlock_user_struct(target_md, target_addr, 1); 2816 return 0; 2817 } 2818 2819 struct target_msginfo { 2820 int msgpool; 2821 int msgmap; 2822 int msgmax; 2823 int msgmnb; 2824 int msgmni; 2825 int msgssz; 2826 int msgtql; 2827 unsigned short int msgseg; 2828 }; 2829 2830 static inline abi_long host_to_target_msginfo(abi_ulong target_addr, 2831 struct msginfo *host_msginfo) 2832 { 2833 struct target_msginfo *target_msginfo; 2834 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0)) 2835 return -TARGET_EFAULT; 2836 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool); 2837 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap); 2838 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax); 2839 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb); 2840 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni); 2841 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz); 2842 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql); 2843 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg); 2844 unlock_user_struct(target_msginfo, target_addr, 1); 2845 return 0; 2846 } 2847 2848 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr) 2849 { 2850 struct msqid_ds dsarg; 2851 struct msginfo msginfo; 2852 abi_long ret = -TARGET_EINVAL; 2853 2854 cmd &= 0xff; 2855 2856 switch (cmd) { 2857 case IPC_STAT: 2858 case IPC_SET: 2859 case MSG_STAT: 2860 if (target_to_host_msqid_ds(&dsarg,ptr)) 2861 return -TARGET_EFAULT; 2862 ret = get_errno(msgctl(msgid, cmd, &dsarg)); 2863 if (host_to_target_msqid_ds(ptr,&dsarg)) 2864 return -TARGET_EFAULT; 2865 break; 2866 case IPC_RMID: 2867 ret = get_errno(msgctl(msgid, cmd, NULL)); 2868 break; 2869 case IPC_INFO: 2870 case MSG_INFO: 2871 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo)); 2872 if (host_to_target_msginfo(ptr, &msginfo)) 2873 return -TARGET_EFAULT; 2874 break; 2875 } 2876 2877 return ret; 2878 } 2879 2880 struct target_msgbuf { 2881 abi_long mtype; 2882 char mtext[1]; 2883 }; 2884 2885 static inline abi_long do_msgsnd(int msqid, abi_long msgp, 2886 unsigned int msgsz, int msgflg) 2887 { 2888 struct target_msgbuf *target_mb; 2889 struct msgbuf *host_mb; 2890 abi_long ret = 0; 2891 2892 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0)) 2893 return -TARGET_EFAULT; 2894 host_mb = malloc(msgsz+sizeof(long)); 2895 host_mb->mtype = (abi_long) tswapal(target_mb->mtype); 2896 memcpy(host_mb->mtext, target_mb->mtext, msgsz); 2897 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg)); 2898 free(host_mb); 2899 unlock_user_struct(target_mb, msgp, 0); 2900 2901 return ret; 2902 } 2903 2904 static inline abi_long do_msgrcv(int msqid, abi_long msgp, 2905 unsigned int msgsz, abi_long msgtyp, 2906 int msgflg) 2907 { 2908 struct target_msgbuf *target_mb; 2909 char *target_mtext; 2910 struct msgbuf *host_mb; 2911 abi_long ret = 0; 2912 2913 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0)) 2914 return -TARGET_EFAULT; 2915 2916 host_mb = g_malloc(msgsz+sizeof(long)); 2917 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg)); 2918 2919 if (ret > 0) { 2920 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong); 2921 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0); 2922 if (!target_mtext) { 2923 ret = -TARGET_EFAULT; 2924 goto end; 2925 } 2926 memcpy(target_mb->mtext, host_mb->mtext, ret); 2927 unlock_user(target_mtext, target_mtext_addr, ret); 2928 } 2929 2930 target_mb->mtype = tswapal(host_mb->mtype); 2931 2932 end: 2933 if (target_mb) 2934 unlock_user_struct(target_mb, msgp, 1); 2935 g_free(host_mb); 2936 return ret; 2937 } 2938 2939 struct target_shmid_ds 2940 { 2941 struct target_ipc_perm shm_perm; 2942 abi_ulong shm_segsz; 2943 abi_ulong shm_atime; 2944 #if TARGET_ABI_BITS == 32 2945 abi_ulong __unused1; 2946 #endif 2947 abi_ulong shm_dtime; 2948 #if TARGET_ABI_BITS == 32 2949 abi_ulong __unused2; 2950 #endif 2951 abi_ulong shm_ctime; 2952 #if TARGET_ABI_BITS == 32 2953 abi_ulong __unused3; 2954 #endif 2955 int shm_cpid; 2956 int shm_lpid; 2957 abi_ulong shm_nattch; 2958 unsigned long int __unused4; 2959 unsigned long int __unused5; 2960 }; 2961 2962 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd, 2963 abi_ulong target_addr) 2964 { 2965 struct target_shmid_ds *target_sd; 2966 2967 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2968 return -TARGET_EFAULT; 2969 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr)) 2970 return -TARGET_EFAULT; 2971 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2972 __get_user(host_sd->shm_atime, &target_sd->shm_atime); 2973 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2974 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2975 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2976 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2977 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2978 unlock_user_struct(target_sd, target_addr, 0); 2979 return 0; 2980 } 2981 2982 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr, 2983 struct shmid_ds *host_sd) 2984 { 2985 struct target_shmid_ds *target_sd; 2986 2987 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2988 return -TARGET_EFAULT; 2989 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm))) 2990 return -TARGET_EFAULT; 2991 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2992 __put_user(host_sd->shm_atime, &target_sd->shm_atime); 2993 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2994 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2995 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2996 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2997 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2998 unlock_user_struct(target_sd, target_addr, 1); 2999 return 0; 3000 } 3001 3002 struct target_shminfo { 3003 abi_ulong shmmax; 3004 abi_ulong shmmin; 3005 abi_ulong shmmni; 3006 abi_ulong shmseg; 3007 abi_ulong shmall; 3008 }; 3009 3010 static inline abi_long host_to_target_shminfo(abi_ulong target_addr, 3011 struct shminfo *host_shminfo) 3012 { 3013 struct target_shminfo *target_shminfo; 3014 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0)) 3015 return -TARGET_EFAULT; 3016 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax); 3017 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin); 3018 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni); 3019 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg); 3020 __put_user(host_shminfo->shmall, &target_shminfo->shmall); 3021 unlock_user_struct(target_shminfo, target_addr, 1); 3022 return 0; 3023 } 3024 3025 struct target_shm_info { 3026 int used_ids; 3027 abi_ulong shm_tot; 3028 abi_ulong shm_rss; 3029 abi_ulong shm_swp; 3030 abi_ulong swap_attempts; 3031 abi_ulong swap_successes; 3032 }; 3033 3034 static inline abi_long host_to_target_shm_info(abi_ulong target_addr, 3035 struct shm_info *host_shm_info) 3036 { 3037 struct target_shm_info *target_shm_info; 3038 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0)) 3039 return -TARGET_EFAULT; 3040 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids); 3041 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot); 3042 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss); 3043 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp); 3044 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts); 3045 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes); 3046 unlock_user_struct(target_shm_info, target_addr, 1); 3047 return 0; 3048 } 3049 3050 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf) 3051 { 3052 struct shmid_ds dsarg; 3053 struct shminfo shminfo; 3054 struct shm_info shm_info; 3055 abi_long ret = -TARGET_EINVAL; 3056 3057 cmd &= 0xff; 3058 3059 switch(cmd) { 3060 case IPC_STAT: 3061 case IPC_SET: 3062 case SHM_STAT: 3063 if (target_to_host_shmid_ds(&dsarg, buf)) 3064 return -TARGET_EFAULT; 3065 ret = get_errno(shmctl(shmid, cmd, &dsarg)); 3066 if (host_to_target_shmid_ds(buf, &dsarg)) 3067 return -TARGET_EFAULT; 3068 break; 3069 case IPC_INFO: 3070 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo)); 3071 if (host_to_target_shminfo(buf, &shminfo)) 3072 return -TARGET_EFAULT; 3073 break; 3074 case SHM_INFO: 3075 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info)); 3076 if (host_to_target_shm_info(buf, &shm_info)) 3077 return -TARGET_EFAULT; 3078 break; 3079 case IPC_RMID: 3080 case SHM_LOCK: 3081 case SHM_UNLOCK: 3082 ret = get_errno(shmctl(shmid, cmd, NULL)); 3083 break; 3084 } 3085 3086 return ret; 3087 } 3088 3089 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg) 3090 { 3091 abi_long raddr; 3092 void *host_raddr; 3093 struct shmid_ds shm_info; 3094 int i,ret; 3095 3096 /* find out the length of the shared memory segment */ 3097 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); 3098 if (is_error(ret)) { 3099 /* can't get length, bail out */ 3100 return ret; 3101 } 3102 3103 mmap_lock(); 3104 3105 if (shmaddr) 3106 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg); 3107 else { 3108 abi_ulong mmap_start; 3109 3110 mmap_start = mmap_find_vma(0, shm_info.shm_segsz); 3111 3112 if (mmap_start == -1) { 3113 errno = ENOMEM; 3114 host_raddr = (void *)-1; 3115 } else 3116 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP); 3117 } 3118 3119 if (host_raddr == (void *)-1) { 3120 mmap_unlock(); 3121 return get_errno((long)host_raddr); 3122 } 3123 raddr=h2g((unsigned long)host_raddr); 3124 3125 page_set_flags(raddr, raddr + shm_info.shm_segsz, 3126 PAGE_VALID | PAGE_READ | 3127 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE)); 3128 3129 for (i = 0; i < N_SHM_REGIONS; i++) { 3130 if (shm_regions[i].start == 0) { 3131 shm_regions[i].start = raddr; 3132 shm_regions[i].size = shm_info.shm_segsz; 3133 break; 3134 } 3135 } 3136 3137 mmap_unlock(); 3138 return raddr; 3139 3140 } 3141 3142 static inline abi_long do_shmdt(abi_ulong shmaddr) 3143 { 3144 int i; 3145 3146 for (i = 0; i < N_SHM_REGIONS; ++i) { 3147 if (shm_regions[i].start == shmaddr) { 3148 shm_regions[i].start = 0; 3149 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0); 3150 break; 3151 } 3152 } 3153 3154 return get_errno(shmdt(g2h(shmaddr))); 3155 } 3156 3157 #ifdef TARGET_NR_ipc 3158 /* ??? This only works with linear mappings. */ 3159 /* do_ipc() must return target values and target errnos. */ 3160 static abi_long do_ipc(unsigned int call, int first, 3161 int second, int third, 3162 abi_long ptr, abi_long fifth) 3163 { 3164 int version; 3165 abi_long ret = 0; 3166 3167 version = call >> 16; 3168 call &= 0xffff; 3169 3170 switch (call) { 3171 case IPCOP_semop: 3172 ret = do_semop(first, ptr, second); 3173 break; 3174 3175 case IPCOP_semget: 3176 ret = get_errno(semget(first, second, third)); 3177 break; 3178 3179 case IPCOP_semctl: 3180 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr); 3181 break; 3182 3183 case IPCOP_msgget: 3184 ret = get_errno(msgget(first, second)); 3185 break; 3186 3187 case IPCOP_msgsnd: 3188 ret = do_msgsnd(first, ptr, second, third); 3189 break; 3190 3191 case IPCOP_msgctl: 3192 ret = do_msgctl(first, second, ptr); 3193 break; 3194 3195 case IPCOP_msgrcv: 3196 switch (version) { 3197 case 0: 3198 { 3199 struct target_ipc_kludge { 3200 abi_long msgp; 3201 abi_long msgtyp; 3202 } *tmp; 3203 3204 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) { 3205 ret = -TARGET_EFAULT; 3206 break; 3207 } 3208 3209 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third); 3210 3211 unlock_user_struct(tmp, ptr, 0); 3212 break; 3213 } 3214 default: 3215 ret = do_msgrcv(first, ptr, second, fifth, third); 3216 } 3217 break; 3218 3219 case IPCOP_shmat: 3220 switch (version) { 3221 default: 3222 { 3223 abi_ulong raddr; 3224 raddr = do_shmat(first, ptr, second); 3225 if (is_error(raddr)) 3226 return get_errno(raddr); 3227 if (put_user_ual(raddr, third)) 3228 return -TARGET_EFAULT; 3229 break; 3230 } 3231 case 1: 3232 ret = -TARGET_EINVAL; 3233 break; 3234 } 3235 break; 3236 case IPCOP_shmdt: 3237 ret = do_shmdt(ptr); 3238 break; 3239 3240 case IPCOP_shmget: 3241 /* IPC_* flag values are the same on all linux platforms */ 3242 ret = get_errno(shmget(first, second, third)); 3243 break; 3244 3245 /* IPC_* and SHM_* command values are the same on all linux platforms */ 3246 case IPCOP_shmctl: 3247 ret = do_shmctl(first, second, third); 3248 break; 3249 default: 3250 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version); 3251 ret = -TARGET_ENOSYS; 3252 break; 3253 } 3254 return ret; 3255 } 3256 #endif 3257 3258 /* kernel structure types definitions */ 3259 3260 #define STRUCT(name, ...) STRUCT_ ## name, 3261 #define STRUCT_SPECIAL(name) STRUCT_ ## name, 3262 enum { 3263 #include "syscall_types.h" 3264 }; 3265 #undef STRUCT 3266 #undef STRUCT_SPECIAL 3267 3268 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL }; 3269 #define STRUCT_SPECIAL(name) 3270 #include "syscall_types.h" 3271 #undef STRUCT 3272 #undef STRUCT_SPECIAL 3273 3274 typedef struct IOCTLEntry IOCTLEntry; 3275 3276 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp, 3277 int fd, abi_long cmd, abi_long arg); 3278 3279 struct IOCTLEntry { 3280 unsigned int target_cmd; 3281 unsigned int host_cmd; 3282 const char *name; 3283 int access; 3284 do_ioctl_fn *do_ioctl; 3285 const argtype arg_type[5]; 3286 }; 3287 3288 #define IOC_R 0x0001 3289 #define IOC_W 0x0002 3290 #define IOC_RW (IOC_R | IOC_W) 3291 3292 #define MAX_STRUCT_SIZE 4096 3293 3294 #ifdef CONFIG_FIEMAP 3295 /* So fiemap access checks don't overflow on 32 bit systems. 3296 * This is very slightly smaller than the limit imposed by 3297 * the underlying kernel. 3298 */ 3299 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \ 3300 / sizeof(struct fiemap_extent)) 3301 3302 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp, 3303 int fd, abi_long cmd, abi_long arg) 3304 { 3305 /* The parameter for this ioctl is a struct fiemap followed 3306 * by an array of struct fiemap_extent whose size is set 3307 * in fiemap->fm_extent_count. The array is filled in by the 3308 * ioctl. 3309 */ 3310 int target_size_in, target_size_out; 3311 struct fiemap *fm; 3312 const argtype *arg_type = ie->arg_type; 3313 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) }; 3314 void *argptr, *p; 3315 abi_long ret; 3316 int i, extent_size = thunk_type_size(extent_arg_type, 0); 3317 uint32_t outbufsz; 3318 int free_fm = 0; 3319 3320 assert(arg_type[0] == TYPE_PTR); 3321 assert(ie->access == IOC_RW); 3322 arg_type++; 3323 target_size_in = thunk_type_size(arg_type, 0); 3324 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1); 3325 if (!argptr) { 3326 return -TARGET_EFAULT; 3327 } 3328 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3329 unlock_user(argptr, arg, 0); 3330 fm = (struct fiemap *)buf_temp; 3331 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) { 3332 return -TARGET_EINVAL; 3333 } 3334 3335 outbufsz = sizeof (*fm) + 3336 (sizeof(struct fiemap_extent) * fm->fm_extent_count); 3337 3338 if (outbufsz > MAX_STRUCT_SIZE) { 3339 /* We can't fit all the extents into the fixed size buffer. 3340 * Allocate one that is large enough and use it instead. 3341 */ 3342 fm = malloc(outbufsz); 3343 if (!fm) { 3344 return -TARGET_ENOMEM; 3345 } 3346 memcpy(fm, buf_temp, sizeof(struct fiemap)); 3347 free_fm = 1; 3348 } 3349 ret = get_errno(ioctl(fd, ie->host_cmd, fm)); 3350 if (!is_error(ret)) { 3351 target_size_out = target_size_in; 3352 /* An extent_count of 0 means we were only counting the extents 3353 * so there are no structs to copy 3354 */ 3355 if (fm->fm_extent_count != 0) { 3356 target_size_out += fm->fm_mapped_extents * extent_size; 3357 } 3358 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0); 3359 if (!argptr) { 3360 ret = -TARGET_EFAULT; 3361 } else { 3362 /* Convert the struct fiemap */ 3363 thunk_convert(argptr, fm, arg_type, THUNK_TARGET); 3364 if (fm->fm_extent_count != 0) { 3365 p = argptr + target_size_in; 3366 /* ...and then all the struct fiemap_extents */ 3367 for (i = 0; i < fm->fm_mapped_extents; i++) { 3368 thunk_convert(p, &fm->fm_extents[i], extent_arg_type, 3369 THUNK_TARGET); 3370 p += extent_size; 3371 } 3372 } 3373 unlock_user(argptr, arg, target_size_out); 3374 } 3375 } 3376 if (free_fm) { 3377 free(fm); 3378 } 3379 return ret; 3380 } 3381 #endif 3382 3383 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, 3384 int fd, abi_long cmd, abi_long arg) 3385 { 3386 const argtype *arg_type = ie->arg_type; 3387 int target_size; 3388 void *argptr; 3389 int ret; 3390 struct ifconf *host_ifconf; 3391 uint32_t outbufsz; 3392 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) }; 3393 int target_ifreq_size; 3394 int nb_ifreq; 3395 int free_buf = 0; 3396 int i; 3397 int target_ifc_len; 3398 abi_long target_ifc_buf; 3399 int host_ifc_len; 3400 char *host_ifc_buf; 3401 3402 assert(arg_type[0] == TYPE_PTR); 3403 assert(ie->access == IOC_RW); 3404 3405 arg_type++; 3406 target_size = thunk_type_size(arg_type, 0); 3407 3408 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3409 if (!argptr) 3410 return -TARGET_EFAULT; 3411 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3412 unlock_user(argptr, arg, 0); 3413 3414 host_ifconf = (struct ifconf *)(unsigned long)buf_temp; 3415 target_ifc_len = host_ifconf->ifc_len; 3416 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf; 3417 3418 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0); 3419 nb_ifreq = target_ifc_len / target_ifreq_size; 3420 host_ifc_len = nb_ifreq * sizeof(struct ifreq); 3421 3422 outbufsz = sizeof(*host_ifconf) + host_ifc_len; 3423 if (outbufsz > MAX_STRUCT_SIZE) { 3424 /* We can't fit all the extents into the fixed size buffer. 3425 * Allocate one that is large enough and use it instead. 3426 */ 3427 host_ifconf = malloc(outbufsz); 3428 if (!host_ifconf) { 3429 return -TARGET_ENOMEM; 3430 } 3431 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf)); 3432 free_buf = 1; 3433 } 3434 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf); 3435 3436 host_ifconf->ifc_len = host_ifc_len; 3437 host_ifconf->ifc_buf = host_ifc_buf; 3438 3439 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf)); 3440 if (!is_error(ret)) { 3441 /* convert host ifc_len to target ifc_len */ 3442 3443 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq); 3444 target_ifc_len = nb_ifreq * target_ifreq_size; 3445 host_ifconf->ifc_len = target_ifc_len; 3446 3447 /* restore target ifc_buf */ 3448 3449 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf; 3450 3451 /* copy struct ifconf to target user */ 3452 3453 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3454 if (!argptr) 3455 return -TARGET_EFAULT; 3456 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET); 3457 unlock_user(argptr, arg, target_size); 3458 3459 /* copy ifreq[] to target user */ 3460 3461 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0); 3462 for (i = 0; i < nb_ifreq ; i++) { 3463 thunk_convert(argptr + i * target_ifreq_size, 3464 host_ifc_buf + i * sizeof(struct ifreq), 3465 ifreq_arg_type, THUNK_TARGET); 3466 } 3467 unlock_user(argptr, target_ifc_buf, target_ifc_len); 3468 } 3469 3470 if (free_buf) { 3471 free(host_ifconf); 3472 } 3473 3474 return ret; 3475 } 3476 3477 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 3478 abi_long cmd, abi_long arg) 3479 { 3480 void *argptr; 3481 struct dm_ioctl *host_dm; 3482 abi_long guest_data; 3483 uint32_t guest_data_size; 3484 int target_size; 3485 const argtype *arg_type = ie->arg_type; 3486 abi_long ret; 3487 void *big_buf = NULL; 3488 char *host_data; 3489 3490 arg_type++; 3491 target_size = thunk_type_size(arg_type, 0); 3492 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3493 if (!argptr) { 3494 ret = -TARGET_EFAULT; 3495 goto out; 3496 } 3497 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3498 unlock_user(argptr, arg, 0); 3499 3500 /* buf_temp is too small, so fetch things into a bigger buffer */ 3501 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2); 3502 memcpy(big_buf, buf_temp, target_size); 3503 buf_temp = big_buf; 3504 host_dm = big_buf; 3505 3506 guest_data = arg + host_dm->data_start; 3507 if ((guest_data - arg) < 0) { 3508 ret = -EINVAL; 3509 goto out; 3510 } 3511 guest_data_size = host_dm->data_size - host_dm->data_start; 3512 host_data = (char*)host_dm + host_dm->data_start; 3513 3514 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1); 3515 switch (ie->host_cmd) { 3516 case DM_REMOVE_ALL: 3517 case DM_LIST_DEVICES: 3518 case DM_DEV_CREATE: 3519 case DM_DEV_REMOVE: 3520 case DM_DEV_SUSPEND: 3521 case DM_DEV_STATUS: 3522 case DM_DEV_WAIT: 3523 case DM_TABLE_STATUS: 3524 case DM_TABLE_CLEAR: 3525 case DM_TABLE_DEPS: 3526 case DM_LIST_VERSIONS: 3527 /* no input data */ 3528 break; 3529 case DM_DEV_RENAME: 3530 case DM_DEV_SET_GEOMETRY: 3531 /* data contains only strings */ 3532 memcpy(host_data, argptr, guest_data_size); 3533 break; 3534 case DM_TARGET_MSG: 3535 memcpy(host_data, argptr, guest_data_size); 3536 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr); 3537 break; 3538 case DM_TABLE_LOAD: 3539 { 3540 void *gspec = argptr; 3541 void *cur_data = host_data; 3542 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 3543 int spec_size = thunk_type_size(arg_type, 0); 3544 int i; 3545 3546 for (i = 0; i < host_dm->target_count; i++) { 3547 struct dm_target_spec *spec = cur_data; 3548 uint32_t next; 3549 int slen; 3550 3551 thunk_convert(spec, gspec, arg_type, THUNK_HOST); 3552 slen = strlen((char*)gspec + spec_size) + 1; 3553 next = spec->next; 3554 spec->next = sizeof(*spec) + slen; 3555 strcpy((char*)&spec[1], gspec + spec_size); 3556 gspec += next; 3557 cur_data += spec->next; 3558 } 3559 break; 3560 } 3561 default: 3562 ret = -TARGET_EINVAL; 3563 goto out; 3564 } 3565 unlock_user(argptr, guest_data, 0); 3566 3567 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3568 if (!is_error(ret)) { 3569 guest_data = arg + host_dm->data_start; 3570 guest_data_size = host_dm->data_size - host_dm->data_start; 3571 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0); 3572 switch (ie->host_cmd) { 3573 case DM_REMOVE_ALL: 3574 case DM_DEV_CREATE: 3575 case DM_DEV_REMOVE: 3576 case DM_DEV_RENAME: 3577 case DM_DEV_SUSPEND: 3578 case DM_DEV_STATUS: 3579 case DM_TABLE_LOAD: 3580 case DM_TABLE_CLEAR: 3581 case DM_TARGET_MSG: 3582 case DM_DEV_SET_GEOMETRY: 3583 /* no return data */ 3584 break; 3585 case DM_LIST_DEVICES: 3586 { 3587 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start; 3588 uint32_t remaining_data = guest_data_size; 3589 void *cur_data = argptr; 3590 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) }; 3591 int nl_size = 12; /* can't use thunk_size due to alignment */ 3592 3593 while (1) { 3594 uint32_t next = nl->next; 3595 if (next) { 3596 nl->next = nl_size + (strlen(nl->name) + 1); 3597 } 3598 if (remaining_data < nl->next) { 3599 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3600 break; 3601 } 3602 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET); 3603 strcpy(cur_data + nl_size, nl->name); 3604 cur_data += nl->next; 3605 remaining_data -= nl->next; 3606 if (!next) { 3607 break; 3608 } 3609 nl = (void*)nl + next; 3610 } 3611 break; 3612 } 3613 case DM_DEV_WAIT: 3614 case DM_TABLE_STATUS: 3615 { 3616 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start; 3617 void *cur_data = argptr; 3618 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 3619 int spec_size = thunk_type_size(arg_type, 0); 3620 int i; 3621 3622 for (i = 0; i < host_dm->target_count; i++) { 3623 uint32_t next = spec->next; 3624 int slen = strlen((char*)&spec[1]) + 1; 3625 spec->next = (cur_data - argptr) + spec_size + slen; 3626 if (guest_data_size < spec->next) { 3627 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3628 break; 3629 } 3630 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET); 3631 strcpy(cur_data + spec_size, (char*)&spec[1]); 3632 cur_data = argptr + spec->next; 3633 spec = (void*)host_dm + host_dm->data_start + next; 3634 } 3635 break; 3636 } 3637 case DM_TABLE_DEPS: 3638 { 3639 void *hdata = (void*)host_dm + host_dm->data_start; 3640 int count = *(uint32_t*)hdata; 3641 uint64_t *hdev = hdata + 8; 3642 uint64_t *gdev = argptr + 8; 3643 int i; 3644 3645 *(uint32_t*)argptr = tswap32(count); 3646 for (i = 0; i < count; i++) { 3647 *gdev = tswap64(*hdev); 3648 gdev++; 3649 hdev++; 3650 } 3651 break; 3652 } 3653 case DM_LIST_VERSIONS: 3654 { 3655 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start; 3656 uint32_t remaining_data = guest_data_size; 3657 void *cur_data = argptr; 3658 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) }; 3659 int vers_size = thunk_type_size(arg_type, 0); 3660 3661 while (1) { 3662 uint32_t next = vers->next; 3663 if (next) { 3664 vers->next = vers_size + (strlen(vers->name) + 1); 3665 } 3666 if (remaining_data < vers->next) { 3667 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3668 break; 3669 } 3670 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET); 3671 strcpy(cur_data + vers_size, vers->name); 3672 cur_data += vers->next; 3673 remaining_data -= vers->next; 3674 if (!next) { 3675 break; 3676 } 3677 vers = (void*)vers + next; 3678 } 3679 break; 3680 } 3681 default: 3682 ret = -TARGET_EINVAL; 3683 goto out; 3684 } 3685 unlock_user(argptr, guest_data, guest_data_size); 3686 3687 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3688 if (!argptr) { 3689 ret = -TARGET_EFAULT; 3690 goto out; 3691 } 3692 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3693 unlock_user(argptr, arg, target_size); 3694 } 3695 out: 3696 g_free(big_buf); 3697 return ret; 3698 } 3699 3700 static IOCTLEntry ioctl_entries[] = { 3701 #define IOCTL(cmd, access, ...) \ 3702 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } }, 3703 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \ 3704 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } }, 3705 #include "ioctls.h" 3706 { 0, 0, }, 3707 }; 3708 3709 /* ??? Implement proper locking for ioctls. */ 3710 /* do_ioctl() Must return target values and target errnos. */ 3711 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg) 3712 { 3713 const IOCTLEntry *ie; 3714 const argtype *arg_type; 3715 abi_long ret; 3716 uint8_t buf_temp[MAX_STRUCT_SIZE]; 3717 int target_size; 3718 void *argptr; 3719 3720 ie = ioctl_entries; 3721 for(;;) { 3722 if (ie->target_cmd == 0) { 3723 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd); 3724 return -TARGET_ENOSYS; 3725 } 3726 if (ie->target_cmd == cmd) 3727 break; 3728 ie++; 3729 } 3730 arg_type = ie->arg_type; 3731 #if defined(DEBUG) 3732 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name); 3733 #endif 3734 if (ie->do_ioctl) { 3735 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg); 3736 } 3737 3738 switch(arg_type[0]) { 3739 case TYPE_NULL: 3740 /* no argument */ 3741 ret = get_errno(ioctl(fd, ie->host_cmd)); 3742 break; 3743 case TYPE_PTRVOID: 3744 case TYPE_INT: 3745 /* int argment */ 3746 ret = get_errno(ioctl(fd, ie->host_cmd, arg)); 3747 break; 3748 case TYPE_PTR: 3749 arg_type++; 3750 target_size = thunk_type_size(arg_type, 0); 3751 switch(ie->access) { 3752 case IOC_R: 3753 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3754 if (!is_error(ret)) { 3755 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3756 if (!argptr) 3757 return -TARGET_EFAULT; 3758 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3759 unlock_user(argptr, arg, target_size); 3760 } 3761 break; 3762 case IOC_W: 3763 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3764 if (!argptr) 3765 return -TARGET_EFAULT; 3766 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3767 unlock_user(argptr, arg, 0); 3768 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3769 break; 3770 default: 3771 case IOC_RW: 3772 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3773 if (!argptr) 3774 return -TARGET_EFAULT; 3775 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3776 unlock_user(argptr, arg, 0); 3777 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3778 if (!is_error(ret)) { 3779 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3780 if (!argptr) 3781 return -TARGET_EFAULT; 3782 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3783 unlock_user(argptr, arg, target_size); 3784 } 3785 break; 3786 } 3787 break; 3788 default: 3789 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n", 3790 (long)cmd, arg_type[0]); 3791 ret = -TARGET_ENOSYS; 3792 break; 3793 } 3794 return ret; 3795 } 3796 3797 static const bitmask_transtbl iflag_tbl[] = { 3798 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK }, 3799 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT }, 3800 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR }, 3801 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK }, 3802 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK }, 3803 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP }, 3804 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR }, 3805 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR }, 3806 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL }, 3807 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC }, 3808 { TARGET_IXON, TARGET_IXON, IXON, IXON }, 3809 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY }, 3810 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF }, 3811 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL }, 3812 { 0, 0, 0, 0 } 3813 }; 3814 3815 static const bitmask_transtbl oflag_tbl[] = { 3816 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST }, 3817 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC }, 3818 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR }, 3819 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL }, 3820 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR }, 3821 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET }, 3822 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL }, 3823 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL }, 3824 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 }, 3825 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 }, 3826 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 }, 3827 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 }, 3828 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 }, 3829 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 }, 3830 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 }, 3831 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 }, 3832 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 }, 3833 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 }, 3834 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 }, 3835 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 }, 3836 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 }, 3837 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 }, 3838 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 }, 3839 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 }, 3840 { 0, 0, 0, 0 } 3841 }; 3842 3843 static const bitmask_transtbl cflag_tbl[] = { 3844 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 }, 3845 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 }, 3846 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 }, 3847 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 }, 3848 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 }, 3849 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 }, 3850 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 }, 3851 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 }, 3852 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 }, 3853 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 }, 3854 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 }, 3855 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 }, 3856 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 }, 3857 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 }, 3858 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 }, 3859 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 }, 3860 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 }, 3861 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 }, 3862 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 }, 3863 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 }, 3864 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 }, 3865 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 }, 3866 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 }, 3867 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 }, 3868 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB }, 3869 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD }, 3870 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB }, 3871 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD }, 3872 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL }, 3873 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL }, 3874 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS }, 3875 { 0, 0, 0, 0 } 3876 }; 3877 3878 static const bitmask_transtbl lflag_tbl[] = { 3879 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG }, 3880 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON }, 3881 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE }, 3882 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO }, 3883 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE }, 3884 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK }, 3885 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL }, 3886 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH }, 3887 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP }, 3888 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL }, 3889 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT }, 3890 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE }, 3891 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO }, 3892 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN }, 3893 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN }, 3894 { 0, 0, 0, 0 } 3895 }; 3896 3897 static void target_to_host_termios (void *dst, const void *src) 3898 { 3899 struct host_termios *host = dst; 3900 const struct target_termios *target = src; 3901 3902 host->c_iflag = 3903 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl); 3904 host->c_oflag = 3905 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl); 3906 host->c_cflag = 3907 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl); 3908 host->c_lflag = 3909 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl); 3910 host->c_line = target->c_line; 3911 3912 memset(host->c_cc, 0, sizeof(host->c_cc)); 3913 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR]; 3914 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT]; 3915 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE]; 3916 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL]; 3917 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF]; 3918 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME]; 3919 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN]; 3920 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC]; 3921 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART]; 3922 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP]; 3923 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP]; 3924 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL]; 3925 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT]; 3926 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD]; 3927 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE]; 3928 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT]; 3929 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2]; 3930 } 3931 3932 static void host_to_target_termios (void *dst, const void *src) 3933 { 3934 struct target_termios *target = dst; 3935 const struct host_termios *host = src; 3936 3937 target->c_iflag = 3938 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl)); 3939 target->c_oflag = 3940 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl)); 3941 target->c_cflag = 3942 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl)); 3943 target->c_lflag = 3944 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl)); 3945 target->c_line = host->c_line; 3946 3947 memset(target->c_cc, 0, sizeof(target->c_cc)); 3948 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR]; 3949 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT]; 3950 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE]; 3951 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL]; 3952 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF]; 3953 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME]; 3954 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN]; 3955 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC]; 3956 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART]; 3957 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP]; 3958 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP]; 3959 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL]; 3960 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT]; 3961 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD]; 3962 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE]; 3963 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT]; 3964 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2]; 3965 } 3966 3967 static const StructEntry struct_termios_def = { 3968 .convert = { host_to_target_termios, target_to_host_termios }, 3969 .size = { sizeof(struct target_termios), sizeof(struct host_termios) }, 3970 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) }, 3971 }; 3972 3973 static bitmask_transtbl mmap_flags_tbl[] = { 3974 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED }, 3975 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE }, 3976 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED }, 3977 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS }, 3978 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN }, 3979 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE }, 3980 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE }, 3981 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED }, 3982 { 0, 0, 0, 0 } 3983 }; 3984 3985 #if defined(TARGET_I386) 3986 3987 /* NOTE: there is really one LDT for all the threads */ 3988 static uint8_t *ldt_table; 3989 3990 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount) 3991 { 3992 int size; 3993 void *p; 3994 3995 if (!ldt_table) 3996 return 0; 3997 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE; 3998 if (size > bytecount) 3999 size = bytecount; 4000 p = lock_user(VERIFY_WRITE, ptr, size, 0); 4001 if (!p) 4002 return -TARGET_EFAULT; 4003 /* ??? Should this by byteswapped? */ 4004 memcpy(p, ldt_table, size); 4005 unlock_user(p, ptr, size); 4006 return size; 4007 } 4008 4009 /* XXX: add locking support */ 4010 static abi_long write_ldt(CPUX86State *env, 4011 abi_ulong ptr, unsigned long bytecount, int oldmode) 4012 { 4013 struct target_modify_ldt_ldt_s ldt_info; 4014 struct target_modify_ldt_ldt_s *target_ldt_info; 4015 int seg_32bit, contents, read_exec_only, limit_in_pages; 4016 int seg_not_present, useable, lm; 4017 uint32_t *lp, entry_1, entry_2; 4018 4019 if (bytecount != sizeof(ldt_info)) 4020 return -TARGET_EINVAL; 4021 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1)) 4022 return -TARGET_EFAULT; 4023 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 4024 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 4025 ldt_info.limit = tswap32(target_ldt_info->limit); 4026 ldt_info.flags = tswap32(target_ldt_info->flags); 4027 unlock_user_struct(target_ldt_info, ptr, 0); 4028 4029 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES) 4030 return -TARGET_EINVAL; 4031 seg_32bit = ldt_info.flags & 1; 4032 contents = (ldt_info.flags >> 1) & 3; 4033 read_exec_only = (ldt_info.flags >> 3) & 1; 4034 limit_in_pages = (ldt_info.flags >> 4) & 1; 4035 seg_not_present = (ldt_info.flags >> 5) & 1; 4036 useable = (ldt_info.flags >> 6) & 1; 4037 #ifdef TARGET_ABI32 4038 lm = 0; 4039 #else 4040 lm = (ldt_info.flags >> 7) & 1; 4041 #endif 4042 if (contents == 3) { 4043 if (oldmode) 4044 return -TARGET_EINVAL; 4045 if (seg_not_present == 0) 4046 return -TARGET_EINVAL; 4047 } 4048 /* allocate the LDT */ 4049 if (!ldt_table) { 4050 env->ldt.base = target_mmap(0, 4051 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE, 4052 PROT_READ|PROT_WRITE, 4053 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 4054 if (env->ldt.base == -1) 4055 return -TARGET_ENOMEM; 4056 memset(g2h(env->ldt.base), 0, 4057 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE); 4058 env->ldt.limit = 0xffff; 4059 ldt_table = g2h(env->ldt.base); 4060 } 4061 4062 /* NOTE: same code as Linux kernel */ 4063 /* Allow LDTs to be cleared by the user. */ 4064 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 4065 if (oldmode || 4066 (contents == 0 && 4067 read_exec_only == 1 && 4068 seg_32bit == 0 && 4069 limit_in_pages == 0 && 4070 seg_not_present == 1 && 4071 useable == 0 )) { 4072 entry_1 = 0; 4073 entry_2 = 0; 4074 goto install; 4075 } 4076 } 4077 4078 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 4079 (ldt_info.limit & 0x0ffff); 4080 entry_2 = (ldt_info.base_addr & 0xff000000) | 4081 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 4082 (ldt_info.limit & 0xf0000) | 4083 ((read_exec_only ^ 1) << 9) | 4084 (contents << 10) | 4085 ((seg_not_present ^ 1) << 15) | 4086 (seg_32bit << 22) | 4087 (limit_in_pages << 23) | 4088 (lm << 21) | 4089 0x7000; 4090 if (!oldmode) 4091 entry_2 |= (useable << 20); 4092 4093 /* Install the new entry ... */ 4094 install: 4095 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3)); 4096 lp[0] = tswap32(entry_1); 4097 lp[1] = tswap32(entry_2); 4098 return 0; 4099 } 4100 4101 /* specific and weird i386 syscalls */ 4102 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr, 4103 unsigned long bytecount) 4104 { 4105 abi_long ret; 4106 4107 switch (func) { 4108 case 0: 4109 ret = read_ldt(ptr, bytecount); 4110 break; 4111 case 1: 4112 ret = write_ldt(env, ptr, bytecount, 1); 4113 break; 4114 case 0x11: 4115 ret = write_ldt(env, ptr, bytecount, 0); 4116 break; 4117 default: 4118 ret = -TARGET_ENOSYS; 4119 break; 4120 } 4121 return ret; 4122 } 4123 4124 #if defined(TARGET_I386) && defined(TARGET_ABI32) 4125 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr) 4126 { 4127 uint64_t *gdt_table = g2h(env->gdt.base); 4128 struct target_modify_ldt_ldt_s ldt_info; 4129 struct target_modify_ldt_ldt_s *target_ldt_info; 4130 int seg_32bit, contents, read_exec_only, limit_in_pages; 4131 int seg_not_present, useable, lm; 4132 uint32_t *lp, entry_1, entry_2; 4133 int i; 4134 4135 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 4136 if (!target_ldt_info) 4137 return -TARGET_EFAULT; 4138 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 4139 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 4140 ldt_info.limit = tswap32(target_ldt_info->limit); 4141 ldt_info.flags = tswap32(target_ldt_info->flags); 4142 if (ldt_info.entry_number == -1) { 4143 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) { 4144 if (gdt_table[i] == 0) { 4145 ldt_info.entry_number = i; 4146 target_ldt_info->entry_number = tswap32(i); 4147 break; 4148 } 4149 } 4150 } 4151 unlock_user_struct(target_ldt_info, ptr, 1); 4152 4153 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 4154 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX) 4155 return -TARGET_EINVAL; 4156 seg_32bit = ldt_info.flags & 1; 4157 contents = (ldt_info.flags >> 1) & 3; 4158 read_exec_only = (ldt_info.flags >> 3) & 1; 4159 limit_in_pages = (ldt_info.flags >> 4) & 1; 4160 seg_not_present = (ldt_info.flags >> 5) & 1; 4161 useable = (ldt_info.flags >> 6) & 1; 4162 #ifdef TARGET_ABI32 4163 lm = 0; 4164 #else 4165 lm = (ldt_info.flags >> 7) & 1; 4166 #endif 4167 4168 if (contents == 3) { 4169 if (seg_not_present == 0) 4170 return -TARGET_EINVAL; 4171 } 4172 4173 /* NOTE: same code as Linux kernel */ 4174 /* Allow LDTs to be cleared by the user. */ 4175 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 4176 if ((contents == 0 && 4177 read_exec_only == 1 && 4178 seg_32bit == 0 && 4179 limit_in_pages == 0 && 4180 seg_not_present == 1 && 4181 useable == 0 )) { 4182 entry_1 = 0; 4183 entry_2 = 0; 4184 goto install; 4185 } 4186 } 4187 4188 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 4189 (ldt_info.limit & 0x0ffff); 4190 entry_2 = (ldt_info.base_addr & 0xff000000) | 4191 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 4192 (ldt_info.limit & 0xf0000) | 4193 ((read_exec_only ^ 1) << 9) | 4194 (contents << 10) | 4195 ((seg_not_present ^ 1) << 15) | 4196 (seg_32bit << 22) | 4197 (limit_in_pages << 23) | 4198 (useable << 20) | 4199 (lm << 21) | 4200 0x7000; 4201 4202 /* Install the new entry ... */ 4203 install: 4204 lp = (uint32_t *)(gdt_table + ldt_info.entry_number); 4205 lp[0] = tswap32(entry_1); 4206 lp[1] = tswap32(entry_2); 4207 return 0; 4208 } 4209 4210 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr) 4211 { 4212 struct target_modify_ldt_ldt_s *target_ldt_info; 4213 uint64_t *gdt_table = g2h(env->gdt.base); 4214 uint32_t base_addr, limit, flags; 4215 int seg_32bit, contents, read_exec_only, limit_in_pages, idx; 4216 int seg_not_present, useable, lm; 4217 uint32_t *lp, entry_1, entry_2; 4218 4219 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 4220 if (!target_ldt_info) 4221 return -TARGET_EFAULT; 4222 idx = tswap32(target_ldt_info->entry_number); 4223 if (idx < TARGET_GDT_ENTRY_TLS_MIN || 4224 idx > TARGET_GDT_ENTRY_TLS_MAX) { 4225 unlock_user_struct(target_ldt_info, ptr, 1); 4226 return -TARGET_EINVAL; 4227 } 4228 lp = (uint32_t *)(gdt_table + idx); 4229 entry_1 = tswap32(lp[0]); 4230 entry_2 = tswap32(lp[1]); 4231 4232 read_exec_only = ((entry_2 >> 9) & 1) ^ 1; 4233 contents = (entry_2 >> 10) & 3; 4234 seg_not_present = ((entry_2 >> 15) & 1) ^ 1; 4235 seg_32bit = (entry_2 >> 22) & 1; 4236 limit_in_pages = (entry_2 >> 23) & 1; 4237 useable = (entry_2 >> 20) & 1; 4238 #ifdef TARGET_ABI32 4239 lm = 0; 4240 #else 4241 lm = (entry_2 >> 21) & 1; 4242 #endif 4243 flags = (seg_32bit << 0) | (contents << 1) | 4244 (read_exec_only << 3) | (limit_in_pages << 4) | 4245 (seg_not_present << 5) | (useable << 6) | (lm << 7); 4246 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000); 4247 base_addr = (entry_1 >> 16) | 4248 (entry_2 & 0xff000000) | 4249 ((entry_2 & 0xff) << 16); 4250 target_ldt_info->base_addr = tswapal(base_addr); 4251 target_ldt_info->limit = tswap32(limit); 4252 target_ldt_info->flags = tswap32(flags); 4253 unlock_user_struct(target_ldt_info, ptr, 1); 4254 return 0; 4255 } 4256 #endif /* TARGET_I386 && TARGET_ABI32 */ 4257 4258 #ifndef TARGET_ABI32 4259 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 4260 { 4261 abi_long ret = 0; 4262 abi_ulong val; 4263 int idx; 4264 4265 switch(code) { 4266 case TARGET_ARCH_SET_GS: 4267 case TARGET_ARCH_SET_FS: 4268 if (code == TARGET_ARCH_SET_GS) 4269 idx = R_GS; 4270 else 4271 idx = R_FS; 4272 cpu_x86_load_seg(env, idx, 0); 4273 env->segs[idx].base = addr; 4274 break; 4275 case TARGET_ARCH_GET_GS: 4276 case TARGET_ARCH_GET_FS: 4277 if (code == TARGET_ARCH_GET_GS) 4278 idx = R_GS; 4279 else 4280 idx = R_FS; 4281 val = env->segs[idx].base; 4282 if (put_user(val, addr, abi_ulong)) 4283 ret = -TARGET_EFAULT; 4284 break; 4285 default: 4286 ret = -TARGET_EINVAL; 4287 break; 4288 } 4289 return ret; 4290 } 4291 #endif 4292 4293 #endif /* defined(TARGET_I386) */ 4294 4295 #define NEW_STACK_SIZE 0x40000 4296 4297 #if defined(CONFIG_USE_NPTL) 4298 4299 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; 4300 typedef struct { 4301 CPUArchState *env; 4302 pthread_mutex_t mutex; 4303 pthread_cond_t cond; 4304 pthread_t thread; 4305 uint32_t tid; 4306 abi_ulong child_tidptr; 4307 abi_ulong parent_tidptr; 4308 sigset_t sigmask; 4309 } new_thread_info; 4310 4311 static void *clone_func(void *arg) 4312 { 4313 new_thread_info *info = arg; 4314 CPUArchState *env; 4315 CPUState *cpu; 4316 TaskState *ts; 4317 4318 env = info->env; 4319 cpu = ENV_GET_CPU(env); 4320 thread_env = env; 4321 ts = (TaskState *)thread_env->opaque; 4322 info->tid = gettid(); 4323 cpu->host_tid = info->tid; 4324 task_settid(ts); 4325 if (info->child_tidptr) 4326 put_user_u32(info->tid, info->child_tidptr); 4327 if (info->parent_tidptr) 4328 put_user_u32(info->tid, info->parent_tidptr); 4329 /* Enable signals. */ 4330 sigprocmask(SIG_SETMASK, &info->sigmask, NULL); 4331 /* Signal to the parent that we're ready. */ 4332 pthread_mutex_lock(&info->mutex); 4333 pthread_cond_broadcast(&info->cond); 4334 pthread_mutex_unlock(&info->mutex); 4335 /* Wait until the parent has finshed initializing the tls state. */ 4336 pthread_mutex_lock(&clone_lock); 4337 pthread_mutex_unlock(&clone_lock); 4338 cpu_loop(env); 4339 /* never exits */ 4340 return NULL; 4341 } 4342 #else 4343 4344 static int clone_func(void *arg) 4345 { 4346 CPUArchState *env = arg; 4347 cpu_loop(env); 4348 /* never exits */ 4349 return 0; 4350 } 4351 #endif 4352 4353 /* do_fork() Must return host values and target errnos (unlike most 4354 do_*() functions). */ 4355 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, 4356 abi_ulong parent_tidptr, target_ulong newtls, 4357 abi_ulong child_tidptr) 4358 { 4359 int ret; 4360 TaskState *ts; 4361 CPUArchState *new_env; 4362 #if defined(CONFIG_USE_NPTL) 4363 unsigned int nptl_flags; 4364 sigset_t sigmask; 4365 #else 4366 uint8_t *new_stack; 4367 #endif 4368 4369 /* Emulate vfork() with fork() */ 4370 if (flags & CLONE_VFORK) 4371 flags &= ~(CLONE_VFORK | CLONE_VM); 4372 4373 if (flags & CLONE_VM) { 4374 TaskState *parent_ts = (TaskState *)env->opaque; 4375 #if defined(CONFIG_USE_NPTL) 4376 new_thread_info info; 4377 pthread_attr_t attr; 4378 #endif 4379 ts = g_malloc0(sizeof(TaskState)); 4380 init_task_state(ts); 4381 /* we create a new CPU instance. */ 4382 new_env = cpu_copy(env); 4383 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC) 4384 cpu_reset(ENV_GET_CPU(new_env)); 4385 #endif 4386 /* Init regs that differ from the parent. */ 4387 cpu_clone_regs(new_env, newsp); 4388 new_env->opaque = ts; 4389 ts->bprm = parent_ts->bprm; 4390 ts->info = parent_ts->info; 4391 #if defined(CONFIG_USE_NPTL) 4392 nptl_flags = flags; 4393 flags &= ~CLONE_NPTL_FLAGS2; 4394 4395 if (nptl_flags & CLONE_CHILD_CLEARTID) { 4396 ts->child_tidptr = child_tidptr; 4397 } 4398 4399 if (nptl_flags & CLONE_SETTLS) 4400 cpu_set_tls (new_env, newtls); 4401 4402 /* Grab a mutex so that thread setup appears atomic. */ 4403 pthread_mutex_lock(&clone_lock); 4404 4405 memset(&info, 0, sizeof(info)); 4406 pthread_mutex_init(&info.mutex, NULL); 4407 pthread_mutex_lock(&info.mutex); 4408 pthread_cond_init(&info.cond, NULL); 4409 info.env = new_env; 4410 if (nptl_flags & CLONE_CHILD_SETTID) 4411 info.child_tidptr = child_tidptr; 4412 if (nptl_flags & CLONE_PARENT_SETTID) 4413 info.parent_tidptr = parent_tidptr; 4414 4415 ret = pthread_attr_init(&attr); 4416 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE); 4417 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 4418 /* It is not safe to deliver signals until the child has finished 4419 initializing, so temporarily block all signals. */ 4420 sigfillset(&sigmask); 4421 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); 4422 4423 ret = pthread_create(&info.thread, &attr, clone_func, &info); 4424 /* TODO: Free new CPU state if thread creation failed. */ 4425 4426 sigprocmask(SIG_SETMASK, &info.sigmask, NULL); 4427 pthread_attr_destroy(&attr); 4428 if (ret == 0) { 4429 /* Wait for the child to initialize. */ 4430 pthread_cond_wait(&info.cond, &info.mutex); 4431 ret = info.tid; 4432 if (flags & CLONE_PARENT_SETTID) 4433 put_user_u32(ret, parent_tidptr); 4434 } else { 4435 ret = -1; 4436 } 4437 pthread_mutex_unlock(&info.mutex); 4438 pthread_cond_destroy(&info.cond); 4439 pthread_mutex_destroy(&info.mutex); 4440 pthread_mutex_unlock(&clone_lock); 4441 #else 4442 if (flags & CLONE_NPTL_FLAGS2) 4443 return -EINVAL; 4444 /* This is probably going to die very quickly, but do it anyway. */ 4445 new_stack = g_malloc0 (NEW_STACK_SIZE); 4446 #ifdef __ia64__ 4447 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env); 4448 #else 4449 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env); 4450 #endif 4451 #endif 4452 } else { 4453 /* if no CLONE_VM, we consider it is a fork */ 4454 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) 4455 return -EINVAL; 4456 fork_start(); 4457 ret = fork(); 4458 if (ret == 0) { 4459 /* Child Process. */ 4460 cpu_clone_regs(env, newsp); 4461 fork_end(1); 4462 #if defined(CONFIG_USE_NPTL) 4463 /* There is a race condition here. The parent process could 4464 theoretically read the TID in the child process before the child 4465 tid is set. This would require using either ptrace 4466 (not implemented) or having *_tidptr to point at a shared memory 4467 mapping. We can't repeat the spinlock hack used above because 4468 the child process gets its own copy of the lock. */ 4469 if (flags & CLONE_CHILD_SETTID) 4470 put_user_u32(gettid(), child_tidptr); 4471 if (flags & CLONE_PARENT_SETTID) 4472 put_user_u32(gettid(), parent_tidptr); 4473 ts = (TaskState *)env->opaque; 4474 if (flags & CLONE_SETTLS) 4475 cpu_set_tls (env, newtls); 4476 if (flags & CLONE_CHILD_CLEARTID) 4477 ts->child_tidptr = child_tidptr; 4478 #endif 4479 } else { 4480 fork_end(0); 4481 } 4482 } 4483 return ret; 4484 } 4485 4486 /* warning : doesn't handle linux specific flags... */ 4487 static int target_to_host_fcntl_cmd(int cmd) 4488 { 4489 switch(cmd) { 4490 case TARGET_F_DUPFD: 4491 case TARGET_F_GETFD: 4492 case TARGET_F_SETFD: 4493 case TARGET_F_GETFL: 4494 case TARGET_F_SETFL: 4495 return cmd; 4496 case TARGET_F_GETLK: 4497 return F_GETLK; 4498 case TARGET_F_SETLK: 4499 return F_SETLK; 4500 case TARGET_F_SETLKW: 4501 return F_SETLKW; 4502 case TARGET_F_GETOWN: 4503 return F_GETOWN; 4504 case TARGET_F_SETOWN: 4505 return F_SETOWN; 4506 case TARGET_F_GETSIG: 4507 return F_GETSIG; 4508 case TARGET_F_SETSIG: 4509 return F_SETSIG; 4510 #if TARGET_ABI_BITS == 32 4511 case TARGET_F_GETLK64: 4512 return F_GETLK64; 4513 case TARGET_F_SETLK64: 4514 return F_SETLK64; 4515 case TARGET_F_SETLKW64: 4516 return F_SETLKW64; 4517 #endif 4518 case TARGET_F_SETLEASE: 4519 return F_SETLEASE; 4520 case TARGET_F_GETLEASE: 4521 return F_GETLEASE; 4522 #ifdef F_DUPFD_CLOEXEC 4523 case TARGET_F_DUPFD_CLOEXEC: 4524 return F_DUPFD_CLOEXEC; 4525 #endif 4526 case TARGET_F_NOTIFY: 4527 return F_NOTIFY; 4528 default: 4529 return -TARGET_EINVAL; 4530 } 4531 return -TARGET_EINVAL; 4532 } 4533 4534 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a } 4535 static const bitmask_transtbl flock_tbl[] = { 4536 TRANSTBL_CONVERT(F_RDLCK), 4537 TRANSTBL_CONVERT(F_WRLCK), 4538 TRANSTBL_CONVERT(F_UNLCK), 4539 TRANSTBL_CONVERT(F_EXLCK), 4540 TRANSTBL_CONVERT(F_SHLCK), 4541 { 0, 0, 0, 0 } 4542 }; 4543 4544 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) 4545 { 4546 struct flock fl; 4547 struct target_flock *target_fl; 4548 struct flock64 fl64; 4549 struct target_flock64 *target_fl64; 4550 abi_long ret; 4551 int host_cmd = target_to_host_fcntl_cmd(cmd); 4552 4553 if (host_cmd == -TARGET_EINVAL) 4554 return host_cmd; 4555 4556 switch(cmd) { 4557 case TARGET_F_GETLK: 4558 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4559 return -TARGET_EFAULT; 4560 fl.l_type = 4561 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl); 4562 fl.l_whence = tswap16(target_fl->l_whence); 4563 fl.l_start = tswapal(target_fl->l_start); 4564 fl.l_len = tswapal(target_fl->l_len); 4565 fl.l_pid = tswap32(target_fl->l_pid); 4566 unlock_user_struct(target_fl, arg, 0); 4567 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4568 if (ret == 0) { 4569 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0)) 4570 return -TARGET_EFAULT; 4571 target_fl->l_type = 4572 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl); 4573 target_fl->l_whence = tswap16(fl.l_whence); 4574 target_fl->l_start = tswapal(fl.l_start); 4575 target_fl->l_len = tswapal(fl.l_len); 4576 target_fl->l_pid = tswap32(fl.l_pid); 4577 unlock_user_struct(target_fl, arg, 1); 4578 } 4579 break; 4580 4581 case TARGET_F_SETLK: 4582 case TARGET_F_SETLKW: 4583 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4584 return -TARGET_EFAULT; 4585 fl.l_type = 4586 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl); 4587 fl.l_whence = tswap16(target_fl->l_whence); 4588 fl.l_start = tswapal(target_fl->l_start); 4589 fl.l_len = tswapal(target_fl->l_len); 4590 fl.l_pid = tswap32(target_fl->l_pid); 4591 unlock_user_struct(target_fl, arg, 0); 4592 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4593 break; 4594 4595 case TARGET_F_GETLK64: 4596 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4597 return -TARGET_EFAULT; 4598 fl64.l_type = 4599 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1; 4600 fl64.l_whence = tswap16(target_fl64->l_whence); 4601 fl64.l_start = tswap64(target_fl64->l_start); 4602 fl64.l_len = tswap64(target_fl64->l_len); 4603 fl64.l_pid = tswap32(target_fl64->l_pid); 4604 unlock_user_struct(target_fl64, arg, 0); 4605 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4606 if (ret == 0) { 4607 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0)) 4608 return -TARGET_EFAULT; 4609 target_fl64->l_type = 4610 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1; 4611 target_fl64->l_whence = tswap16(fl64.l_whence); 4612 target_fl64->l_start = tswap64(fl64.l_start); 4613 target_fl64->l_len = tswap64(fl64.l_len); 4614 target_fl64->l_pid = tswap32(fl64.l_pid); 4615 unlock_user_struct(target_fl64, arg, 1); 4616 } 4617 break; 4618 case TARGET_F_SETLK64: 4619 case TARGET_F_SETLKW64: 4620 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4621 return -TARGET_EFAULT; 4622 fl64.l_type = 4623 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1; 4624 fl64.l_whence = tswap16(target_fl64->l_whence); 4625 fl64.l_start = tswap64(target_fl64->l_start); 4626 fl64.l_len = tswap64(target_fl64->l_len); 4627 fl64.l_pid = tswap32(target_fl64->l_pid); 4628 unlock_user_struct(target_fl64, arg, 0); 4629 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4630 break; 4631 4632 case TARGET_F_GETFL: 4633 ret = get_errno(fcntl(fd, host_cmd, arg)); 4634 if (ret >= 0) { 4635 ret = host_to_target_bitmask(ret, fcntl_flags_tbl); 4636 } 4637 break; 4638 4639 case TARGET_F_SETFL: 4640 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl))); 4641 break; 4642 4643 case TARGET_F_SETOWN: 4644 case TARGET_F_GETOWN: 4645 case TARGET_F_SETSIG: 4646 case TARGET_F_GETSIG: 4647 case TARGET_F_SETLEASE: 4648 case TARGET_F_GETLEASE: 4649 ret = get_errno(fcntl(fd, host_cmd, arg)); 4650 break; 4651 4652 default: 4653 ret = get_errno(fcntl(fd, cmd, arg)); 4654 break; 4655 } 4656 return ret; 4657 } 4658 4659 #ifdef USE_UID16 4660 4661 static inline int high2lowuid(int uid) 4662 { 4663 if (uid > 65535) 4664 return 65534; 4665 else 4666 return uid; 4667 } 4668 4669 static inline int high2lowgid(int gid) 4670 { 4671 if (gid > 65535) 4672 return 65534; 4673 else 4674 return gid; 4675 } 4676 4677 static inline int low2highuid(int uid) 4678 { 4679 if ((int16_t)uid == -1) 4680 return -1; 4681 else 4682 return uid; 4683 } 4684 4685 static inline int low2highgid(int gid) 4686 { 4687 if ((int16_t)gid == -1) 4688 return -1; 4689 else 4690 return gid; 4691 } 4692 static inline int tswapid(int id) 4693 { 4694 return tswap16(id); 4695 } 4696 #else /* !USE_UID16 */ 4697 static inline int high2lowuid(int uid) 4698 { 4699 return uid; 4700 } 4701 static inline int high2lowgid(int gid) 4702 { 4703 return gid; 4704 } 4705 static inline int low2highuid(int uid) 4706 { 4707 return uid; 4708 } 4709 static inline int low2highgid(int gid) 4710 { 4711 return gid; 4712 } 4713 static inline int tswapid(int id) 4714 { 4715 return tswap32(id); 4716 } 4717 #endif /* USE_UID16 */ 4718 4719 void syscall_init(void) 4720 { 4721 IOCTLEntry *ie; 4722 const argtype *arg_type; 4723 int size; 4724 int i; 4725 4726 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def); 4727 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def); 4728 #include "syscall_types.h" 4729 #undef STRUCT 4730 #undef STRUCT_SPECIAL 4731 4732 /* Build target_to_host_errno_table[] table from 4733 * host_to_target_errno_table[]. */ 4734 for (i = 0; i < ERRNO_TABLE_SIZE; i++) { 4735 target_to_host_errno_table[host_to_target_errno_table[i]] = i; 4736 } 4737 4738 /* we patch the ioctl size if necessary. We rely on the fact that 4739 no ioctl has all the bits at '1' in the size field */ 4740 ie = ioctl_entries; 4741 while (ie->target_cmd != 0) { 4742 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) == 4743 TARGET_IOC_SIZEMASK) { 4744 arg_type = ie->arg_type; 4745 if (arg_type[0] != TYPE_PTR) { 4746 fprintf(stderr, "cannot patch size for ioctl 0x%x\n", 4747 ie->target_cmd); 4748 exit(1); 4749 } 4750 arg_type++; 4751 size = thunk_type_size(arg_type, 0); 4752 ie->target_cmd = (ie->target_cmd & 4753 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) | 4754 (size << TARGET_IOC_SIZESHIFT); 4755 } 4756 4757 /* automatic consistency check if same arch */ 4758 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 4759 (defined(__x86_64__) && defined(TARGET_X86_64)) 4760 if (unlikely(ie->target_cmd != ie->host_cmd)) { 4761 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n", 4762 ie->name, ie->target_cmd, ie->host_cmd); 4763 } 4764 #endif 4765 ie++; 4766 } 4767 } 4768 4769 #if TARGET_ABI_BITS == 32 4770 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1) 4771 { 4772 #ifdef TARGET_WORDS_BIGENDIAN 4773 return ((uint64_t)word0 << 32) | word1; 4774 #else 4775 return ((uint64_t)word1 << 32) | word0; 4776 #endif 4777 } 4778 #else /* TARGET_ABI_BITS == 32 */ 4779 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1) 4780 { 4781 return word0; 4782 } 4783 #endif /* TARGET_ABI_BITS != 32 */ 4784 4785 #ifdef TARGET_NR_truncate64 4786 static inline abi_long target_truncate64(void *cpu_env, const char *arg1, 4787 abi_long arg2, 4788 abi_long arg3, 4789 abi_long arg4) 4790 { 4791 if (regpairs_aligned(cpu_env)) { 4792 arg2 = arg3; 4793 arg3 = arg4; 4794 } 4795 return get_errno(truncate64(arg1, target_offset64(arg2, arg3))); 4796 } 4797 #endif 4798 4799 #ifdef TARGET_NR_ftruncate64 4800 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1, 4801 abi_long arg2, 4802 abi_long arg3, 4803 abi_long arg4) 4804 { 4805 if (regpairs_aligned(cpu_env)) { 4806 arg2 = arg3; 4807 arg3 = arg4; 4808 } 4809 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3))); 4810 } 4811 #endif 4812 4813 static inline abi_long target_to_host_timespec(struct timespec *host_ts, 4814 abi_ulong target_addr) 4815 { 4816 struct target_timespec *target_ts; 4817 4818 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) 4819 return -TARGET_EFAULT; 4820 host_ts->tv_sec = tswapal(target_ts->tv_sec); 4821 host_ts->tv_nsec = tswapal(target_ts->tv_nsec); 4822 unlock_user_struct(target_ts, target_addr, 0); 4823 return 0; 4824 } 4825 4826 static inline abi_long host_to_target_timespec(abi_ulong target_addr, 4827 struct timespec *host_ts) 4828 { 4829 struct target_timespec *target_ts; 4830 4831 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) 4832 return -TARGET_EFAULT; 4833 target_ts->tv_sec = tswapal(host_ts->tv_sec); 4834 target_ts->tv_nsec = tswapal(host_ts->tv_nsec); 4835 unlock_user_struct(target_ts, target_addr, 1); 4836 return 0; 4837 } 4838 4839 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat) 4840 static inline abi_long host_to_target_stat64(void *cpu_env, 4841 abi_ulong target_addr, 4842 struct stat *host_st) 4843 { 4844 #ifdef TARGET_ARM 4845 if (((CPUARMState *)cpu_env)->eabi) { 4846 struct target_eabi_stat64 *target_st; 4847 4848 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 4849 return -TARGET_EFAULT; 4850 memset(target_st, 0, sizeof(struct target_eabi_stat64)); 4851 __put_user(host_st->st_dev, &target_st->st_dev); 4852 __put_user(host_st->st_ino, &target_st->st_ino); 4853 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 4854 __put_user(host_st->st_ino, &target_st->__st_ino); 4855 #endif 4856 __put_user(host_st->st_mode, &target_st->st_mode); 4857 __put_user(host_st->st_nlink, &target_st->st_nlink); 4858 __put_user(host_st->st_uid, &target_st->st_uid); 4859 __put_user(host_st->st_gid, &target_st->st_gid); 4860 __put_user(host_st->st_rdev, &target_st->st_rdev); 4861 __put_user(host_st->st_size, &target_st->st_size); 4862 __put_user(host_st->st_blksize, &target_st->st_blksize); 4863 __put_user(host_st->st_blocks, &target_st->st_blocks); 4864 __put_user(host_st->st_atime, &target_st->target_st_atime); 4865 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 4866 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 4867 unlock_user_struct(target_st, target_addr, 1); 4868 } else 4869 #endif 4870 { 4871 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA) 4872 struct target_stat *target_st; 4873 #else 4874 struct target_stat64 *target_st; 4875 #endif 4876 4877 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 4878 return -TARGET_EFAULT; 4879 memset(target_st, 0, sizeof(*target_st)); 4880 __put_user(host_st->st_dev, &target_st->st_dev); 4881 __put_user(host_st->st_ino, &target_st->st_ino); 4882 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 4883 __put_user(host_st->st_ino, &target_st->__st_ino); 4884 #endif 4885 __put_user(host_st->st_mode, &target_st->st_mode); 4886 __put_user(host_st->st_nlink, &target_st->st_nlink); 4887 __put_user(host_st->st_uid, &target_st->st_uid); 4888 __put_user(host_st->st_gid, &target_st->st_gid); 4889 __put_user(host_st->st_rdev, &target_st->st_rdev); 4890 /* XXX: better use of kernel struct */ 4891 __put_user(host_st->st_size, &target_st->st_size); 4892 __put_user(host_st->st_blksize, &target_st->st_blksize); 4893 __put_user(host_st->st_blocks, &target_st->st_blocks); 4894 __put_user(host_st->st_atime, &target_st->target_st_atime); 4895 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 4896 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 4897 unlock_user_struct(target_st, target_addr, 1); 4898 } 4899 4900 return 0; 4901 } 4902 #endif 4903 4904 #if defined(CONFIG_USE_NPTL) 4905 /* ??? Using host futex calls even when target atomic operations 4906 are not really atomic probably breaks things. However implementing 4907 futexes locally would make futexes shared between multiple processes 4908 tricky. However they're probably useless because guest atomic 4909 operations won't work either. */ 4910 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout, 4911 target_ulong uaddr2, int val3) 4912 { 4913 struct timespec ts, *pts; 4914 int base_op; 4915 4916 /* ??? We assume FUTEX_* constants are the same on both host 4917 and target. */ 4918 #ifdef FUTEX_CMD_MASK 4919 base_op = op & FUTEX_CMD_MASK; 4920 #else 4921 base_op = op; 4922 #endif 4923 switch (base_op) { 4924 case FUTEX_WAIT: 4925 case FUTEX_WAIT_BITSET: 4926 if (timeout) { 4927 pts = &ts; 4928 target_to_host_timespec(pts, timeout); 4929 } else { 4930 pts = NULL; 4931 } 4932 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val), 4933 pts, NULL, val3)); 4934 case FUTEX_WAKE: 4935 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 4936 case FUTEX_FD: 4937 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 4938 case FUTEX_REQUEUE: 4939 case FUTEX_CMP_REQUEUE: 4940 case FUTEX_WAKE_OP: 4941 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 4942 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 4943 But the prototype takes a `struct timespec *'; insert casts 4944 to satisfy the compiler. We do not need to tswap TIMEOUT 4945 since it's not compared to guest memory. */ 4946 pts = (struct timespec *)(uintptr_t) timeout; 4947 return get_errno(sys_futex(g2h(uaddr), op, val, pts, 4948 g2h(uaddr2), 4949 (base_op == FUTEX_CMP_REQUEUE 4950 ? tswap32(val3) 4951 : val3))); 4952 default: 4953 return -TARGET_ENOSYS; 4954 } 4955 } 4956 #endif 4957 4958 /* Map host to target signal numbers for the wait family of syscalls. 4959 Assume all other status bits are the same. */ 4960 int host_to_target_waitstatus(int status) 4961 { 4962 if (WIFSIGNALED(status)) { 4963 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f); 4964 } 4965 if (WIFSTOPPED(status)) { 4966 return (host_to_target_signal(WSTOPSIG(status)) << 8) 4967 | (status & 0xff); 4968 } 4969 return status; 4970 } 4971 4972 int get_osversion(void) 4973 { 4974 static int osversion; 4975 struct new_utsname buf; 4976 const char *s; 4977 int i, n, tmp; 4978 if (osversion) 4979 return osversion; 4980 if (qemu_uname_release && *qemu_uname_release) { 4981 s = qemu_uname_release; 4982 } else { 4983 if (sys_uname(&buf)) 4984 return 0; 4985 s = buf.release; 4986 } 4987 tmp = 0; 4988 for (i = 0; i < 3; i++) { 4989 n = 0; 4990 while (*s >= '0' && *s <= '9') { 4991 n *= 10; 4992 n += *s - '0'; 4993 s++; 4994 } 4995 tmp = (tmp << 8) + n; 4996 if (*s == '.') 4997 s++; 4998 } 4999 osversion = tmp; 5000 return osversion; 5001 } 5002 5003 5004 static int open_self_maps(void *cpu_env, int fd) 5005 { 5006 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32) 5007 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 5008 #endif 5009 FILE *fp; 5010 char *line = NULL; 5011 size_t len = 0; 5012 ssize_t read; 5013 5014 fp = fopen("/proc/self/maps", "r"); 5015 if (fp == NULL) { 5016 return -EACCES; 5017 } 5018 5019 while ((read = getline(&line, &len, fp)) != -1) { 5020 int fields, dev_maj, dev_min, inode; 5021 uint64_t min, max, offset; 5022 char flag_r, flag_w, flag_x, flag_p; 5023 char path[512] = ""; 5024 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d" 5025 " %512s", &min, &max, &flag_r, &flag_w, &flag_x, 5026 &flag_p, &offset, &dev_maj, &dev_min, &inode, path); 5027 5028 if ((fields < 10) || (fields > 11)) { 5029 continue; 5030 } 5031 if (!strncmp(path, "[stack]", 7)) { 5032 continue; 5033 } 5034 if (h2g_valid(min) && h2g_valid(max)) { 5035 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx 5036 " %c%c%c%c %08" PRIx64 " %02x:%02x %d%s%s\n", 5037 h2g(min), h2g(max), flag_r, flag_w, 5038 flag_x, flag_p, offset, dev_maj, dev_min, inode, 5039 path[0] ? " " : "", path); 5040 } 5041 } 5042 5043 free(line); 5044 fclose(fp); 5045 5046 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32) 5047 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n", 5048 (unsigned long long)ts->info->stack_limit, 5049 (unsigned long long)(ts->info->start_stack + 5050 (TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK, 5051 (unsigned long long)0); 5052 #endif 5053 5054 return 0; 5055 } 5056 5057 static int open_self_stat(void *cpu_env, int fd) 5058 { 5059 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 5060 abi_ulong start_stack = ts->info->start_stack; 5061 int i; 5062 5063 for (i = 0; i < 44; i++) { 5064 char buf[128]; 5065 int len; 5066 uint64_t val = 0; 5067 5068 if (i == 0) { 5069 /* pid */ 5070 val = getpid(); 5071 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 5072 } else if (i == 1) { 5073 /* app name */ 5074 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]); 5075 } else if (i == 27) { 5076 /* stack bottom */ 5077 val = start_stack; 5078 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 5079 } else { 5080 /* for the rest, there is MasterCard */ 5081 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' '); 5082 } 5083 5084 len = strlen(buf); 5085 if (write(fd, buf, len) != len) { 5086 return -1; 5087 } 5088 } 5089 5090 return 0; 5091 } 5092 5093 static int open_self_auxv(void *cpu_env, int fd) 5094 { 5095 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 5096 abi_ulong auxv = ts->info->saved_auxv; 5097 abi_ulong len = ts->info->auxv_len; 5098 char *ptr; 5099 5100 /* 5101 * Auxiliary vector is stored in target process stack. 5102 * read in whole auxv vector and copy it to file 5103 */ 5104 ptr = lock_user(VERIFY_READ, auxv, len, 0); 5105 if (ptr != NULL) { 5106 while (len > 0) { 5107 ssize_t r; 5108 r = write(fd, ptr, len); 5109 if (r <= 0) { 5110 break; 5111 } 5112 len -= r; 5113 ptr += r; 5114 } 5115 lseek(fd, 0, SEEK_SET); 5116 unlock_user(ptr, auxv, len); 5117 } 5118 5119 return 0; 5120 } 5121 5122 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode) 5123 { 5124 struct fake_open { 5125 const char *filename; 5126 int (*fill)(void *cpu_env, int fd); 5127 }; 5128 const struct fake_open *fake_open; 5129 static const struct fake_open fakes[] = { 5130 { "/proc/self/maps", open_self_maps }, 5131 { "/proc/self/stat", open_self_stat }, 5132 { "/proc/self/auxv", open_self_auxv }, 5133 { NULL, NULL } 5134 }; 5135 5136 for (fake_open = fakes; fake_open->filename; fake_open++) { 5137 if (!strncmp(pathname, fake_open->filename, 5138 strlen(fake_open->filename))) { 5139 break; 5140 } 5141 } 5142 5143 if (fake_open->filename) { 5144 const char *tmpdir; 5145 char filename[PATH_MAX]; 5146 int fd, r; 5147 5148 /* create temporary file to map stat to */ 5149 tmpdir = getenv("TMPDIR"); 5150 if (!tmpdir) 5151 tmpdir = "/tmp"; 5152 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir); 5153 fd = mkstemp(filename); 5154 if (fd < 0) { 5155 return fd; 5156 } 5157 unlink(filename); 5158 5159 if ((r = fake_open->fill(cpu_env, fd))) { 5160 close(fd); 5161 return r; 5162 } 5163 lseek(fd, 0, SEEK_SET); 5164 5165 return fd; 5166 } 5167 5168 return get_errno(open(path(pathname), flags, mode)); 5169 } 5170 5171 /* do_syscall() should always have a single exit point at the end so 5172 that actions, such as logging of syscall results, can be performed. 5173 All errnos that do_syscall() returns must be -TARGET_<errcode>. */ 5174 abi_long do_syscall(void *cpu_env, int num, abi_long arg1, 5175 abi_long arg2, abi_long arg3, abi_long arg4, 5176 abi_long arg5, abi_long arg6, abi_long arg7, 5177 abi_long arg8) 5178 { 5179 abi_long ret; 5180 struct stat st; 5181 struct statfs stfs; 5182 void *p; 5183 5184 #ifdef DEBUG 5185 gemu_log("syscall %d", num); 5186 #endif 5187 if(do_strace) 5188 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); 5189 5190 switch(num) { 5191 case TARGET_NR_exit: 5192 #ifdef CONFIG_USE_NPTL 5193 /* In old applications this may be used to implement _exit(2). 5194 However in threaded applictions it is used for thread termination, 5195 and _exit_group is used for application termination. 5196 Do thread termination if we have more then one thread. */ 5197 /* FIXME: This probably breaks if a signal arrives. We should probably 5198 be disabling signals. */ 5199 if (first_cpu->next_cpu) { 5200 TaskState *ts; 5201 CPUArchState **lastp; 5202 CPUArchState *p; 5203 5204 cpu_list_lock(); 5205 lastp = &first_cpu; 5206 p = first_cpu; 5207 while (p && p != (CPUArchState *)cpu_env) { 5208 lastp = &p->next_cpu; 5209 p = p->next_cpu; 5210 } 5211 /* If we didn't find the CPU for this thread then something is 5212 horribly wrong. */ 5213 if (!p) 5214 abort(); 5215 /* Remove the CPU from the list. */ 5216 *lastp = p->next_cpu; 5217 cpu_list_unlock(); 5218 ts = ((CPUArchState *)cpu_env)->opaque; 5219 if (ts->child_tidptr) { 5220 put_user_u32(0, ts->child_tidptr); 5221 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX, 5222 NULL, NULL, 0); 5223 } 5224 thread_env = NULL; 5225 object_unref(OBJECT(ENV_GET_CPU(cpu_env))); 5226 g_free(ts); 5227 pthread_exit(NULL); 5228 } 5229 #endif 5230 #ifdef TARGET_GPROF 5231 _mcleanup(); 5232 #endif 5233 gdb_exit(cpu_env, arg1); 5234 _exit(arg1); 5235 ret = 0; /* avoid warning */ 5236 break; 5237 case TARGET_NR_read: 5238 if (arg3 == 0) 5239 ret = 0; 5240 else { 5241 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 5242 goto efault; 5243 ret = get_errno(read(arg1, p, arg3)); 5244 unlock_user(p, arg2, ret); 5245 } 5246 break; 5247 case TARGET_NR_write: 5248 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 5249 goto efault; 5250 ret = get_errno(write(arg1, p, arg3)); 5251 unlock_user(p, arg2, 0); 5252 break; 5253 case TARGET_NR_open: 5254 if (!(p = lock_user_string(arg1))) 5255 goto efault; 5256 ret = get_errno(do_open(cpu_env, p, 5257 target_to_host_bitmask(arg2, fcntl_flags_tbl), 5258 arg3)); 5259 unlock_user(p, arg1, 0); 5260 break; 5261 #if defined(TARGET_NR_openat) && defined(__NR_openat) 5262 case TARGET_NR_openat: 5263 if (!(p = lock_user_string(arg2))) 5264 goto efault; 5265 ret = get_errno(sys_openat(arg1, 5266 path(p), 5267 target_to_host_bitmask(arg3, fcntl_flags_tbl), 5268 arg4)); 5269 unlock_user(p, arg2, 0); 5270 break; 5271 #endif 5272 case TARGET_NR_close: 5273 ret = get_errno(close(arg1)); 5274 break; 5275 case TARGET_NR_brk: 5276 ret = do_brk(arg1); 5277 break; 5278 case TARGET_NR_fork: 5279 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0)); 5280 break; 5281 #ifdef TARGET_NR_waitpid 5282 case TARGET_NR_waitpid: 5283 { 5284 int status; 5285 ret = get_errno(waitpid(arg1, &status, arg3)); 5286 if (!is_error(ret) && arg2 && ret 5287 && put_user_s32(host_to_target_waitstatus(status), arg2)) 5288 goto efault; 5289 } 5290 break; 5291 #endif 5292 #ifdef TARGET_NR_waitid 5293 case TARGET_NR_waitid: 5294 { 5295 siginfo_t info; 5296 info.si_pid = 0; 5297 ret = get_errno(waitid(arg1, arg2, &info, arg4)); 5298 if (!is_error(ret) && arg3 && info.si_pid != 0) { 5299 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) 5300 goto efault; 5301 host_to_target_siginfo(p, &info); 5302 unlock_user(p, arg3, sizeof(target_siginfo_t)); 5303 } 5304 } 5305 break; 5306 #endif 5307 #ifdef TARGET_NR_creat /* not on alpha */ 5308 case TARGET_NR_creat: 5309 if (!(p = lock_user_string(arg1))) 5310 goto efault; 5311 ret = get_errno(creat(p, arg2)); 5312 unlock_user(p, arg1, 0); 5313 break; 5314 #endif 5315 case TARGET_NR_link: 5316 { 5317 void * p2; 5318 p = lock_user_string(arg1); 5319 p2 = lock_user_string(arg2); 5320 if (!p || !p2) 5321 ret = -TARGET_EFAULT; 5322 else 5323 ret = get_errno(link(p, p2)); 5324 unlock_user(p2, arg2, 0); 5325 unlock_user(p, arg1, 0); 5326 } 5327 break; 5328 #if defined(TARGET_NR_linkat) && defined(__NR_linkat) 5329 case TARGET_NR_linkat: 5330 { 5331 void * p2 = NULL; 5332 if (!arg2 || !arg4) 5333 goto efault; 5334 p = lock_user_string(arg2); 5335 p2 = lock_user_string(arg4); 5336 if (!p || !p2) 5337 ret = -TARGET_EFAULT; 5338 else 5339 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5)); 5340 unlock_user(p, arg2, 0); 5341 unlock_user(p2, arg4, 0); 5342 } 5343 break; 5344 #endif 5345 case TARGET_NR_unlink: 5346 if (!(p = lock_user_string(arg1))) 5347 goto efault; 5348 ret = get_errno(unlink(p)); 5349 unlock_user(p, arg1, 0); 5350 break; 5351 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat) 5352 case TARGET_NR_unlinkat: 5353 if (!(p = lock_user_string(arg2))) 5354 goto efault; 5355 ret = get_errno(sys_unlinkat(arg1, p, arg3)); 5356 unlock_user(p, arg2, 0); 5357 break; 5358 #endif 5359 case TARGET_NR_execve: 5360 { 5361 char **argp, **envp; 5362 int argc, envc; 5363 abi_ulong gp; 5364 abi_ulong guest_argp; 5365 abi_ulong guest_envp; 5366 abi_ulong addr; 5367 char **q; 5368 int total_size = 0; 5369 5370 argc = 0; 5371 guest_argp = arg2; 5372 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { 5373 if (get_user_ual(addr, gp)) 5374 goto efault; 5375 if (!addr) 5376 break; 5377 argc++; 5378 } 5379 envc = 0; 5380 guest_envp = arg3; 5381 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { 5382 if (get_user_ual(addr, gp)) 5383 goto efault; 5384 if (!addr) 5385 break; 5386 envc++; 5387 } 5388 5389 argp = alloca((argc + 1) * sizeof(void *)); 5390 envp = alloca((envc + 1) * sizeof(void *)); 5391 5392 for (gp = guest_argp, q = argp; gp; 5393 gp += sizeof(abi_ulong), q++) { 5394 if (get_user_ual(addr, gp)) 5395 goto execve_efault; 5396 if (!addr) 5397 break; 5398 if (!(*q = lock_user_string(addr))) 5399 goto execve_efault; 5400 total_size += strlen(*q) + 1; 5401 } 5402 *q = NULL; 5403 5404 for (gp = guest_envp, q = envp; gp; 5405 gp += sizeof(abi_ulong), q++) { 5406 if (get_user_ual(addr, gp)) 5407 goto execve_efault; 5408 if (!addr) 5409 break; 5410 if (!(*q = lock_user_string(addr))) 5411 goto execve_efault; 5412 total_size += strlen(*q) + 1; 5413 } 5414 *q = NULL; 5415 5416 /* This case will not be caught by the host's execve() if its 5417 page size is bigger than the target's. */ 5418 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) { 5419 ret = -TARGET_E2BIG; 5420 goto execve_end; 5421 } 5422 if (!(p = lock_user_string(arg1))) 5423 goto execve_efault; 5424 ret = get_errno(execve(p, argp, envp)); 5425 unlock_user(p, arg1, 0); 5426 5427 goto execve_end; 5428 5429 execve_efault: 5430 ret = -TARGET_EFAULT; 5431 5432 execve_end: 5433 for (gp = guest_argp, q = argp; *q; 5434 gp += sizeof(abi_ulong), q++) { 5435 if (get_user_ual(addr, gp) 5436 || !addr) 5437 break; 5438 unlock_user(*q, addr, 0); 5439 } 5440 for (gp = guest_envp, q = envp; *q; 5441 gp += sizeof(abi_ulong), q++) { 5442 if (get_user_ual(addr, gp) 5443 || !addr) 5444 break; 5445 unlock_user(*q, addr, 0); 5446 } 5447 } 5448 break; 5449 case TARGET_NR_chdir: 5450 if (!(p = lock_user_string(arg1))) 5451 goto efault; 5452 ret = get_errno(chdir(p)); 5453 unlock_user(p, arg1, 0); 5454 break; 5455 #ifdef TARGET_NR_time 5456 case TARGET_NR_time: 5457 { 5458 time_t host_time; 5459 ret = get_errno(time(&host_time)); 5460 if (!is_error(ret) 5461 && arg1 5462 && put_user_sal(host_time, arg1)) 5463 goto efault; 5464 } 5465 break; 5466 #endif 5467 case TARGET_NR_mknod: 5468 if (!(p = lock_user_string(arg1))) 5469 goto efault; 5470 ret = get_errno(mknod(p, arg2, arg3)); 5471 unlock_user(p, arg1, 0); 5472 break; 5473 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat) 5474 case TARGET_NR_mknodat: 5475 if (!(p = lock_user_string(arg2))) 5476 goto efault; 5477 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4)); 5478 unlock_user(p, arg2, 0); 5479 break; 5480 #endif 5481 case TARGET_NR_chmod: 5482 if (!(p = lock_user_string(arg1))) 5483 goto efault; 5484 ret = get_errno(chmod(p, arg2)); 5485 unlock_user(p, arg1, 0); 5486 break; 5487 #ifdef TARGET_NR_break 5488 case TARGET_NR_break: 5489 goto unimplemented; 5490 #endif 5491 #ifdef TARGET_NR_oldstat 5492 case TARGET_NR_oldstat: 5493 goto unimplemented; 5494 #endif 5495 case TARGET_NR_lseek: 5496 ret = get_errno(lseek(arg1, arg2, arg3)); 5497 break; 5498 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) 5499 /* Alpha specific */ 5500 case TARGET_NR_getxpid: 5501 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid(); 5502 ret = get_errno(getpid()); 5503 break; 5504 #endif 5505 #ifdef TARGET_NR_getpid 5506 case TARGET_NR_getpid: 5507 ret = get_errno(getpid()); 5508 break; 5509 #endif 5510 case TARGET_NR_mount: 5511 { 5512 /* need to look at the data field */ 5513 void *p2, *p3; 5514 p = lock_user_string(arg1); 5515 p2 = lock_user_string(arg2); 5516 p3 = lock_user_string(arg3); 5517 if (!p || !p2 || !p3) 5518 ret = -TARGET_EFAULT; 5519 else { 5520 /* FIXME - arg5 should be locked, but it isn't clear how to 5521 * do that since it's not guaranteed to be a NULL-terminated 5522 * string. 5523 */ 5524 if ( ! arg5 ) 5525 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL)); 5526 else 5527 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5))); 5528 } 5529 unlock_user(p, arg1, 0); 5530 unlock_user(p2, arg2, 0); 5531 unlock_user(p3, arg3, 0); 5532 break; 5533 } 5534 #ifdef TARGET_NR_umount 5535 case TARGET_NR_umount: 5536 if (!(p = lock_user_string(arg1))) 5537 goto efault; 5538 ret = get_errno(umount(p)); 5539 unlock_user(p, arg1, 0); 5540 break; 5541 #endif 5542 #ifdef TARGET_NR_stime /* not on alpha */ 5543 case TARGET_NR_stime: 5544 { 5545 time_t host_time; 5546 if (get_user_sal(host_time, arg1)) 5547 goto efault; 5548 ret = get_errno(stime(&host_time)); 5549 } 5550 break; 5551 #endif 5552 case TARGET_NR_ptrace: 5553 goto unimplemented; 5554 #ifdef TARGET_NR_alarm /* not on alpha */ 5555 case TARGET_NR_alarm: 5556 ret = alarm(arg1); 5557 break; 5558 #endif 5559 #ifdef TARGET_NR_oldfstat 5560 case TARGET_NR_oldfstat: 5561 goto unimplemented; 5562 #endif 5563 #ifdef TARGET_NR_pause /* not on alpha */ 5564 case TARGET_NR_pause: 5565 ret = get_errno(pause()); 5566 break; 5567 #endif 5568 #ifdef TARGET_NR_utime 5569 case TARGET_NR_utime: 5570 { 5571 struct utimbuf tbuf, *host_tbuf; 5572 struct target_utimbuf *target_tbuf; 5573 if (arg2) { 5574 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) 5575 goto efault; 5576 tbuf.actime = tswapal(target_tbuf->actime); 5577 tbuf.modtime = tswapal(target_tbuf->modtime); 5578 unlock_user_struct(target_tbuf, arg2, 0); 5579 host_tbuf = &tbuf; 5580 } else { 5581 host_tbuf = NULL; 5582 } 5583 if (!(p = lock_user_string(arg1))) 5584 goto efault; 5585 ret = get_errno(utime(p, host_tbuf)); 5586 unlock_user(p, arg1, 0); 5587 } 5588 break; 5589 #endif 5590 case TARGET_NR_utimes: 5591 { 5592 struct timeval *tvp, tv[2]; 5593 if (arg2) { 5594 if (copy_from_user_timeval(&tv[0], arg2) 5595 || copy_from_user_timeval(&tv[1], 5596 arg2 + sizeof(struct target_timeval))) 5597 goto efault; 5598 tvp = tv; 5599 } else { 5600 tvp = NULL; 5601 } 5602 if (!(p = lock_user_string(arg1))) 5603 goto efault; 5604 ret = get_errno(utimes(p, tvp)); 5605 unlock_user(p, arg1, 0); 5606 } 5607 break; 5608 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat) 5609 case TARGET_NR_futimesat: 5610 { 5611 struct timeval *tvp, tv[2]; 5612 if (arg3) { 5613 if (copy_from_user_timeval(&tv[0], arg3) 5614 || copy_from_user_timeval(&tv[1], 5615 arg3 + sizeof(struct target_timeval))) 5616 goto efault; 5617 tvp = tv; 5618 } else { 5619 tvp = NULL; 5620 } 5621 if (!(p = lock_user_string(arg2))) 5622 goto efault; 5623 ret = get_errno(sys_futimesat(arg1, path(p), tvp)); 5624 unlock_user(p, arg2, 0); 5625 } 5626 break; 5627 #endif 5628 #ifdef TARGET_NR_stty 5629 case TARGET_NR_stty: 5630 goto unimplemented; 5631 #endif 5632 #ifdef TARGET_NR_gtty 5633 case TARGET_NR_gtty: 5634 goto unimplemented; 5635 #endif 5636 case TARGET_NR_access: 5637 if (!(p = lock_user_string(arg1))) 5638 goto efault; 5639 ret = get_errno(access(path(p), arg2)); 5640 unlock_user(p, arg1, 0); 5641 break; 5642 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 5643 case TARGET_NR_faccessat: 5644 if (!(p = lock_user_string(arg2))) 5645 goto efault; 5646 ret = get_errno(sys_faccessat(arg1, p, arg3)); 5647 unlock_user(p, arg2, 0); 5648 break; 5649 #endif 5650 #ifdef TARGET_NR_nice /* not on alpha */ 5651 case TARGET_NR_nice: 5652 ret = get_errno(nice(arg1)); 5653 break; 5654 #endif 5655 #ifdef TARGET_NR_ftime 5656 case TARGET_NR_ftime: 5657 goto unimplemented; 5658 #endif 5659 case TARGET_NR_sync: 5660 sync(); 5661 ret = 0; 5662 break; 5663 case TARGET_NR_kill: 5664 ret = get_errno(kill(arg1, target_to_host_signal(arg2))); 5665 break; 5666 case TARGET_NR_rename: 5667 { 5668 void *p2; 5669 p = lock_user_string(arg1); 5670 p2 = lock_user_string(arg2); 5671 if (!p || !p2) 5672 ret = -TARGET_EFAULT; 5673 else 5674 ret = get_errno(rename(p, p2)); 5675 unlock_user(p2, arg2, 0); 5676 unlock_user(p, arg1, 0); 5677 } 5678 break; 5679 #if defined(TARGET_NR_renameat) && defined(__NR_renameat) 5680 case TARGET_NR_renameat: 5681 { 5682 void *p2; 5683 p = lock_user_string(arg2); 5684 p2 = lock_user_string(arg4); 5685 if (!p || !p2) 5686 ret = -TARGET_EFAULT; 5687 else 5688 ret = get_errno(sys_renameat(arg1, p, arg3, p2)); 5689 unlock_user(p2, arg4, 0); 5690 unlock_user(p, arg2, 0); 5691 } 5692 break; 5693 #endif 5694 case TARGET_NR_mkdir: 5695 if (!(p = lock_user_string(arg1))) 5696 goto efault; 5697 ret = get_errno(mkdir(p, arg2)); 5698 unlock_user(p, arg1, 0); 5699 break; 5700 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat) 5701 case TARGET_NR_mkdirat: 5702 if (!(p = lock_user_string(arg2))) 5703 goto efault; 5704 ret = get_errno(sys_mkdirat(arg1, p, arg3)); 5705 unlock_user(p, arg2, 0); 5706 break; 5707 #endif 5708 case TARGET_NR_rmdir: 5709 if (!(p = lock_user_string(arg1))) 5710 goto efault; 5711 ret = get_errno(rmdir(p)); 5712 unlock_user(p, arg1, 0); 5713 break; 5714 case TARGET_NR_dup: 5715 ret = get_errno(dup(arg1)); 5716 break; 5717 case TARGET_NR_pipe: 5718 ret = do_pipe(cpu_env, arg1, 0, 0); 5719 break; 5720 #ifdef TARGET_NR_pipe2 5721 case TARGET_NR_pipe2: 5722 ret = do_pipe(cpu_env, arg1, 5723 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1); 5724 break; 5725 #endif 5726 case TARGET_NR_times: 5727 { 5728 struct target_tms *tmsp; 5729 struct tms tms; 5730 ret = get_errno(times(&tms)); 5731 if (arg1) { 5732 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); 5733 if (!tmsp) 5734 goto efault; 5735 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime)); 5736 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime)); 5737 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime)); 5738 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime)); 5739 } 5740 if (!is_error(ret)) 5741 ret = host_to_target_clock_t(ret); 5742 } 5743 break; 5744 #ifdef TARGET_NR_prof 5745 case TARGET_NR_prof: 5746 goto unimplemented; 5747 #endif 5748 #ifdef TARGET_NR_signal 5749 case TARGET_NR_signal: 5750 goto unimplemented; 5751 #endif 5752 case TARGET_NR_acct: 5753 if (arg1 == 0) { 5754 ret = get_errno(acct(NULL)); 5755 } else { 5756 if (!(p = lock_user_string(arg1))) 5757 goto efault; 5758 ret = get_errno(acct(path(p))); 5759 unlock_user(p, arg1, 0); 5760 } 5761 break; 5762 #ifdef TARGET_NR_umount2 /* not on alpha */ 5763 case TARGET_NR_umount2: 5764 if (!(p = lock_user_string(arg1))) 5765 goto efault; 5766 ret = get_errno(umount2(p, arg2)); 5767 unlock_user(p, arg1, 0); 5768 break; 5769 #endif 5770 #ifdef TARGET_NR_lock 5771 case TARGET_NR_lock: 5772 goto unimplemented; 5773 #endif 5774 case TARGET_NR_ioctl: 5775 ret = do_ioctl(arg1, arg2, arg3); 5776 break; 5777 case TARGET_NR_fcntl: 5778 ret = do_fcntl(arg1, arg2, arg3); 5779 break; 5780 #ifdef TARGET_NR_mpx 5781 case TARGET_NR_mpx: 5782 goto unimplemented; 5783 #endif 5784 case TARGET_NR_setpgid: 5785 ret = get_errno(setpgid(arg1, arg2)); 5786 break; 5787 #ifdef TARGET_NR_ulimit 5788 case TARGET_NR_ulimit: 5789 goto unimplemented; 5790 #endif 5791 #ifdef TARGET_NR_oldolduname 5792 case TARGET_NR_oldolduname: 5793 goto unimplemented; 5794 #endif 5795 case TARGET_NR_umask: 5796 ret = get_errno(umask(arg1)); 5797 break; 5798 case TARGET_NR_chroot: 5799 if (!(p = lock_user_string(arg1))) 5800 goto efault; 5801 ret = get_errno(chroot(p)); 5802 unlock_user(p, arg1, 0); 5803 break; 5804 case TARGET_NR_ustat: 5805 goto unimplemented; 5806 case TARGET_NR_dup2: 5807 ret = get_errno(dup2(arg1, arg2)); 5808 break; 5809 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) 5810 case TARGET_NR_dup3: 5811 ret = get_errno(dup3(arg1, arg2, arg3)); 5812 break; 5813 #endif 5814 #ifdef TARGET_NR_getppid /* not on alpha */ 5815 case TARGET_NR_getppid: 5816 ret = get_errno(getppid()); 5817 break; 5818 #endif 5819 case TARGET_NR_getpgrp: 5820 ret = get_errno(getpgrp()); 5821 break; 5822 case TARGET_NR_setsid: 5823 ret = get_errno(setsid()); 5824 break; 5825 #ifdef TARGET_NR_sigaction 5826 case TARGET_NR_sigaction: 5827 { 5828 #if defined(TARGET_ALPHA) 5829 struct target_sigaction act, oact, *pact = 0; 5830 struct target_old_sigaction *old_act; 5831 if (arg2) { 5832 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5833 goto efault; 5834 act._sa_handler = old_act->_sa_handler; 5835 target_siginitset(&act.sa_mask, old_act->sa_mask); 5836 act.sa_flags = old_act->sa_flags; 5837 act.sa_restorer = 0; 5838 unlock_user_struct(old_act, arg2, 0); 5839 pact = &act; 5840 } 5841 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5842 if (!is_error(ret) && arg3) { 5843 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5844 goto efault; 5845 old_act->_sa_handler = oact._sa_handler; 5846 old_act->sa_mask = oact.sa_mask.sig[0]; 5847 old_act->sa_flags = oact.sa_flags; 5848 unlock_user_struct(old_act, arg3, 1); 5849 } 5850 #elif defined(TARGET_MIPS) 5851 struct target_sigaction act, oact, *pact, *old_act; 5852 5853 if (arg2) { 5854 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5855 goto efault; 5856 act._sa_handler = old_act->_sa_handler; 5857 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); 5858 act.sa_flags = old_act->sa_flags; 5859 unlock_user_struct(old_act, arg2, 0); 5860 pact = &act; 5861 } else { 5862 pact = NULL; 5863 } 5864 5865 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5866 5867 if (!is_error(ret) && arg3) { 5868 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5869 goto efault; 5870 old_act->_sa_handler = oact._sa_handler; 5871 old_act->sa_flags = oact.sa_flags; 5872 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; 5873 old_act->sa_mask.sig[1] = 0; 5874 old_act->sa_mask.sig[2] = 0; 5875 old_act->sa_mask.sig[3] = 0; 5876 unlock_user_struct(old_act, arg3, 1); 5877 } 5878 #else 5879 struct target_old_sigaction *old_act; 5880 struct target_sigaction act, oact, *pact; 5881 if (arg2) { 5882 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5883 goto efault; 5884 act._sa_handler = old_act->_sa_handler; 5885 target_siginitset(&act.sa_mask, old_act->sa_mask); 5886 act.sa_flags = old_act->sa_flags; 5887 act.sa_restorer = old_act->sa_restorer; 5888 unlock_user_struct(old_act, arg2, 0); 5889 pact = &act; 5890 } else { 5891 pact = NULL; 5892 } 5893 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5894 if (!is_error(ret) && arg3) { 5895 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5896 goto efault; 5897 old_act->_sa_handler = oact._sa_handler; 5898 old_act->sa_mask = oact.sa_mask.sig[0]; 5899 old_act->sa_flags = oact.sa_flags; 5900 old_act->sa_restorer = oact.sa_restorer; 5901 unlock_user_struct(old_act, arg3, 1); 5902 } 5903 #endif 5904 } 5905 break; 5906 #endif 5907 case TARGET_NR_rt_sigaction: 5908 { 5909 #if defined(TARGET_ALPHA) 5910 struct target_sigaction act, oact, *pact = 0; 5911 struct target_rt_sigaction *rt_act; 5912 /* ??? arg4 == sizeof(sigset_t). */ 5913 if (arg2) { 5914 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1)) 5915 goto efault; 5916 act._sa_handler = rt_act->_sa_handler; 5917 act.sa_mask = rt_act->sa_mask; 5918 act.sa_flags = rt_act->sa_flags; 5919 act.sa_restorer = arg5; 5920 unlock_user_struct(rt_act, arg2, 0); 5921 pact = &act; 5922 } 5923 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5924 if (!is_error(ret) && arg3) { 5925 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0)) 5926 goto efault; 5927 rt_act->_sa_handler = oact._sa_handler; 5928 rt_act->sa_mask = oact.sa_mask; 5929 rt_act->sa_flags = oact.sa_flags; 5930 unlock_user_struct(rt_act, arg3, 1); 5931 } 5932 #else 5933 struct target_sigaction *act; 5934 struct target_sigaction *oact; 5935 5936 if (arg2) { 5937 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) 5938 goto efault; 5939 } else 5940 act = NULL; 5941 if (arg3) { 5942 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { 5943 ret = -TARGET_EFAULT; 5944 goto rt_sigaction_fail; 5945 } 5946 } else 5947 oact = NULL; 5948 ret = get_errno(do_sigaction(arg1, act, oact)); 5949 rt_sigaction_fail: 5950 if (act) 5951 unlock_user_struct(act, arg2, 0); 5952 if (oact) 5953 unlock_user_struct(oact, arg3, 1); 5954 #endif 5955 } 5956 break; 5957 #ifdef TARGET_NR_sgetmask /* not on alpha */ 5958 case TARGET_NR_sgetmask: 5959 { 5960 sigset_t cur_set; 5961 abi_ulong target_set; 5962 sigprocmask(0, NULL, &cur_set); 5963 host_to_target_old_sigset(&target_set, &cur_set); 5964 ret = target_set; 5965 } 5966 break; 5967 #endif 5968 #ifdef TARGET_NR_ssetmask /* not on alpha */ 5969 case TARGET_NR_ssetmask: 5970 { 5971 sigset_t set, oset, cur_set; 5972 abi_ulong target_set = arg1; 5973 sigprocmask(0, NULL, &cur_set); 5974 target_to_host_old_sigset(&set, &target_set); 5975 sigorset(&set, &set, &cur_set); 5976 sigprocmask(SIG_SETMASK, &set, &oset); 5977 host_to_target_old_sigset(&target_set, &oset); 5978 ret = target_set; 5979 } 5980 break; 5981 #endif 5982 #ifdef TARGET_NR_sigprocmask 5983 case TARGET_NR_sigprocmask: 5984 { 5985 #if defined(TARGET_ALPHA) 5986 sigset_t set, oldset; 5987 abi_ulong mask; 5988 int how; 5989 5990 switch (arg1) { 5991 case TARGET_SIG_BLOCK: 5992 how = SIG_BLOCK; 5993 break; 5994 case TARGET_SIG_UNBLOCK: 5995 how = SIG_UNBLOCK; 5996 break; 5997 case TARGET_SIG_SETMASK: 5998 how = SIG_SETMASK; 5999 break; 6000 default: 6001 ret = -TARGET_EINVAL; 6002 goto fail; 6003 } 6004 mask = arg2; 6005 target_to_host_old_sigset(&set, &mask); 6006 6007 ret = get_errno(sigprocmask(how, &set, &oldset)); 6008 if (!is_error(ret)) { 6009 host_to_target_old_sigset(&mask, &oldset); 6010 ret = mask; 6011 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */ 6012 } 6013 #else 6014 sigset_t set, oldset, *set_ptr; 6015 int how; 6016 6017 if (arg2) { 6018 switch (arg1) { 6019 case TARGET_SIG_BLOCK: 6020 how = SIG_BLOCK; 6021 break; 6022 case TARGET_SIG_UNBLOCK: 6023 how = SIG_UNBLOCK; 6024 break; 6025 case TARGET_SIG_SETMASK: 6026 how = SIG_SETMASK; 6027 break; 6028 default: 6029 ret = -TARGET_EINVAL; 6030 goto fail; 6031 } 6032 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 6033 goto efault; 6034 target_to_host_old_sigset(&set, p); 6035 unlock_user(p, arg2, 0); 6036 set_ptr = &set; 6037 } else { 6038 how = 0; 6039 set_ptr = NULL; 6040 } 6041 ret = get_errno(sigprocmask(how, set_ptr, &oldset)); 6042 if (!is_error(ret) && arg3) { 6043 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 6044 goto efault; 6045 host_to_target_old_sigset(p, &oldset); 6046 unlock_user(p, arg3, sizeof(target_sigset_t)); 6047 } 6048 #endif 6049 } 6050 break; 6051 #endif 6052 case TARGET_NR_rt_sigprocmask: 6053 { 6054 int how = arg1; 6055 sigset_t set, oldset, *set_ptr; 6056 6057 if (arg2) { 6058 switch(how) { 6059 case TARGET_SIG_BLOCK: 6060 how = SIG_BLOCK; 6061 break; 6062 case TARGET_SIG_UNBLOCK: 6063 how = SIG_UNBLOCK; 6064 break; 6065 case TARGET_SIG_SETMASK: 6066 how = SIG_SETMASK; 6067 break; 6068 default: 6069 ret = -TARGET_EINVAL; 6070 goto fail; 6071 } 6072 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 6073 goto efault; 6074 target_to_host_sigset(&set, p); 6075 unlock_user(p, arg2, 0); 6076 set_ptr = &set; 6077 } else { 6078 how = 0; 6079 set_ptr = NULL; 6080 } 6081 ret = get_errno(sigprocmask(how, set_ptr, &oldset)); 6082 if (!is_error(ret) && arg3) { 6083 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 6084 goto efault; 6085 host_to_target_sigset(p, &oldset); 6086 unlock_user(p, arg3, sizeof(target_sigset_t)); 6087 } 6088 } 6089 break; 6090 #ifdef TARGET_NR_sigpending 6091 case TARGET_NR_sigpending: 6092 { 6093 sigset_t set; 6094 ret = get_errno(sigpending(&set)); 6095 if (!is_error(ret)) { 6096 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 6097 goto efault; 6098 host_to_target_old_sigset(p, &set); 6099 unlock_user(p, arg1, sizeof(target_sigset_t)); 6100 } 6101 } 6102 break; 6103 #endif 6104 case TARGET_NR_rt_sigpending: 6105 { 6106 sigset_t set; 6107 ret = get_errno(sigpending(&set)); 6108 if (!is_error(ret)) { 6109 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 6110 goto efault; 6111 host_to_target_sigset(p, &set); 6112 unlock_user(p, arg1, sizeof(target_sigset_t)); 6113 } 6114 } 6115 break; 6116 #ifdef TARGET_NR_sigsuspend 6117 case TARGET_NR_sigsuspend: 6118 { 6119 sigset_t set; 6120 #if defined(TARGET_ALPHA) 6121 abi_ulong mask = arg1; 6122 target_to_host_old_sigset(&set, &mask); 6123 #else 6124 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6125 goto efault; 6126 target_to_host_old_sigset(&set, p); 6127 unlock_user(p, arg1, 0); 6128 #endif 6129 ret = get_errno(sigsuspend(&set)); 6130 } 6131 break; 6132 #endif 6133 case TARGET_NR_rt_sigsuspend: 6134 { 6135 sigset_t set; 6136 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6137 goto efault; 6138 target_to_host_sigset(&set, p); 6139 unlock_user(p, arg1, 0); 6140 ret = get_errno(sigsuspend(&set)); 6141 } 6142 break; 6143 case TARGET_NR_rt_sigtimedwait: 6144 { 6145 sigset_t set; 6146 struct timespec uts, *puts; 6147 siginfo_t uinfo; 6148 6149 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6150 goto efault; 6151 target_to_host_sigset(&set, p); 6152 unlock_user(p, arg1, 0); 6153 if (arg3) { 6154 puts = &uts; 6155 target_to_host_timespec(puts, arg3); 6156 } else { 6157 puts = NULL; 6158 } 6159 ret = get_errno(sigtimedwait(&set, &uinfo, puts)); 6160 if (!is_error(ret) && arg2) { 6161 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0))) 6162 goto efault; 6163 host_to_target_siginfo(p, &uinfo); 6164 unlock_user(p, arg2, sizeof(target_siginfo_t)); 6165 } 6166 } 6167 break; 6168 case TARGET_NR_rt_sigqueueinfo: 6169 { 6170 siginfo_t uinfo; 6171 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1))) 6172 goto efault; 6173 target_to_host_siginfo(&uinfo, p); 6174 unlock_user(p, arg1, 0); 6175 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo)); 6176 } 6177 break; 6178 #ifdef TARGET_NR_sigreturn 6179 case TARGET_NR_sigreturn: 6180 /* NOTE: ret is eax, so not transcoding must be done */ 6181 ret = do_sigreturn(cpu_env); 6182 break; 6183 #endif 6184 case TARGET_NR_rt_sigreturn: 6185 /* NOTE: ret is eax, so not transcoding must be done */ 6186 ret = do_rt_sigreturn(cpu_env); 6187 break; 6188 case TARGET_NR_sethostname: 6189 if (!(p = lock_user_string(arg1))) 6190 goto efault; 6191 ret = get_errno(sethostname(p, arg2)); 6192 unlock_user(p, arg1, 0); 6193 break; 6194 case TARGET_NR_setrlimit: 6195 { 6196 int resource = target_to_host_resource(arg1); 6197 struct target_rlimit *target_rlim; 6198 struct rlimit rlim; 6199 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) 6200 goto efault; 6201 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur); 6202 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max); 6203 unlock_user_struct(target_rlim, arg2, 0); 6204 ret = get_errno(setrlimit(resource, &rlim)); 6205 } 6206 break; 6207 case TARGET_NR_getrlimit: 6208 { 6209 int resource = target_to_host_resource(arg1); 6210 struct target_rlimit *target_rlim; 6211 struct rlimit rlim; 6212 6213 ret = get_errno(getrlimit(resource, &rlim)); 6214 if (!is_error(ret)) { 6215 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 6216 goto efault; 6217 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 6218 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 6219 unlock_user_struct(target_rlim, arg2, 1); 6220 } 6221 } 6222 break; 6223 case TARGET_NR_getrusage: 6224 { 6225 struct rusage rusage; 6226 ret = get_errno(getrusage(arg1, &rusage)); 6227 if (!is_error(ret)) { 6228 host_to_target_rusage(arg2, &rusage); 6229 } 6230 } 6231 break; 6232 case TARGET_NR_gettimeofday: 6233 { 6234 struct timeval tv; 6235 ret = get_errno(gettimeofday(&tv, NULL)); 6236 if (!is_error(ret)) { 6237 if (copy_to_user_timeval(arg1, &tv)) 6238 goto efault; 6239 } 6240 } 6241 break; 6242 case TARGET_NR_settimeofday: 6243 { 6244 struct timeval tv; 6245 if (copy_from_user_timeval(&tv, arg1)) 6246 goto efault; 6247 ret = get_errno(settimeofday(&tv, NULL)); 6248 } 6249 break; 6250 #if defined(TARGET_NR_select) 6251 case TARGET_NR_select: 6252 #if defined(TARGET_S390X) || defined(TARGET_ALPHA) 6253 ret = do_select(arg1, arg2, arg3, arg4, arg5); 6254 #else 6255 { 6256 struct target_sel_arg_struct *sel; 6257 abi_ulong inp, outp, exp, tvp; 6258 long nsel; 6259 6260 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) 6261 goto efault; 6262 nsel = tswapal(sel->n); 6263 inp = tswapal(sel->inp); 6264 outp = tswapal(sel->outp); 6265 exp = tswapal(sel->exp); 6266 tvp = tswapal(sel->tvp); 6267 unlock_user_struct(sel, arg1, 0); 6268 ret = do_select(nsel, inp, outp, exp, tvp); 6269 } 6270 #endif 6271 break; 6272 #endif 6273 #ifdef TARGET_NR_pselect6 6274 case TARGET_NR_pselect6: 6275 { 6276 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr; 6277 fd_set rfds, wfds, efds; 6278 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 6279 struct timespec ts, *ts_ptr; 6280 6281 /* 6282 * The 6th arg is actually two args smashed together, 6283 * so we cannot use the C library. 6284 */ 6285 sigset_t set; 6286 struct { 6287 sigset_t *set; 6288 size_t size; 6289 } sig, *sig_ptr; 6290 6291 abi_ulong arg_sigset, arg_sigsize, *arg7; 6292 target_sigset_t *target_sigset; 6293 6294 n = arg1; 6295 rfd_addr = arg2; 6296 wfd_addr = arg3; 6297 efd_addr = arg4; 6298 ts_addr = arg5; 6299 6300 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 6301 if (ret) { 6302 goto fail; 6303 } 6304 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 6305 if (ret) { 6306 goto fail; 6307 } 6308 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 6309 if (ret) { 6310 goto fail; 6311 } 6312 6313 /* 6314 * This takes a timespec, and not a timeval, so we cannot 6315 * use the do_select() helper ... 6316 */ 6317 if (ts_addr) { 6318 if (target_to_host_timespec(&ts, ts_addr)) { 6319 goto efault; 6320 } 6321 ts_ptr = &ts; 6322 } else { 6323 ts_ptr = NULL; 6324 } 6325 6326 /* Extract the two packed args for the sigset */ 6327 if (arg6) { 6328 sig_ptr = &sig; 6329 sig.size = _NSIG / 8; 6330 6331 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1); 6332 if (!arg7) { 6333 goto efault; 6334 } 6335 arg_sigset = tswapal(arg7[0]); 6336 arg_sigsize = tswapal(arg7[1]); 6337 unlock_user(arg7, arg6, 0); 6338 6339 if (arg_sigset) { 6340 sig.set = &set; 6341 if (arg_sigsize != sizeof(*target_sigset)) { 6342 /* Like the kernel, we enforce correct size sigsets */ 6343 ret = -TARGET_EINVAL; 6344 goto fail; 6345 } 6346 target_sigset = lock_user(VERIFY_READ, arg_sigset, 6347 sizeof(*target_sigset), 1); 6348 if (!target_sigset) { 6349 goto efault; 6350 } 6351 target_to_host_sigset(&set, target_sigset); 6352 unlock_user(target_sigset, arg_sigset, 0); 6353 } else { 6354 sig.set = NULL; 6355 } 6356 } else { 6357 sig_ptr = NULL; 6358 } 6359 6360 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 6361 ts_ptr, sig_ptr)); 6362 6363 if (!is_error(ret)) { 6364 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 6365 goto efault; 6366 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 6367 goto efault; 6368 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 6369 goto efault; 6370 6371 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) 6372 goto efault; 6373 } 6374 } 6375 break; 6376 #endif 6377 case TARGET_NR_symlink: 6378 { 6379 void *p2; 6380 p = lock_user_string(arg1); 6381 p2 = lock_user_string(arg2); 6382 if (!p || !p2) 6383 ret = -TARGET_EFAULT; 6384 else 6385 ret = get_errno(symlink(p, p2)); 6386 unlock_user(p2, arg2, 0); 6387 unlock_user(p, arg1, 0); 6388 } 6389 break; 6390 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat) 6391 case TARGET_NR_symlinkat: 6392 { 6393 void *p2; 6394 p = lock_user_string(arg1); 6395 p2 = lock_user_string(arg3); 6396 if (!p || !p2) 6397 ret = -TARGET_EFAULT; 6398 else 6399 ret = get_errno(sys_symlinkat(p, arg2, p2)); 6400 unlock_user(p2, arg3, 0); 6401 unlock_user(p, arg1, 0); 6402 } 6403 break; 6404 #endif 6405 #ifdef TARGET_NR_oldlstat 6406 case TARGET_NR_oldlstat: 6407 goto unimplemented; 6408 #endif 6409 case TARGET_NR_readlink: 6410 { 6411 void *p2, *temp; 6412 p = lock_user_string(arg1); 6413 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); 6414 if (!p || !p2) 6415 ret = -TARGET_EFAULT; 6416 else { 6417 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) { 6418 char real[PATH_MAX]; 6419 temp = realpath(exec_path,real); 6420 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ; 6421 snprintf((char *)p2, arg3, "%s", real); 6422 } 6423 else 6424 ret = get_errno(readlink(path(p), p2, arg3)); 6425 } 6426 unlock_user(p2, arg2, ret); 6427 unlock_user(p, arg1, 0); 6428 } 6429 break; 6430 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat) 6431 case TARGET_NR_readlinkat: 6432 { 6433 void *p2; 6434 p = lock_user_string(arg2); 6435 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); 6436 if (!p || !p2) 6437 ret = -TARGET_EFAULT; 6438 else 6439 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4)); 6440 unlock_user(p2, arg3, ret); 6441 unlock_user(p, arg2, 0); 6442 } 6443 break; 6444 #endif 6445 #ifdef TARGET_NR_uselib 6446 case TARGET_NR_uselib: 6447 goto unimplemented; 6448 #endif 6449 #ifdef TARGET_NR_swapon 6450 case TARGET_NR_swapon: 6451 if (!(p = lock_user_string(arg1))) 6452 goto efault; 6453 ret = get_errno(swapon(p, arg2)); 6454 unlock_user(p, arg1, 0); 6455 break; 6456 #endif 6457 case TARGET_NR_reboot: 6458 if (arg3 == LINUX_REBOOT_CMD_RESTART2) { 6459 /* arg4 must be ignored in all other cases */ 6460 p = lock_user_string(arg4); 6461 if (!p) { 6462 goto efault; 6463 } 6464 ret = get_errno(reboot(arg1, arg2, arg3, p)); 6465 unlock_user(p, arg4, 0); 6466 } else { 6467 ret = get_errno(reboot(arg1, arg2, arg3, NULL)); 6468 } 6469 break; 6470 #ifdef TARGET_NR_readdir 6471 case TARGET_NR_readdir: 6472 goto unimplemented; 6473 #endif 6474 #ifdef TARGET_NR_mmap 6475 case TARGET_NR_mmap: 6476 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \ 6477 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ 6478 || defined(TARGET_S390X) 6479 { 6480 abi_ulong *v; 6481 abi_ulong v1, v2, v3, v4, v5, v6; 6482 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) 6483 goto efault; 6484 v1 = tswapal(v[0]); 6485 v2 = tswapal(v[1]); 6486 v3 = tswapal(v[2]); 6487 v4 = tswapal(v[3]); 6488 v5 = tswapal(v[4]); 6489 v6 = tswapal(v[5]); 6490 unlock_user(v, arg1, 0); 6491 ret = get_errno(target_mmap(v1, v2, v3, 6492 target_to_host_bitmask(v4, mmap_flags_tbl), 6493 v5, v6)); 6494 } 6495 #else 6496 ret = get_errno(target_mmap(arg1, arg2, arg3, 6497 target_to_host_bitmask(arg4, mmap_flags_tbl), 6498 arg5, 6499 arg6)); 6500 #endif 6501 break; 6502 #endif 6503 #ifdef TARGET_NR_mmap2 6504 case TARGET_NR_mmap2: 6505 #ifndef MMAP_SHIFT 6506 #define MMAP_SHIFT 12 6507 #endif 6508 ret = get_errno(target_mmap(arg1, arg2, arg3, 6509 target_to_host_bitmask(arg4, mmap_flags_tbl), 6510 arg5, 6511 arg6 << MMAP_SHIFT)); 6512 break; 6513 #endif 6514 case TARGET_NR_munmap: 6515 ret = get_errno(target_munmap(arg1, arg2)); 6516 break; 6517 case TARGET_NR_mprotect: 6518 { 6519 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 6520 /* Special hack to detect libc making the stack executable. */ 6521 if ((arg3 & PROT_GROWSDOWN) 6522 && arg1 >= ts->info->stack_limit 6523 && arg1 <= ts->info->start_stack) { 6524 arg3 &= ~PROT_GROWSDOWN; 6525 arg2 = arg2 + arg1 - ts->info->stack_limit; 6526 arg1 = ts->info->stack_limit; 6527 } 6528 } 6529 ret = get_errno(target_mprotect(arg1, arg2, arg3)); 6530 break; 6531 #ifdef TARGET_NR_mremap 6532 case TARGET_NR_mremap: 6533 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); 6534 break; 6535 #endif 6536 /* ??? msync/mlock/munlock are broken for softmmu. */ 6537 #ifdef TARGET_NR_msync 6538 case TARGET_NR_msync: 6539 ret = get_errno(msync(g2h(arg1), arg2, arg3)); 6540 break; 6541 #endif 6542 #ifdef TARGET_NR_mlock 6543 case TARGET_NR_mlock: 6544 ret = get_errno(mlock(g2h(arg1), arg2)); 6545 break; 6546 #endif 6547 #ifdef TARGET_NR_munlock 6548 case TARGET_NR_munlock: 6549 ret = get_errno(munlock(g2h(arg1), arg2)); 6550 break; 6551 #endif 6552 #ifdef TARGET_NR_mlockall 6553 case TARGET_NR_mlockall: 6554 ret = get_errno(mlockall(arg1)); 6555 break; 6556 #endif 6557 #ifdef TARGET_NR_munlockall 6558 case TARGET_NR_munlockall: 6559 ret = get_errno(munlockall()); 6560 break; 6561 #endif 6562 case TARGET_NR_truncate: 6563 if (!(p = lock_user_string(arg1))) 6564 goto efault; 6565 ret = get_errno(truncate(p, arg2)); 6566 unlock_user(p, arg1, 0); 6567 break; 6568 case TARGET_NR_ftruncate: 6569 ret = get_errno(ftruncate(arg1, arg2)); 6570 break; 6571 case TARGET_NR_fchmod: 6572 ret = get_errno(fchmod(arg1, arg2)); 6573 break; 6574 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat) 6575 case TARGET_NR_fchmodat: 6576 if (!(p = lock_user_string(arg2))) 6577 goto efault; 6578 ret = get_errno(sys_fchmodat(arg1, p, arg3)); 6579 unlock_user(p, arg2, 0); 6580 break; 6581 #endif 6582 case TARGET_NR_getpriority: 6583 /* Note that negative values are valid for getpriority, so we must 6584 differentiate based on errno settings. */ 6585 errno = 0; 6586 ret = getpriority(arg1, arg2); 6587 if (ret == -1 && errno != 0) { 6588 ret = -host_to_target_errno(errno); 6589 break; 6590 } 6591 #ifdef TARGET_ALPHA 6592 /* Return value is the unbiased priority. Signal no error. */ 6593 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; 6594 #else 6595 /* Return value is a biased priority to avoid negative numbers. */ 6596 ret = 20 - ret; 6597 #endif 6598 break; 6599 case TARGET_NR_setpriority: 6600 ret = get_errno(setpriority(arg1, arg2, arg3)); 6601 break; 6602 #ifdef TARGET_NR_profil 6603 case TARGET_NR_profil: 6604 goto unimplemented; 6605 #endif 6606 case TARGET_NR_statfs: 6607 if (!(p = lock_user_string(arg1))) 6608 goto efault; 6609 ret = get_errno(statfs(path(p), &stfs)); 6610 unlock_user(p, arg1, 0); 6611 convert_statfs: 6612 if (!is_error(ret)) { 6613 struct target_statfs *target_stfs; 6614 6615 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) 6616 goto efault; 6617 __put_user(stfs.f_type, &target_stfs->f_type); 6618 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 6619 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 6620 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 6621 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 6622 __put_user(stfs.f_files, &target_stfs->f_files); 6623 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 6624 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 6625 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 6626 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 6627 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 6628 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 6629 unlock_user_struct(target_stfs, arg2, 1); 6630 } 6631 break; 6632 case TARGET_NR_fstatfs: 6633 ret = get_errno(fstatfs(arg1, &stfs)); 6634 goto convert_statfs; 6635 #ifdef TARGET_NR_statfs64 6636 case TARGET_NR_statfs64: 6637 if (!(p = lock_user_string(arg1))) 6638 goto efault; 6639 ret = get_errno(statfs(path(p), &stfs)); 6640 unlock_user(p, arg1, 0); 6641 convert_statfs64: 6642 if (!is_error(ret)) { 6643 struct target_statfs64 *target_stfs; 6644 6645 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) 6646 goto efault; 6647 __put_user(stfs.f_type, &target_stfs->f_type); 6648 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 6649 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 6650 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 6651 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 6652 __put_user(stfs.f_files, &target_stfs->f_files); 6653 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 6654 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 6655 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 6656 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 6657 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 6658 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 6659 unlock_user_struct(target_stfs, arg3, 1); 6660 } 6661 break; 6662 case TARGET_NR_fstatfs64: 6663 ret = get_errno(fstatfs(arg1, &stfs)); 6664 goto convert_statfs64; 6665 #endif 6666 #ifdef TARGET_NR_ioperm 6667 case TARGET_NR_ioperm: 6668 goto unimplemented; 6669 #endif 6670 #ifdef TARGET_NR_socketcall 6671 case TARGET_NR_socketcall: 6672 ret = do_socketcall(arg1, arg2); 6673 break; 6674 #endif 6675 #ifdef TARGET_NR_accept 6676 case TARGET_NR_accept: 6677 ret = do_accept(arg1, arg2, arg3); 6678 break; 6679 #endif 6680 #ifdef TARGET_NR_bind 6681 case TARGET_NR_bind: 6682 ret = do_bind(arg1, arg2, arg3); 6683 break; 6684 #endif 6685 #ifdef TARGET_NR_connect 6686 case TARGET_NR_connect: 6687 ret = do_connect(arg1, arg2, arg3); 6688 break; 6689 #endif 6690 #ifdef TARGET_NR_getpeername 6691 case TARGET_NR_getpeername: 6692 ret = do_getpeername(arg1, arg2, arg3); 6693 break; 6694 #endif 6695 #ifdef TARGET_NR_getsockname 6696 case TARGET_NR_getsockname: 6697 ret = do_getsockname(arg1, arg2, arg3); 6698 break; 6699 #endif 6700 #ifdef TARGET_NR_getsockopt 6701 case TARGET_NR_getsockopt: 6702 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5); 6703 break; 6704 #endif 6705 #ifdef TARGET_NR_listen 6706 case TARGET_NR_listen: 6707 ret = get_errno(listen(arg1, arg2)); 6708 break; 6709 #endif 6710 #ifdef TARGET_NR_recv 6711 case TARGET_NR_recv: 6712 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); 6713 break; 6714 #endif 6715 #ifdef TARGET_NR_recvfrom 6716 case TARGET_NR_recvfrom: 6717 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); 6718 break; 6719 #endif 6720 #ifdef TARGET_NR_recvmsg 6721 case TARGET_NR_recvmsg: 6722 ret = do_sendrecvmsg(arg1, arg2, arg3, 0); 6723 break; 6724 #endif 6725 #ifdef TARGET_NR_send 6726 case TARGET_NR_send: 6727 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0); 6728 break; 6729 #endif 6730 #ifdef TARGET_NR_sendmsg 6731 case TARGET_NR_sendmsg: 6732 ret = do_sendrecvmsg(arg1, arg2, arg3, 1); 6733 break; 6734 #endif 6735 #ifdef TARGET_NR_sendto 6736 case TARGET_NR_sendto: 6737 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); 6738 break; 6739 #endif 6740 #ifdef TARGET_NR_shutdown 6741 case TARGET_NR_shutdown: 6742 ret = get_errno(shutdown(arg1, arg2)); 6743 break; 6744 #endif 6745 #ifdef TARGET_NR_socket 6746 case TARGET_NR_socket: 6747 ret = do_socket(arg1, arg2, arg3); 6748 break; 6749 #endif 6750 #ifdef TARGET_NR_socketpair 6751 case TARGET_NR_socketpair: 6752 ret = do_socketpair(arg1, arg2, arg3, arg4); 6753 break; 6754 #endif 6755 #ifdef TARGET_NR_setsockopt 6756 case TARGET_NR_setsockopt: 6757 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); 6758 break; 6759 #endif 6760 6761 case TARGET_NR_syslog: 6762 if (!(p = lock_user_string(arg2))) 6763 goto efault; 6764 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); 6765 unlock_user(p, arg2, 0); 6766 break; 6767 6768 case TARGET_NR_setitimer: 6769 { 6770 struct itimerval value, ovalue, *pvalue; 6771 6772 if (arg2) { 6773 pvalue = &value; 6774 if (copy_from_user_timeval(&pvalue->it_interval, arg2) 6775 || copy_from_user_timeval(&pvalue->it_value, 6776 arg2 + sizeof(struct target_timeval))) 6777 goto efault; 6778 } else { 6779 pvalue = NULL; 6780 } 6781 ret = get_errno(setitimer(arg1, pvalue, &ovalue)); 6782 if (!is_error(ret) && arg3) { 6783 if (copy_to_user_timeval(arg3, 6784 &ovalue.it_interval) 6785 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), 6786 &ovalue.it_value)) 6787 goto efault; 6788 } 6789 } 6790 break; 6791 case TARGET_NR_getitimer: 6792 { 6793 struct itimerval value; 6794 6795 ret = get_errno(getitimer(arg1, &value)); 6796 if (!is_error(ret) && arg2) { 6797 if (copy_to_user_timeval(arg2, 6798 &value.it_interval) 6799 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), 6800 &value.it_value)) 6801 goto efault; 6802 } 6803 } 6804 break; 6805 case TARGET_NR_stat: 6806 if (!(p = lock_user_string(arg1))) 6807 goto efault; 6808 ret = get_errno(stat(path(p), &st)); 6809 unlock_user(p, arg1, 0); 6810 goto do_stat; 6811 case TARGET_NR_lstat: 6812 if (!(p = lock_user_string(arg1))) 6813 goto efault; 6814 ret = get_errno(lstat(path(p), &st)); 6815 unlock_user(p, arg1, 0); 6816 goto do_stat; 6817 case TARGET_NR_fstat: 6818 { 6819 ret = get_errno(fstat(arg1, &st)); 6820 do_stat: 6821 if (!is_error(ret)) { 6822 struct target_stat *target_st; 6823 6824 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) 6825 goto efault; 6826 memset(target_st, 0, sizeof(*target_st)); 6827 __put_user(st.st_dev, &target_st->st_dev); 6828 __put_user(st.st_ino, &target_st->st_ino); 6829 __put_user(st.st_mode, &target_st->st_mode); 6830 __put_user(st.st_uid, &target_st->st_uid); 6831 __put_user(st.st_gid, &target_st->st_gid); 6832 __put_user(st.st_nlink, &target_st->st_nlink); 6833 __put_user(st.st_rdev, &target_st->st_rdev); 6834 __put_user(st.st_size, &target_st->st_size); 6835 __put_user(st.st_blksize, &target_st->st_blksize); 6836 __put_user(st.st_blocks, &target_st->st_blocks); 6837 __put_user(st.st_atime, &target_st->target_st_atime); 6838 __put_user(st.st_mtime, &target_st->target_st_mtime); 6839 __put_user(st.st_ctime, &target_st->target_st_ctime); 6840 unlock_user_struct(target_st, arg2, 1); 6841 } 6842 } 6843 break; 6844 #ifdef TARGET_NR_olduname 6845 case TARGET_NR_olduname: 6846 goto unimplemented; 6847 #endif 6848 #ifdef TARGET_NR_iopl 6849 case TARGET_NR_iopl: 6850 goto unimplemented; 6851 #endif 6852 case TARGET_NR_vhangup: 6853 ret = get_errno(vhangup()); 6854 break; 6855 #ifdef TARGET_NR_idle 6856 case TARGET_NR_idle: 6857 goto unimplemented; 6858 #endif 6859 #ifdef TARGET_NR_syscall 6860 case TARGET_NR_syscall: 6861 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5, 6862 arg6, arg7, arg8, 0); 6863 break; 6864 #endif 6865 case TARGET_NR_wait4: 6866 { 6867 int status; 6868 abi_long status_ptr = arg2; 6869 struct rusage rusage, *rusage_ptr; 6870 abi_ulong target_rusage = arg4; 6871 if (target_rusage) 6872 rusage_ptr = &rusage; 6873 else 6874 rusage_ptr = NULL; 6875 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr)); 6876 if (!is_error(ret)) { 6877 if (status_ptr && ret) { 6878 status = host_to_target_waitstatus(status); 6879 if (put_user_s32(status, status_ptr)) 6880 goto efault; 6881 } 6882 if (target_rusage) 6883 host_to_target_rusage(target_rusage, &rusage); 6884 } 6885 } 6886 break; 6887 #ifdef TARGET_NR_swapoff 6888 case TARGET_NR_swapoff: 6889 if (!(p = lock_user_string(arg1))) 6890 goto efault; 6891 ret = get_errno(swapoff(p)); 6892 unlock_user(p, arg1, 0); 6893 break; 6894 #endif 6895 case TARGET_NR_sysinfo: 6896 { 6897 struct target_sysinfo *target_value; 6898 struct sysinfo value; 6899 ret = get_errno(sysinfo(&value)); 6900 if (!is_error(ret) && arg1) 6901 { 6902 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) 6903 goto efault; 6904 __put_user(value.uptime, &target_value->uptime); 6905 __put_user(value.loads[0], &target_value->loads[0]); 6906 __put_user(value.loads[1], &target_value->loads[1]); 6907 __put_user(value.loads[2], &target_value->loads[2]); 6908 __put_user(value.totalram, &target_value->totalram); 6909 __put_user(value.freeram, &target_value->freeram); 6910 __put_user(value.sharedram, &target_value->sharedram); 6911 __put_user(value.bufferram, &target_value->bufferram); 6912 __put_user(value.totalswap, &target_value->totalswap); 6913 __put_user(value.freeswap, &target_value->freeswap); 6914 __put_user(value.procs, &target_value->procs); 6915 __put_user(value.totalhigh, &target_value->totalhigh); 6916 __put_user(value.freehigh, &target_value->freehigh); 6917 __put_user(value.mem_unit, &target_value->mem_unit); 6918 unlock_user_struct(target_value, arg1, 1); 6919 } 6920 } 6921 break; 6922 #ifdef TARGET_NR_ipc 6923 case TARGET_NR_ipc: 6924 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6); 6925 break; 6926 #endif 6927 #ifdef TARGET_NR_semget 6928 case TARGET_NR_semget: 6929 ret = get_errno(semget(arg1, arg2, arg3)); 6930 break; 6931 #endif 6932 #ifdef TARGET_NR_semop 6933 case TARGET_NR_semop: 6934 ret = get_errno(do_semop(arg1, arg2, arg3)); 6935 break; 6936 #endif 6937 #ifdef TARGET_NR_semctl 6938 case TARGET_NR_semctl: 6939 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4); 6940 break; 6941 #endif 6942 #ifdef TARGET_NR_msgctl 6943 case TARGET_NR_msgctl: 6944 ret = do_msgctl(arg1, arg2, arg3); 6945 break; 6946 #endif 6947 #ifdef TARGET_NR_msgget 6948 case TARGET_NR_msgget: 6949 ret = get_errno(msgget(arg1, arg2)); 6950 break; 6951 #endif 6952 #ifdef TARGET_NR_msgrcv 6953 case TARGET_NR_msgrcv: 6954 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5); 6955 break; 6956 #endif 6957 #ifdef TARGET_NR_msgsnd 6958 case TARGET_NR_msgsnd: 6959 ret = do_msgsnd(arg1, arg2, arg3, arg4); 6960 break; 6961 #endif 6962 #ifdef TARGET_NR_shmget 6963 case TARGET_NR_shmget: 6964 ret = get_errno(shmget(arg1, arg2, arg3)); 6965 break; 6966 #endif 6967 #ifdef TARGET_NR_shmctl 6968 case TARGET_NR_shmctl: 6969 ret = do_shmctl(arg1, arg2, arg3); 6970 break; 6971 #endif 6972 #ifdef TARGET_NR_shmat 6973 case TARGET_NR_shmat: 6974 ret = do_shmat(arg1, arg2, arg3); 6975 break; 6976 #endif 6977 #ifdef TARGET_NR_shmdt 6978 case TARGET_NR_shmdt: 6979 ret = do_shmdt(arg1); 6980 break; 6981 #endif 6982 case TARGET_NR_fsync: 6983 ret = get_errno(fsync(arg1)); 6984 break; 6985 case TARGET_NR_clone: 6986 #if defined(TARGET_SH4) || defined(TARGET_ALPHA) 6987 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); 6988 #elif defined(TARGET_CRIS) 6989 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5)); 6990 #elif defined(TARGET_MICROBLAZE) 6991 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5)); 6992 #elif defined(TARGET_S390X) 6993 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); 6994 #else 6995 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); 6996 #endif 6997 break; 6998 #ifdef __NR_exit_group 6999 /* new thread calls */ 7000 case TARGET_NR_exit_group: 7001 #ifdef TARGET_GPROF 7002 _mcleanup(); 7003 #endif 7004 gdb_exit(cpu_env, arg1); 7005 ret = get_errno(exit_group(arg1)); 7006 break; 7007 #endif 7008 case TARGET_NR_setdomainname: 7009 if (!(p = lock_user_string(arg1))) 7010 goto efault; 7011 ret = get_errno(setdomainname(p, arg2)); 7012 unlock_user(p, arg1, 0); 7013 break; 7014 case TARGET_NR_uname: 7015 /* no need to transcode because we use the linux syscall */ 7016 { 7017 struct new_utsname * buf; 7018 7019 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) 7020 goto efault; 7021 ret = get_errno(sys_uname(buf)); 7022 if (!is_error(ret)) { 7023 /* Overrite the native machine name with whatever is being 7024 emulated. */ 7025 strcpy (buf->machine, cpu_to_uname_machine(cpu_env)); 7026 /* Allow the user to override the reported release. */ 7027 if (qemu_uname_release && *qemu_uname_release) 7028 strcpy (buf->release, qemu_uname_release); 7029 } 7030 unlock_user_struct(buf, arg1, 1); 7031 } 7032 break; 7033 #ifdef TARGET_I386 7034 case TARGET_NR_modify_ldt: 7035 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3); 7036 break; 7037 #if !defined(TARGET_X86_64) 7038 case TARGET_NR_vm86old: 7039 goto unimplemented; 7040 case TARGET_NR_vm86: 7041 ret = do_vm86(cpu_env, arg1, arg2); 7042 break; 7043 #endif 7044 #endif 7045 case TARGET_NR_adjtimex: 7046 goto unimplemented; 7047 #ifdef TARGET_NR_create_module 7048 case TARGET_NR_create_module: 7049 #endif 7050 case TARGET_NR_init_module: 7051 case TARGET_NR_delete_module: 7052 #ifdef TARGET_NR_get_kernel_syms 7053 case TARGET_NR_get_kernel_syms: 7054 #endif 7055 goto unimplemented; 7056 case TARGET_NR_quotactl: 7057 goto unimplemented; 7058 case TARGET_NR_getpgid: 7059 ret = get_errno(getpgid(arg1)); 7060 break; 7061 case TARGET_NR_fchdir: 7062 ret = get_errno(fchdir(arg1)); 7063 break; 7064 #ifdef TARGET_NR_bdflush /* not on x86_64 */ 7065 case TARGET_NR_bdflush: 7066 goto unimplemented; 7067 #endif 7068 #ifdef TARGET_NR_sysfs 7069 case TARGET_NR_sysfs: 7070 goto unimplemented; 7071 #endif 7072 case TARGET_NR_personality: 7073 ret = get_errno(personality(arg1)); 7074 break; 7075 #ifdef TARGET_NR_afs_syscall 7076 case TARGET_NR_afs_syscall: 7077 goto unimplemented; 7078 #endif 7079 #ifdef TARGET_NR__llseek /* Not on alpha */ 7080 case TARGET_NR__llseek: 7081 { 7082 int64_t res; 7083 #if !defined(__NR_llseek) 7084 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5); 7085 if (res == -1) { 7086 ret = get_errno(res); 7087 } else { 7088 ret = 0; 7089 } 7090 #else 7091 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); 7092 #endif 7093 if ((ret == 0) && put_user_s64(res, arg4)) { 7094 goto efault; 7095 } 7096 } 7097 break; 7098 #endif 7099 case TARGET_NR_getdents: 7100 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 7101 { 7102 struct target_dirent *target_dirp; 7103 struct linux_dirent *dirp; 7104 abi_long count = arg3; 7105 7106 dirp = malloc(count); 7107 if (!dirp) { 7108 ret = -TARGET_ENOMEM; 7109 goto fail; 7110 } 7111 7112 ret = get_errno(sys_getdents(arg1, dirp, count)); 7113 if (!is_error(ret)) { 7114 struct linux_dirent *de; 7115 struct target_dirent *tde; 7116 int len = ret; 7117 int reclen, treclen; 7118 int count1, tnamelen; 7119 7120 count1 = 0; 7121 de = dirp; 7122 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7123 goto efault; 7124 tde = target_dirp; 7125 while (len > 0) { 7126 reclen = de->d_reclen; 7127 tnamelen = reclen - offsetof(struct linux_dirent, d_name); 7128 assert(tnamelen >= 0); 7129 treclen = tnamelen + offsetof(struct target_dirent, d_name); 7130 assert(count1 + treclen <= count); 7131 tde->d_reclen = tswap16(treclen); 7132 tde->d_ino = tswapal(de->d_ino); 7133 tde->d_off = tswapal(de->d_off); 7134 memcpy(tde->d_name, de->d_name, tnamelen); 7135 de = (struct linux_dirent *)((char *)de + reclen); 7136 len -= reclen; 7137 tde = (struct target_dirent *)((char *)tde + treclen); 7138 count1 += treclen; 7139 } 7140 ret = count1; 7141 unlock_user(target_dirp, arg2, ret); 7142 } 7143 free(dirp); 7144 } 7145 #else 7146 { 7147 struct linux_dirent *dirp; 7148 abi_long count = arg3; 7149 7150 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7151 goto efault; 7152 ret = get_errno(sys_getdents(arg1, dirp, count)); 7153 if (!is_error(ret)) { 7154 struct linux_dirent *de; 7155 int len = ret; 7156 int reclen; 7157 de = dirp; 7158 while (len > 0) { 7159 reclen = de->d_reclen; 7160 if (reclen > len) 7161 break; 7162 de->d_reclen = tswap16(reclen); 7163 tswapls(&de->d_ino); 7164 tswapls(&de->d_off); 7165 de = (struct linux_dirent *)((char *)de + reclen); 7166 len -= reclen; 7167 } 7168 } 7169 unlock_user(dirp, arg2, ret); 7170 } 7171 #endif 7172 break; 7173 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 7174 case TARGET_NR_getdents64: 7175 { 7176 struct linux_dirent64 *dirp; 7177 abi_long count = arg3; 7178 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7179 goto efault; 7180 ret = get_errno(sys_getdents64(arg1, dirp, count)); 7181 if (!is_error(ret)) { 7182 struct linux_dirent64 *de; 7183 int len = ret; 7184 int reclen; 7185 de = dirp; 7186 while (len > 0) { 7187 reclen = de->d_reclen; 7188 if (reclen > len) 7189 break; 7190 de->d_reclen = tswap16(reclen); 7191 tswap64s((uint64_t *)&de->d_ino); 7192 tswap64s((uint64_t *)&de->d_off); 7193 de = (struct linux_dirent64 *)((char *)de + reclen); 7194 len -= reclen; 7195 } 7196 } 7197 unlock_user(dirp, arg2, ret); 7198 } 7199 break; 7200 #endif /* TARGET_NR_getdents64 */ 7201 #if defined(TARGET_NR__newselect) 7202 case TARGET_NR__newselect: 7203 ret = do_select(arg1, arg2, arg3, arg4, arg5); 7204 break; 7205 #endif 7206 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) 7207 # ifdef TARGET_NR_poll 7208 case TARGET_NR_poll: 7209 # endif 7210 # ifdef TARGET_NR_ppoll 7211 case TARGET_NR_ppoll: 7212 # endif 7213 { 7214 struct target_pollfd *target_pfd; 7215 unsigned int nfds = arg2; 7216 int timeout = arg3; 7217 struct pollfd *pfd; 7218 unsigned int i; 7219 7220 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1); 7221 if (!target_pfd) 7222 goto efault; 7223 7224 pfd = alloca(sizeof(struct pollfd) * nfds); 7225 for(i = 0; i < nfds; i++) { 7226 pfd[i].fd = tswap32(target_pfd[i].fd); 7227 pfd[i].events = tswap16(target_pfd[i].events); 7228 } 7229 7230 # ifdef TARGET_NR_ppoll 7231 if (num == TARGET_NR_ppoll) { 7232 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; 7233 target_sigset_t *target_set; 7234 sigset_t _set, *set = &_set; 7235 7236 if (arg3) { 7237 if (target_to_host_timespec(timeout_ts, arg3)) { 7238 unlock_user(target_pfd, arg1, 0); 7239 goto efault; 7240 } 7241 } else { 7242 timeout_ts = NULL; 7243 } 7244 7245 if (arg4) { 7246 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1); 7247 if (!target_set) { 7248 unlock_user(target_pfd, arg1, 0); 7249 goto efault; 7250 } 7251 target_to_host_sigset(set, target_set); 7252 } else { 7253 set = NULL; 7254 } 7255 7256 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8)); 7257 7258 if (!is_error(ret) && arg3) { 7259 host_to_target_timespec(arg3, timeout_ts); 7260 } 7261 if (arg4) { 7262 unlock_user(target_set, arg4, 0); 7263 } 7264 } else 7265 # endif 7266 ret = get_errno(poll(pfd, nfds, timeout)); 7267 7268 if (!is_error(ret)) { 7269 for(i = 0; i < nfds; i++) { 7270 target_pfd[i].revents = tswap16(pfd[i].revents); 7271 } 7272 } 7273 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds); 7274 } 7275 break; 7276 #endif 7277 case TARGET_NR_flock: 7278 /* NOTE: the flock constant seems to be the same for every 7279 Linux platform */ 7280 ret = get_errno(flock(arg1, arg2)); 7281 break; 7282 case TARGET_NR_readv: 7283 { 7284 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 7285 if (vec != NULL) { 7286 ret = get_errno(readv(arg1, vec, arg3)); 7287 unlock_iovec(vec, arg2, arg3, 1); 7288 } else { 7289 ret = -host_to_target_errno(errno); 7290 } 7291 } 7292 break; 7293 case TARGET_NR_writev: 7294 { 7295 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 7296 if (vec != NULL) { 7297 ret = get_errno(writev(arg1, vec, arg3)); 7298 unlock_iovec(vec, arg2, arg3, 0); 7299 } else { 7300 ret = -host_to_target_errno(errno); 7301 } 7302 } 7303 break; 7304 case TARGET_NR_getsid: 7305 ret = get_errno(getsid(arg1)); 7306 break; 7307 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ 7308 case TARGET_NR_fdatasync: 7309 ret = get_errno(fdatasync(arg1)); 7310 break; 7311 #endif 7312 case TARGET_NR__sysctl: 7313 /* We don't implement this, but ENOTDIR is always a safe 7314 return value. */ 7315 ret = -TARGET_ENOTDIR; 7316 break; 7317 case TARGET_NR_sched_getaffinity: 7318 { 7319 unsigned int mask_size; 7320 unsigned long *mask; 7321 7322 /* 7323 * sched_getaffinity needs multiples of ulong, so need to take 7324 * care of mismatches between target ulong and host ulong sizes. 7325 */ 7326 if (arg2 & (sizeof(abi_ulong) - 1)) { 7327 ret = -TARGET_EINVAL; 7328 break; 7329 } 7330 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 7331 7332 mask = alloca(mask_size); 7333 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask)); 7334 7335 if (!is_error(ret)) { 7336 if (copy_to_user(arg3, mask, ret)) { 7337 goto efault; 7338 } 7339 } 7340 } 7341 break; 7342 case TARGET_NR_sched_setaffinity: 7343 { 7344 unsigned int mask_size; 7345 unsigned long *mask; 7346 7347 /* 7348 * sched_setaffinity needs multiples of ulong, so need to take 7349 * care of mismatches between target ulong and host ulong sizes. 7350 */ 7351 if (arg2 & (sizeof(abi_ulong) - 1)) { 7352 ret = -TARGET_EINVAL; 7353 break; 7354 } 7355 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 7356 7357 mask = alloca(mask_size); 7358 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) { 7359 goto efault; 7360 } 7361 memcpy(mask, p, arg2); 7362 unlock_user_struct(p, arg2, 0); 7363 7364 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask)); 7365 } 7366 break; 7367 case TARGET_NR_sched_setparam: 7368 { 7369 struct sched_param *target_schp; 7370 struct sched_param schp; 7371 7372 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) 7373 goto efault; 7374 schp.sched_priority = tswap32(target_schp->sched_priority); 7375 unlock_user_struct(target_schp, arg2, 0); 7376 ret = get_errno(sched_setparam(arg1, &schp)); 7377 } 7378 break; 7379 case TARGET_NR_sched_getparam: 7380 { 7381 struct sched_param *target_schp; 7382 struct sched_param schp; 7383 ret = get_errno(sched_getparam(arg1, &schp)); 7384 if (!is_error(ret)) { 7385 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) 7386 goto efault; 7387 target_schp->sched_priority = tswap32(schp.sched_priority); 7388 unlock_user_struct(target_schp, arg2, 1); 7389 } 7390 } 7391 break; 7392 case TARGET_NR_sched_setscheduler: 7393 { 7394 struct sched_param *target_schp; 7395 struct sched_param schp; 7396 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) 7397 goto efault; 7398 schp.sched_priority = tswap32(target_schp->sched_priority); 7399 unlock_user_struct(target_schp, arg3, 0); 7400 ret = get_errno(sched_setscheduler(arg1, arg2, &schp)); 7401 } 7402 break; 7403 case TARGET_NR_sched_getscheduler: 7404 ret = get_errno(sched_getscheduler(arg1)); 7405 break; 7406 case TARGET_NR_sched_yield: 7407 ret = get_errno(sched_yield()); 7408 break; 7409 case TARGET_NR_sched_get_priority_max: 7410 ret = get_errno(sched_get_priority_max(arg1)); 7411 break; 7412 case TARGET_NR_sched_get_priority_min: 7413 ret = get_errno(sched_get_priority_min(arg1)); 7414 break; 7415 case TARGET_NR_sched_rr_get_interval: 7416 { 7417 struct timespec ts; 7418 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 7419 if (!is_error(ret)) { 7420 host_to_target_timespec(arg2, &ts); 7421 } 7422 } 7423 break; 7424 case TARGET_NR_nanosleep: 7425 { 7426 struct timespec req, rem; 7427 target_to_host_timespec(&req, arg1); 7428 ret = get_errno(nanosleep(&req, &rem)); 7429 if (is_error(ret) && arg2) { 7430 host_to_target_timespec(arg2, &rem); 7431 } 7432 } 7433 break; 7434 #ifdef TARGET_NR_query_module 7435 case TARGET_NR_query_module: 7436 goto unimplemented; 7437 #endif 7438 #ifdef TARGET_NR_nfsservctl 7439 case TARGET_NR_nfsservctl: 7440 goto unimplemented; 7441 #endif 7442 case TARGET_NR_prctl: 7443 switch (arg1) { 7444 case PR_GET_PDEATHSIG: 7445 { 7446 int deathsig; 7447 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5)); 7448 if (!is_error(ret) && arg2 7449 && put_user_ual(deathsig, arg2)) { 7450 goto efault; 7451 } 7452 break; 7453 } 7454 #ifdef PR_GET_NAME 7455 case PR_GET_NAME: 7456 { 7457 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1); 7458 if (!name) { 7459 goto efault; 7460 } 7461 ret = get_errno(prctl(arg1, (unsigned long)name, 7462 arg3, arg4, arg5)); 7463 unlock_user(name, arg2, 16); 7464 break; 7465 } 7466 case PR_SET_NAME: 7467 { 7468 void *name = lock_user(VERIFY_READ, arg2, 16, 1); 7469 if (!name) { 7470 goto efault; 7471 } 7472 ret = get_errno(prctl(arg1, (unsigned long)name, 7473 arg3, arg4, arg5)); 7474 unlock_user(name, arg2, 0); 7475 break; 7476 } 7477 #endif 7478 default: 7479 /* Most prctl options have no pointer arguments */ 7480 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5)); 7481 break; 7482 } 7483 break; 7484 #ifdef TARGET_NR_arch_prctl 7485 case TARGET_NR_arch_prctl: 7486 #if defined(TARGET_I386) && !defined(TARGET_ABI32) 7487 ret = do_arch_prctl(cpu_env, arg1, arg2); 7488 break; 7489 #else 7490 goto unimplemented; 7491 #endif 7492 #endif 7493 #ifdef TARGET_NR_pread64 7494 case TARGET_NR_pread64: 7495 if (regpairs_aligned(cpu_env)) { 7496 arg4 = arg5; 7497 arg5 = arg6; 7498 } 7499 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 7500 goto efault; 7501 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); 7502 unlock_user(p, arg2, ret); 7503 break; 7504 case TARGET_NR_pwrite64: 7505 if (regpairs_aligned(cpu_env)) { 7506 arg4 = arg5; 7507 arg5 = arg6; 7508 } 7509 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 7510 goto efault; 7511 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); 7512 unlock_user(p, arg2, 0); 7513 break; 7514 #endif 7515 case TARGET_NR_getcwd: 7516 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) 7517 goto efault; 7518 ret = get_errno(sys_getcwd1(p, arg2)); 7519 unlock_user(p, arg1, ret); 7520 break; 7521 case TARGET_NR_capget: 7522 goto unimplemented; 7523 case TARGET_NR_capset: 7524 goto unimplemented; 7525 case TARGET_NR_sigaltstack: 7526 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \ 7527 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \ 7528 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC) 7529 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env)); 7530 break; 7531 #else 7532 goto unimplemented; 7533 #endif 7534 case TARGET_NR_sendfile: 7535 goto unimplemented; 7536 #ifdef TARGET_NR_getpmsg 7537 case TARGET_NR_getpmsg: 7538 goto unimplemented; 7539 #endif 7540 #ifdef TARGET_NR_putpmsg 7541 case TARGET_NR_putpmsg: 7542 goto unimplemented; 7543 #endif 7544 #ifdef TARGET_NR_vfork 7545 case TARGET_NR_vfork: 7546 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 7547 0, 0, 0, 0)); 7548 break; 7549 #endif 7550 #ifdef TARGET_NR_ugetrlimit 7551 case TARGET_NR_ugetrlimit: 7552 { 7553 struct rlimit rlim; 7554 int resource = target_to_host_resource(arg1); 7555 ret = get_errno(getrlimit(resource, &rlim)); 7556 if (!is_error(ret)) { 7557 struct target_rlimit *target_rlim; 7558 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 7559 goto efault; 7560 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 7561 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 7562 unlock_user_struct(target_rlim, arg2, 1); 7563 } 7564 break; 7565 } 7566 #endif 7567 #ifdef TARGET_NR_truncate64 7568 case TARGET_NR_truncate64: 7569 if (!(p = lock_user_string(arg1))) 7570 goto efault; 7571 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); 7572 unlock_user(p, arg1, 0); 7573 break; 7574 #endif 7575 #ifdef TARGET_NR_ftruncate64 7576 case TARGET_NR_ftruncate64: 7577 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); 7578 break; 7579 #endif 7580 #ifdef TARGET_NR_stat64 7581 case TARGET_NR_stat64: 7582 if (!(p = lock_user_string(arg1))) 7583 goto efault; 7584 ret = get_errno(stat(path(p), &st)); 7585 unlock_user(p, arg1, 0); 7586 if (!is_error(ret)) 7587 ret = host_to_target_stat64(cpu_env, arg2, &st); 7588 break; 7589 #endif 7590 #ifdef TARGET_NR_lstat64 7591 case TARGET_NR_lstat64: 7592 if (!(p = lock_user_string(arg1))) 7593 goto efault; 7594 ret = get_errno(lstat(path(p), &st)); 7595 unlock_user(p, arg1, 0); 7596 if (!is_error(ret)) 7597 ret = host_to_target_stat64(cpu_env, arg2, &st); 7598 break; 7599 #endif 7600 #ifdef TARGET_NR_fstat64 7601 case TARGET_NR_fstat64: 7602 ret = get_errno(fstat(arg1, &st)); 7603 if (!is_error(ret)) 7604 ret = host_to_target_stat64(cpu_env, arg2, &st); 7605 break; 7606 #endif 7607 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \ 7608 (defined(__NR_fstatat64) || defined(__NR_newfstatat)) 7609 #ifdef TARGET_NR_fstatat64 7610 case TARGET_NR_fstatat64: 7611 #endif 7612 #ifdef TARGET_NR_newfstatat 7613 case TARGET_NR_newfstatat: 7614 #endif 7615 if (!(p = lock_user_string(arg2))) 7616 goto efault; 7617 #ifdef __NR_fstatat64 7618 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4)); 7619 #else 7620 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4)); 7621 #endif 7622 if (!is_error(ret)) 7623 ret = host_to_target_stat64(cpu_env, arg3, &st); 7624 break; 7625 #endif 7626 case TARGET_NR_lchown: 7627 if (!(p = lock_user_string(arg1))) 7628 goto efault; 7629 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); 7630 unlock_user(p, arg1, 0); 7631 break; 7632 #ifdef TARGET_NR_getuid 7633 case TARGET_NR_getuid: 7634 ret = get_errno(high2lowuid(getuid())); 7635 break; 7636 #endif 7637 #ifdef TARGET_NR_getgid 7638 case TARGET_NR_getgid: 7639 ret = get_errno(high2lowgid(getgid())); 7640 break; 7641 #endif 7642 #ifdef TARGET_NR_geteuid 7643 case TARGET_NR_geteuid: 7644 ret = get_errno(high2lowuid(geteuid())); 7645 break; 7646 #endif 7647 #ifdef TARGET_NR_getegid 7648 case TARGET_NR_getegid: 7649 ret = get_errno(high2lowgid(getegid())); 7650 break; 7651 #endif 7652 case TARGET_NR_setreuid: 7653 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); 7654 break; 7655 case TARGET_NR_setregid: 7656 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); 7657 break; 7658 case TARGET_NR_getgroups: 7659 { 7660 int gidsetsize = arg1; 7661 target_id *target_grouplist; 7662 gid_t *grouplist; 7663 int i; 7664 7665 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7666 ret = get_errno(getgroups(gidsetsize, grouplist)); 7667 if (gidsetsize == 0) 7668 break; 7669 if (!is_error(ret)) { 7670 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0); 7671 if (!target_grouplist) 7672 goto efault; 7673 for(i = 0;i < ret; i++) 7674 target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); 7675 unlock_user(target_grouplist, arg2, gidsetsize * 2); 7676 } 7677 } 7678 break; 7679 case TARGET_NR_setgroups: 7680 { 7681 int gidsetsize = arg1; 7682 target_id *target_grouplist; 7683 gid_t *grouplist = NULL; 7684 int i; 7685 if (gidsetsize) { 7686 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7687 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1); 7688 if (!target_grouplist) { 7689 ret = -TARGET_EFAULT; 7690 goto fail; 7691 } 7692 for (i = 0; i < gidsetsize; i++) { 7693 grouplist[i] = low2highgid(tswapid(target_grouplist[i])); 7694 } 7695 unlock_user(target_grouplist, arg2, 0); 7696 } 7697 ret = get_errno(setgroups(gidsetsize, grouplist)); 7698 } 7699 break; 7700 case TARGET_NR_fchown: 7701 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); 7702 break; 7703 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) 7704 case TARGET_NR_fchownat: 7705 if (!(p = lock_user_string(arg2))) 7706 goto efault; 7707 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5)); 7708 unlock_user(p, arg2, 0); 7709 break; 7710 #endif 7711 #ifdef TARGET_NR_setresuid 7712 case TARGET_NR_setresuid: 7713 ret = get_errno(setresuid(low2highuid(arg1), 7714 low2highuid(arg2), 7715 low2highuid(arg3))); 7716 break; 7717 #endif 7718 #ifdef TARGET_NR_getresuid 7719 case TARGET_NR_getresuid: 7720 { 7721 uid_t ruid, euid, suid; 7722 ret = get_errno(getresuid(&ruid, &euid, &suid)); 7723 if (!is_error(ret)) { 7724 if (put_user_u16(high2lowuid(ruid), arg1) 7725 || put_user_u16(high2lowuid(euid), arg2) 7726 || put_user_u16(high2lowuid(suid), arg3)) 7727 goto efault; 7728 } 7729 } 7730 break; 7731 #endif 7732 #ifdef TARGET_NR_getresgid 7733 case TARGET_NR_setresgid: 7734 ret = get_errno(setresgid(low2highgid(arg1), 7735 low2highgid(arg2), 7736 low2highgid(arg3))); 7737 break; 7738 #endif 7739 #ifdef TARGET_NR_getresgid 7740 case TARGET_NR_getresgid: 7741 { 7742 gid_t rgid, egid, sgid; 7743 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 7744 if (!is_error(ret)) { 7745 if (put_user_u16(high2lowgid(rgid), arg1) 7746 || put_user_u16(high2lowgid(egid), arg2) 7747 || put_user_u16(high2lowgid(sgid), arg3)) 7748 goto efault; 7749 } 7750 } 7751 break; 7752 #endif 7753 case TARGET_NR_chown: 7754 if (!(p = lock_user_string(arg1))) 7755 goto efault; 7756 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); 7757 unlock_user(p, arg1, 0); 7758 break; 7759 case TARGET_NR_setuid: 7760 ret = get_errno(setuid(low2highuid(arg1))); 7761 break; 7762 case TARGET_NR_setgid: 7763 ret = get_errno(setgid(low2highgid(arg1))); 7764 break; 7765 case TARGET_NR_setfsuid: 7766 ret = get_errno(setfsuid(arg1)); 7767 break; 7768 case TARGET_NR_setfsgid: 7769 ret = get_errno(setfsgid(arg1)); 7770 break; 7771 7772 #ifdef TARGET_NR_lchown32 7773 case TARGET_NR_lchown32: 7774 if (!(p = lock_user_string(arg1))) 7775 goto efault; 7776 ret = get_errno(lchown(p, arg2, arg3)); 7777 unlock_user(p, arg1, 0); 7778 break; 7779 #endif 7780 #ifdef TARGET_NR_getuid32 7781 case TARGET_NR_getuid32: 7782 ret = get_errno(getuid()); 7783 break; 7784 #endif 7785 7786 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) 7787 /* Alpha specific */ 7788 case TARGET_NR_getxuid: 7789 { 7790 uid_t euid; 7791 euid=geteuid(); 7792 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid; 7793 } 7794 ret = get_errno(getuid()); 7795 break; 7796 #endif 7797 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) 7798 /* Alpha specific */ 7799 case TARGET_NR_getxgid: 7800 { 7801 uid_t egid; 7802 egid=getegid(); 7803 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid; 7804 } 7805 ret = get_errno(getgid()); 7806 break; 7807 #endif 7808 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) 7809 /* Alpha specific */ 7810 case TARGET_NR_osf_getsysinfo: 7811 ret = -TARGET_EOPNOTSUPP; 7812 switch (arg1) { 7813 case TARGET_GSI_IEEE_FP_CONTROL: 7814 { 7815 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env); 7816 7817 /* Copied from linux ieee_fpcr_to_swcr. */ 7818 swcr = (fpcr >> 35) & SWCR_STATUS_MASK; 7819 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ; 7820 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV 7821 | SWCR_TRAP_ENABLE_DZE 7822 | SWCR_TRAP_ENABLE_OVF); 7823 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF 7824 | SWCR_TRAP_ENABLE_INE); 7825 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ; 7826 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO; 7827 7828 if (put_user_u64 (swcr, arg2)) 7829 goto efault; 7830 ret = 0; 7831 } 7832 break; 7833 7834 /* case GSI_IEEE_STATE_AT_SIGNAL: 7835 -- Not implemented in linux kernel. 7836 case GSI_UACPROC: 7837 -- Retrieves current unaligned access state; not much used. 7838 case GSI_PROC_TYPE: 7839 -- Retrieves implver information; surely not used. 7840 case GSI_GET_HWRPB: 7841 -- Grabs a copy of the HWRPB; surely not used. 7842 */ 7843 } 7844 break; 7845 #endif 7846 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) 7847 /* Alpha specific */ 7848 case TARGET_NR_osf_setsysinfo: 7849 ret = -TARGET_EOPNOTSUPP; 7850 switch (arg1) { 7851 case TARGET_SSI_IEEE_FP_CONTROL: 7852 { 7853 uint64_t swcr, fpcr, orig_fpcr; 7854 7855 if (get_user_u64 (swcr, arg2)) { 7856 goto efault; 7857 } 7858 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 7859 fpcr = orig_fpcr & FPCR_DYN_MASK; 7860 7861 /* Copied from linux ieee_swcr_to_fpcr. */ 7862 fpcr |= (swcr & SWCR_STATUS_MASK) << 35; 7863 fpcr |= (swcr & SWCR_MAP_DMZ) << 36; 7864 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV 7865 | SWCR_TRAP_ENABLE_DZE 7866 | SWCR_TRAP_ENABLE_OVF)) << 48; 7867 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF 7868 | SWCR_TRAP_ENABLE_INE)) << 57; 7869 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0); 7870 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41; 7871 7872 cpu_alpha_store_fpcr(cpu_env, fpcr); 7873 ret = 0; 7874 } 7875 break; 7876 7877 case TARGET_SSI_IEEE_RAISE_EXCEPTION: 7878 { 7879 uint64_t exc, fpcr, orig_fpcr; 7880 int si_code; 7881 7882 if (get_user_u64(exc, arg2)) { 7883 goto efault; 7884 } 7885 7886 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 7887 7888 /* We only add to the exception status here. */ 7889 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35); 7890 7891 cpu_alpha_store_fpcr(cpu_env, fpcr); 7892 ret = 0; 7893 7894 /* Old exceptions are not signaled. */ 7895 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK); 7896 7897 /* If any exceptions set by this call, 7898 and are unmasked, send a signal. */ 7899 si_code = 0; 7900 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) { 7901 si_code = TARGET_FPE_FLTRES; 7902 } 7903 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) { 7904 si_code = TARGET_FPE_FLTUND; 7905 } 7906 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) { 7907 si_code = TARGET_FPE_FLTOVF; 7908 } 7909 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) { 7910 si_code = TARGET_FPE_FLTDIV; 7911 } 7912 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) { 7913 si_code = TARGET_FPE_FLTINV; 7914 } 7915 if (si_code != 0) { 7916 target_siginfo_t info; 7917 info.si_signo = SIGFPE; 7918 info.si_errno = 0; 7919 info.si_code = si_code; 7920 info._sifields._sigfault._addr 7921 = ((CPUArchState *)cpu_env)->pc; 7922 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info); 7923 } 7924 } 7925 break; 7926 7927 /* case SSI_NVPAIRS: 7928 -- Used with SSIN_UACPROC to enable unaligned accesses. 7929 case SSI_IEEE_STATE_AT_SIGNAL: 7930 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: 7931 -- Not implemented in linux kernel 7932 */ 7933 } 7934 break; 7935 #endif 7936 #ifdef TARGET_NR_osf_sigprocmask 7937 /* Alpha specific. */ 7938 case TARGET_NR_osf_sigprocmask: 7939 { 7940 abi_ulong mask; 7941 int how; 7942 sigset_t set, oldset; 7943 7944 switch(arg1) { 7945 case TARGET_SIG_BLOCK: 7946 how = SIG_BLOCK; 7947 break; 7948 case TARGET_SIG_UNBLOCK: 7949 how = SIG_UNBLOCK; 7950 break; 7951 case TARGET_SIG_SETMASK: 7952 how = SIG_SETMASK; 7953 break; 7954 default: 7955 ret = -TARGET_EINVAL; 7956 goto fail; 7957 } 7958 mask = arg2; 7959 target_to_host_old_sigset(&set, &mask); 7960 sigprocmask(how, &set, &oldset); 7961 host_to_target_old_sigset(&mask, &oldset); 7962 ret = mask; 7963 } 7964 break; 7965 #endif 7966 7967 #ifdef TARGET_NR_getgid32 7968 case TARGET_NR_getgid32: 7969 ret = get_errno(getgid()); 7970 break; 7971 #endif 7972 #ifdef TARGET_NR_geteuid32 7973 case TARGET_NR_geteuid32: 7974 ret = get_errno(geteuid()); 7975 break; 7976 #endif 7977 #ifdef TARGET_NR_getegid32 7978 case TARGET_NR_getegid32: 7979 ret = get_errno(getegid()); 7980 break; 7981 #endif 7982 #ifdef TARGET_NR_setreuid32 7983 case TARGET_NR_setreuid32: 7984 ret = get_errno(setreuid(arg1, arg2)); 7985 break; 7986 #endif 7987 #ifdef TARGET_NR_setregid32 7988 case TARGET_NR_setregid32: 7989 ret = get_errno(setregid(arg1, arg2)); 7990 break; 7991 #endif 7992 #ifdef TARGET_NR_getgroups32 7993 case TARGET_NR_getgroups32: 7994 { 7995 int gidsetsize = arg1; 7996 uint32_t *target_grouplist; 7997 gid_t *grouplist; 7998 int i; 7999 8000 grouplist = alloca(gidsetsize * sizeof(gid_t)); 8001 ret = get_errno(getgroups(gidsetsize, grouplist)); 8002 if (gidsetsize == 0) 8003 break; 8004 if (!is_error(ret)) { 8005 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); 8006 if (!target_grouplist) { 8007 ret = -TARGET_EFAULT; 8008 goto fail; 8009 } 8010 for(i = 0;i < ret; i++) 8011 target_grouplist[i] = tswap32(grouplist[i]); 8012 unlock_user(target_grouplist, arg2, gidsetsize * 4); 8013 } 8014 } 8015 break; 8016 #endif 8017 #ifdef TARGET_NR_setgroups32 8018 case TARGET_NR_setgroups32: 8019 { 8020 int gidsetsize = arg1; 8021 uint32_t *target_grouplist; 8022 gid_t *grouplist; 8023 int i; 8024 8025 grouplist = alloca(gidsetsize * sizeof(gid_t)); 8026 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); 8027 if (!target_grouplist) { 8028 ret = -TARGET_EFAULT; 8029 goto fail; 8030 } 8031 for(i = 0;i < gidsetsize; i++) 8032 grouplist[i] = tswap32(target_grouplist[i]); 8033 unlock_user(target_grouplist, arg2, 0); 8034 ret = get_errno(setgroups(gidsetsize, grouplist)); 8035 } 8036 break; 8037 #endif 8038 #ifdef TARGET_NR_fchown32 8039 case TARGET_NR_fchown32: 8040 ret = get_errno(fchown(arg1, arg2, arg3)); 8041 break; 8042 #endif 8043 #ifdef TARGET_NR_setresuid32 8044 case TARGET_NR_setresuid32: 8045 ret = get_errno(setresuid(arg1, arg2, arg3)); 8046 break; 8047 #endif 8048 #ifdef TARGET_NR_getresuid32 8049 case TARGET_NR_getresuid32: 8050 { 8051 uid_t ruid, euid, suid; 8052 ret = get_errno(getresuid(&ruid, &euid, &suid)); 8053 if (!is_error(ret)) { 8054 if (put_user_u32(ruid, arg1) 8055 || put_user_u32(euid, arg2) 8056 || put_user_u32(suid, arg3)) 8057 goto efault; 8058 } 8059 } 8060 break; 8061 #endif 8062 #ifdef TARGET_NR_setresgid32 8063 case TARGET_NR_setresgid32: 8064 ret = get_errno(setresgid(arg1, arg2, arg3)); 8065 break; 8066 #endif 8067 #ifdef TARGET_NR_getresgid32 8068 case TARGET_NR_getresgid32: 8069 { 8070 gid_t rgid, egid, sgid; 8071 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 8072 if (!is_error(ret)) { 8073 if (put_user_u32(rgid, arg1) 8074 || put_user_u32(egid, arg2) 8075 || put_user_u32(sgid, arg3)) 8076 goto efault; 8077 } 8078 } 8079 break; 8080 #endif 8081 #ifdef TARGET_NR_chown32 8082 case TARGET_NR_chown32: 8083 if (!(p = lock_user_string(arg1))) 8084 goto efault; 8085 ret = get_errno(chown(p, arg2, arg3)); 8086 unlock_user(p, arg1, 0); 8087 break; 8088 #endif 8089 #ifdef TARGET_NR_setuid32 8090 case TARGET_NR_setuid32: 8091 ret = get_errno(setuid(arg1)); 8092 break; 8093 #endif 8094 #ifdef TARGET_NR_setgid32 8095 case TARGET_NR_setgid32: 8096 ret = get_errno(setgid(arg1)); 8097 break; 8098 #endif 8099 #ifdef TARGET_NR_setfsuid32 8100 case TARGET_NR_setfsuid32: 8101 ret = get_errno(setfsuid(arg1)); 8102 break; 8103 #endif 8104 #ifdef TARGET_NR_setfsgid32 8105 case TARGET_NR_setfsgid32: 8106 ret = get_errno(setfsgid(arg1)); 8107 break; 8108 #endif 8109 8110 case TARGET_NR_pivot_root: 8111 goto unimplemented; 8112 #ifdef TARGET_NR_mincore 8113 case TARGET_NR_mincore: 8114 { 8115 void *a; 8116 ret = -TARGET_EFAULT; 8117 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0))) 8118 goto efault; 8119 if (!(p = lock_user_string(arg3))) 8120 goto mincore_fail; 8121 ret = get_errno(mincore(a, arg2, p)); 8122 unlock_user(p, arg3, ret); 8123 mincore_fail: 8124 unlock_user(a, arg1, 0); 8125 } 8126 break; 8127 #endif 8128 #ifdef TARGET_NR_arm_fadvise64_64 8129 case TARGET_NR_arm_fadvise64_64: 8130 { 8131 /* 8132 * arm_fadvise64_64 looks like fadvise64_64 but 8133 * with different argument order 8134 */ 8135 abi_long temp; 8136 temp = arg3; 8137 arg3 = arg4; 8138 arg4 = temp; 8139 } 8140 #endif 8141 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64) 8142 #ifdef TARGET_NR_fadvise64_64 8143 case TARGET_NR_fadvise64_64: 8144 #endif 8145 #ifdef TARGET_NR_fadvise64 8146 case TARGET_NR_fadvise64: 8147 #endif 8148 #ifdef TARGET_S390X 8149 switch (arg4) { 8150 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ 8151 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ 8152 case 6: arg4 = POSIX_FADV_DONTNEED; break; 8153 case 7: arg4 = POSIX_FADV_NOREUSE; break; 8154 default: break; 8155 } 8156 #endif 8157 ret = -posix_fadvise(arg1, arg2, arg3, arg4); 8158 break; 8159 #endif 8160 #ifdef TARGET_NR_madvise 8161 case TARGET_NR_madvise: 8162 /* A straight passthrough may not be safe because qemu sometimes 8163 turns private flie-backed mappings into anonymous mappings. 8164 This will break MADV_DONTNEED. 8165 This is a hint, so ignoring and returning success is ok. */ 8166 ret = get_errno(0); 8167 break; 8168 #endif 8169 #if TARGET_ABI_BITS == 32 8170 case TARGET_NR_fcntl64: 8171 { 8172 int cmd; 8173 struct flock64 fl; 8174 struct target_flock64 *target_fl; 8175 #ifdef TARGET_ARM 8176 struct target_eabi_flock64 *target_efl; 8177 #endif 8178 8179 cmd = target_to_host_fcntl_cmd(arg2); 8180 if (cmd == -TARGET_EINVAL) { 8181 ret = cmd; 8182 break; 8183 } 8184 8185 switch(arg2) { 8186 case TARGET_F_GETLK64: 8187 #ifdef TARGET_ARM 8188 if (((CPUARMState *)cpu_env)->eabi) { 8189 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 8190 goto efault; 8191 fl.l_type = tswap16(target_efl->l_type); 8192 fl.l_whence = tswap16(target_efl->l_whence); 8193 fl.l_start = tswap64(target_efl->l_start); 8194 fl.l_len = tswap64(target_efl->l_len); 8195 fl.l_pid = tswap32(target_efl->l_pid); 8196 unlock_user_struct(target_efl, arg3, 0); 8197 } else 8198 #endif 8199 { 8200 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 8201 goto efault; 8202 fl.l_type = tswap16(target_fl->l_type); 8203 fl.l_whence = tswap16(target_fl->l_whence); 8204 fl.l_start = tswap64(target_fl->l_start); 8205 fl.l_len = tswap64(target_fl->l_len); 8206 fl.l_pid = tswap32(target_fl->l_pid); 8207 unlock_user_struct(target_fl, arg3, 0); 8208 } 8209 ret = get_errno(fcntl(arg1, cmd, &fl)); 8210 if (ret == 0) { 8211 #ifdef TARGET_ARM 8212 if (((CPUARMState *)cpu_env)->eabi) { 8213 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0)) 8214 goto efault; 8215 target_efl->l_type = tswap16(fl.l_type); 8216 target_efl->l_whence = tswap16(fl.l_whence); 8217 target_efl->l_start = tswap64(fl.l_start); 8218 target_efl->l_len = tswap64(fl.l_len); 8219 target_efl->l_pid = tswap32(fl.l_pid); 8220 unlock_user_struct(target_efl, arg3, 1); 8221 } else 8222 #endif 8223 { 8224 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0)) 8225 goto efault; 8226 target_fl->l_type = tswap16(fl.l_type); 8227 target_fl->l_whence = tswap16(fl.l_whence); 8228 target_fl->l_start = tswap64(fl.l_start); 8229 target_fl->l_len = tswap64(fl.l_len); 8230 target_fl->l_pid = tswap32(fl.l_pid); 8231 unlock_user_struct(target_fl, arg3, 1); 8232 } 8233 } 8234 break; 8235 8236 case TARGET_F_SETLK64: 8237 case TARGET_F_SETLKW64: 8238 #ifdef TARGET_ARM 8239 if (((CPUARMState *)cpu_env)->eabi) { 8240 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 8241 goto efault; 8242 fl.l_type = tswap16(target_efl->l_type); 8243 fl.l_whence = tswap16(target_efl->l_whence); 8244 fl.l_start = tswap64(target_efl->l_start); 8245 fl.l_len = tswap64(target_efl->l_len); 8246 fl.l_pid = tswap32(target_efl->l_pid); 8247 unlock_user_struct(target_efl, arg3, 0); 8248 } else 8249 #endif 8250 { 8251 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 8252 goto efault; 8253 fl.l_type = tswap16(target_fl->l_type); 8254 fl.l_whence = tswap16(target_fl->l_whence); 8255 fl.l_start = tswap64(target_fl->l_start); 8256 fl.l_len = tswap64(target_fl->l_len); 8257 fl.l_pid = tswap32(target_fl->l_pid); 8258 unlock_user_struct(target_fl, arg3, 0); 8259 } 8260 ret = get_errno(fcntl(arg1, cmd, &fl)); 8261 break; 8262 default: 8263 ret = do_fcntl(arg1, arg2, arg3); 8264 break; 8265 } 8266 break; 8267 } 8268 #endif 8269 #ifdef TARGET_NR_cacheflush 8270 case TARGET_NR_cacheflush: 8271 /* self-modifying code is handled automatically, so nothing needed */ 8272 ret = 0; 8273 break; 8274 #endif 8275 #ifdef TARGET_NR_security 8276 case TARGET_NR_security: 8277 goto unimplemented; 8278 #endif 8279 #ifdef TARGET_NR_getpagesize 8280 case TARGET_NR_getpagesize: 8281 ret = TARGET_PAGE_SIZE; 8282 break; 8283 #endif 8284 case TARGET_NR_gettid: 8285 ret = get_errno(gettid()); 8286 break; 8287 #ifdef TARGET_NR_readahead 8288 case TARGET_NR_readahead: 8289 #if TARGET_ABI_BITS == 32 8290 if (regpairs_aligned(cpu_env)) { 8291 arg2 = arg3; 8292 arg3 = arg4; 8293 arg4 = arg5; 8294 } 8295 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4)); 8296 #else 8297 ret = get_errno(readahead(arg1, arg2, arg3)); 8298 #endif 8299 break; 8300 #endif 8301 #ifdef CONFIG_ATTR 8302 #ifdef TARGET_NR_setxattr 8303 case TARGET_NR_listxattr: 8304 case TARGET_NR_llistxattr: 8305 { 8306 void *p, *b = 0; 8307 if (arg2) { 8308 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 8309 if (!b) { 8310 ret = -TARGET_EFAULT; 8311 break; 8312 } 8313 } 8314 p = lock_user_string(arg1); 8315 if (p) { 8316 if (num == TARGET_NR_listxattr) { 8317 ret = get_errno(listxattr(p, b, arg3)); 8318 } else { 8319 ret = get_errno(llistxattr(p, b, arg3)); 8320 } 8321 } else { 8322 ret = -TARGET_EFAULT; 8323 } 8324 unlock_user(p, arg1, 0); 8325 unlock_user(b, arg2, arg3); 8326 break; 8327 } 8328 case TARGET_NR_flistxattr: 8329 { 8330 void *b = 0; 8331 if (arg2) { 8332 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 8333 if (!b) { 8334 ret = -TARGET_EFAULT; 8335 break; 8336 } 8337 } 8338 ret = get_errno(flistxattr(arg1, b, arg3)); 8339 unlock_user(b, arg2, arg3); 8340 break; 8341 } 8342 case TARGET_NR_setxattr: 8343 case TARGET_NR_lsetxattr: 8344 { 8345 void *p, *n, *v = 0; 8346 if (arg3) { 8347 v = lock_user(VERIFY_READ, arg3, arg4, 1); 8348 if (!v) { 8349 ret = -TARGET_EFAULT; 8350 break; 8351 } 8352 } 8353 p = lock_user_string(arg1); 8354 n = lock_user_string(arg2); 8355 if (p && n) { 8356 if (num == TARGET_NR_setxattr) { 8357 ret = get_errno(setxattr(p, n, v, arg4, arg5)); 8358 } else { 8359 ret = get_errno(lsetxattr(p, n, v, arg4, arg5)); 8360 } 8361 } else { 8362 ret = -TARGET_EFAULT; 8363 } 8364 unlock_user(p, arg1, 0); 8365 unlock_user(n, arg2, 0); 8366 unlock_user(v, arg3, 0); 8367 } 8368 break; 8369 case TARGET_NR_fsetxattr: 8370 { 8371 void *n, *v = 0; 8372 if (arg3) { 8373 v = lock_user(VERIFY_READ, arg3, arg4, 1); 8374 if (!v) { 8375 ret = -TARGET_EFAULT; 8376 break; 8377 } 8378 } 8379 n = lock_user_string(arg2); 8380 if (n) { 8381 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5)); 8382 } else { 8383 ret = -TARGET_EFAULT; 8384 } 8385 unlock_user(n, arg2, 0); 8386 unlock_user(v, arg3, 0); 8387 } 8388 break; 8389 case TARGET_NR_getxattr: 8390 case TARGET_NR_lgetxattr: 8391 { 8392 void *p, *n, *v = 0; 8393 if (arg3) { 8394 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 8395 if (!v) { 8396 ret = -TARGET_EFAULT; 8397 break; 8398 } 8399 } 8400 p = lock_user_string(arg1); 8401 n = lock_user_string(arg2); 8402 if (p && n) { 8403 if (num == TARGET_NR_getxattr) { 8404 ret = get_errno(getxattr(p, n, v, arg4)); 8405 } else { 8406 ret = get_errno(lgetxattr(p, n, v, arg4)); 8407 } 8408 } else { 8409 ret = -TARGET_EFAULT; 8410 } 8411 unlock_user(p, arg1, 0); 8412 unlock_user(n, arg2, 0); 8413 unlock_user(v, arg3, arg4); 8414 } 8415 break; 8416 case TARGET_NR_fgetxattr: 8417 { 8418 void *n, *v = 0; 8419 if (arg3) { 8420 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 8421 if (!v) { 8422 ret = -TARGET_EFAULT; 8423 break; 8424 } 8425 } 8426 n = lock_user_string(arg2); 8427 if (n) { 8428 ret = get_errno(fgetxattr(arg1, n, v, arg4)); 8429 } else { 8430 ret = -TARGET_EFAULT; 8431 } 8432 unlock_user(n, arg2, 0); 8433 unlock_user(v, arg3, arg4); 8434 } 8435 break; 8436 case TARGET_NR_removexattr: 8437 case TARGET_NR_lremovexattr: 8438 { 8439 void *p, *n; 8440 p = lock_user_string(arg1); 8441 n = lock_user_string(arg2); 8442 if (p && n) { 8443 if (num == TARGET_NR_removexattr) { 8444 ret = get_errno(removexattr(p, n)); 8445 } else { 8446 ret = get_errno(lremovexattr(p, n)); 8447 } 8448 } else { 8449 ret = -TARGET_EFAULT; 8450 } 8451 unlock_user(p, arg1, 0); 8452 unlock_user(n, arg2, 0); 8453 } 8454 break; 8455 case TARGET_NR_fremovexattr: 8456 { 8457 void *n; 8458 n = lock_user_string(arg2); 8459 if (n) { 8460 ret = get_errno(fremovexattr(arg1, n)); 8461 } else { 8462 ret = -TARGET_EFAULT; 8463 } 8464 unlock_user(n, arg2, 0); 8465 } 8466 break; 8467 #endif 8468 #endif /* CONFIG_ATTR */ 8469 #ifdef TARGET_NR_set_thread_area 8470 case TARGET_NR_set_thread_area: 8471 #if defined(TARGET_MIPS) 8472 ((CPUMIPSState *) cpu_env)->tls_value = arg1; 8473 ret = 0; 8474 break; 8475 #elif defined(TARGET_CRIS) 8476 if (arg1 & 0xff) 8477 ret = -TARGET_EINVAL; 8478 else { 8479 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1; 8480 ret = 0; 8481 } 8482 break; 8483 #elif defined(TARGET_I386) && defined(TARGET_ABI32) 8484 ret = do_set_thread_area(cpu_env, arg1); 8485 break; 8486 #else 8487 goto unimplemented_nowarn; 8488 #endif 8489 #endif 8490 #ifdef TARGET_NR_get_thread_area 8491 case TARGET_NR_get_thread_area: 8492 #if defined(TARGET_I386) && defined(TARGET_ABI32) 8493 ret = do_get_thread_area(cpu_env, arg1); 8494 #else 8495 goto unimplemented_nowarn; 8496 #endif 8497 #endif 8498 #ifdef TARGET_NR_getdomainname 8499 case TARGET_NR_getdomainname: 8500 goto unimplemented_nowarn; 8501 #endif 8502 8503 #ifdef TARGET_NR_clock_gettime 8504 case TARGET_NR_clock_gettime: 8505 { 8506 struct timespec ts; 8507 ret = get_errno(clock_gettime(arg1, &ts)); 8508 if (!is_error(ret)) { 8509 host_to_target_timespec(arg2, &ts); 8510 } 8511 break; 8512 } 8513 #endif 8514 #ifdef TARGET_NR_clock_getres 8515 case TARGET_NR_clock_getres: 8516 { 8517 struct timespec ts; 8518 ret = get_errno(clock_getres(arg1, &ts)); 8519 if (!is_error(ret)) { 8520 host_to_target_timespec(arg2, &ts); 8521 } 8522 break; 8523 } 8524 #endif 8525 #ifdef TARGET_NR_clock_nanosleep 8526 case TARGET_NR_clock_nanosleep: 8527 { 8528 struct timespec ts; 8529 target_to_host_timespec(&ts, arg3); 8530 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL)); 8531 if (arg4) 8532 host_to_target_timespec(arg4, &ts); 8533 break; 8534 } 8535 #endif 8536 8537 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 8538 case TARGET_NR_set_tid_address: 8539 ret = get_errno(set_tid_address((int *)g2h(arg1))); 8540 break; 8541 #endif 8542 8543 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 8544 case TARGET_NR_tkill: 8545 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2))); 8546 break; 8547 #endif 8548 8549 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 8550 case TARGET_NR_tgkill: 8551 ret = get_errno(sys_tgkill((int)arg1, (int)arg2, 8552 target_to_host_signal(arg3))); 8553 break; 8554 #endif 8555 8556 #ifdef TARGET_NR_set_robust_list 8557 case TARGET_NR_set_robust_list: 8558 goto unimplemented_nowarn; 8559 #endif 8560 8561 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat) 8562 case TARGET_NR_utimensat: 8563 { 8564 struct timespec *tsp, ts[2]; 8565 if (!arg3) { 8566 tsp = NULL; 8567 } else { 8568 target_to_host_timespec(ts, arg3); 8569 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec)); 8570 tsp = ts; 8571 } 8572 if (!arg2) 8573 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 8574 else { 8575 if (!(p = lock_user_string(arg2))) { 8576 ret = -TARGET_EFAULT; 8577 goto fail; 8578 } 8579 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 8580 unlock_user(p, arg2, 0); 8581 } 8582 } 8583 break; 8584 #endif 8585 #if defined(CONFIG_USE_NPTL) 8586 case TARGET_NR_futex: 8587 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6); 8588 break; 8589 #endif 8590 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 8591 case TARGET_NR_inotify_init: 8592 ret = get_errno(sys_inotify_init()); 8593 break; 8594 #endif 8595 #ifdef CONFIG_INOTIFY1 8596 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 8597 case TARGET_NR_inotify_init1: 8598 ret = get_errno(sys_inotify_init1(arg1)); 8599 break; 8600 #endif 8601 #endif 8602 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 8603 case TARGET_NR_inotify_add_watch: 8604 p = lock_user_string(arg2); 8605 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3)); 8606 unlock_user(p, arg2, 0); 8607 break; 8608 #endif 8609 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 8610 case TARGET_NR_inotify_rm_watch: 8611 ret = get_errno(sys_inotify_rm_watch(arg1, arg2)); 8612 break; 8613 #endif 8614 8615 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 8616 case TARGET_NR_mq_open: 8617 { 8618 struct mq_attr posix_mq_attr; 8619 8620 p = lock_user_string(arg1 - 1); 8621 if (arg4 != 0) 8622 copy_from_user_mq_attr (&posix_mq_attr, arg4); 8623 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr)); 8624 unlock_user (p, arg1, 0); 8625 } 8626 break; 8627 8628 case TARGET_NR_mq_unlink: 8629 p = lock_user_string(arg1 - 1); 8630 ret = get_errno(mq_unlink(p)); 8631 unlock_user (p, arg1, 0); 8632 break; 8633 8634 case TARGET_NR_mq_timedsend: 8635 { 8636 struct timespec ts; 8637 8638 p = lock_user (VERIFY_READ, arg2, arg3, 1); 8639 if (arg5 != 0) { 8640 target_to_host_timespec(&ts, arg5); 8641 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts)); 8642 host_to_target_timespec(arg5, &ts); 8643 } 8644 else 8645 ret = get_errno(mq_send(arg1, p, arg3, arg4)); 8646 unlock_user (p, arg2, arg3); 8647 } 8648 break; 8649 8650 case TARGET_NR_mq_timedreceive: 8651 { 8652 struct timespec ts; 8653 unsigned int prio; 8654 8655 p = lock_user (VERIFY_READ, arg2, arg3, 1); 8656 if (arg5 != 0) { 8657 target_to_host_timespec(&ts, arg5); 8658 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts)); 8659 host_to_target_timespec(arg5, &ts); 8660 } 8661 else 8662 ret = get_errno(mq_receive(arg1, p, arg3, &prio)); 8663 unlock_user (p, arg2, arg3); 8664 if (arg4 != 0) 8665 put_user_u32(prio, arg4); 8666 } 8667 break; 8668 8669 /* Not implemented for now... */ 8670 /* case TARGET_NR_mq_notify: */ 8671 /* break; */ 8672 8673 case TARGET_NR_mq_getsetattr: 8674 { 8675 struct mq_attr posix_mq_attr_in, posix_mq_attr_out; 8676 ret = 0; 8677 if (arg3 != 0) { 8678 ret = mq_getattr(arg1, &posix_mq_attr_out); 8679 copy_to_user_mq_attr(arg3, &posix_mq_attr_out); 8680 } 8681 if (arg2 != 0) { 8682 copy_from_user_mq_attr(&posix_mq_attr_in, arg2); 8683 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out); 8684 } 8685 8686 } 8687 break; 8688 #endif 8689 8690 #ifdef CONFIG_SPLICE 8691 #ifdef TARGET_NR_tee 8692 case TARGET_NR_tee: 8693 { 8694 ret = get_errno(tee(arg1,arg2,arg3,arg4)); 8695 } 8696 break; 8697 #endif 8698 #ifdef TARGET_NR_splice 8699 case TARGET_NR_splice: 8700 { 8701 loff_t loff_in, loff_out; 8702 loff_t *ploff_in = NULL, *ploff_out = NULL; 8703 if(arg2) { 8704 get_user_u64(loff_in, arg2); 8705 ploff_in = &loff_in; 8706 } 8707 if(arg4) { 8708 get_user_u64(loff_out, arg2); 8709 ploff_out = &loff_out; 8710 } 8711 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); 8712 } 8713 break; 8714 #endif 8715 #ifdef TARGET_NR_vmsplice 8716 case TARGET_NR_vmsplice: 8717 { 8718 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 8719 if (vec != NULL) { 8720 ret = get_errno(vmsplice(arg1, vec, arg3, arg4)); 8721 unlock_iovec(vec, arg2, arg3, 0); 8722 } else { 8723 ret = -host_to_target_errno(errno); 8724 } 8725 } 8726 break; 8727 #endif 8728 #endif /* CONFIG_SPLICE */ 8729 #ifdef CONFIG_EVENTFD 8730 #if defined(TARGET_NR_eventfd) 8731 case TARGET_NR_eventfd: 8732 ret = get_errno(eventfd(arg1, 0)); 8733 break; 8734 #endif 8735 #if defined(TARGET_NR_eventfd2) 8736 case TARGET_NR_eventfd2: 8737 ret = get_errno(eventfd(arg1, arg2)); 8738 break; 8739 #endif 8740 #endif /* CONFIG_EVENTFD */ 8741 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) 8742 case TARGET_NR_fallocate: 8743 #if TARGET_ABI_BITS == 32 8744 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4), 8745 target_offset64(arg5, arg6))); 8746 #else 8747 ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); 8748 #endif 8749 break; 8750 #endif 8751 #if defined(CONFIG_SYNC_FILE_RANGE) 8752 #if defined(TARGET_NR_sync_file_range) 8753 case TARGET_NR_sync_file_range: 8754 #if TARGET_ABI_BITS == 32 8755 #if defined(TARGET_MIPS) 8756 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 8757 target_offset64(arg5, arg6), arg7)); 8758 #else 8759 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3), 8760 target_offset64(arg4, arg5), arg6)); 8761 #endif /* !TARGET_MIPS */ 8762 #else 8763 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4)); 8764 #endif 8765 break; 8766 #endif 8767 #if defined(TARGET_NR_sync_file_range2) 8768 case TARGET_NR_sync_file_range2: 8769 /* This is like sync_file_range but the arguments are reordered */ 8770 #if TARGET_ABI_BITS == 32 8771 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 8772 target_offset64(arg5, arg6), arg2)); 8773 #else 8774 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2)); 8775 #endif 8776 break; 8777 #endif 8778 #endif 8779 #if defined(CONFIG_EPOLL) 8780 #if defined(TARGET_NR_epoll_create) 8781 case TARGET_NR_epoll_create: 8782 ret = get_errno(epoll_create(arg1)); 8783 break; 8784 #endif 8785 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1) 8786 case TARGET_NR_epoll_create1: 8787 ret = get_errno(epoll_create1(arg1)); 8788 break; 8789 #endif 8790 #if defined(TARGET_NR_epoll_ctl) 8791 case TARGET_NR_epoll_ctl: 8792 { 8793 struct epoll_event ep; 8794 struct epoll_event *epp = 0; 8795 if (arg4) { 8796 struct target_epoll_event *target_ep; 8797 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) { 8798 goto efault; 8799 } 8800 ep.events = tswap32(target_ep->events); 8801 /* The epoll_data_t union is just opaque data to the kernel, 8802 * so we transfer all 64 bits across and need not worry what 8803 * actual data type it is. 8804 */ 8805 ep.data.u64 = tswap64(target_ep->data.u64); 8806 unlock_user_struct(target_ep, arg4, 0); 8807 epp = &ep; 8808 } 8809 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp)); 8810 break; 8811 } 8812 #endif 8813 8814 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT) 8815 #define IMPLEMENT_EPOLL_PWAIT 8816 #endif 8817 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT) 8818 #if defined(TARGET_NR_epoll_wait) 8819 case TARGET_NR_epoll_wait: 8820 #endif 8821 #if defined(IMPLEMENT_EPOLL_PWAIT) 8822 case TARGET_NR_epoll_pwait: 8823 #endif 8824 { 8825 struct target_epoll_event *target_ep; 8826 struct epoll_event *ep; 8827 int epfd = arg1; 8828 int maxevents = arg3; 8829 int timeout = arg4; 8830 8831 target_ep = lock_user(VERIFY_WRITE, arg2, 8832 maxevents * sizeof(struct target_epoll_event), 1); 8833 if (!target_ep) { 8834 goto efault; 8835 } 8836 8837 ep = alloca(maxevents * sizeof(struct epoll_event)); 8838 8839 switch (num) { 8840 #if defined(IMPLEMENT_EPOLL_PWAIT) 8841 case TARGET_NR_epoll_pwait: 8842 { 8843 target_sigset_t *target_set; 8844 sigset_t _set, *set = &_set; 8845 8846 if (arg5) { 8847 target_set = lock_user(VERIFY_READ, arg5, 8848 sizeof(target_sigset_t), 1); 8849 if (!target_set) { 8850 unlock_user(target_ep, arg2, 0); 8851 goto efault; 8852 } 8853 target_to_host_sigset(set, target_set); 8854 unlock_user(target_set, arg5, 0); 8855 } else { 8856 set = NULL; 8857 } 8858 8859 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set)); 8860 break; 8861 } 8862 #endif 8863 #if defined(TARGET_NR_epoll_wait) 8864 case TARGET_NR_epoll_wait: 8865 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout)); 8866 break; 8867 #endif 8868 default: 8869 ret = -TARGET_ENOSYS; 8870 } 8871 if (!is_error(ret)) { 8872 int i; 8873 for (i = 0; i < ret; i++) { 8874 target_ep[i].events = tswap32(ep[i].events); 8875 target_ep[i].data.u64 = tswap64(ep[i].data.u64); 8876 } 8877 } 8878 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event)); 8879 break; 8880 } 8881 #endif 8882 #endif 8883 #ifdef TARGET_NR_prlimit64 8884 case TARGET_NR_prlimit64: 8885 { 8886 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */ 8887 struct target_rlimit64 *target_rnew, *target_rold; 8888 struct host_rlimit64 rnew, rold, *rnewp = 0; 8889 if (arg3) { 8890 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { 8891 goto efault; 8892 } 8893 rnew.rlim_cur = tswap64(target_rnew->rlim_cur); 8894 rnew.rlim_max = tswap64(target_rnew->rlim_max); 8895 unlock_user_struct(target_rnew, arg3, 0); 8896 rnewp = &rnew; 8897 } 8898 8899 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0)); 8900 if (!is_error(ret) && arg4) { 8901 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { 8902 goto efault; 8903 } 8904 target_rold->rlim_cur = tswap64(rold.rlim_cur); 8905 target_rold->rlim_max = tswap64(rold.rlim_max); 8906 unlock_user_struct(target_rold, arg4, 1); 8907 } 8908 break; 8909 } 8910 #endif 8911 #ifdef TARGET_NR_gethostname 8912 case TARGET_NR_gethostname: 8913 { 8914 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0); 8915 if (name) { 8916 ret = get_errno(gethostname(name, arg2)); 8917 unlock_user(name, arg1, arg2); 8918 } else { 8919 ret = -TARGET_EFAULT; 8920 } 8921 break; 8922 } 8923 #endif 8924 default: 8925 unimplemented: 8926 gemu_log("qemu: Unsupported syscall: %d\n", num); 8927 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list) 8928 unimplemented_nowarn: 8929 #endif 8930 ret = -TARGET_ENOSYS; 8931 break; 8932 } 8933 fail: 8934 #ifdef DEBUG 8935 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret); 8936 #endif 8937 if(do_strace) 8938 print_syscall_ret(num, ret); 8939 return ret; 8940 efault: 8941 ret = -TARGET_EFAULT; 8942 goto fail; 8943 } 8944