1 /* 2 * Linux syscalls 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #define _ATFILE_SOURCE 20 #include <stdlib.h> 21 #include <stdio.h> 22 #include <stdarg.h> 23 #include <string.h> 24 #include <elf.h> 25 #include <endian.h> 26 #include <errno.h> 27 #include <unistd.h> 28 #include <fcntl.h> 29 #include <time.h> 30 #include <limits.h> 31 #include <grp.h> 32 #include <sys/types.h> 33 #include <sys/ipc.h> 34 #include <sys/msg.h> 35 #include <sys/wait.h> 36 #include <sys/time.h> 37 #include <sys/stat.h> 38 #include <sys/mount.h> 39 #include <sys/file.h> 40 #include <sys/fsuid.h> 41 #include <sys/personality.h> 42 #include <sys/prctl.h> 43 #include <sys/resource.h> 44 #include <sys/mman.h> 45 #include <sys/swap.h> 46 #include <signal.h> 47 #include <sched.h> 48 #ifdef __ia64__ 49 int __clone2(int (*fn)(void *), void *child_stack_base, 50 size_t stack_size, int flags, void *arg, ...); 51 #endif 52 #include <sys/socket.h> 53 #include <sys/un.h> 54 #include <sys/uio.h> 55 #include <sys/poll.h> 56 #include <sys/times.h> 57 #include <sys/shm.h> 58 #include <sys/sem.h> 59 #include <sys/statfs.h> 60 #include <utime.h> 61 #include <sys/sysinfo.h> 62 #include <sys/utsname.h> 63 //#include <sys/user.h> 64 #include <netinet/ip.h> 65 #include <netinet/tcp.h> 66 #include <linux/wireless.h> 67 #include <linux/icmp.h> 68 #include "qemu-common.h" 69 #ifdef TARGET_GPROF 70 #include <sys/gmon.h> 71 #endif 72 #ifdef CONFIG_EVENTFD 73 #include <sys/eventfd.h> 74 #endif 75 #ifdef CONFIG_EPOLL 76 #include <sys/epoll.h> 77 #endif 78 #ifdef CONFIG_ATTR 79 #include "qemu/xattr.h" 80 #endif 81 #ifdef CONFIG_SENDFILE 82 #include <sys/sendfile.h> 83 #endif 84 85 #define termios host_termios 86 #define winsize host_winsize 87 #define termio host_termio 88 #define sgttyb host_sgttyb /* same as target */ 89 #define tchars host_tchars /* same as target */ 90 #define ltchars host_ltchars /* same as target */ 91 92 #include <linux/termios.h> 93 #include <linux/unistd.h> 94 #include <linux/utsname.h> 95 #include <linux/cdrom.h> 96 #include <linux/hdreg.h> 97 #include <linux/soundcard.h> 98 #include <linux/kd.h> 99 #include <linux/mtio.h> 100 #include <linux/fs.h> 101 #if defined(CONFIG_FIEMAP) 102 #include <linux/fiemap.h> 103 #endif 104 #include <linux/fb.h> 105 #include <linux/vt.h> 106 #include <linux/dm-ioctl.h> 107 #include <linux/reboot.h> 108 #include "linux_loop.h" 109 #include "cpu-uname.h" 110 111 #include "qemu.h" 112 113 #if defined(CONFIG_USE_NPTL) 114 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \ 115 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID) 116 #else 117 /* XXX: Hardcode the above values. */ 118 #define CLONE_NPTL_FLAGS2 0 119 #endif 120 121 //#define DEBUG 122 123 //#include <linux/msdos_fs.h> 124 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2]) 125 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2]) 126 127 128 #undef _syscall0 129 #undef _syscall1 130 #undef _syscall2 131 #undef _syscall3 132 #undef _syscall4 133 #undef _syscall5 134 #undef _syscall6 135 136 #define _syscall0(type,name) \ 137 static type name (void) \ 138 { \ 139 return syscall(__NR_##name); \ 140 } 141 142 #define _syscall1(type,name,type1,arg1) \ 143 static type name (type1 arg1) \ 144 { \ 145 return syscall(__NR_##name, arg1); \ 146 } 147 148 #define _syscall2(type,name,type1,arg1,type2,arg2) \ 149 static type name (type1 arg1,type2 arg2) \ 150 { \ 151 return syscall(__NR_##name, arg1, arg2); \ 152 } 153 154 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 155 static type name (type1 arg1,type2 arg2,type3 arg3) \ 156 { \ 157 return syscall(__NR_##name, arg1, arg2, arg3); \ 158 } 159 160 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 161 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ 162 { \ 163 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 164 } 165 166 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 167 type5,arg5) \ 168 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ 169 { \ 170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 171 } 172 173 174 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 175 type5,arg5,type6,arg6) \ 176 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \ 177 type6 arg6) \ 178 { \ 179 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 180 } 181 182 183 #define __NR_sys_uname __NR_uname 184 #define __NR_sys_faccessat __NR_faccessat 185 #define __NR_sys_fchmodat __NR_fchmodat 186 #define __NR_sys_fchownat __NR_fchownat 187 #define __NR_sys_fstatat64 __NR_fstatat64 188 #define __NR_sys_futimesat __NR_futimesat 189 #define __NR_sys_getcwd1 __NR_getcwd 190 #define __NR_sys_getdents __NR_getdents 191 #define __NR_sys_getdents64 __NR_getdents64 192 #define __NR_sys_getpriority __NR_getpriority 193 #define __NR_sys_linkat __NR_linkat 194 #define __NR_sys_mkdirat __NR_mkdirat 195 #define __NR_sys_mknodat __NR_mknodat 196 #define __NR_sys_newfstatat __NR_newfstatat 197 #define __NR_sys_openat __NR_openat 198 #define __NR_sys_readlinkat __NR_readlinkat 199 #define __NR_sys_renameat __NR_renameat 200 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo 201 #define __NR_sys_symlinkat __NR_symlinkat 202 #define __NR_sys_syslog __NR_syslog 203 #define __NR_sys_tgkill __NR_tgkill 204 #define __NR_sys_tkill __NR_tkill 205 #define __NR_sys_unlinkat __NR_unlinkat 206 #define __NR_sys_utimensat __NR_utimensat 207 #define __NR_sys_futex __NR_futex 208 #define __NR_sys_inotify_init __NR_inotify_init 209 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch 210 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch 211 212 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \ 213 defined(__s390x__) 214 #define __NR__llseek __NR_lseek 215 #endif 216 217 #ifdef __NR_gettid 218 _syscall0(int, gettid) 219 #else 220 /* This is a replacement for the host gettid() and must return a host 221 errno. */ 222 static int gettid(void) { 223 return -ENOSYS; 224 } 225 #endif 226 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count); 227 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 228 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count); 229 #endif 230 #if defined(TARGET_NR__llseek) && defined(__NR_llseek) 231 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo, 232 loff_t *, res, uint, wh); 233 #endif 234 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo) 235 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len) 236 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 237 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig) 238 #endif 239 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 240 _syscall2(int,sys_tkill,int,tid,int,sig) 241 #endif 242 #ifdef __NR_exit_group 243 _syscall1(int,exit_group,int,error_code) 244 #endif 245 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 246 _syscall1(int,set_tid_address,int *,tidptr) 247 #endif 248 #if defined(CONFIG_USE_NPTL) 249 #if defined(TARGET_NR_futex) && defined(__NR_futex) 250 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, 251 const struct timespec *,timeout,int *,uaddr2,int,val3) 252 #endif 253 #endif 254 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity 255 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, 256 unsigned long *, user_mask_ptr); 257 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity 258 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len, 259 unsigned long *, user_mask_ptr); 260 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd, 261 void *, arg); 262 263 static bitmask_transtbl fcntl_flags_tbl[] = { 264 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, }, 265 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, }, 266 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, }, 267 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, }, 268 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, }, 269 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, }, 270 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, }, 271 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, }, 272 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, }, 273 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, }, 274 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, }, 275 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, }, 276 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, }, 277 #if defined(O_DIRECT) 278 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, }, 279 #endif 280 #if defined(O_NOATIME) 281 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME }, 282 #endif 283 #if defined(O_CLOEXEC) 284 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC }, 285 #endif 286 #if defined(O_PATH) 287 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH }, 288 #endif 289 /* Don't terminate the list prematurely on 64-bit host+guest. */ 290 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0 291 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, }, 292 #endif 293 { 0, 0, 0, 0 } 294 }; 295 296 #define COPY_UTSNAME_FIELD(dest, src) \ 297 do { \ 298 /* __NEW_UTS_LEN doesn't include terminating null */ \ 299 (void) strncpy((dest), (src), __NEW_UTS_LEN); \ 300 (dest)[__NEW_UTS_LEN] = '\0'; \ 301 } while (0) 302 303 static int sys_uname(struct new_utsname *buf) 304 { 305 struct utsname uts_buf; 306 307 if (uname(&uts_buf) < 0) 308 return (-1); 309 310 /* 311 * Just in case these have some differences, we 312 * translate utsname to new_utsname (which is the 313 * struct linux kernel uses). 314 */ 315 316 memset(buf, 0, sizeof(*buf)); 317 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname); 318 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename); 319 COPY_UTSNAME_FIELD(buf->release, uts_buf.release); 320 COPY_UTSNAME_FIELD(buf->version, uts_buf.version); 321 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine); 322 #ifdef _GNU_SOURCE 323 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname); 324 #endif 325 return (0); 326 327 #undef COPY_UTSNAME_FIELD 328 } 329 330 static int sys_getcwd1(char *buf, size_t size) 331 { 332 if (getcwd(buf, size) == NULL) { 333 /* getcwd() sets errno */ 334 return (-1); 335 } 336 return strlen(buf)+1; 337 } 338 339 #ifdef CONFIG_ATFILE 340 /* 341 * Host system seems to have atfile syscall stubs available. We 342 * now enable them one by one as specified by target syscall_nr.h. 343 */ 344 345 #ifdef TARGET_NR_faccessat 346 static int sys_faccessat(int dirfd, const char *pathname, int mode) 347 { 348 return (faccessat(dirfd, pathname, mode, 0)); 349 } 350 #endif 351 #ifdef TARGET_NR_fchmodat 352 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode) 353 { 354 return (fchmodat(dirfd, pathname, mode, 0)); 355 } 356 #endif 357 #if defined(TARGET_NR_fchownat) 358 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner, 359 gid_t group, int flags) 360 { 361 return (fchownat(dirfd, pathname, owner, group, flags)); 362 } 363 #endif 364 #ifdef __NR_fstatat64 365 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf, 366 int flags) 367 { 368 return (fstatat(dirfd, pathname, buf, flags)); 369 } 370 #endif 371 #ifdef __NR_newfstatat 372 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf, 373 int flags) 374 { 375 return (fstatat(dirfd, pathname, buf, flags)); 376 } 377 #endif 378 #ifdef TARGET_NR_futimesat 379 static int sys_futimesat(int dirfd, const char *pathname, 380 const struct timeval times[2]) 381 { 382 return (futimesat(dirfd, pathname, times)); 383 } 384 #endif 385 #ifdef TARGET_NR_linkat 386 static int sys_linkat(int olddirfd, const char *oldpath, 387 int newdirfd, const char *newpath, int flags) 388 { 389 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags)); 390 } 391 #endif 392 #ifdef TARGET_NR_mkdirat 393 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode) 394 { 395 return (mkdirat(dirfd, pathname, mode)); 396 } 397 #endif 398 #ifdef TARGET_NR_mknodat 399 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode, 400 dev_t dev) 401 { 402 return (mknodat(dirfd, pathname, mode, dev)); 403 } 404 #endif 405 #ifdef TARGET_NR_openat 406 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode) 407 { 408 /* 409 * open(2) has extra parameter 'mode' when called with 410 * flag O_CREAT. 411 */ 412 if ((flags & O_CREAT) != 0) { 413 return (openat(dirfd, pathname, flags, mode)); 414 } 415 return (openat(dirfd, pathname, flags)); 416 } 417 #endif 418 #ifdef TARGET_NR_readlinkat 419 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz) 420 { 421 return (readlinkat(dirfd, pathname, buf, bufsiz)); 422 } 423 #endif 424 #ifdef TARGET_NR_renameat 425 static int sys_renameat(int olddirfd, const char *oldpath, 426 int newdirfd, const char *newpath) 427 { 428 return (renameat(olddirfd, oldpath, newdirfd, newpath)); 429 } 430 #endif 431 #ifdef TARGET_NR_symlinkat 432 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath) 433 { 434 return (symlinkat(oldpath, newdirfd, newpath)); 435 } 436 #endif 437 #ifdef TARGET_NR_unlinkat 438 static int sys_unlinkat(int dirfd, const char *pathname, int flags) 439 { 440 return (unlinkat(dirfd, pathname, flags)); 441 } 442 #endif 443 #else /* !CONFIG_ATFILE */ 444 445 /* 446 * Try direct syscalls instead 447 */ 448 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 449 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode) 450 #endif 451 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat) 452 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode) 453 #endif 454 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) 455 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname, 456 uid_t,owner,gid_t,group,int,flags) 457 #endif 458 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \ 459 defined(__NR_fstatat64) 460 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname, 461 struct stat *,buf,int,flags) 462 #endif 463 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat) 464 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname, 465 const struct timeval *,times) 466 #endif 467 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \ 468 defined(__NR_newfstatat) 469 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname, 470 struct stat *,buf,int,flags) 471 #endif 472 #if defined(TARGET_NR_linkat) && defined(__NR_linkat) 473 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath, 474 int,newdirfd,const char *,newpath,int,flags) 475 #endif 476 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat) 477 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode) 478 #endif 479 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat) 480 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname, 481 mode_t,mode,dev_t,dev) 482 #endif 483 #if defined(TARGET_NR_openat) && defined(__NR_openat) 484 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode) 485 #endif 486 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat) 487 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname, 488 char *,buf,size_t,bufsize) 489 #endif 490 #if defined(TARGET_NR_renameat) && defined(__NR_renameat) 491 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath, 492 int,newdirfd,const char *,newpath) 493 #endif 494 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat) 495 _syscall3(int,sys_symlinkat,const char *,oldpath, 496 int,newdirfd,const char *,newpath) 497 #endif 498 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat) 499 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags) 500 #endif 501 502 #endif /* CONFIG_ATFILE */ 503 504 #ifdef CONFIG_UTIMENSAT 505 static int sys_utimensat(int dirfd, const char *pathname, 506 const struct timespec times[2], int flags) 507 { 508 if (pathname == NULL) 509 return futimens(dirfd, times); 510 else 511 return utimensat(dirfd, pathname, times, flags); 512 } 513 #else 514 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat) 515 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname, 516 const struct timespec *,tsp,int,flags) 517 #endif 518 #endif /* CONFIG_UTIMENSAT */ 519 520 #ifdef CONFIG_INOTIFY 521 #include <sys/inotify.h> 522 523 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 524 static int sys_inotify_init(void) 525 { 526 return (inotify_init()); 527 } 528 #endif 529 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 530 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask) 531 { 532 return (inotify_add_watch(fd, pathname, mask)); 533 } 534 #endif 535 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 536 static int sys_inotify_rm_watch(int fd, int32_t wd) 537 { 538 return (inotify_rm_watch(fd, wd)); 539 } 540 #endif 541 #ifdef CONFIG_INOTIFY1 542 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 543 static int sys_inotify_init1(int flags) 544 { 545 return (inotify_init1(flags)); 546 } 547 #endif 548 #endif 549 #else 550 /* Userspace can usually survive runtime without inotify */ 551 #undef TARGET_NR_inotify_init 552 #undef TARGET_NR_inotify_init1 553 #undef TARGET_NR_inotify_add_watch 554 #undef TARGET_NR_inotify_rm_watch 555 #endif /* CONFIG_INOTIFY */ 556 557 #if defined(TARGET_NR_ppoll) 558 #ifndef __NR_ppoll 559 # define __NR_ppoll -1 560 #endif 561 #define __NR_sys_ppoll __NR_ppoll 562 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds, 563 struct timespec *, timeout, const __sigset_t *, sigmask, 564 size_t, sigsetsize) 565 #endif 566 567 #if defined(TARGET_NR_pselect6) 568 #ifndef __NR_pselect6 569 # define __NR_pselect6 -1 570 #endif 571 #define __NR_sys_pselect6 __NR_pselect6 572 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, 573 fd_set *, exceptfds, struct timespec *, timeout, void *, sig); 574 #endif 575 576 #if defined(TARGET_NR_prlimit64) 577 #ifndef __NR_prlimit64 578 # define __NR_prlimit64 -1 579 #endif 580 #define __NR_sys_prlimit64 __NR_prlimit64 581 /* The glibc rlimit structure may not be that used by the underlying syscall */ 582 struct host_rlimit64 { 583 uint64_t rlim_cur; 584 uint64_t rlim_max; 585 }; 586 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource, 587 const struct host_rlimit64 *, new_limit, 588 struct host_rlimit64 *, old_limit) 589 #endif 590 591 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */ 592 #ifdef TARGET_ARM 593 static inline int regpairs_aligned(void *cpu_env) { 594 return ((((CPUARMState *)cpu_env)->eabi) == 1) ; 595 } 596 #elif defined(TARGET_MIPS) 597 static inline int regpairs_aligned(void *cpu_env) { return 1; } 598 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64) 599 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs 600 * of registers which translates to the same as ARM/MIPS, because we start with 601 * r3 as arg1 */ 602 static inline int regpairs_aligned(void *cpu_env) { return 1; } 603 #else 604 static inline int regpairs_aligned(void *cpu_env) { return 0; } 605 #endif 606 607 #define ERRNO_TABLE_SIZE 1200 608 609 /* target_to_host_errno_table[] is initialized from 610 * host_to_target_errno_table[] in syscall_init(). */ 611 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = { 612 }; 613 614 /* 615 * This list is the union of errno values overridden in asm-<arch>/errno.h 616 * minus the errnos that are not actually generic to all archs. 617 */ 618 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = { 619 [EIDRM] = TARGET_EIDRM, 620 [ECHRNG] = TARGET_ECHRNG, 621 [EL2NSYNC] = TARGET_EL2NSYNC, 622 [EL3HLT] = TARGET_EL3HLT, 623 [EL3RST] = TARGET_EL3RST, 624 [ELNRNG] = TARGET_ELNRNG, 625 [EUNATCH] = TARGET_EUNATCH, 626 [ENOCSI] = TARGET_ENOCSI, 627 [EL2HLT] = TARGET_EL2HLT, 628 [EDEADLK] = TARGET_EDEADLK, 629 [ENOLCK] = TARGET_ENOLCK, 630 [EBADE] = TARGET_EBADE, 631 [EBADR] = TARGET_EBADR, 632 [EXFULL] = TARGET_EXFULL, 633 [ENOANO] = TARGET_ENOANO, 634 [EBADRQC] = TARGET_EBADRQC, 635 [EBADSLT] = TARGET_EBADSLT, 636 [EBFONT] = TARGET_EBFONT, 637 [ENOSTR] = TARGET_ENOSTR, 638 [ENODATA] = TARGET_ENODATA, 639 [ETIME] = TARGET_ETIME, 640 [ENOSR] = TARGET_ENOSR, 641 [ENONET] = TARGET_ENONET, 642 [ENOPKG] = TARGET_ENOPKG, 643 [EREMOTE] = TARGET_EREMOTE, 644 [ENOLINK] = TARGET_ENOLINK, 645 [EADV] = TARGET_EADV, 646 [ESRMNT] = TARGET_ESRMNT, 647 [ECOMM] = TARGET_ECOMM, 648 [EPROTO] = TARGET_EPROTO, 649 [EDOTDOT] = TARGET_EDOTDOT, 650 [EMULTIHOP] = TARGET_EMULTIHOP, 651 [EBADMSG] = TARGET_EBADMSG, 652 [ENAMETOOLONG] = TARGET_ENAMETOOLONG, 653 [EOVERFLOW] = TARGET_EOVERFLOW, 654 [ENOTUNIQ] = TARGET_ENOTUNIQ, 655 [EBADFD] = TARGET_EBADFD, 656 [EREMCHG] = TARGET_EREMCHG, 657 [ELIBACC] = TARGET_ELIBACC, 658 [ELIBBAD] = TARGET_ELIBBAD, 659 [ELIBSCN] = TARGET_ELIBSCN, 660 [ELIBMAX] = TARGET_ELIBMAX, 661 [ELIBEXEC] = TARGET_ELIBEXEC, 662 [EILSEQ] = TARGET_EILSEQ, 663 [ENOSYS] = TARGET_ENOSYS, 664 [ELOOP] = TARGET_ELOOP, 665 [ERESTART] = TARGET_ERESTART, 666 [ESTRPIPE] = TARGET_ESTRPIPE, 667 [ENOTEMPTY] = TARGET_ENOTEMPTY, 668 [EUSERS] = TARGET_EUSERS, 669 [ENOTSOCK] = TARGET_ENOTSOCK, 670 [EDESTADDRREQ] = TARGET_EDESTADDRREQ, 671 [EMSGSIZE] = TARGET_EMSGSIZE, 672 [EPROTOTYPE] = TARGET_EPROTOTYPE, 673 [ENOPROTOOPT] = TARGET_ENOPROTOOPT, 674 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT, 675 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT, 676 [EOPNOTSUPP] = TARGET_EOPNOTSUPP, 677 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT, 678 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT, 679 [EADDRINUSE] = TARGET_EADDRINUSE, 680 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL, 681 [ENETDOWN] = TARGET_ENETDOWN, 682 [ENETUNREACH] = TARGET_ENETUNREACH, 683 [ENETRESET] = TARGET_ENETRESET, 684 [ECONNABORTED] = TARGET_ECONNABORTED, 685 [ECONNRESET] = TARGET_ECONNRESET, 686 [ENOBUFS] = TARGET_ENOBUFS, 687 [EISCONN] = TARGET_EISCONN, 688 [ENOTCONN] = TARGET_ENOTCONN, 689 [EUCLEAN] = TARGET_EUCLEAN, 690 [ENOTNAM] = TARGET_ENOTNAM, 691 [ENAVAIL] = TARGET_ENAVAIL, 692 [EISNAM] = TARGET_EISNAM, 693 [EREMOTEIO] = TARGET_EREMOTEIO, 694 [ESHUTDOWN] = TARGET_ESHUTDOWN, 695 [ETOOMANYREFS] = TARGET_ETOOMANYREFS, 696 [ETIMEDOUT] = TARGET_ETIMEDOUT, 697 [ECONNREFUSED] = TARGET_ECONNREFUSED, 698 [EHOSTDOWN] = TARGET_EHOSTDOWN, 699 [EHOSTUNREACH] = TARGET_EHOSTUNREACH, 700 [EALREADY] = TARGET_EALREADY, 701 [EINPROGRESS] = TARGET_EINPROGRESS, 702 [ESTALE] = TARGET_ESTALE, 703 [ECANCELED] = TARGET_ECANCELED, 704 [ENOMEDIUM] = TARGET_ENOMEDIUM, 705 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE, 706 #ifdef ENOKEY 707 [ENOKEY] = TARGET_ENOKEY, 708 #endif 709 #ifdef EKEYEXPIRED 710 [EKEYEXPIRED] = TARGET_EKEYEXPIRED, 711 #endif 712 #ifdef EKEYREVOKED 713 [EKEYREVOKED] = TARGET_EKEYREVOKED, 714 #endif 715 #ifdef EKEYREJECTED 716 [EKEYREJECTED] = TARGET_EKEYREJECTED, 717 #endif 718 #ifdef EOWNERDEAD 719 [EOWNERDEAD] = TARGET_EOWNERDEAD, 720 #endif 721 #ifdef ENOTRECOVERABLE 722 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE, 723 #endif 724 }; 725 726 static inline int host_to_target_errno(int err) 727 { 728 if(host_to_target_errno_table[err]) 729 return host_to_target_errno_table[err]; 730 return err; 731 } 732 733 static inline int target_to_host_errno(int err) 734 { 735 if (target_to_host_errno_table[err]) 736 return target_to_host_errno_table[err]; 737 return err; 738 } 739 740 static inline abi_long get_errno(abi_long ret) 741 { 742 if (ret == -1) 743 return -host_to_target_errno(errno); 744 else 745 return ret; 746 } 747 748 static inline int is_error(abi_long ret) 749 { 750 return (abi_ulong)ret >= (abi_ulong)(-4096); 751 } 752 753 char *target_strerror(int err) 754 { 755 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) { 756 return NULL; 757 } 758 return strerror(target_to_host_errno(err)); 759 } 760 761 static abi_ulong target_brk; 762 static abi_ulong target_original_brk; 763 static abi_ulong brk_page; 764 765 void target_set_brk(abi_ulong new_brk) 766 { 767 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk); 768 brk_page = HOST_PAGE_ALIGN(target_brk); 769 } 770 771 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0) 772 #define DEBUGF_BRK(message, args...) 773 774 /* do_brk() must return target values and target errnos. */ 775 abi_long do_brk(abi_ulong new_brk) 776 { 777 abi_long mapped_addr; 778 int new_alloc_size; 779 780 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk); 781 782 if (!new_brk) { 783 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk); 784 return target_brk; 785 } 786 if (new_brk < target_original_brk) { 787 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n", 788 target_brk); 789 return target_brk; 790 } 791 792 /* If the new brk is less than the highest page reserved to the 793 * target heap allocation, set it and we're almost done... */ 794 if (new_brk <= brk_page) { 795 /* Heap contents are initialized to zero, as for anonymous 796 * mapped pages. */ 797 if (new_brk > target_brk) { 798 memset(g2h(target_brk), 0, new_brk - target_brk); 799 } 800 target_brk = new_brk; 801 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk); 802 return target_brk; 803 } 804 805 /* We need to allocate more memory after the brk... Note that 806 * we don't use MAP_FIXED because that will map over the top of 807 * any existing mapping (like the one with the host libc or qemu 808 * itself); instead we treat "mapped but at wrong address" as 809 * a failure and unmap again. 810 */ 811 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page); 812 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, 813 PROT_READ|PROT_WRITE, 814 MAP_ANON|MAP_PRIVATE, 0, 0)); 815 816 if (mapped_addr == brk_page) { 817 /* Heap contents are initialized to zero, as for anonymous 818 * mapped pages. Technically the new pages are already 819 * initialized to zero since they *are* anonymous mapped 820 * pages, however we have to take care with the contents that 821 * come from the remaining part of the previous page: it may 822 * contains garbage data due to a previous heap usage (grown 823 * then shrunken). */ 824 memset(g2h(target_brk), 0, brk_page - target_brk); 825 826 target_brk = new_brk; 827 brk_page = HOST_PAGE_ALIGN(target_brk); 828 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n", 829 target_brk); 830 return target_brk; 831 } else if (mapped_addr != -1) { 832 /* Mapped but at wrong address, meaning there wasn't actually 833 * enough space for this brk. 834 */ 835 target_munmap(mapped_addr, new_alloc_size); 836 mapped_addr = -1; 837 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk); 838 } 839 else { 840 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk); 841 } 842 843 #if defined(TARGET_ALPHA) 844 /* We (partially) emulate OSF/1 on Alpha, which requires we 845 return a proper errno, not an unchanged brk value. */ 846 return -TARGET_ENOMEM; 847 #endif 848 /* For everything else, return the previous break. */ 849 return target_brk; 850 } 851 852 static inline abi_long copy_from_user_fdset(fd_set *fds, 853 abi_ulong target_fds_addr, 854 int n) 855 { 856 int i, nw, j, k; 857 abi_ulong b, *target_fds; 858 859 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 860 if (!(target_fds = lock_user(VERIFY_READ, 861 target_fds_addr, 862 sizeof(abi_ulong) * nw, 863 1))) 864 return -TARGET_EFAULT; 865 866 FD_ZERO(fds); 867 k = 0; 868 for (i = 0; i < nw; i++) { 869 /* grab the abi_ulong */ 870 __get_user(b, &target_fds[i]); 871 for (j = 0; j < TARGET_ABI_BITS; j++) { 872 /* check the bit inside the abi_ulong */ 873 if ((b >> j) & 1) 874 FD_SET(k, fds); 875 k++; 876 } 877 } 878 879 unlock_user(target_fds, target_fds_addr, 0); 880 881 return 0; 882 } 883 884 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr, 885 abi_ulong target_fds_addr, 886 int n) 887 { 888 if (target_fds_addr) { 889 if (copy_from_user_fdset(fds, target_fds_addr, n)) 890 return -TARGET_EFAULT; 891 *fds_ptr = fds; 892 } else { 893 *fds_ptr = NULL; 894 } 895 return 0; 896 } 897 898 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr, 899 const fd_set *fds, 900 int n) 901 { 902 int i, nw, j, k; 903 abi_long v; 904 abi_ulong *target_fds; 905 906 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 907 if (!(target_fds = lock_user(VERIFY_WRITE, 908 target_fds_addr, 909 sizeof(abi_ulong) * nw, 910 0))) 911 return -TARGET_EFAULT; 912 913 k = 0; 914 for (i = 0; i < nw; i++) { 915 v = 0; 916 for (j = 0; j < TARGET_ABI_BITS; j++) { 917 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j); 918 k++; 919 } 920 __put_user(v, &target_fds[i]); 921 } 922 923 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw); 924 925 return 0; 926 } 927 928 #if defined(__alpha__) 929 #define HOST_HZ 1024 930 #else 931 #define HOST_HZ 100 932 #endif 933 934 static inline abi_long host_to_target_clock_t(long ticks) 935 { 936 #if HOST_HZ == TARGET_HZ 937 return ticks; 938 #else 939 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ; 940 #endif 941 } 942 943 static inline abi_long host_to_target_rusage(abi_ulong target_addr, 944 const struct rusage *rusage) 945 { 946 struct target_rusage *target_rusage; 947 948 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0)) 949 return -TARGET_EFAULT; 950 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec); 951 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec); 952 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec); 953 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec); 954 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss); 955 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss); 956 target_rusage->ru_idrss = tswapal(rusage->ru_idrss); 957 target_rusage->ru_isrss = tswapal(rusage->ru_isrss); 958 target_rusage->ru_minflt = tswapal(rusage->ru_minflt); 959 target_rusage->ru_majflt = tswapal(rusage->ru_majflt); 960 target_rusage->ru_nswap = tswapal(rusage->ru_nswap); 961 target_rusage->ru_inblock = tswapal(rusage->ru_inblock); 962 target_rusage->ru_oublock = tswapal(rusage->ru_oublock); 963 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd); 964 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv); 965 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals); 966 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw); 967 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw); 968 unlock_user_struct(target_rusage, target_addr, 1); 969 970 return 0; 971 } 972 973 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim) 974 { 975 abi_ulong target_rlim_swap; 976 rlim_t result; 977 978 target_rlim_swap = tswapal(target_rlim); 979 if (target_rlim_swap == TARGET_RLIM_INFINITY) 980 return RLIM_INFINITY; 981 982 result = target_rlim_swap; 983 if (target_rlim_swap != (rlim_t)result) 984 return RLIM_INFINITY; 985 986 return result; 987 } 988 989 static inline abi_ulong host_to_target_rlim(rlim_t rlim) 990 { 991 abi_ulong target_rlim_swap; 992 abi_ulong result; 993 994 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim) 995 target_rlim_swap = TARGET_RLIM_INFINITY; 996 else 997 target_rlim_swap = rlim; 998 result = tswapal(target_rlim_swap); 999 1000 return result; 1001 } 1002 1003 static inline int target_to_host_resource(int code) 1004 { 1005 switch (code) { 1006 case TARGET_RLIMIT_AS: 1007 return RLIMIT_AS; 1008 case TARGET_RLIMIT_CORE: 1009 return RLIMIT_CORE; 1010 case TARGET_RLIMIT_CPU: 1011 return RLIMIT_CPU; 1012 case TARGET_RLIMIT_DATA: 1013 return RLIMIT_DATA; 1014 case TARGET_RLIMIT_FSIZE: 1015 return RLIMIT_FSIZE; 1016 case TARGET_RLIMIT_LOCKS: 1017 return RLIMIT_LOCKS; 1018 case TARGET_RLIMIT_MEMLOCK: 1019 return RLIMIT_MEMLOCK; 1020 case TARGET_RLIMIT_MSGQUEUE: 1021 return RLIMIT_MSGQUEUE; 1022 case TARGET_RLIMIT_NICE: 1023 return RLIMIT_NICE; 1024 case TARGET_RLIMIT_NOFILE: 1025 return RLIMIT_NOFILE; 1026 case TARGET_RLIMIT_NPROC: 1027 return RLIMIT_NPROC; 1028 case TARGET_RLIMIT_RSS: 1029 return RLIMIT_RSS; 1030 case TARGET_RLIMIT_RTPRIO: 1031 return RLIMIT_RTPRIO; 1032 case TARGET_RLIMIT_SIGPENDING: 1033 return RLIMIT_SIGPENDING; 1034 case TARGET_RLIMIT_STACK: 1035 return RLIMIT_STACK; 1036 default: 1037 return code; 1038 } 1039 } 1040 1041 static inline abi_long copy_from_user_timeval(struct timeval *tv, 1042 abi_ulong target_tv_addr) 1043 { 1044 struct target_timeval *target_tv; 1045 1046 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) 1047 return -TARGET_EFAULT; 1048 1049 __get_user(tv->tv_sec, &target_tv->tv_sec); 1050 __get_user(tv->tv_usec, &target_tv->tv_usec); 1051 1052 unlock_user_struct(target_tv, target_tv_addr, 0); 1053 1054 return 0; 1055 } 1056 1057 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr, 1058 const struct timeval *tv) 1059 { 1060 struct target_timeval *target_tv; 1061 1062 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) 1063 return -TARGET_EFAULT; 1064 1065 __put_user(tv->tv_sec, &target_tv->tv_sec); 1066 __put_user(tv->tv_usec, &target_tv->tv_usec); 1067 1068 unlock_user_struct(target_tv, target_tv_addr, 1); 1069 1070 return 0; 1071 } 1072 1073 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 1074 #include <mqueue.h> 1075 1076 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr, 1077 abi_ulong target_mq_attr_addr) 1078 { 1079 struct target_mq_attr *target_mq_attr; 1080 1081 if (!lock_user_struct(VERIFY_READ, target_mq_attr, 1082 target_mq_attr_addr, 1)) 1083 return -TARGET_EFAULT; 1084 1085 __get_user(attr->mq_flags, &target_mq_attr->mq_flags); 1086 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1087 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1088 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1089 1090 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0); 1091 1092 return 0; 1093 } 1094 1095 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr, 1096 const struct mq_attr *attr) 1097 { 1098 struct target_mq_attr *target_mq_attr; 1099 1100 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr, 1101 target_mq_attr_addr, 0)) 1102 return -TARGET_EFAULT; 1103 1104 __put_user(attr->mq_flags, &target_mq_attr->mq_flags); 1105 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1106 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1107 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1108 1109 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1); 1110 1111 return 0; 1112 } 1113 #endif 1114 1115 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) 1116 /* do_select() must return target values and target errnos. */ 1117 static abi_long do_select(int n, 1118 abi_ulong rfd_addr, abi_ulong wfd_addr, 1119 abi_ulong efd_addr, abi_ulong target_tv_addr) 1120 { 1121 fd_set rfds, wfds, efds; 1122 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 1123 struct timeval tv, *tv_ptr; 1124 abi_long ret; 1125 1126 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 1127 if (ret) { 1128 return ret; 1129 } 1130 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 1131 if (ret) { 1132 return ret; 1133 } 1134 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 1135 if (ret) { 1136 return ret; 1137 } 1138 1139 if (target_tv_addr) { 1140 if (copy_from_user_timeval(&tv, target_tv_addr)) 1141 return -TARGET_EFAULT; 1142 tv_ptr = &tv; 1143 } else { 1144 tv_ptr = NULL; 1145 } 1146 1147 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr)); 1148 1149 if (!is_error(ret)) { 1150 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 1151 return -TARGET_EFAULT; 1152 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 1153 return -TARGET_EFAULT; 1154 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 1155 return -TARGET_EFAULT; 1156 1157 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv)) 1158 return -TARGET_EFAULT; 1159 } 1160 1161 return ret; 1162 } 1163 #endif 1164 1165 static abi_long do_pipe2(int host_pipe[], int flags) 1166 { 1167 #ifdef CONFIG_PIPE2 1168 return pipe2(host_pipe, flags); 1169 #else 1170 return -ENOSYS; 1171 #endif 1172 } 1173 1174 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes, 1175 int flags, int is_pipe2) 1176 { 1177 int host_pipe[2]; 1178 abi_long ret; 1179 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe); 1180 1181 if (is_error(ret)) 1182 return get_errno(ret); 1183 1184 /* Several targets have special calling conventions for the original 1185 pipe syscall, but didn't replicate this into the pipe2 syscall. */ 1186 if (!is_pipe2) { 1187 #if defined(TARGET_ALPHA) 1188 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1]; 1189 return host_pipe[0]; 1190 #elif defined(TARGET_MIPS) 1191 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1]; 1192 return host_pipe[0]; 1193 #elif defined(TARGET_SH4) 1194 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1]; 1195 return host_pipe[0]; 1196 #endif 1197 } 1198 1199 if (put_user_s32(host_pipe[0], pipedes) 1200 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0]))) 1201 return -TARGET_EFAULT; 1202 return get_errno(ret); 1203 } 1204 1205 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn, 1206 abi_ulong target_addr, 1207 socklen_t len) 1208 { 1209 struct target_ip_mreqn *target_smreqn; 1210 1211 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1); 1212 if (!target_smreqn) 1213 return -TARGET_EFAULT; 1214 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr; 1215 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr; 1216 if (len == sizeof(struct target_ip_mreqn)) 1217 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex); 1218 unlock_user(target_smreqn, target_addr, 0); 1219 1220 return 0; 1221 } 1222 1223 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr, 1224 abi_ulong target_addr, 1225 socklen_t len) 1226 { 1227 const socklen_t unix_maxlen = sizeof (struct sockaddr_un); 1228 sa_family_t sa_family; 1229 struct target_sockaddr *target_saddr; 1230 1231 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 1232 if (!target_saddr) 1233 return -TARGET_EFAULT; 1234 1235 sa_family = tswap16(target_saddr->sa_family); 1236 1237 /* Oops. The caller might send a incomplete sun_path; sun_path 1238 * must be terminated by \0 (see the manual page), but 1239 * unfortunately it is quite common to specify sockaddr_un 1240 * length as "strlen(x->sun_path)" while it should be 1241 * "strlen(...) + 1". We'll fix that here if needed. 1242 * Linux kernel has a similar feature. 1243 */ 1244 1245 if (sa_family == AF_UNIX) { 1246 if (len < unix_maxlen && len > 0) { 1247 char *cp = (char*)target_saddr; 1248 1249 if ( cp[len-1] && !cp[len] ) 1250 len++; 1251 } 1252 if (len > unix_maxlen) 1253 len = unix_maxlen; 1254 } 1255 1256 memcpy(addr, target_saddr, len); 1257 addr->sa_family = sa_family; 1258 unlock_user(target_saddr, target_addr, 0); 1259 1260 return 0; 1261 } 1262 1263 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr, 1264 struct sockaddr *addr, 1265 socklen_t len) 1266 { 1267 struct target_sockaddr *target_saddr; 1268 1269 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0); 1270 if (!target_saddr) 1271 return -TARGET_EFAULT; 1272 memcpy(target_saddr, addr, len); 1273 target_saddr->sa_family = tswap16(addr->sa_family); 1274 unlock_user(target_saddr, target_addr, len); 1275 1276 return 0; 1277 } 1278 1279 static inline abi_long target_to_host_cmsg(struct msghdr *msgh, 1280 struct target_msghdr *target_msgh) 1281 { 1282 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1283 abi_long msg_controllen; 1284 abi_ulong target_cmsg_addr; 1285 struct target_cmsghdr *target_cmsg; 1286 socklen_t space = 0; 1287 1288 msg_controllen = tswapal(target_msgh->msg_controllen); 1289 if (msg_controllen < sizeof (struct target_cmsghdr)) 1290 goto the_end; 1291 target_cmsg_addr = tswapal(target_msgh->msg_control); 1292 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1); 1293 if (!target_cmsg) 1294 return -TARGET_EFAULT; 1295 1296 while (cmsg && target_cmsg) { 1297 void *data = CMSG_DATA(cmsg); 1298 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1299 1300 int len = tswapal(target_cmsg->cmsg_len) 1301 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr)); 1302 1303 space += CMSG_SPACE(len); 1304 if (space > msgh->msg_controllen) { 1305 space -= CMSG_SPACE(len); 1306 gemu_log("Host cmsg overflow\n"); 1307 break; 1308 } 1309 1310 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level); 1311 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type); 1312 cmsg->cmsg_len = CMSG_LEN(len); 1313 1314 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) { 1315 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type); 1316 memcpy(data, target_data, len); 1317 } else { 1318 int *fd = (int *)data; 1319 int *target_fd = (int *)target_data; 1320 int i, numfds = len / sizeof(int); 1321 1322 for (i = 0; i < numfds; i++) 1323 fd[i] = tswap32(target_fd[i]); 1324 } 1325 1326 cmsg = CMSG_NXTHDR(msgh, cmsg); 1327 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1328 } 1329 unlock_user(target_cmsg, target_cmsg_addr, 0); 1330 the_end: 1331 msgh->msg_controllen = space; 1332 return 0; 1333 } 1334 1335 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, 1336 struct msghdr *msgh) 1337 { 1338 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1339 abi_long msg_controllen; 1340 abi_ulong target_cmsg_addr; 1341 struct target_cmsghdr *target_cmsg; 1342 socklen_t space = 0; 1343 1344 msg_controllen = tswapal(target_msgh->msg_controllen); 1345 if (msg_controllen < sizeof (struct target_cmsghdr)) 1346 goto the_end; 1347 target_cmsg_addr = tswapal(target_msgh->msg_control); 1348 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0); 1349 if (!target_cmsg) 1350 return -TARGET_EFAULT; 1351 1352 while (cmsg && target_cmsg) { 1353 void *data = CMSG_DATA(cmsg); 1354 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1355 1356 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr)); 1357 1358 space += TARGET_CMSG_SPACE(len); 1359 if (space > msg_controllen) { 1360 space -= TARGET_CMSG_SPACE(len); 1361 gemu_log("Target cmsg overflow\n"); 1362 break; 1363 } 1364 1365 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level); 1366 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type); 1367 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len)); 1368 1369 if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) && 1370 (cmsg->cmsg_type == SCM_RIGHTS)) { 1371 int *fd = (int *)data; 1372 int *target_fd = (int *)target_data; 1373 int i, numfds = len / sizeof(int); 1374 1375 for (i = 0; i < numfds; i++) 1376 target_fd[i] = tswap32(fd[i]); 1377 } else if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) && 1378 (cmsg->cmsg_type == SO_TIMESTAMP) && 1379 (len == sizeof(struct timeval))) { 1380 /* copy struct timeval to target */ 1381 struct timeval *tv = (struct timeval *)data; 1382 struct target_timeval *target_tv = 1383 (struct target_timeval *)target_data; 1384 1385 target_tv->tv_sec = tswapal(tv->tv_sec); 1386 target_tv->tv_usec = tswapal(tv->tv_usec); 1387 } else { 1388 gemu_log("Unsupported ancillary data: %d/%d\n", 1389 cmsg->cmsg_level, cmsg->cmsg_type); 1390 memcpy(target_data, data, len); 1391 } 1392 1393 cmsg = CMSG_NXTHDR(msgh, cmsg); 1394 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1395 } 1396 unlock_user(target_cmsg, target_cmsg_addr, space); 1397 the_end: 1398 target_msgh->msg_controllen = tswapal(space); 1399 return 0; 1400 } 1401 1402 /* do_setsockopt() Must return target values and target errnos. */ 1403 static abi_long do_setsockopt(int sockfd, int level, int optname, 1404 abi_ulong optval_addr, socklen_t optlen) 1405 { 1406 abi_long ret; 1407 int val; 1408 struct ip_mreqn *ip_mreq; 1409 struct ip_mreq_source *ip_mreq_source; 1410 1411 switch(level) { 1412 case SOL_TCP: 1413 /* TCP options all take an 'int' value. */ 1414 if (optlen < sizeof(uint32_t)) 1415 return -TARGET_EINVAL; 1416 1417 if (get_user_u32(val, optval_addr)) 1418 return -TARGET_EFAULT; 1419 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1420 break; 1421 case SOL_IP: 1422 switch(optname) { 1423 case IP_TOS: 1424 case IP_TTL: 1425 case IP_HDRINCL: 1426 case IP_ROUTER_ALERT: 1427 case IP_RECVOPTS: 1428 case IP_RETOPTS: 1429 case IP_PKTINFO: 1430 case IP_MTU_DISCOVER: 1431 case IP_RECVERR: 1432 case IP_RECVTOS: 1433 #ifdef IP_FREEBIND 1434 case IP_FREEBIND: 1435 #endif 1436 case IP_MULTICAST_TTL: 1437 case IP_MULTICAST_LOOP: 1438 val = 0; 1439 if (optlen >= sizeof(uint32_t)) { 1440 if (get_user_u32(val, optval_addr)) 1441 return -TARGET_EFAULT; 1442 } else if (optlen >= 1) { 1443 if (get_user_u8(val, optval_addr)) 1444 return -TARGET_EFAULT; 1445 } 1446 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1447 break; 1448 case IP_ADD_MEMBERSHIP: 1449 case IP_DROP_MEMBERSHIP: 1450 if (optlen < sizeof (struct target_ip_mreq) || 1451 optlen > sizeof (struct target_ip_mreqn)) 1452 return -TARGET_EINVAL; 1453 1454 ip_mreq = (struct ip_mreqn *) alloca(optlen); 1455 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen); 1456 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen)); 1457 break; 1458 1459 case IP_BLOCK_SOURCE: 1460 case IP_UNBLOCK_SOURCE: 1461 case IP_ADD_SOURCE_MEMBERSHIP: 1462 case IP_DROP_SOURCE_MEMBERSHIP: 1463 if (optlen != sizeof (struct target_ip_mreq_source)) 1464 return -TARGET_EINVAL; 1465 1466 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1); 1467 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen)); 1468 unlock_user (ip_mreq_source, optval_addr, 0); 1469 break; 1470 1471 default: 1472 goto unimplemented; 1473 } 1474 break; 1475 case SOL_RAW: 1476 switch (optname) { 1477 case ICMP_FILTER: 1478 /* struct icmp_filter takes an u32 value */ 1479 if (optlen < sizeof(uint32_t)) { 1480 return -TARGET_EINVAL; 1481 } 1482 1483 if (get_user_u32(val, optval_addr)) { 1484 return -TARGET_EFAULT; 1485 } 1486 ret = get_errno(setsockopt(sockfd, level, optname, 1487 &val, sizeof(val))); 1488 break; 1489 1490 default: 1491 goto unimplemented; 1492 } 1493 break; 1494 case TARGET_SOL_SOCKET: 1495 switch (optname) { 1496 case TARGET_SO_RCVTIMEO: 1497 { 1498 struct timeval tv; 1499 1500 optname = SO_RCVTIMEO; 1501 1502 set_timeout: 1503 if (optlen != sizeof(struct target_timeval)) { 1504 return -TARGET_EINVAL; 1505 } 1506 1507 if (copy_from_user_timeval(&tv, optval_addr)) { 1508 return -TARGET_EFAULT; 1509 } 1510 1511 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 1512 &tv, sizeof(tv))); 1513 return ret; 1514 } 1515 case TARGET_SO_SNDTIMEO: 1516 optname = SO_SNDTIMEO; 1517 goto set_timeout; 1518 /* Options with 'int' argument. */ 1519 case TARGET_SO_DEBUG: 1520 optname = SO_DEBUG; 1521 break; 1522 case TARGET_SO_REUSEADDR: 1523 optname = SO_REUSEADDR; 1524 break; 1525 case TARGET_SO_TYPE: 1526 optname = SO_TYPE; 1527 break; 1528 case TARGET_SO_ERROR: 1529 optname = SO_ERROR; 1530 break; 1531 case TARGET_SO_DONTROUTE: 1532 optname = SO_DONTROUTE; 1533 break; 1534 case TARGET_SO_BROADCAST: 1535 optname = SO_BROADCAST; 1536 break; 1537 case TARGET_SO_SNDBUF: 1538 optname = SO_SNDBUF; 1539 break; 1540 case TARGET_SO_RCVBUF: 1541 optname = SO_RCVBUF; 1542 break; 1543 case TARGET_SO_KEEPALIVE: 1544 optname = SO_KEEPALIVE; 1545 break; 1546 case TARGET_SO_OOBINLINE: 1547 optname = SO_OOBINLINE; 1548 break; 1549 case TARGET_SO_NO_CHECK: 1550 optname = SO_NO_CHECK; 1551 break; 1552 case TARGET_SO_PRIORITY: 1553 optname = SO_PRIORITY; 1554 break; 1555 #ifdef SO_BSDCOMPAT 1556 case TARGET_SO_BSDCOMPAT: 1557 optname = SO_BSDCOMPAT; 1558 break; 1559 #endif 1560 case TARGET_SO_PASSCRED: 1561 optname = SO_PASSCRED; 1562 break; 1563 case TARGET_SO_TIMESTAMP: 1564 optname = SO_TIMESTAMP; 1565 break; 1566 case TARGET_SO_RCVLOWAT: 1567 optname = SO_RCVLOWAT; 1568 break; 1569 break; 1570 default: 1571 goto unimplemented; 1572 } 1573 if (optlen < sizeof(uint32_t)) 1574 return -TARGET_EINVAL; 1575 1576 if (get_user_u32(val, optval_addr)) 1577 return -TARGET_EFAULT; 1578 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val))); 1579 break; 1580 default: 1581 unimplemented: 1582 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname); 1583 ret = -TARGET_ENOPROTOOPT; 1584 } 1585 return ret; 1586 } 1587 1588 /* do_getsockopt() Must return target values and target errnos. */ 1589 static abi_long do_getsockopt(int sockfd, int level, int optname, 1590 abi_ulong optval_addr, abi_ulong optlen) 1591 { 1592 abi_long ret; 1593 int len, val; 1594 socklen_t lv; 1595 1596 switch(level) { 1597 case TARGET_SOL_SOCKET: 1598 level = SOL_SOCKET; 1599 switch (optname) { 1600 /* These don't just return a single integer */ 1601 case TARGET_SO_LINGER: 1602 case TARGET_SO_RCVTIMEO: 1603 case TARGET_SO_SNDTIMEO: 1604 case TARGET_SO_PEERNAME: 1605 goto unimplemented; 1606 case TARGET_SO_PEERCRED: { 1607 struct ucred cr; 1608 socklen_t crlen; 1609 struct target_ucred *tcr; 1610 1611 if (get_user_u32(len, optlen)) { 1612 return -TARGET_EFAULT; 1613 } 1614 if (len < 0) { 1615 return -TARGET_EINVAL; 1616 } 1617 1618 crlen = sizeof(cr); 1619 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED, 1620 &cr, &crlen)); 1621 if (ret < 0) { 1622 return ret; 1623 } 1624 if (len > crlen) { 1625 len = crlen; 1626 } 1627 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) { 1628 return -TARGET_EFAULT; 1629 } 1630 __put_user(cr.pid, &tcr->pid); 1631 __put_user(cr.uid, &tcr->uid); 1632 __put_user(cr.gid, &tcr->gid); 1633 unlock_user_struct(tcr, optval_addr, 1); 1634 if (put_user_u32(len, optlen)) { 1635 return -TARGET_EFAULT; 1636 } 1637 break; 1638 } 1639 /* Options with 'int' argument. */ 1640 case TARGET_SO_DEBUG: 1641 optname = SO_DEBUG; 1642 goto int_case; 1643 case TARGET_SO_REUSEADDR: 1644 optname = SO_REUSEADDR; 1645 goto int_case; 1646 case TARGET_SO_TYPE: 1647 optname = SO_TYPE; 1648 goto int_case; 1649 case TARGET_SO_ERROR: 1650 optname = SO_ERROR; 1651 goto int_case; 1652 case TARGET_SO_DONTROUTE: 1653 optname = SO_DONTROUTE; 1654 goto int_case; 1655 case TARGET_SO_BROADCAST: 1656 optname = SO_BROADCAST; 1657 goto int_case; 1658 case TARGET_SO_SNDBUF: 1659 optname = SO_SNDBUF; 1660 goto int_case; 1661 case TARGET_SO_RCVBUF: 1662 optname = SO_RCVBUF; 1663 goto int_case; 1664 case TARGET_SO_KEEPALIVE: 1665 optname = SO_KEEPALIVE; 1666 goto int_case; 1667 case TARGET_SO_OOBINLINE: 1668 optname = SO_OOBINLINE; 1669 goto int_case; 1670 case TARGET_SO_NO_CHECK: 1671 optname = SO_NO_CHECK; 1672 goto int_case; 1673 case TARGET_SO_PRIORITY: 1674 optname = SO_PRIORITY; 1675 goto int_case; 1676 #ifdef SO_BSDCOMPAT 1677 case TARGET_SO_BSDCOMPAT: 1678 optname = SO_BSDCOMPAT; 1679 goto int_case; 1680 #endif 1681 case TARGET_SO_PASSCRED: 1682 optname = SO_PASSCRED; 1683 goto int_case; 1684 case TARGET_SO_TIMESTAMP: 1685 optname = SO_TIMESTAMP; 1686 goto int_case; 1687 case TARGET_SO_RCVLOWAT: 1688 optname = SO_RCVLOWAT; 1689 goto int_case; 1690 default: 1691 goto int_case; 1692 } 1693 break; 1694 case SOL_TCP: 1695 /* TCP options all take an 'int' value. */ 1696 int_case: 1697 if (get_user_u32(len, optlen)) 1698 return -TARGET_EFAULT; 1699 if (len < 0) 1700 return -TARGET_EINVAL; 1701 lv = sizeof(lv); 1702 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1703 if (ret < 0) 1704 return ret; 1705 if (len > lv) 1706 len = lv; 1707 if (len == 4) { 1708 if (put_user_u32(val, optval_addr)) 1709 return -TARGET_EFAULT; 1710 } else { 1711 if (put_user_u8(val, optval_addr)) 1712 return -TARGET_EFAULT; 1713 } 1714 if (put_user_u32(len, optlen)) 1715 return -TARGET_EFAULT; 1716 break; 1717 case SOL_IP: 1718 switch(optname) { 1719 case IP_TOS: 1720 case IP_TTL: 1721 case IP_HDRINCL: 1722 case IP_ROUTER_ALERT: 1723 case IP_RECVOPTS: 1724 case IP_RETOPTS: 1725 case IP_PKTINFO: 1726 case IP_MTU_DISCOVER: 1727 case IP_RECVERR: 1728 case IP_RECVTOS: 1729 #ifdef IP_FREEBIND 1730 case IP_FREEBIND: 1731 #endif 1732 case IP_MULTICAST_TTL: 1733 case IP_MULTICAST_LOOP: 1734 if (get_user_u32(len, optlen)) 1735 return -TARGET_EFAULT; 1736 if (len < 0) 1737 return -TARGET_EINVAL; 1738 lv = sizeof(lv); 1739 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1740 if (ret < 0) 1741 return ret; 1742 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 1743 len = 1; 1744 if (put_user_u32(len, optlen) 1745 || put_user_u8(val, optval_addr)) 1746 return -TARGET_EFAULT; 1747 } else { 1748 if (len > sizeof(int)) 1749 len = sizeof(int); 1750 if (put_user_u32(len, optlen) 1751 || put_user_u32(val, optval_addr)) 1752 return -TARGET_EFAULT; 1753 } 1754 break; 1755 default: 1756 ret = -TARGET_ENOPROTOOPT; 1757 break; 1758 } 1759 break; 1760 default: 1761 unimplemented: 1762 gemu_log("getsockopt level=%d optname=%d not yet supported\n", 1763 level, optname); 1764 ret = -TARGET_EOPNOTSUPP; 1765 break; 1766 } 1767 return ret; 1768 } 1769 1770 static struct iovec *lock_iovec(int type, abi_ulong target_addr, 1771 int count, int copy) 1772 { 1773 struct target_iovec *target_vec; 1774 struct iovec *vec; 1775 abi_ulong total_len, max_len; 1776 int i; 1777 1778 if (count == 0) { 1779 errno = 0; 1780 return NULL; 1781 } 1782 if (count < 0 || count > IOV_MAX) { 1783 errno = EINVAL; 1784 return NULL; 1785 } 1786 1787 vec = calloc(count, sizeof(struct iovec)); 1788 if (vec == NULL) { 1789 errno = ENOMEM; 1790 return NULL; 1791 } 1792 1793 target_vec = lock_user(VERIFY_READ, target_addr, 1794 count * sizeof(struct target_iovec), 1); 1795 if (target_vec == NULL) { 1796 errno = EFAULT; 1797 goto fail2; 1798 } 1799 1800 /* ??? If host page size > target page size, this will result in a 1801 value larger than what we can actually support. */ 1802 max_len = 0x7fffffff & TARGET_PAGE_MASK; 1803 total_len = 0; 1804 1805 for (i = 0; i < count; i++) { 1806 abi_ulong base = tswapal(target_vec[i].iov_base); 1807 abi_long len = tswapal(target_vec[i].iov_len); 1808 1809 if (len < 0) { 1810 errno = EINVAL; 1811 goto fail; 1812 } else if (len == 0) { 1813 /* Zero length pointer is ignored. */ 1814 vec[i].iov_base = 0; 1815 } else { 1816 vec[i].iov_base = lock_user(type, base, len, copy); 1817 if (!vec[i].iov_base) { 1818 errno = EFAULT; 1819 goto fail; 1820 } 1821 if (len > max_len - total_len) { 1822 len = max_len - total_len; 1823 } 1824 } 1825 vec[i].iov_len = len; 1826 total_len += len; 1827 } 1828 1829 unlock_user(target_vec, target_addr, 0); 1830 return vec; 1831 1832 fail: 1833 free(vec); 1834 fail2: 1835 unlock_user(target_vec, target_addr, 0); 1836 return NULL; 1837 } 1838 1839 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr, 1840 int count, int copy) 1841 { 1842 struct target_iovec *target_vec; 1843 int i; 1844 1845 target_vec = lock_user(VERIFY_READ, target_addr, 1846 count * sizeof(struct target_iovec), 1); 1847 if (target_vec) { 1848 for (i = 0; i < count; i++) { 1849 abi_ulong base = tswapal(target_vec[i].iov_base); 1850 abi_long len = tswapal(target_vec[i].iov_base); 1851 if (len < 0) { 1852 break; 1853 } 1854 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0); 1855 } 1856 unlock_user(target_vec, target_addr, 0); 1857 } 1858 1859 free(vec); 1860 } 1861 1862 /* do_socket() Must return target values and target errnos. */ 1863 static abi_long do_socket(int domain, int type, int protocol) 1864 { 1865 #if defined(TARGET_MIPS) 1866 switch(type) { 1867 case TARGET_SOCK_DGRAM: 1868 type = SOCK_DGRAM; 1869 break; 1870 case TARGET_SOCK_STREAM: 1871 type = SOCK_STREAM; 1872 break; 1873 case TARGET_SOCK_RAW: 1874 type = SOCK_RAW; 1875 break; 1876 case TARGET_SOCK_RDM: 1877 type = SOCK_RDM; 1878 break; 1879 case TARGET_SOCK_SEQPACKET: 1880 type = SOCK_SEQPACKET; 1881 break; 1882 case TARGET_SOCK_PACKET: 1883 type = SOCK_PACKET; 1884 break; 1885 } 1886 #endif 1887 if (domain == PF_NETLINK) 1888 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */ 1889 return get_errno(socket(domain, type, protocol)); 1890 } 1891 1892 /* do_bind() Must return target values and target errnos. */ 1893 static abi_long do_bind(int sockfd, abi_ulong target_addr, 1894 socklen_t addrlen) 1895 { 1896 void *addr; 1897 abi_long ret; 1898 1899 if ((int)addrlen < 0) { 1900 return -TARGET_EINVAL; 1901 } 1902 1903 addr = alloca(addrlen+1); 1904 1905 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1906 if (ret) 1907 return ret; 1908 1909 return get_errno(bind(sockfd, addr, addrlen)); 1910 } 1911 1912 /* do_connect() Must return target values and target errnos. */ 1913 static abi_long do_connect(int sockfd, abi_ulong target_addr, 1914 socklen_t addrlen) 1915 { 1916 void *addr; 1917 abi_long ret; 1918 1919 if ((int)addrlen < 0) { 1920 return -TARGET_EINVAL; 1921 } 1922 1923 addr = alloca(addrlen); 1924 1925 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1926 if (ret) 1927 return ret; 1928 1929 return get_errno(connect(sockfd, addr, addrlen)); 1930 } 1931 1932 /* do_sendrecvmsg() Must return target values and target errnos. */ 1933 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg, 1934 int flags, int send) 1935 { 1936 abi_long ret, len; 1937 struct target_msghdr *msgp; 1938 struct msghdr msg; 1939 int count; 1940 struct iovec *vec; 1941 abi_ulong target_vec; 1942 1943 /* FIXME */ 1944 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE, 1945 msgp, 1946 target_msg, 1947 send ? 1 : 0)) 1948 return -TARGET_EFAULT; 1949 if (msgp->msg_name) { 1950 msg.msg_namelen = tswap32(msgp->msg_namelen); 1951 msg.msg_name = alloca(msg.msg_namelen); 1952 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name), 1953 msg.msg_namelen); 1954 if (ret) { 1955 goto out2; 1956 } 1957 } else { 1958 msg.msg_name = NULL; 1959 msg.msg_namelen = 0; 1960 } 1961 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen); 1962 msg.msg_control = alloca(msg.msg_controllen); 1963 msg.msg_flags = tswap32(msgp->msg_flags); 1964 1965 count = tswapal(msgp->msg_iovlen); 1966 target_vec = tswapal(msgp->msg_iov); 1967 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, 1968 target_vec, count, send); 1969 if (vec == NULL) { 1970 ret = -host_to_target_errno(errno); 1971 goto out2; 1972 } 1973 msg.msg_iovlen = count; 1974 msg.msg_iov = vec; 1975 1976 if (send) { 1977 ret = target_to_host_cmsg(&msg, msgp); 1978 if (ret == 0) 1979 ret = get_errno(sendmsg(fd, &msg, flags)); 1980 } else { 1981 ret = get_errno(recvmsg(fd, &msg, flags)); 1982 if (!is_error(ret)) { 1983 len = ret; 1984 ret = host_to_target_cmsg(msgp, &msg); 1985 if (!is_error(ret)) { 1986 msgp->msg_namelen = tswap32(msg.msg_namelen); 1987 if (msg.msg_name != NULL) { 1988 ret = host_to_target_sockaddr(tswapal(msgp->msg_name), 1989 msg.msg_name, msg.msg_namelen); 1990 if (ret) { 1991 goto out; 1992 } 1993 } 1994 1995 ret = len; 1996 } 1997 } 1998 } 1999 2000 out: 2001 unlock_iovec(vec, target_vec, count, !send); 2002 out2: 2003 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 2004 return ret; 2005 } 2006 2007 /* If we don't have a system accept4() then just call accept. 2008 * The callsites to do_accept4() will ensure that they don't 2009 * pass a non-zero flags argument in this config. 2010 */ 2011 #ifndef CONFIG_ACCEPT4 2012 static inline int accept4(int sockfd, struct sockaddr *addr, 2013 socklen_t *addrlen, int flags) 2014 { 2015 assert(flags == 0); 2016 return accept(sockfd, addr, addrlen); 2017 } 2018 #endif 2019 2020 /* do_accept4() Must return target values and target errnos. */ 2021 static abi_long do_accept4(int fd, abi_ulong target_addr, 2022 abi_ulong target_addrlen_addr, int flags) 2023 { 2024 socklen_t addrlen; 2025 void *addr; 2026 abi_long ret; 2027 2028 if (target_addr == 0) { 2029 return get_errno(accept4(fd, NULL, NULL, flags)); 2030 } 2031 2032 /* linux returns EINVAL if addrlen pointer is invalid */ 2033 if (get_user_u32(addrlen, target_addrlen_addr)) 2034 return -TARGET_EINVAL; 2035 2036 if ((int)addrlen < 0) { 2037 return -TARGET_EINVAL; 2038 } 2039 2040 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2041 return -TARGET_EINVAL; 2042 2043 addr = alloca(addrlen); 2044 2045 ret = get_errno(accept4(fd, addr, &addrlen, flags)); 2046 if (!is_error(ret)) { 2047 host_to_target_sockaddr(target_addr, addr, addrlen); 2048 if (put_user_u32(addrlen, target_addrlen_addr)) 2049 ret = -TARGET_EFAULT; 2050 } 2051 return ret; 2052 } 2053 2054 /* do_getpeername() Must return target values and target errnos. */ 2055 static abi_long do_getpeername(int fd, abi_ulong target_addr, 2056 abi_ulong target_addrlen_addr) 2057 { 2058 socklen_t addrlen; 2059 void *addr; 2060 abi_long ret; 2061 2062 if (get_user_u32(addrlen, target_addrlen_addr)) 2063 return -TARGET_EFAULT; 2064 2065 if ((int)addrlen < 0) { 2066 return -TARGET_EINVAL; 2067 } 2068 2069 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2070 return -TARGET_EFAULT; 2071 2072 addr = alloca(addrlen); 2073 2074 ret = get_errno(getpeername(fd, addr, &addrlen)); 2075 if (!is_error(ret)) { 2076 host_to_target_sockaddr(target_addr, addr, addrlen); 2077 if (put_user_u32(addrlen, target_addrlen_addr)) 2078 ret = -TARGET_EFAULT; 2079 } 2080 return ret; 2081 } 2082 2083 /* do_getsockname() Must return target values and target errnos. */ 2084 static abi_long do_getsockname(int fd, abi_ulong target_addr, 2085 abi_ulong target_addrlen_addr) 2086 { 2087 socklen_t addrlen; 2088 void *addr; 2089 abi_long ret; 2090 2091 if (get_user_u32(addrlen, target_addrlen_addr)) 2092 return -TARGET_EFAULT; 2093 2094 if ((int)addrlen < 0) { 2095 return -TARGET_EINVAL; 2096 } 2097 2098 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2099 return -TARGET_EFAULT; 2100 2101 addr = alloca(addrlen); 2102 2103 ret = get_errno(getsockname(fd, addr, &addrlen)); 2104 if (!is_error(ret)) { 2105 host_to_target_sockaddr(target_addr, addr, addrlen); 2106 if (put_user_u32(addrlen, target_addrlen_addr)) 2107 ret = -TARGET_EFAULT; 2108 } 2109 return ret; 2110 } 2111 2112 /* do_socketpair() Must return target values and target errnos. */ 2113 static abi_long do_socketpair(int domain, int type, int protocol, 2114 abi_ulong target_tab_addr) 2115 { 2116 int tab[2]; 2117 abi_long ret; 2118 2119 ret = get_errno(socketpair(domain, type, protocol, tab)); 2120 if (!is_error(ret)) { 2121 if (put_user_s32(tab[0], target_tab_addr) 2122 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0]))) 2123 ret = -TARGET_EFAULT; 2124 } 2125 return ret; 2126 } 2127 2128 /* do_sendto() Must return target values and target errnos. */ 2129 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags, 2130 abi_ulong target_addr, socklen_t addrlen) 2131 { 2132 void *addr; 2133 void *host_msg; 2134 abi_long ret; 2135 2136 if ((int)addrlen < 0) { 2137 return -TARGET_EINVAL; 2138 } 2139 2140 host_msg = lock_user(VERIFY_READ, msg, len, 1); 2141 if (!host_msg) 2142 return -TARGET_EFAULT; 2143 if (target_addr) { 2144 addr = alloca(addrlen); 2145 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 2146 if (ret) { 2147 unlock_user(host_msg, msg, 0); 2148 return ret; 2149 } 2150 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen)); 2151 } else { 2152 ret = get_errno(send(fd, host_msg, len, flags)); 2153 } 2154 unlock_user(host_msg, msg, 0); 2155 return ret; 2156 } 2157 2158 /* do_recvfrom() Must return target values and target errnos. */ 2159 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags, 2160 abi_ulong target_addr, 2161 abi_ulong target_addrlen) 2162 { 2163 socklen_t addrlen; 2164 void *addr; 2165 void *host_msg; 2166 abi_long ret; 2167 2168 host_msg = lock_user(VERIFY_WRITE, msg, len, 0); 2169 if (!host_msg) 2170 return -TARGET_EFAULT; 2171 if (target_addr) { 2172 if (get_user_u32(addrlen, target_addrlen)) { 2173 ret = -TARGET_EFAULT; 2174 goto fail; 2175 } 2176 if ((int)addrlen < 0) { 2177 ret = -TARGET_EINVAL; 2178 goto fail; 2179 } 2180 addr = alloca(addrlen); 2181 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen)); 2182 } else { 2183 addr = NULL; /* To keep compiler quiet. */ 2184 ret = get_errno(qemu_recv(fd, host_msg, len, flags)); 2185 } 2186 if (!is_error(ret)) { 2187 if (target_addr) { 2188 host_to_target_sockaddr(target_addr, addr, addrlen); 2189 if (put_user_u32(addrlen, target_addrlen)) { 2190 ret = -TARGET_EFAULT; 2191 goto fail; 2192 } 2193 } 2194 unlock_user(host_msg, msg, len); 2195 } else { 2196 fail: 2197 unlock_user(host_msg, msg, 0); 2198 } 2199 return ret; 2200 } 2201 2202 #ifdef TARGET_NR_socketcall 2203 /* do_socketcall() Must return target values and target errnos. */ 2204 static abi_long do_socketcall(int num, abi_ulong vptr) 2205 { 2206 abi_long ret; 2207 const int n = sizeof(abi_ulong); 2208 2209 switch(num) { 2210 case SOCKOP_socket: 2211 { 2212 abi_ulong domain, type, protocol; 2213 2214 if (get_user_ual(domain, vptr) 2215 || get_user_ual(type, vptr + n) 2216 || get_user_ual(protocol, vptr + 2 * n)) 2217 return -TARGET_EFAULT; 2218 2219 ret = do_socket(domain, type, protocol); 2220 } 2221 break; 2222 case SOCKOP_bind: 2223 { 2224 abi_ulong sockfd; 2225 abi_ulong target_addr; 2226 socklen_t addrlen; 2227 2228 if (get_user_ual(sockfd, vptr) 2229 || get_user_ual(target_addr, vptr + n) 2230 || get_user_ual(addrlen, vptr + 2 * n)) 2231 return -TARGET_EFAULT; 2232 2233 ret = do_bind(sockfd, target_addr, addrlen); 2234 } 2235 break; 2236 case SOCKOP_connect: 2237 { 2238 abi_ulong sockfd; 2239 abi_ulong target_addr; 2240 socklen_t addrlen; 2241 2242 if (get_user_ual(sockfd, vptr) 2243 || get_user_ual(target_addr, vptr + n) 2244 || get_user_ual(addrlen, vptr + 2 * n)) 2245 return -TARGET_EFAULT; 2246 2247 ret = do_connect(sockfd, target_addr, addrlen); 2248 } 2249 break; 2250 case SOCKOP_listen: 2251 { 2252 abi_ulong sockfd, backlog; 2253 2254 if (get_user_ual(sockfd, vptr) 2255 || get_user_ual(backlog, vptr + n)) 2256 return -TARGET_EFAULT; 2257 2258 ret = get_errno(listen(sockfd, backlog)); 2259 } 2260 break; 2261 case SOCKOP_accept: 2262 { 2263 abi_ulong sockfd; 2264 abi_ulong target_addr, target_addrlen; 2265 2266 if (get_user_ual(sockfd, vptr) 2267 || get_user_ual(target_addr, vptr + n) 2268 || get_user_ual(target_addrlen, vptr + 2 * n)) 2269 return -TARGET_EFAULT; 2270 2271 ret = do_accept4(sockfd, target_addr, target_addrlen, 0); 2272 } 2273 break; 2274 case SOCKOP_getsockname: 2275 { 2276 abi_ulong sockfd; 2277 abi_ulong target_addr, target_addrlen; 2278 2279 if (get_user_ual(sockfd, vptr) 2280 || get_user_ual(target_addr, vptr + n) 2281 || get_user_ual(target_addrlen, vptr + 2 * n)) 2282 return -TARGET_EFAULT; 2283 2284 ret = do_getsockname(sockfd, target_addr, target_addrlen); 2285 } 2286 break; 2287 case SOCKOP_getpeername: 2288 { 2289 abi_ulong sockfd; 2290 abi_ulong target_addr, target_addrlen; 2291 2292 if (get_user_ual(sockfd, vptr) 2293 || get_user_ual(target_addr, vptr + n) 2294 || get_user_ual(target_addrlen, vptr + 2 * n)) 2295 return -TARGET_EFAULT; 2296 2297 ret = do_getpeername(sockfd, target_addr, target_addrlen); 2298 } 2299 break; 2300 case SOCKOP_socketpair: 2301 { 2302 abi_ulong domain, type, protocol; 2303 abi_ulong tab; 2304 2305 if (get_user_ual(domain, vptr) 2306 || get_user_ual(type, vptr + n) 2307 || get_user_ual(protocol, vptr + 2 * n) 2308 || get_user_ual(tab, vptr + 3 * n)) 2309 return -TARGET_EFAULT; 2310 2311 ret = do_socketpair(domain, type, protocol, tab); 2312 } 2313 break; 2314 case SOCKOP_send: 2315 { 2316 abi_ulong sockfd; 2317 abi_ulong msg; 2318 size_t len; 2319 abi_ulong flags; 2320 2321 if (get_user_ual(sockfd, vptr) 2322 || get_user_ual(msg, vptr + n) 2323 || get_user_ual(len, vptr + 2 * n) 2324 || get_user_ual(flags, vptr + 3 * n)) 2325 return -TARGET_EFAULT; 2326 2327 ret = do_sendto(sockfd, msg, len, flags, 0, 0); 2328 } 2329 break; 2330 case SOCKOP_recv: 2331 { 2332 abi_ulong sockfd; 2333 abi_ulong msg; 2334 size_t len; 2335 abi_ulong flags; 2336 2337 if (get_user_ual(sockfd, vptr) 2338 || get_user_ual(msg, vptr + n) 2339 || get_user_ual(len, vptr + 2 * n) 2340 || get_user_ual(flags, vptr + 3 * n)) 2341 return -TARGET_EFAULT; 2342 2343 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0); 2344 } 2345 break; 2346 case SOCKOP_sendto: 2347 { 2348 abi_ulong sockfd; 2349 abi_ulong msg; 2350 size_t len; 2351 abi_ulong flags; 2352 abi_ulong addr; 2353 socklen_t addrlen; 2354 2355 if (get_user_ual(sockfd, vptr) 2356 || get_user_ual(msg, vptr + n) 2357 || get_user_ual(len, vptr + 2 * n) 2358 || get_user_ual(flags, vptr + 3 * n) 2359 || get_user_ual(addr, vptr + 4 * n) 2360 || get_user_ual(addrlen, vptr + 5 * n)) 2361 return -TARGET_EFAULT; 2362 2363 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen); 2364 } 2365 break; 2366 case SOCKOP_recvfrom: 2367 { 2368 abi_ulong sockfd; 2369 abi_ulong msg; 2370 size_t len; 2371 abi_ulong flags; 2372 abi_ulong addr; 2373 socklen_t addrlen; 2374 2375 if (get_user_ual(sockfd, vptr) 2376 || get_user_ual(msg, vptr + n) 2377 || get_user_ual(len, vptr + 2 * n) 2378 || get_user_ual(flags, vptr + 3 * n) 2379 || get_user_ual(addr, vptr + 4 * n) 2380 || get_user_ual(addrlen, vptr + 5 * n)) 2381 return -TARGET_EFAULT; 2382 2383 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen); 2384 } 2385 break; 2386 case SOCKOP_shutdown: 2387 { 2388 abi_ulong sockfd, how; 2389 2390 if (get_user_ual(sockfd, vptr) 2391 || get_user_ual(how, vptr + n)) 2392 return -TARGET_EFAULT; 2393 2394 ret = get_errno(shutdown(sockfd, how)); 2395 } 2396 break; 2397 case SOCKOP_sendmsg: 2398 case SOCKOP_recvmsg: 2399 { 2400 abi_ulong fd; 2401 abi_ulong target_msg; 2402 abi_ulong flags; 2403 2404 if (get_user_ual(fd, vptr) 2405 || get_user_ual(target_msg, vptr + n) 2406 || get_user_ual(flags, vptr + 2 * n)) 2407 return -TARGET_EFAULT; 2408 2409 ret = do_sendrecvmsg(fd, target_msg, flags, 2410 (num == SOCKOP_sendmsg)); 2411 } 2412 break; 2413 case SOCKOP_setsockopt: 2414 { 2415 abi_ulong sockfd; 2416 abi_ulong level; 2417 abi_ulong optname; 2418 abi_ulong optval; 2419 socklen_t optlen; 2420 2421 if (get_user_ual(sockfd, vptr) 2422 || get_user_ual(level, vptr + n) 2423 || get_user_ual(optname, vptr + 2 * n) 2424 || get_user_ual(optval, vptr + 3 * n) 2425 || get_user_ual(optlen, vptr + 4 * n)) 2426 return -TARGET_EFAULT; 2427 2428 ret = do_setsockopt(sockfd, level, optname, optval, optlen); 2429 } 2430 break; 2431 case SOCKOP_getsockopt: 2432 { 2433 abi_ulong sockfd; 2434 abi_ulong level; 2435 abi_ulong optname; 2436 abi_ulong optval; 2437 socklen_t optlen; 2438 2439 if (get_user_ual(sockfd, vptr) 2440 || get_user_ual(level, vptr + n) 2441 || get_user_ual(optname, vptr + 2 * n) 2442 || get_user_ual(optval, vptr + 3 * n) 2443 || get_user_ual(optlen, vptr + 4 * n)) 2444 return -TARGET_EFAULT; 2445 2446 ret = do_getsockopt(sockfd, level, optname, optval, optlen); 2447 } 2448 break; 2449 default: 2450 gemu_log("Unsupported socketcall: %d\n", num); 2451 ret = -TARGET_ENOSYS; 2452 break; 2453 } 2454 return ret; 2455 } 2456 #endif 2457 2458 #define N_SHM_REGIONS 32 2459 2460 static struct shm_region { 2461 abi_ulong start; 2462 abi_ulong size; 2463 } shm_regions[N_SHM_REGIONS]; 2464 2465 struct target_ipc_perm 2466 { 2467 abi_long __key; 2468 abi_ulong uid; 2469 abi_ulong gid; 2470 abi_ulong cuid; 2471 abi_ulong cgid; 2472 unsigned short int mode; 2473 unsigned short int __pad1; 2474 unsigned short int __seq; 2475 unsigned short int __pad2; 2476 abi_ulong __unused1; 2477 abi_ulong __unused2; 2478 }; 2479 2480 struct target_semid_ds 2481 { 2482 struct target_ipc_perm sem_perm; 2483 abi_ulong sem_otime; 2484 abi_ulong __unused1; 2485 abi_ulong sem_ctime; 2486 abi_ulong __unused2; 2487 abi_ulong sem_nsems; 2488 abi_ulong __unused3; 2489 abi_ulong __unused4; 2490 }; 2491 2492 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip, 2493 abi_ulong target_addr) 2494 { 2495 struct target_ipc_perm *target_ip; 2496 struct target_semid_ds *target_sd; 2497 2498 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2499 return -TARGET_EFAULT; 2500 target_ip = &(target_sd->sem_perm); 2501 host_ip->__key = tswapal(target_ip->__key); 2502 host_ip->uid = tswapal(target_ip->uid); 2503 host_ip->gid = tswapal(target_ip->gid); 2504 host_ip->cuid = tswapal(target_ip->cuid); 2505 host_ip->cgid = tswapal(target_ip->cgid); 2506 host_ip->mode = tswap16(target_ip->mode); 2507 unlock_user_struct(target_sd, target_addr, 0); 2508 return 0; 2509 } 2510 2511 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr, 2512 struct ipc_perm *host_ip) 2513 { 2514 struct target_ipc_perm *target_ip; 2515 struct target_semid_ds *target_sd; 2516 2517 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2518 return -TARGET_EFAULT; 2519 target_ip = &(target_sd->sem_perm); 2520 target_ip->__key = tswapal(host_ip->__key); 2521 target_ip->uid = tswapal(host_ip->uid); 2522 target_ip->gid = tswapal(host_ip->gid); 2523 target_ip->cuid = tswapal(host_ip->cuid); 2524 target_ip->cgid = tswapal(host_ip->cgid); 2525 target_ip->mode = tswap16(host_ip->mode); 2526 unlock_user_struct(target_sd, target_addr, 1); 2527 return 0; 2528 } 2529 2530 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd, 2531 abi_ulong target_addr) 2532 { 2533 struct target_semid_ds *target_sd; 2534 2535 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2536 return -TARGET_EFAULT; 2537 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr)) 2538 return -TARGET_EFAULT; 2539 host_sd->sem_nsems = tswapal(target_sd->sem_nsems); 2540 host_sd->sem_otime = tswapal(target_sd->sem_otime); 2541 host_sd->sem_ctime = tswapal(target_sd->sem_ctime); 2542 unlock_user_struct(target_sd, target_addr, 0); 2543 return 0; 2544 } 2545 2546 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr, 2547 struct semid_ds *host_sd) 2548 { 2549 struct target_semid_ds *target_sd; 2550 2551 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2552 return -TARGET_EFAULT; 2553 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm))) 2554 return -TARGET_EFAULT; 2555 target_sd->sem_nsems = tswapal(host_sd->sem_nsems); 2556 target_sd->sem_otime = tswapal(host_sd->sem_otime); 2557 target_sd->sem_ctime = tswapal(host_sd->sem_ctime); 2558 unlock_user_struct(target_sd, target_addr, 1); 2559 return 0; 2560 } 2561 2562 struct target_seminfo { 2563 int semmap; 2564 int semmni; 2565 int semmns; 2566 int semmnu; 2567 int semmsl; 2568 int semopm; 2569 int semume; 2570 int semusz; 2571 int semvmx; 2572 int semaem; 2573 }; 2574 2575 static inline abi_long host_to_target_seminfo(abi_ulong target_addr, 2576 struct seminfo *host_seminfo) 2577 { 2578 struct target_seminfo *target_seminfo; 2579 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0)) 2580 return -TARGET_EFAULT; 2581 __put_user(host_seminfo->semmap, &target_seminfo->semmap); 2582 __put_user(host_seminfo->semmni, &target_seminfo->semmni); 2583 __put_user(host_seminfo->semmns, &target_seminfo->semmns); 2584 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu); 2585 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl); 2586 __put_user(host_seminfo->semopm, &target_seminfo->semopm); 2587 __put_user(host_seminfo->semume, &target_seminfo->semume); 2588 __put_user(host_seminfo->semusz, &target_seminfo->semusz); 2589 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx); 2590 __put_user(host_seminfo->semaem, &target_seminfo->semaem); 2591 unlock_user_struct(target_seminfo, target_addr, 1); 2592 return 0; 2593 } 2594 2595 union semun { 2596 int val; 2597 struct semid_ds *buf; 2598 unsigned short *array; 2599 struct seminfo *__buf; 2600 }; 2601 2602 union target_semun { 2603 int val; 2604 abi_ulong buf; 2605 abi_ulong array; 2606 abi_ulong __buf; 2607 }; 2608 2609 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array, 2610 abi_ulong target_addr) 2611 { 2612 int nsems; 2613 unsigned short *array; 2614 union semun semun; 2615 struct semid_ds semid_ds; 2616 int i, ret; 2617 2618 semun.buf = &semid_ds; 2619 2620 ret = semctl(semid, 0, IPC_STAT, semun); 2621 if (ret == -1) 2622 return get_errno(ret); 2623 2624 nsems = semid_ds.sem_nsems; 2625 2626 *host_array = malloc(nsems*sizeof(unsigned short)); 2627 array = lock_user(VERIFY_READ, target_addr, 2628 nsems*sizeof(unsigned short), 1); 2629 if (!array) 2630 return -TARGET_EFAULT; 2631 2632 for(i=0; i<nsems; i++) { 2633 __get_user((*host_array)[i], &array[i]); 2634 } 2635 unlock_user(array, target_addr, 0); 2636 2637 return 0; 2638 } 2639 2640 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr, 2641 unsigned short **host_array) 2642 { 2643 int nsems; 2644 unsigned short *array; 2645 union semun semun; 2646 struct semid_ds semid_ds; 2647 int i, ret; 2648 2649 semun.buf = &semid_ds; 2650 2651 ret = semctl(semid, 0, IPC_STAT, semun); 2652 if (ret == -1) 2653 return get_errno(ret); 2654 2655 nsems = semid_ds.sem_nsems; 2656 2657 array = lock_user(VERIFY_WRITE, target_addr, 2658 nsems*sizeof(unsigned short), 0); 2659 if (!array) 2660 return -TARGET_EFAULT; 2661 2662 for(i=0; i<nsems; i++) { 2663 __put_user((*host_array)[i], &array[i]); 2664 } 2665 free(*host_array); 2666 unlock_user(array, target_addr, 1); 2667 2668 return 0; 2669 } 2670 2671 static inline abi_long do_semctl(int semid, int semnum, int cmd, 2672 union target_semun target_su) 2673 { 2674 union semun arg; 2675 struct semid_ds dsarg; 2676 unsigned short *array = NULL; 2677 struct seminfo seminfo; 2678 abi_long ret = -TARGET_EINVAL; 2679 abi_long err; 2680 cmd &= 0xff; 2681 2682 switch( cmd ) { 2683 case GETVAL: 2684 case SETVAL: 2685 arg.val = tswap32(target_su.val); 2686 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2687 target_su.val = tswap32(arg.val); 2688 break; 2689 case GETALL: 2690 case SETALL: 2691 err = target_to_host_semarray(semid, &array, target_su.array); 2692 if (err) 2693 return err; 2694 arg.array = array; 2695 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2696 err = host_to_target_semarray(semid, target_su.array, &array); 2697 if (err) 2698 return err; 2699 break; 2700 case IPC_STAT: 2701 case IPC_SET: 2702 case SEM_STAT: 2703 err = target_to_host_semid_ds(&dsarg, target_su.buf); 2704 if (err) 2705 return err; 2706 arg.buf = &dsarg; 2707 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2708 err = host_to_target_semid_ds(target_su.buf, &dsarg); 2709 if (err) 2710 return err; 2711 break; 2712 case IPC_INFO: 2713 case SEM_INFO: 2714 arg.__buf = &seminfo; 2715 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2716 err = host_to_target_seminfo(target_su.__buf, &seminfo); 2717 if (err) 2718 return err; 2719 break; 2720 case IPC_RMID: 2721 case GETPID: 2722 case GETNCNT: 2723 case GETZCNT: 2724 ret = get_errno(semctl(semid, semnum, cmd, NULL)); 2725 break; 2726 } 2727 2728 return ret; 2729 } 2730 2731 struct target_sembuf { 2732 unsigned short sem_num; 2733 short sem_op; 2734 short sem_flg; 2735 }; 2736 2737 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf, 2738 abi_ulong target_addr, 2739 unsigned nsops) 2740 { 2741 struct target_sembuf *target_sembuf; 2742 int i; 2743 2744 target_sembuf = lock_user(VERIFY_READ, target_addr, 2745 nsops*sizeof(struct target_sembuf), 1); 2746 if (!target_sembuf) 2747 return -TARGET_EFAULT; 2748 2749 for(i=0; i<nsops; i++) { 2750 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num); 2751 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op); 2752 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg); 2753 } 2754 2755 unlock_user(target_sembuf, target_addr, 0); 2756 2757 return 0; 2758 } 2759 2760 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops) 2761 { 2762 struct sembuf sops[nsops]; 2763 2764 if (target_to_host_sembuf(sops, ptr, nsops)) 2765 return -TARGET_EFAULT; 2766 2767 return get_errno(semop(semid, sops, nsops)); 2768 } 2769 2770 struct target_msqid_ds 2771 { 2772 struct target_ipc_perm msg_perm; 2773 abi_ulong msg_stime; 2774 #if TARGET_ABI_BITS == 32 2775 abi_ulong __unused1; 2776 #endif 2777 abi_ulong msg_rtime; 2778 #if TARGET_ABI_BITS == 32 2779 abi_ulong __unused2; 2780 #endif 2781 abi_ulong msg_ctime; 2782 #if TARGET_ABI_BITS == 32 2783 abi_ulong __unused3; 2784 #endif 2785 abi_ulong __msg_cbytes; 2786 abi_ulong msg_qnum; 2787 abi_ulong msg_qbytes; 2788 abi_ulong msg_lspid; 2789 abi_ulong msg_lrpid; 2790 abi_ulong __unused4; 2791 abi_ulong __unused5; 2792 }; 2793 2794 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md, 2795 abi_ulong target_addr) 2796 { 2797 struct target_msqid_ds *target_md; 2798 2799 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1)) 2800 return -TARGET_EFAULT; 2801 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr)) 2802 return -TARGET_EFAULT; 2803 host_md->msg_stime = tswapal(target_md->msg_stime); 2804 host_md->msg_rtime = tswapal(target_md->msg_rtime); 2805 host_md->msg_ctime = tswapal(target_md->msg_ctime); 2806 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes); 2807 host_md->msg_qnum = tswapal(target_md->msg_qnum); 2808 host_md->msg_qbytes = tswapal(target_md->msg_qbytes); 2809 host_md->msg_lspid = tswapal(target_md->msg_lspid); 2810 host_md->msg_lrpid = tswapal(target_md->msg_lrpid); 2811 unlock_user_struct(target_md, target_addr, 0); 2812 return 0; 2813 } 2814 2815 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr, 2816 struct msqid_ds *host_md) 2817 { 2818 struct target_msqid_ds *target_md; 2819 2820 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0)) 2821 return -TARGET_EFAULT; 2822 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm))) 2823 return -TARGET_EFAULT; 2824 target_md->msg_stime = tswapal(host_md->msg_stime); 2825 target_md->msg_rtime = tswapal(host_md->msg_rtime); 2826 target_md->msg_ctime = tswapal(host_md->msg_ctime); 2827 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes); 2828 target_md->msg_qnum = tswapal(host_md->msg_qnum); 2829 target_md->msg_qbytes = tswapal(host_md->msg_qbytes); 2830 target_md->msg_lspid = tswapal(host_md->msg_lspid); 2831 target_md->msg_lrpid = tswapal(host_md->msg_lrpid); 2832 unlock_user_struct(target_md, target_addr, 1); 2833 return 0; 2834 } 2835 2836 struct target_msginfo { 2837 int msgpool; 2838 int msgmap; 2839 int msgmax; 2840 int msgmnb; 2841 int msgmni; 2842 int msgssz; 2843 int msgtql; 2844 unsigned short int msgseg; 2845 }; 2846 2847 static inline abi_long host_to_target_msginfo(abi_ulong target_addr, 2848 struct msginfo *host_msginfo) 2849 { 2850 struct target_msginfo *target_msginfo; 2851 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0)) 2852 return -TARGET_EFAULT; 2853 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool); 2854 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap); 2855 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax); 2856 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb); 2857 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni); 2858 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz); 2859 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql); 2860 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg); 2861 unlock_user_struct(target_msginfo, target_addr, 1); 2862 return 0; 2863 } 2864 2865 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr) 2866 { 2867 struct msqid_ds dsarg; 2868 struct msginfo msginfo; 2869 abi_long ret = -TARGET_EINVAL; 2870 2871 cmd &= 0xff; 2872 2873 switch (cmd) { 2874 case IPC_STAT: 2875 case IPC_SET: 2876 case MSG_STAT: 2877 if (target_to_host_msqid_ds(&dsarg,ptr)) 2878 return -TARGET_EFAULT; 2879 ret = get_errno(msgctl(msgid, cmd, &dsarg)); 2880 if (host_to_target_msqid_ds(ptr,&dsarg)) 2881 return -TARGET_EFAULT; 2882 break; 2883 case IPC_RMID: 2884 ret = get_errno(msgctl(msgid, cmd, NULL)); 2885 break; 2886 case IPC_INFO: 2887 case MSG_INFO: 2888 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo)); 2889 if (host_to_target_msginfo(ptr, &msginfo)) 2890 return -TARGET_EFAULT; 2891 break; 2892 } 2893 2894 return ret; 2895 } 2896 2897 struct target_msgbuf { 2898 abi_long mtype; 2899 char mtext[1]; 2900 }; 2901 2902 static inline abi_long do_msgsnd(int msqid, abi_long msgp, 2903 unsigned int msgsz, int msgflg) 2904 { 2905 struct target_msgbuf *target_mb; 2906 struct msgbuf *host_mb; 2907 abi_long ret = 0; 2908 2909 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0)) 2910 return -TARGET_EFAULT; 2911 host_mb = malloc(msgsz+sizeof(long)); 2912 host_mb->mtype = (abi_long) tswapal(target_mb->mtype); 2913 memcpy(host_mb->mtext, target_mb->mtext, msgsz); 2914 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg)); 2915 free(host_mb); 2916 unlock_user_struct(target_mb, msgp, 0); 2917 2918 return ret; 2919 } 2920 2921 static inline abi_long do_msgrcv(int msqid, abi_long msgp, 2922 unsigned int msgsz, abi_long msgtyp, 2923 int msgflg) 2924 { 2925 struct target_msgbuf *target_mb; 2926 char *target_mtext; 2927 struct msgbuf *host_mb; 2928 abi_long ret = 0; 2929 2930 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0)) 2931 return -TARGET_EFAULT; 2932 2933 host_mb = g_malloc(msgsz+sizeof(long)); 2934 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg)); 2935 2936 if (ret > 0) { 2937 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong); 2938 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0); 2939 if (!target_mtext) { 2940 ret = -TARGET_EFAULT; 2941 goto end; 2942 } 2943 memcpy(target_mb->mtext, host_mb->mtext, ret); 2944 unlock_user(target_mtext, target_mtext_addr, ret); 2945 } 2946 2947 target_mb->mtype = tswapal(host_mb->mtype); 2948 2949 end: 2950 if (target_mb) 2951 unlock_user_struct(target_mb, msgp, 1); 2952 g_free(host_mb); 2953 return ret; 2954 } 2955 2956 struct target_shmid_ds 2957 { 2958 struct target_ipc_perm shm_perm; 2959 abi_ulong shm_segsz; 2960 abi_ulong shm_atime; 2961 #if TARGET_ABI_BITS == 32 2962 abi_ulong __unused1; 2963 #endif 2964 abi_ulong shm_dtime; 2965 #if TARGET_ABI_BITS == 32 2966 abi_ulong __unused2; 2967 #endif 2968 abi_ulong shm_ctime; 2969 #if TARGET_ABI_BITS == 32 2970 abi_ulong __unused3; 2971 #endif 2972 int shm_cpid; 2973 int shm_lpid; 2974 abi_ulong shm_nattch; 2975 unsigned long int __unused4; 2976 unsigned long int __unused5; 2977 }; 2978 2979 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd, 2980 abi_ulong target_addr) 2981 { 2982 struct target_shmid_ds *target_sd; 2983 2984 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2985 return -TARGET_EFAULT; 2986 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr)) 2987 return -TARGET_EFAULT; 2988 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2989 __get_user(host_sd->shm_atime, &target_sd->shm_atime); 2990 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2991 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2992 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2993 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2994 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2995 unlock_user_struct(target_sd, target_addr, 0); 2996 return 0; 2997 } 2998 2999 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr, 3000 struct shmid_ds *host_sd) 3001 { 3002 struct target_shmid_ds *target_sd; 3003 3004 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 3005 return -TARGET_EFAULT; 3006 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm))) 3007 return -TARGET_EFAULT; 3008 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz); 3009 __put_user(host_sd->shm_atime, &target_sd->shm_atime); 3010 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime); 3011 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime); 3012 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid); 3013 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid); 3014 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch); 3015 unlock_user_struct(target_sd, target_addr, 1); 3016 return 0; 3017 } 3018 3019 struct target_shminfo { 3020 abi_ulong shmmax; 3021 abi_ulong shmmin; 3022 abi_ulong shmmni; 3023 abi_ulong shmseg; 3024 abi_ulong shmall; 3025 }; 3026 3027 static inline abi_long host_to_target_shminfo(abi_ulong target_addr, 3028 struct shminfo *host_shminfo) 3029 { 3030 struct target_shminfo *target_shminfo; 3031 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0)) 3032 return -TARGET_EFAULT; 3033 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax); 3034 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin); 3035 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni); 3036 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg); 3037 __put_user(host_shminfo->shmall, &target_shminfo->shmall); 3038 unlock_user_struct(target_shminfo, target_addr, 1); 3039 return 0; 3040 } 3041 3042 struct target_shm_info { 3043 int used_ids; 3044 abi_ulong shm_tot; 3045 abi_ulong shm_rss; 3046 abi_ulong shm_swp; 3047 abi_ulong swap_attempts; 3048 abi_ulong swap_successes; 3049 }; 3050 3051 static inline abi_long host_to_target_shm_info(abi_ulong target_addr, 3052 struct shm_info *host_shm_info) 3053 { 3054 struct target_shm_info *target_shm_info; 3055 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0)) 3056 return -TARGET_EFAULT; 3057 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids); 3058 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot); 3059 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss); 3060 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp); 3061 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts); 3062 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes); 3063 unlock_user_struct(target_shm_info, target_addr, 1); 3064 return 0; 3065 } 3066 3067 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf) 3068 { 3069 struct shmid_ds dsarg; 3070 struct shminfo shminfo; 3071 struct shm_info shm_info; 3072 abi_long ret = -TARGET_EINVAL; 3073 3074 cmd &= 0xff; 3075 3076 switch(cmd) { 3077 case IPC_STAT: 3078 case IPC_SET: 3079 case SHM_STAT: 3080 if (target_to_host_shmid_ds(&dsarg, buf)) 3081 return -TARGET_EFAULT; 3082 ret = get_errno(shmctl(shmid, cmd, &dsarg)); 3083 if (host_to_target_shmid_ds(buf, &dsarg)) 3084 return -TARGET_EFAULT; 3085 break; 3086 case IPC_INFO: 3087 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo)); 3088 if (host_to_target_shminfo(buf, &shminfo)) 3089 return -TARGET_EFAULT; 3090 break; 3091 case SHM_INFO: 3092 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info)); 3093 if (host_to_target_shm_info(buf, &shm_info)) 3094 return -TARGET_EFAULT; 3095 break; 3096 case IPC_RMID: 3097 case SHM_LOCK: 3098 case SHM_UNLOCK: 3099 ret = get_errno(shmctl(shmid, cmd, NULL)); 3100 break; 3101 } 3102 3103 return ret; 3104 } 3105 3106 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg) 3107 { 3108 abi_long raddr; 3109 void *host_raddr; 3110 struct shmid_ds shm_info; 3111 int i,ret; 3112 3113 /* find out the length of the shared memory segment */ 3114 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); 3115 if (is_error(ret)) { 3116 /* can't get length, bail out */ 3117 return ret; 3118 } 3119 3120 mmap_lock(); 3121 3122 if (shmaddr) 3123 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg); 3124 else { 3125 abi_ulong mmap_start; 3126 3127 mmap_start = mmap_find_vma(0, shm_info.shm_segsz); 3128 3129 if (mmap_start == -1) { 3130 errno = ENOMEM; 3131 host_raddr = (void *)-1; 3132 } else 3133 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP); 3134 } 3135 3136 if (host_raddr == (void *)-1) { 3137 mmap_unlock(); 3138 return get_errno((long)host_raddr); 3139 } 3140 raddr=h2g((unsigned long)host_raddr); 3141 3142 page_set_flags(raddr, raddr + shm_info.shm_segsz, 3143 PAGE_VALID | PAGE_READ | 3144 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE)); 3145 3146 for (i = 0; i < N_SHM_REGIONS; i++) { 3147 if (shm_regions[i].start == 0) { 3148 shm_regions[i].start = raddr; 3149 shm_regions[i].size = shm_info.shm_segsz; 3150 break; 3151 } 3152 } 3153 3154 mmap_unlock(); 3155 return raddr; 3156 3157 } 3158 3159 static inline abi_long do_shmdt(abi_ulong shmaddr) 3160 { 3161 int i; 3162 3163 for (i = 0; i < N_SHM_REGIONS; ++i) { 3164 if (shm_regions[i].start == shmaddr) { 3165 shm_regions[i].start = 0; 3166 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0); 3167 break; 3168 } 3169 } 3170 3171 return get_errno(shmdt(g2h(shmaddr))); 3172 } 3173 3174 #ifdef TARGET_NR_ipc 3175 /* ??? This only works with linear mappings. */ 3176 /* do_ipc() must return target values and target errnos. */ 3177 static abi_long do_ipc(unsigned int call, int first, 3178 int second, int third, 3179 abi_long ptr, abi_long fifth) 3180 { 3181 int version; 3182 abi_long ret = 0; 3183 3184 version = call >> 16; 3185 call &= 0xffff; 3186 3187 switch (call) { 3188 case IPCOP_semop: 3189 ret = do_semop(first, ptr, second); 3190 break; 3191 3192 case IPCOP_semget: 3193 ret = get_errno(semget(first, second, third)); 3194 break; 3195 3196 case IPCOP_semctl: 3197 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr); 3198 break; 3199 3200 case IPCOP_msgget: 3201 ret = get_errno(msgget(first, second)); 3202 break; 3203 3204 case IPCOP_msgsnd: 3205 ret = do_msgsnd(first, ptr, second, third); 3206 break; 3207 3208 case IPCOP_msgctl: 3209 ret = do_msgctl(first, second, ptr); 3210 break; 3211 3212 case IPCOP_msgrcv: 3213 switch (version) { 3214 case 0: 3215 { 3216 struct target_ipc_kludge { 3217 abi_long msgp; 3218 abi_long msgtyp; 3219 } *tmp; 3220 3221 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) { 3222 ret = -TARGET_EFAULT; 3223 break; 3224 } 3225 3226 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third); 3227 3228 unlock_user_struct(tmp, ptr, 0); 3229 break; 3230 } 3231 default: 3232 ret = do_msgrcv(first, ptr, second, fifth, third); 3233 } 3234 break; 3235 3236 case IPCOP_shmat: 3237 switch (version) { 3238 default: 3239 { 3240 abi_ulong raddr; 3241 raddr = do_shmat(first, ptr, second); 3242 if (is_error(raddr)) 3243 return get_errno(raddr); 3244 if (put_user_ual(raddr, third)) 3245 return -TARGET_EFAULT; 3246 break; 3247 } 3248 case 1: 3249 ret = -TARGET_EINVAL; 3250 break; 3251 } 3252 break; 3253 case IPCOP_shmdt: 3254 ret = do_shmdt(ptr); 3255 break; 3256 3257 case IPCOP_shmget: 3258 /* IPC_* flag values are the same on all linux platforms */ 3259 ret = get_errno(shmget(first, second, third)); 3260 break; 3261 3262 /* IPC_* and SHM_* command values are the same on all linux platforms */ 3263 case IPCOP_shmctl: 3264 ret = do_shmctl(first, second, third); 3265 break; 3266 default: 3267 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version); 3268 ret = -TARGET_ENOSYS; 3269 break; 3270 } 3271 return ret; 3272 } 3273 #endif 3274 3275 /* kernel structure types definitions */ 3276 3277 #define STRUCT(name, ...) STRUCT_ ## name, 3278 #define STRUCT_SPECIAL(name) STRUCT_ ## name, 3279 enum { 3280 #include "syscall_types.h" 3281 }; 3282 #undef STRUCT 3283 #undef STRUCT_SPECIAL 3284 3285 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL }; 3286 #define STRUCT_SPECIAL(name) 3287 #include "syscall_types.h" 3288 #undef STRUCT 3289 #undef STRUCT_SPECIAL 3290 3291 typedef struct IOCTLEntry IOCTLEntry; 3292 3293 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp, 3294 int fd, abi_long cmd, abi_long arg); 3295 3296 struct IOCTLEntry { 3297 unsigned int target_cmd; 3298 unsigned int host_cmd; 3299 const char *name; 3300 int access; 3301 do_ioctl_fn *do_ioctl; 3302 const argtype arg_type[5]; 3303 }; 3304 3305 #define IOC_R 0x0001 3306 #define IOC_W 0x0002 3307 #define IOC_RW (IOC_R | IOC_W) 3308 3309 #define MAX_STRUCT_SIZE 4096 3310 3311 #ifdef CONFIG_FIEMAP 3312 /* So fiemap access checks don't overflow on 32 bit systems. 3313 * This is very slightly smaller than the limit imposed by 3314 * the underlying kernel. 3315 */ 3316 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \ 3317 / sizeof(struct fiemap_extent)) 3318 3319 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp, 3320 int fd, abi_long cmd, abi_long arg) 3321 { 3322 /* The parameter for this ioctl is a struct fiemap followed 3323 * by an array of struct fiemap_extent whose size is set 3324 * in fiemap->fm_extent_count. The array is filled in by the 3325 * ioctl. 3326 */ 3327 int target_size_in, target_size_out; 3328 struct fiemap *fm; 3329 const argtype *arg_type = ie->arg_type; 3330 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) }; 3331 void *argptr, *p; 3332 abi_long ret; 3333 int i, extent_size = thunk_type_size(extent_arg_type, 0); 3334 uint32_t outbufsz; 3335 int free_fm = 0; 3336 3337 assert(arg_type[0] == TYPE_PTR); 3338 assert(ie->access == IOC_RW); 3339 arg_type++; 3340 target_size_in = thunk_type_size(arg_type, 0); 3341 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1); 3342 if (!argptr) { 3343 return -TARGET_EFAULT; 3344 } 3345 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3346 unlock_user(argptr, arg, 0); 3347 fm = (struct fiemap *)buf_temp; 3348 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) { 3349 return -TARGET_EINVAL; 3350 } 3351 3352 outbufsz = sizeof (*fm) + 3353 (sizeof(struct fiemap_extent) * fm->fm_extent_count); 3354 3355 if (outbufsz > MAX_STRUCT_SIZE) { 3356 /* We can't fit all the extents into the fixed size buffer. 3357 * Allocate one that is large enough and use it instead. 3358 */ 3359 fm = malloc(outbufsz); 3360 if (!fm) { 3361 return -TARGET_ENOMEM; 3362 } 3363 memcpy(fm, buf_temp, sizeof(struct fiemap)); 3364 free_fm = 1; 3365 } 3366 ret = get_errno(ioctl(fd, ie->host_cmd, fm)); 3367 if (!is_error(ret)) { 3368 target_size_out = target_size_in; 3369 /* An extent_count of 0 means we were only counting the extents 3370 * so there are no structs to copy 3371 */ 3372 if (fm->fm_extent_count != 0) { 3373 target_size_out += fm->fm_mapped_extents * extent_size; 3374 } 3375 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0); 3376 if (!argptr) { 3377 ret = -TARGET_EFAULT; 3378 } else { 3379 /* Convert the struct fiemap */ 3380 thunk_convert(argptr, fm, arg_type, THUNK_TARGET); 3381 if (fm->fm_extent_count != 0) { 3382 p = argptr + target_size_in; 3383 /* ...and then all the struct fiemap_extents */ 3384 for (i = 0; i < fm->fm_mapped_extents; i++) { 3385 thunk_convert(p, &fm->fm_extents[i], extent_arg_type, 3386 THUNK_TARGET); 3387 p += extent_size; 3388 } 3389 } 3390 unlock_user(argptr, arg, target_size_out); 3391 } 3392 } 3393 if (free_fm) { 3394 free(fm); 3395 } 3396 return ret; 3397 } 3398 #endif 3399 3400 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, 3401 int fd, abi_long cmd, abi_long arg) 3402 { 3403 const argtype *arg_type = ie->arg_type; 3404 int target_size; 3405 void *argptr; 3406 int ret; 3407 struct ifconf *host_ifconf; 3408 uint32_t outbufsz; 3409 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) }; 3410 int target_ifreq_size; 3411 int nb_ifreq; 3412 int free_buf = 0; 3413 int i; 3414 int target_ifc_len; 3415 abi_long target_ifc_buf; 3416 int host_ifc_len; 3417 char *host_ifc_buf; 3418 3419 assert(arg_type[0] == TYPE_PTR); 3420 assert(ie->access == IOC_RW); 3421 3422 arg_type++; 3423 target_size = thunk_type_size(arg_type, 0); 3424 3425 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3426 if (!argptr) 3427 return -TARGET_EFAULT; 3428 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3429 unlock_user(argptr, arg, 0); 3430 3431 host_ifconf = (struct ifconf *)(unsigned long)buf_temp; 3432 target_ifc_len = host_ifconf->ifc_len; 3433 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf; 3434 3435 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0); 3436 nb_ifreq = target_ifc_len / target_ifreq_size; 3437 host_ifc_len = nb_ifreq * sizeof(struct ifreq); 3438 3439 outbufsz = sizeof(*host_ifconf) + host_ifc_len; 3440 if (outbufsz > MAX_STRUCT_SIZE) { 3441 /* We can't fit all the extents into the fixed size buffer. 3442 * Allocate one that is large enough and use it instead. 3443 */ 3444 host_ifconf = malloc(outbufsz); 3445 if (!host_ifconf) { 3446 return -TARGET_ENOMEM; 3447 } 3448 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf)); 3449 free_buf = 1; 3450 } 3451 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf); 3452 3453 host_ifconf->ifc_len = host_ifc_len; 3454 host_ifconf->ifc_buf = host_ifc_buf; 3455 3456 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf)); 3457 if (!is_error(ret)) { 3458 /* convert host ifc_len to target ifc_len */ 3459 3460 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq); 3461 target_ifc_len = nb_ifreq * target_ifreq_size; 3462 host_ifconf->ifc_len = target_ifc_len; 3463 3464 /* restore target ifc_buf */ 3465 3466 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf; 3467 3468 /* copy struct ifconf to target user */ 3469 3470 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3471 if (!argptr) 3472 return -TARGET_EFAULT; 3473 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET); 3474 unlock_user(argptr, arg, target_size); 3475 3476 /* copy ifreq[] to target user */ 3477 3478 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0); 3479 for (i = 0; i < nb_ifreq ; i++) { 3480 thunk_convert(argptr + i * target_ifreq_size, 3481 host_ifc_buf + i * sizeof(struct ifreq), 3482 ifreq_arg_type, THUNK_TARGET); 3483 } 3484 unlock_user(argptr, target_ifc_buf, target_ifc_len); 3485 } 3486 3487 if (free_buf) { 3488 free(host_ifconf); 3489 } 3490 3491 return ret; 3492 } 3493 3494 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 3495 abi_long cmd, abi_long arg) 3496 { 3497 void *argptr; 3498 struct dm_ioctl *host_dm; 3499 abi_long guest_data; 3500 uint32_t guest_data_size; 3501 int target_size; 3502 const argtype *arg_type = ie->arg_type; 3503 abi_long ret; 3504 void *big_buf = NULL; 3505 char *host_data; 3506 3507 arg_type++; 3508 target_size = thunk_type_size(arg_type, 0); 3509 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3510 if (!argptr) { 3511 ret = -TARGET_EFAULT; 3512 goto out; 3513 } 3514 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3515 unlock_user(argptr, arg, 0); 3516 3517 /* buf_temp is too small, so fetch things into a bigger buffer */ 3518 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2); 3519 memcpy(big_buf, buf_temp, target_size); 3520 buf_temp = big_buf; 3521 host_dm = big_buf; 3522 3523 guest_data = arg + host_dm->data_start; 3524 if ((guest_data - arg) < 0) { 3525 ret = -EINVAL; 3526 goto out; 3527 } 3528 guest_data_size = host_dm->data_size - host_dm->data_start; 3529 host_data = (char*)host_dm + host_dm->data_start; 3530 3531 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1); 3532 switch (ie->host_cmd) { 3533 case DM_REMOVE_ALL: 3534 case DM_LIST_DEVICES: 3535 case DM_DEV_CREATE: 3536 case DM_DEV_REMOVE: 3537 case DM_DEV_SUSPEND: 3538 case DM_DEV_STATUS: 3539 case DM_DEV_WAIT: 3540 case DM_TABLE_STATUS: 3541 case DM_TABLE_CLEAR: 3542 case DM_TABLE_DEPS: 3543 case DM_LIST_VERSIONS: 3544 /* no input data */ 3545 break; 3546 case DM_DEV_RENAME: 3547 case DM_DEV_SET_GEOMETRY: 3548 /* data contains only strings */ 3549 memcpy(host_data, argptr, guest_data_size); 3550 break; 3551 case DM_TARGET_MSG: 3552 memcpy(host_data, argptr, guest_data_size); 3553 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr); 3554 break; 3555 case DM_TABLE_LOAD: 3556 { 3557 void *gspec = argptr; 3558 void *cur_data = host_data; 3559 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 3560 int spec_size = thunk_type_size(arg_type, 0); 3561 int i; 3562 3563 for (i = 0; i < host_dm->target_count; i++) { 3564 struct dm_target_spec *spec = cur_data; 3565 uint32_t next; 3566 int slen; 3567 3568 thunk_convert(spec, gspec, arg_type, THUNK_HOST); 3569 slen = strlen((char*)gspec + spec_size) + 1; 3570 next = spec->next; 3571 spec->next = sizeof(*spec) + slen; 3572 strcpy((char*)&spec[1], gspec + spec_size); 3573 gspec += next; 3574 cur_data += spec->next; 3575 } 3576 break; 3577 } 3578 default: 3579 ret = -TARGET_EINVAL; 3580 goto out; 3581 } 3582 unlock_user(argptr, guest_data, 0); 3583 3584 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3585 if (!is_error(ret)) { 3586 guest_data = arg + host_dm->data_start; 3587 guest_data_size = host_dm->data_size - host_dm->data_start; 3588 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0); 3589 switch (ie->host_cmd) { 3590 case DM_REMOVE_ALL: 3591 case DM_DEV_CREATE: 3592 case DM_DEV_REMOVE: 3593 case DM_DEV_RENAME: 3594 case DM_DEV_SUSPEND: 3595 case DM_DEV_STATUS: 3596 case DM_TABLE_LOAD: 3597 case DM_TABLE_CLEAR: 3598 case DM_TARGET_MSG: 3599 case DM_DEV_SET_GEOMETRY: 3600 /* no return data */ 3601 break; 3602 case DM_LIST_DEVICES: 3603 { 3604 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start; 3605 uint32_t remaining_data = guest_data_size; 3606 void *cur_data = argptr; 3607 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) }; 3608 int nl_size = 12; /* can't use thunk_size due to alignment */ 3609 3610 while (1) { 3611 uint32_t next = nl->next; 3612 if (next) { 3613 nl->next = nl_size + (strlen(nl->name) + 1); 3614 } 3615 if (remaining_data < nl->next) { 3616 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3617 break; 3618 } 3619 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET); 3620 strcpy(cur_data + nl_size, nl->name); 3621 cur_data += nl->next; 3622 remaining_data -= nl->next; 3623 if (!next) { 3624 break; 3625 } 3626 nl = (void*)nl + next; 3627 } 3628 break; 3629 } 3630 case DM_DEV_WAIT: 3631 case DM_TABLE_STATUS: 3632 { 3633 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start; 3634 void *cur_data = argptr; 3635 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 3636 int spec_size = thunk_type_size(arg_type, 0); 3637 int i; 3638 3639 for (i = 0; i < host_dm->target_count; i++) { 3640 uint32_t next = spec->next; 3641 int slen = strlen((char*)&spec[1]) + 1; 3642 spec->next = (cur_data - argptr) + spec_size + slen; 3643 if (guest_data_size < spec->next) { 3644 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3645 break; 3646 } 3647 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET); 3648 strcpy(cur_data + spec_size, (char*)&spec[1]); 3649 cur_data = argptr + spec->next; 3650 spec = (void*)host_dm + host_dm->data_start + next; 3651 } 3652 break; 3653 } 3654 case DM_TABLE_DEPS: 3655 { 3656 void *hdata = (void*)host_dm + host_dm->data_start; 3657 int count = *(uint32_t*)hdata; 3658 uint64_t *hdev = hdata + 8; 3659 uint64_t *gdev = argptr + 8; 3660 int i; 3661 3662 *(uint32_t*)argptr = tswap32(count); 3663 for (i = 0; i < count; i++) { 3664 *gdev = tswap64(*hdev); 3665 gdev++; 3666 hdev++; 3667 } 3668 break; 3669 } 3670 case DM_LIST_VERSIONS: 3671 { 3672 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start; 3673 uint32_t remaining_data = guest_data_size; 3674 void *cur_data = argptr; 3675 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) }; 3676 int vers_size = thunk_type_size(arg_type, 0); 3677 3678 while (1) { 3679 uint32_t next = vers->next; 3680 if (next) { 3681 vers->next = vers_size + (strlen(vers->name) + 1); 3682 } 3683 if (remaining_data < vers->next) { 3684 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3685 break; 3686 } 3687 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET); 3688 strcpy(cur_data + vers_size, vers->name); 3689 cur_data += vers->next; 3690 remaining_data -= vers->next; 3691 if (!next) { 3692 break; 3693 } 3694 vers = (void*)vers + next; 3695 } 3696 break; 3697 } 3698 default: 3699 ret = -TARGET_EINVAL; 3700 goto out; 3701 } 3702 unlock_user(argptr, guest_data, guest_data_size); 3703 3704 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3705 if (!argptr) { 3706 ret = -TARGET_EFAULT; 3707 goto out; 3708 } 3709 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3710 unlock_user(argptr, arg, target_size); 3711 } 3712 out: 3713 g_free(big_buf); 3714 return ret; 3715 } 3716 3717 static IOCTLEntry ioctl_entries[] = { 3718 #define IOCTL(cmd, access, ...) \ 3719 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } }, 3720 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \ 3721 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } }, 3722 #include "ioctls.h" 3723 { 0, 0, }, 3724 }; 3725 3726 /* ??? Implement proper locking for ioctls. */ 3727 /* do_ioctl() Must return target values and target errnos. */ 3728 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg) 3729 { 3730 const IOCTLEntry *ie; 3731 const argtype *arg_type; 3732 abi_long ret; 3733 uint8_t buf_temp[MAX_STRUCT_SIZE]; 3734 int target_size; 3735 void *argptr; 3736 3737 ie = ioctl_entries; 3738 for(;;) { 3739 if (ie->target_cmd == 0) { 3740 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd); 3741 return -TARGET_ENOSYS; 3742 } 3743 if (ie->target_cmd == cmd) 3744 break; 3745 ie++; 3746 } 3747 arg_type = ie->arg_type; 3748 #if defined(DEBUG) 3749 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name); 3750 #endif 3751 if (ie->do_ioctl) { 3752 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg); 3753 } 3754 3755 switch(arg_type[0]) { 3756 case TYPE_NULL: 3757 /* no argument */ 3758 ret = get_errno(ioctl(fd, ie->host_cmd)); 3759 break; 3760 case TYPE_PTRVOID: 3761 case TYPE_INT: 3762 /* int argment */ 3763 ret = get_errno(ioctl(fd, ie->host_cmd, arg)); 3764 break; 3765 case TYPE_PTR: 3766 arg_type++; 3767 target_size = thunk_type_size(arg_type, 0); 3768 switch(ie->access) { 3769 case IOC_R: 3770 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3771 if (!is_error(ret)) { 3772 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3773 if (!argptr) 3774 return -TARGET_EFAULT; 3775 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3776 unlock_user(argptr, arg, target_size); 3777 } 3778 break; 3779 case IOC_W: 3780 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3781 if (!argptr) 3782 return -TARGET_EFAULT; 3783 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3784 unlock_user(argptr, arg, 0); 3785 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3786 break; 3787 default: 3788 case IOC_RW: 3789 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3790 if (!argptr) 3791 return -TARGET_EFAULT; 3792 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3793 unlock_user(argptr, arg, 0); 3794 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3795 if (!is_error(ret)) { 3796 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3797 if (!argptr) 3798 return -TARGET_EFAULT; 3799 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3800 unlock_user(argptr, arg, target_size); 3801 } 3802 break; 3803 } 3804 break; 3805 default: 3806 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n", 3807 (long)cmd, arg_type[0]); 3808 ret = -TARGET_ENOSYS; 3809 break; 3810 } 3811 return ret; 3812 } 3813 3814 static const bitmask_transtbl iflag_tbl[] = { 3815 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK }, 3816 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT }, 3817 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR }, 3818 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK }, 3819 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK }, 3820 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP }, 3821 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR }, 3822 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR }, 3823 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL }, 3824 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC }, 3825 { TARGET_IXON, TARGET_IXON, IXON, IXON }, 3826 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY }, 3827 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF }, 3828 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL }, 3829 { 0, 0, 0, 0 } 3830 }; 3831 3832 static const bitmask_transtbl oflag_tbl[] = { 3833 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST }, 3834 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC }, 3835 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR }, 3836 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL }, 3837 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR }, 3838 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET }, 3839 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL }, 3840 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL }, 3841 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 }, 3842 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 }, 3843 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 }, 3844 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 }, 3845 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 }, 3846 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 }, 3847 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 }, 3848 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 }, 3849 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 }, 3850 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 }, 3851 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 }, 3852 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 }, 3853 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 }, 3854 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 }, 3855 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 }, 3856 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 }, 3857 { 0, 0, 0, 0 } 3858 }; 3859 3860 static const bitmask_transtbl cflag_tbl[] = { 3861 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 }, 3862 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 }, 3863 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 }, 3864 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 }, 3865 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 }, 3866 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 }, 3867 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 }, 3868 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 }, 3869 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 }, 3870 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 }, 3871 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 }, 3872 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 }, 3873 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 }, 3874 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 }, 3875 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 }, 3876 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 }, 3877 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 }, 3878 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 }, 3879 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 }, 3880 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 }, 3881 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 }, 3882 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 }, 3883 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 }, 3884 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 }, 3885 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB }, 3886 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD }, 3887 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB }, 3888 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD }, 3889 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL }, 3890 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL }, 3891 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS }, 3892 { 0, 0, 0, 0 } 3893 }; 3894 3895 static const bitmask_transtbl lflag_tbl[] = { 3896 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG }, 3897 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON }, 3898 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE }, 3899 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO }, 3900 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE }, 3901 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK }, 3902 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL }, 3903 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH }, 3904 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP }, 3905 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL }, 3906 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT }, 3907 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE }, 3908 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO }, 3909 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN }, 3910 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN }, 3911 { 0, 0, 0, 0 } 3912 }; 3913 3914 static void target_to_host_termios (void *dst, const void *src) 3915 { 3916 struct host_termios *host = dst; 3917 const struct target_termios *target = src; 3918 3919 host->c_iflag = 3920 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl); 3921 host->c_oflag = 3922 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl); 3923 host->c_cflag = 3924 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl); 3925 host->c_lflag = 3926 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl); 3927 host->c_line = target->c_line; 3928 3929 memset(host->c_cc, 0, sizeof(host->c_cc)); 3930 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR]; 3931 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT]; 3932 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE]; 3933 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL]; 3934 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF]; 3935 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME]; 3936 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN]; 3937 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC]; 3938 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART]; 3939 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP]; 3940 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP]; 3941 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL]; 3942 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT]; 3943 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD]; 3944 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE]; 3945 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT]; 3946 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2]; 3947 } 3948 3949 static void host_to_target_termios (void *dst, const void *src) 3950 { 3951 struct target_termios *target = dst; 3952 const struct host_termios *host = src; 3953 3954 target->c_iflag = 3955 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl)); 3956 target->c_oflag = 3957 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl)); 3958 target->c_cflag = 3959 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl)); 3960 target->c_lflag = 3961 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl)); 3962 target->c_line = host->c_line; 3963 3964 memset(target->c_cc, 0, sizeof(target->c_cc)); 3965 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR]; 3966 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT]; 3967 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE]; 3968 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL]; 3969 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF]; 3970 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME]; 3971 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN]; 3972 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC]; 3973 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART]; 3974 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP]; 3975 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP]; 3976 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL]; 3977 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT]; 3978 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD]; 3979 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE]; 3980 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT]; 3981 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2]; 3982 } 3983 3984 static const StructEntry struct_termios_def = { 3985 .convert = { host_to_target_termios, target_to_host_termios }, 3986 .size = { sizeof(struct target_termios), sizeof(struct host_termios) }, 3987 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) }, 3988 }; 3989 3990 static bitmask_transtbl mmap_flags_tbl[] = { 3991 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED }, 3992 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE }, 3993 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED }, 3994 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS }, 3995 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN }, 3996 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE }, 3997 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE }, 3998 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED }, 3999 { 0, 0, 0, 0 } 4000 }; 4001 4002 #if defined(TARGET_I386) 4003 4004 /* NOTE: there is really one LDT for all the threads */ 4005 static uint8_t *ldt_table; 4006 4007 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount) 4008 { 4009 int size; 4010 void *p; 4011 4012 if (!ldt_table) 4013 return 0; 4014 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE; 4015 if (size > bytecount) 4016 size = bytecount; 4017 p = lock_user(VERIFY_WRITE, ptr, size, 0); 4018 if (!p) 4019 return -TARGET_EFAULT; 4020 /* ??? Should this by byteswapped? */ 4021 memcpy(p, ldt_table, size); 4022 unlock_user(p, ptr, size); 4023 return size; 4024 } 4025 4026 /* XXX: add locking support */ 4027 static abi_long write_ldt(CPUX86State *env, 4028 abi_ulong ptr, unsigned long bytecount, int oldmode) 4029 { 4030 struct target_modify_ldt_ldt_s ldt_info; 4031 struct target_modify_ldt_ldt_s *target_ldt_info; 4032 int seg_32bit, contents, read_exec_only, limit_in_pages; 4033 int seg_not_present, useable, lm; 4034 uint32_t *lp, entry_1, entry_2; 4035 4036 if (bytecount != sizeof(ldt_info)) 4037 return -TARGET_EINVAL; 4038 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1)) 4039 return -TARGET_EFAULT; 4040 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 4041 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 4042 ldt_info.limit = tswap32(target_ldt_info->limit); 4043 ldt_info.flags = tswap32(target_ldt_info->flags); 4044 unlock_user_struct(target_ldt_info, ptr, 0); 4045 4046 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES) 4047 return -TARGET_EINVAL; 4048 seg_32bit = ldt_info.flags & 1; 4049 contents = (ldt_info.flags >> 1) & 3; 4050 read_exec_only = (ldt_info.flags >> 3) & 1; 4051 limit_in_pages = (ldt_info.flags >> 4) & 1; 4052 seg_not_present = (ldt_info.flags >> 5) & 1; 4053 useable = (ldt_info.flags >> 6) & 1; 4054 #ifdef TARGET_ABI32 4055 lm = 0; 4056 #else 4057 lm = (ldt_info.flags >> 7) & 1; 4058 #endif 4059 if (contents == 3) { 4060 if (oldmode) 4061 return -TARGET_EINVAL; 4062 if (seg_not_present == 0) 4063 return -TARGET_EINVAL; 4064 } 4065 /* allocate the LDT */ 4066 if (!ldt_table) { 4067 env->ldt.base = target_mmap(0, 4068 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE, 4069 PROT_READ|PROT_WRITE, 4070 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 4071 if (env->ldt.base == -1) 4072 return -TARGET_ENOMEM; 4073 memset(g2h(env->ldt.base), 0, 4074 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE); 4075 env->ldt.limit = 0xffff; 4076 ldt_table = g2h(env->ldt.base); 4077 } 4078 4079 /* NOTE: same code as Linux kernel */ 4080 /* Allow LDTs to be cleared by the user. */ 4081 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 4082 if (oldmode || 4083 (contents == 0 && 4084 read_exec_only == 1 && 4085 seg_32bit == 0 && 4086 limit_in_pages == 0 && 4087 seg_not_present == 1 && 4088 useable == 0 )) { 4089 entry_1 = 0; 4090 entry_2 = 0; 4091 goto install; 4092 } 4093 } 4094 4095 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 4096 (ldt_info.limit & 0x0ffff); 4097 entry_2 = (ldt_info.base_addr & 0xff000000) | 4098 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 4099 (ldt_info.limit & 0xf0000) | 4100 ((read_exec_only ^ 1) << 9) | 4101 (contents << 10) | 4102 ((seg_not_present ^ 1) << 15) | 4103 (seg_32bit << 22) | 4104 (limit_in_pages << 23) | 4105 (lm << 21) | 4106 0x7000; 4107 if (!oldmode) 4108 entry_2 |= (useable << 20); 4109 4110 /* Install the new entry ... */ 4111 install: 4112 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3)); 4113 lp[0] = tswap32(entry_1); 4114 lp[1] = tswap32(entry_2); 4115 return 0; 4116 } 4117 4118 /* specific and weird i386 syscalls */ 4119 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr, 4120 unsigned long bytecount) 4121 { 4122 abi_long ret; 4123 4124 switch (func) { 4125 case 0: 4126 ret = read_ldt(ptr, bytecount); 4127 break; 4128 case 1: 4129 ret = write_ldt(env, ptr, bytecount, 1); 4130 break; 4131 case 0x11: 4132 ret = write_ldt(env, ptr, bytecount, 0); 4133 break; 4134 default: 4135 ret = -TARGET_ENOSYS; 4136 break; 4137 } 4138 return ret; 4139 } 4140 4141 #if defined(TARGET_I386) && defined(TARGET_ABI32) 4142 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr) 4143 { 4144 uint64_t *gdt_table = g2h(env->gdt.base); 4145 struct target_modify_ldt_ldt_s ldt_info; 4146 struct target_modify_ldt_ldt_s *target_ldt_info; 4147 int seg_32bit, contents, read_exec_only, limit_in_pages; 4148 int seg_not_present, useable, lm; 4149 uint32_t *lp, entry_1, entry_2; 4150 int i; 4151 4152 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 4153 if (!target_ldt_info) 4154 return -TARGET_EFAULT; 4155 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 4156 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 4157 ldt_info.limit = tswap32(target_ldt_info->limit); 4158 ldt_info.flags = tswap32(target_ldt_info->flags); 4159 if (ldt_info.entry_number == -1) { 4160 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) { 4161 if (gdt_table[i] == 0) { 4162 ldt_info.entry_number = i; 4163 target_ldt_info->entry_number = tswap32(i); 4164 break; 4165 } 4166 } 4167 } 4168 unlock_user_struct(target_ldt_info, ptr, 1); 4169 4170 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 4171 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX) 4172 return -TARGET_EINVAL; 4173 seg_32bit = ldt_info.flags & 1; 4174 contents = (ldt_info.flags >> 1) & 3; 4175 read_exec_only = (ldt_info.flags >> 3) & 1; 4176 limit_in_pages = (ldt_info.flags >> 4) & 1; 4177 seg_not_present = (ldt_info.flags >> 5) & 1; 4178 useable = (ldt_info.flags >> 6) & 1; 4179 #ifdef TARGET_ABI32 4180 lm = 0; 4181 #else 4182 lm = (ldt_info.flags >> 7) & 1; 4183 #endif 4184 4185 if (contents == 3) { 4186 if (seg_not_present == 0) 4187 return -TARGET_EINVAL; 4188 } 4189 4190 /* NOTE: same code as Linux kernel */ 4191 /* Allow LDTs to be cleared by the user. */ 4192 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 4193 if ((contents == 0 && 4194 read_exec_only == 1 && 4195 seg_32bit == 0 && 4196 limit_in_pages == 0 && 4197 seg_not_present == 1 && 4198 useable == 0 )) { 4199 entry_1 = 0; 4200 entry_2 = 0; 4201 goto install; 4202 } 4203 } 4204 4205 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 4206 (ldt_info.limit & 0x0ffff); 4207 entry_2 = (ldt_info.base_addr & 0xff000000) | 4208 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 4209 (ldt_info.limit & 0xf0000) | 4210 ((read_exec_only ^ 1) << 9) | 4211 (contents << 10) | 4212 ((seg_not_present ^ 1) << 15) | 4213 (seg_32bit << 22) | 4214 (limit_in_pages << 23) | 4215 (useable << 20) | 4216 (lm << 21) | 4217 0x7000; 4218 4219 /* Install the new entry ... */ 4220 install: 4221 lp = (uint32_t *)(gdt_table + ldt_info.entry_number); 4222 lp[0] = tswap32(entry_1); 4223 lp[1] = tswap32(entry_2); 4224 return 0; 4225 } 4226 4227 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr) 4228 { 4229 struct target_modify_ldt_ldt_s *target_ldt_info; 4230 uint64_t *gdt_table = g2h(env->gdt.base); 4231 uint32_t base_addr, limit, flags; 4232 int seg_32bit, contents, read_exec_only, limit_in_pages, idx; 4233 int seg_not_present, useable, lm; 4234 uint32_t *lp, entry_1, entry_2; 4235 4236 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 4237 if (!target_ldt_info) 4238 return -TARGET_EFAULT; 4239 idx = tswap32(target_ldt_info->entry_number); 4240 if (idx < TARGET_GDT_ENTRY_TLS_MIN || 4241 idx > TARGET_GDT_ENTRY_TLS_MAX) { 4242 unlock_user_struct(target_ldt_info, ptr, 1); 4243 return -TARGET_EINVAL; 4244 } 4245 lp = (uint32_t *)(gdt_table + idx); 4246 entry_1 = tswap32(lp[0]); 4247 entry_2 = tswap32(lp[1]); 4248 4249 read_exec_only = ((entry_2 >> 9) & 1) ^ 1; 4250 contents = (entry_2 >> 10) & 3; 4251 seg_not_present = ((entry_2 >> 15) & 1) ^ 1; 4252 seg_32bit = (entry_2 >> 22) & 1; 4253 limit_in_pages = (entry_2 >> 23) & 1; 4254 useable = (entry_2 >> 20) & 1; 4255 #ifdef TARGET_ABI32 4256 lm = 0; 4257 #else 4258 lm = (entry_2 >> 21) & 1; 4259 #endif 4260 flags = (seg_32bit << 0) | (contents << 1) | 4261 (read_exec_only << 3) | (limit_in_pages << 4) | 4262 (seg_not_present << 5) | (useable << 6) | (lm << 7); 4263 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000); 4264 base_addr = (entry_1 >> 16) | 4265 (entry_2 & 0xff000000) | 4266 ((entry_2 & 0xff) << 16); 4267 target_ldt_info->base_addr = tswapal(base_addr); 4268 target_ldt_info->limit = tswap32(limit); 4269 target_ldt_info->flags = tswap32(flags); 4270 unlock_user_struct(target_ldt_info, ptr, 1); 4271 return 0; 4272 } 4273 #endif /* TARGET_I386 && TARGET_ABI32 */ 4274 4275 #ifndef TARGET_ABI32 4276 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 4277 { 4278 abi_long ret = 0; 4279 abi_ulong val; 4280 int idx; 4281 4282 switch(code) { 4283 case TARGET_ARCH_SET_GS: 4284 case TARGET_ARCH_SET_FS: 4285 if (code == TARGET_ARCH_SET_GS) 4286 idx = R_GS; 4287 else 4288 idx = R_FS; 4289 cpu_x86_load_seg(env, idx, 0); 4290 env->segs[idx].base = addr; 4291 break; 4292 case TARGET_ARCH_GET_GS: 4293 case TARGET_ARCH_GET_FS: 4294 if (code == TARGET_ARCH_GET_GS) 4295 idx = R_GS; 4296 else 4297 idx = R_FS; 4298 val = env->segs[idx].base; 4299 if (put_user(val, addr, abi_ulong)) 4300 ret = -TARGET_EFAULT; 4301 break; 4302 default: 4303 ret = -TARGET_EINVAL; 4304 break; 4305 } 4306 return ret; 4307 } 4308 #endif 4309 4310 #endif /* defined(TARGET_I386) */ 4311 4312 #define NEW_STACK_SIZE 0x40000 4313 4314 #if defined(CONFIG_USE_NPTL) 4315 4316 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; 4317 typedef struct { 4318 CPUArchState *env; 4319 pthread_mutex_t mutex; 4320 pthread_cond_t cond; 4321 pthread_t thread; 4322 uint32_t tid; 4323 abi_ulong child_tidptr; 4324 abi_ulong parent_tidptr; 4325 sigset_t sigmask; 4326 } new_thread_info; 4327 4328 static void *clone_func(void *arg) 4329 { 4330 new_thread_info *info = arg; 4331 CPUArchState *env; 4332 CPUState *cpu; 4333 TaskState *ts; 4334 4335 env = info->env; 4336 cpu = ENV_GET_CPU(env); 4337 thread_env = env; 4338 ts = (TaskState *)thread_env->opaque; 4339 info->tid = gettid(); 4340 cpu->host_tid = info->tid; 4341 task_settid(ts); 4342 if (info->child_tidptr) 4343 put_user_u32(info->tid, info->child_tidptr); 4344 if (info->parent_tidptr) 4345 put_user_u32(info->tid, info->parent_tidptr); 4346 /* Enable signals. */ 4347 sigprocmask(SIG_SETMASK, &info->sigmask, NULL); 4348 /* Signal to the parent that we're ready. */ 4349 pthread_mutex_lock(&info->mutex); 4350 pthread_cond_broadcast(&info->cond); 4351 pthread_mutex_unlock(&info->mutex); 4352 /* Wait until the parent has finshed initializing the tls state. */ 4353 pthread_mutex_lock(&clone_lock); 4354 pthread_mutex_unlock(&clone_lock); 4355 cpu_loop(env); 4356 /* never exits */ 4357 return NULL; 4358 } 4359 #else 4360 4361 static int clone_func(void *arg) 4362 { 4363 CPUArchState *env = arg; 4364 cpu_loop(env); 4365 /* never exits */ 4366 return 0; 4367 } 4368 #endif 4369 4370 /* do_fork() Must return host values and target errnos (unlike most 4371 do_*() functions). */ 4372 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, 4373 abi_ulong parent_tidptr, target_ulong newtls, 4374 abi_ulong child_tidptr) 4375 { 4376 int ret; 4377 TaskState *ts; 4378 CPUArchState *new_env; 4379 #if defined(CONFIG_USE_NPTL) 4380 unsigned int nptl_flags; 4381 sigset_t sigmask; 4382 #else 4383 uint8_t *new_stack; 4384 #endif 4385 4386 /* Emulate vfork() with fork() */ 4387 if (flags & CLONE_VFORK) 4388 flags &= ~(CLONE_VFORK | CLONE_VM); 4389 4390 if (flags & CLONE_VM) { 4391 TaskState *parent_ts = (TaskState *)env->opaque; 4392 #if defined(CONFIG_USE_NPTL) 4393 new_thread_info info; 4394 pthread_attr_t attr; 4395 #endif 4396 ts = g_malloc0(sizeof(TaskState)); 4397 init_task_state(ts); 4398 /* we create a new CPU instance. */ 4399 new_env = cpu_copy(env); 4400 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC) 4401 cpu_reset(ENV_GET_CPU(new_env)); 4402 #endif 4403 /* Init regs that differ from the parent. */ 4404 cpu_clone_regs(new_env, newsp); 4405 new_env->opaque = ts; 4406 ts->bprm = parent_ts->bprm; 4407 ts->info = parent_ts->info; 4408 #if defined(CONFIG_USE_NPTL) 4409 nptl_flags = flags; 4410 flags &= ~CLONE_NPTL_FLAGS2; 4411 4412 if (nptl_flags & CLONE_CHILD_CLEARTID) { 4413 ts->child_tidptr = child_tidptr; 4414 } 4415 4416 if (nptl_flags & CLONE_SETTLS) 4417 cpu_set_tls (new_env, newtls); 4418 4419 /* Grab a mutex so that thread setup appears atomic. */ 4420 pthread_mutex_lock(&clone_lock); 4421 4422 memset(&info, 0, sizeof(info)); 4423 pthread_mutex_init(&info.mutex, NULL); 4424 pthread_mutex_lock(&info.mutex); 4425 pthread_cond_init(&info.cond, NULL); 4426 info.env = new_env; 4427 if (nptl_flags & CLONE_CHILD_SETTID) 4428 info.child_tidptr = child_tidptr; 4429 if (nptl_flags & CLONE_PARENT_SETTID) 4430 info.parent_tidptr = parent_tidptr; 4431 4432 ret = pthread_attr_init(&attr); 4433 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE); 4434 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 4435 /* It is not safe to deliver signals until the child has finished 4436 initializing, so temporarily block all signals. */ 4437 sigfillset(&sigmask); 4438 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); 4439 4440 ret = pthread_create(&info.thread, &attr, clone_func, &info); 4441 /* TODO: Free new CPU state if thread creation failed. */ 4442 4443 sigprocmask(SIG_SETMASK, &info.sigmask, NULL); 4444 pthread_attr_destroy(&attr); 4445 if (ret == 0) { 4446 /* Wait for the child to initialize. */ 4447 pthread_cond_wait(&info.cond, &info.mutex); 4448 ret = info.tid; 4449 if (flags & CLONE_PARENT_SETTID) 4450 put_user_u32(ret, parent_tidptr); 4451 } else { 4452 ret = -1; 4453 } 4454 pthread_mutex_unlock(&info.mutex); 4455 pthread_cond_destroy(&info.cond); 4456 pthread_mutex_destroy(&info.mutex); 4457 pthread_mutex_unlock(&clone_lock); 4458 #else 4459 if (flags & CLONE_NPTL_FLAGS2) 4460 return -EINVAL; 4461 /* This is probably going to die very quickly, but do it anyway. */ 4462 new_stack = g_malloc0 (NEW_STACK_SIZE); 4463 #ifdef __ia64__ 4464 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env); 4465 #else 4466 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env); 4467 #endif 4468 #endif 4469 } else { 4470 /* if no CLONE_VM, we consider it is a fork */ 4471 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) 4472 return -EINVAL; 4473 fork_start(); 4474 ret = fork(); 4475 if (ret == 0) { 4476 /* Child Process. */ 4477 cpu_clone_regs(env, newsp); 4478 fork_end(1); 4479 #if defined(CONFIG_USE_NPTL) 4480 /* There is a race condition here. The parent process could 4481 theoretically read the TID in the child process before the child 4482 tid is set. This would require using either ptrace 4483 (not implemented) or having *_tidptr to point at a shared memory 4484 mapping. We can't repeat the spinlock hack used above because 4485 the child process gets its own copy of the lock. */ 4486 if (flags & CLONE_CHILD_SETTID) 4487 put_user_u32(gettid(), child_tidptr); 4488 if (flags & CLONE_PARENT_SETTID) 4489 put_user_u32(gettid(), parent_tidptr); 4490 ts = (TaskState *)env->opaque; 4491 if (flags & CLONE_SETTLS) 4492 cpu_set_tls (env, newtls); 4493 if (flags & CLONE_CHILD_CLEARTID) 4494 ts->child_tidptr = child_tidptr; 4495 #endif 4496 } else { 4497 fork_end(0); 4498 } 4499 } 4500 return ret; 4501 } 4502 4503 /* warning : doesn't handle linux specific flags... */ 4504 static int target_to_host_fcntl_cmd(int cmd) 4505 { 4506 switch(cmd) { 4507 case TARGET_F_DUPFD: 4508 case TARGET_F_GETFD: 4509 case TARGET_F_SETFD: 4510 case TARGET_F_GETFL: 4511 case TARGET_F_SETFL: 4512 return cmd; 4513 case TARGET_F_GETLK: 4514 return F_GETLK; 4515 case TARGET_F_SETLK: 4516 return F_SETLK; 4517 case TARGET_F_SETLKW: 4518 return F_SETLKW; 4519 case TARGET_F_GETOWN: 4520 return F_GETOWN; 4521 case TARGET_F_SETOWN: 4522 return F_SETOWN; 4523 case TARGET_F_GETSIG: 4524 return F_GETSIG; 4525 case TARGET_F_SETSIG: 4526 return F_SETSIG; 4527 #if TARGET_ABI_BITS == 32 4528 case TARGET_F_GETLK64: 4529 return F_GETLK64; 4530 case TARGET_F_SETLK64: 4531 return F_SETLK64; 4532 case TARGET_F_SETLKW64: 4533 return F_SETLKW64; 4534 #endif 4535 case TARGET_F_SETLEASE: 4536 return F_SETLEASE; 4537 case TARGET_F_GETLEASE: 4538 return F_GETLEASE; 4539 #ifdef F_DUPFD_CLOEXEC 4540 case TARGET_F_DUPFD_CLOEXEC: 4541 return F_DUPFD_CLOEXEC; 4542 #endif 4543 case TARGET_F_NOTIFY: 4544 return F_NOTIFY; 4545 default: 4546 return -TARGET_EINVAL; 4547 } 4548 return -TARGET_EINVAL; 4549 } 4550 4551 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a } 4552 static const bitmask_transtbl flock_tbl[] = { 4553 TRANSTBL_CONVERT(F_RDLCK), 4554 TRANSTBL_CONVERT(F_WRLCK), 4555 TRANSTBL_CONVERT(F_UNLCK), 4556 TRANSTBL_CONVERT(F_EXLCK), 4557 TRANSTBL_CONVERT(F_SHLCK), 4558 { 0, 0, 0, 0 } 4559 }; 4560 4561 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) 4562 { 4563 struct flock fl; 4564 struct target_flock *target_fl; 4565 struct flock64 fl64; 4566 struct target_flock64 *target_fl64; 4567 abi_long ret; 4568 int host_cmd = target_to_host_fcntl_cmd(cmd); 4569 4570 if (host_cmd == -TARGET_EINVAL) 4571 return host_cmd; 4572 4573 switch(cmd) { 4574 case TARGET_F_GETLK: 4575 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4576 return -TARGET_EFAULT; 4577 fl.l_type = 4578 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl); 4579 fl.l_whence = tswap16(target_fl->l_whence); 4580 fl.l_start = tswapal(target_fl->l_start); 4581 fl.l_len = tswapal(target_fl->l_len); 4582 fl.l_pid = tswap32(target_fl->l_pid); 4583 unlock_user_struct(target_fl, arg, 0); 4584 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4585 if (ret == 0) { 4586 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0)) 4587 return -TARGET_EFAULT; 4588 target_fl->l_type = 4589 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl); 4590 target_fl->l_whence = tswap16(fl.l_whence); 4591 target_fl->l_start = tswapal(fl.l_start); 4592 target_fl->l_len = tswapal(fl.l_len); 4593 target_fl->l_pid = tswap32(fl.l_pid); 4594 unlock_user_struct(target_fl, arg, 1); 4595 } 4596 break; 4597 4598 case TARGET_F_SETLK: 4599 case TARGET_F_SETLKW: 4600 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4601 return -TARGET_EFAULT; 4602 fl.l_type = 4603 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl); 4604 fl.l_whence = tswap16(target_fl->l_whence); 4605 fl.l_start = tswapal(target_fl->l_start); 4606 fl.l_len = tswapal(target_fl->l_len); 4607 fl.l_pid = tswap32(target_fl->l_pid); 4608 unlock_user_struct(target_fl, arg, 0); 4609 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4610 break; 4611 4612 case TARGET_F_GETLK64: 4613 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4614 return -TARGET_EFAULT; 4615 fl64.l_type = 4616 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1; 4617 fl64.l_whence = tswap16(target_fl64->l_whence); 4618 fl64.l_start = tswap64(target_fl64->l_start); 4619 fl64.l_len = tswap64(target_fl64->l_len); 4620 fl64.l_pid = tswap32(target_fl64->l_pid); 4621 unlock_user_struct(target_fl64, arg, 0); 4622 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4623 if (ret == 0) { 4624 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0)) 4625 return -TARGET_EFAULT; 4626 target_fl64->l_type = 4627 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1; 4628 target_fl64->l_whence = tswap16(fl64.l_whence); 4629 target_fl64->l_start = tswap64(fl64.l_start); 4630 target_fl64->l_len = tswap64(fl64.l_len); 4631 target_fl64->l_pid = tswap32(fl64.l_pid); 4632 unlock_user_struct(target_fl64, arg, 1); 4633 } 4634 break; 4635 case TARGET_F_SETLK64: 4636 case TARGET_F_SETLKW64: 4637 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4638 return -TARGET_EFAULT; 4639 fl64.l_type = 4640 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1; 4641 fl64.l_whence = tswap16(target_fl64->l_whence); 4642 fl64.l_start = tswap64(target_fl64->l_start); 4643 fl64.l_len = tswap64(target_fl64->l_len); 4644 fl64.l_pid = tswap32(target_fl64->l_pid); 4645 unlock_user_struct(target_fl64, arg, 0); 4646 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4647 break; 4648 4649 case TARGET_F_GETFL: 4650 ret = get_errno(fcntl(fd, host_cmd, arg)); 4651 if (ret >= 0) { 4652 ret = host_to_target_bitmask(ret, fcntl_flags_tbl); 4653 } 4654 break; 4655 4656 case TARGET_F_SETFL: 4657 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl))); 4658 break; 4659 4660 case TARGET_F_SETOWN: 4661 case TARGET_F_GETOWN: 4662 case TARGET_F_SETSIG: 4663 case TARGET_F_GETSIG: 4664 case TARGET_F_SETLEASE: 4665 case TARGET_F_GETLEASE: 4666 ret = get_errno(fcntl(fd, host_cmd, arg)); 4667 break; 4668 4669 default: 4670 ret = get_errno(fcntl(fd, cmd, arg)); 4671 break; 4672 } 4673 return ret; 4674 } 4675 4676 #ifdef USE_UID16 4677 4678 static inline int high2lowuid(int uid) 4679 { 4680 if (uid > 65535) 4681 return 65534; 4682 else 4683 return uid; 4684 } 4685 4686 static inline int high2lowgid(int gid) 4687 { 4688 if (gid > 65535) 4689 return 65534; 4690 else 4691 return gid; 4692 } 4693 4694 static inline int low2highuid(int uid) 4695 { 4696 if ((int16_t)uid == -1) 4697 return -1; 4698 else 4699 return uid; 4700 } 4701 4702 static inline int low2highgid(int gid) 4703 { 4704 if ((int16_t)gid == -1) 4705 return -1; 4706 else 4707 return gid; 4708 } 4709 static inline int tswapid(int id) 4710 { 4711 return tswap16(id); 4712 } 4713 #else /* !USE_UID16 */ 4714 static inline int high2lowuid(int uid) 4715 { 4716 return uid; 4717 } 4718 static inline int high2lowgid(int gid) 4719 { 4720 return gid; 4721 } 4722 static inline int low2highuid(int uid) 4723 { 4724 return uid; 4725 } 4726 static inline int low2highgid(int gid) 4727 { 4728 return gid; 4729 } 4730 static inline int tswapid(int id) 4731 { 4732 return tswap32(id); 4733 } 4734 #endif /* USE_UID16 */ 4735 4736 void syscall_init(void) 4737 { 4738 IOCTLEntry *ie; 4739 const argtype *arg_type; 4740 int size; 4741 int i; 4742 4743 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def); 4744 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def); 4745 #include "syscall_types.h" 4746 #undef STRUCT 4747 #undef STRUCT_SPECIAL 4748 4749 /* Build target_to_host_errno_table[] table from 4750 * host_to_target_errno_table[]. */ 4751 for (i = 0; i < ERRNO_TABLE_SIZE; i++) { 4752 target_to_host_errno_table[host_to_target_errno_table[i]] = i; 4753 } 4754 4755 /* we patch the ioctl size if necessary. We rely on the fact that 4756 no ioctl has all the bits at '1' in the size field */ 4757 ie = ioctl_entries; 4758 while (ie->target_cmd != 0) { 4759 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) == 4760 TARGET_IOC_SIZEMASK) { 4761 arg_type = ie->arg_type; 4762 if (arg_type[0] != TYPE_PTR) { 4763 fprintf(stderr, "cannot patch size for ioctl 0x%x\n", 4764 ie->target_cmd); 4765 exit(1); 4766 } 4767 arg_type++; 4768 size = thunk_type_size(arg_type, 0); 4769 ie->target_cmd = (ie->target_cmd & 4770 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) | 4771 (size << TARGET_IOC_SIZESHIFT); 4772 } 4773 4774 /* automatic consistency check if same arch */ 4775 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 4776 (defined(__x86_64__) && defined(TARGET_X86_64)) 4777 if (unlikely(ie->target_cmd != ie->host_cmd)) { 4778 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n", 4779 ie->name, ie->target_cmd, ie->host_cmd); 4780 } 4781 #endif 4782 ie++; 4783 } 4784 } 4785 4786 #if TARGET_ABI_BITS == 32 4787 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1) 4788 { 4789 #ifdef TARGET_WORDS_BIGENDIAN 4790 return ((uint64_t)word0 << 32) | word1; 4791 #else 4792 return ((uint64_t)word1 << 32) | word0; 4793 #endif 4794 } 4795 #else /* TARGET_ABI_BITS == 32 */ 4796 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1) 4797 { 4798 return word0; 4799 } 4800 #endif /* TARGET_ABI_BITS != 32 */ 4801 4802 #ifdef TARGET_NR_truncate64 4803 static inline abi_long target_truncate64(void *cpu_env, const char *arg1, 4804 abi_long arg2, 4805 abi_long arg3, 4806 abi_long arg4) 4807 { 4808 if (regpairs_aligned(cpu_env)) { 4809 arg2 = arg3; 4810 arg3 = arg4; 4811 } 4812 return get_errno(truncate64(arg1, target_offset64(arg2, arg3))); 4813 } 4814 #endif 4815 4816 #ifdef TARGET_NR_ftruncate64 4817 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1, 4818 abi_long arg2, 4819 abi_long arg3, 4820 abi_long arg4) 4821 { 4822 if (regpairs_aligned(cpu_env)) { 4823 arg2 = arg3; 4824 arg3 = arg4; 4825 } 4826 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3))); 4827 } 4828 #endif 4829 4830 static inline abi_long target_to_host_timespec(struct timespec *host_ts, 4831 abi_ulong target_addr) 4832 { 4833 struct target_timespec *target_ts; 4834 4835 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) 4836 return -TARGET_EFAULT; 4837 host_ts->tv_sec = tswapal(target_ts->tv_sec); 4838 host_ts->tv_nsec = tswapal(target_ts->tv_nsec); 4839 unlock_user_struct(target_ts, target_addr, 0); 4840 return 0; 4841 } 4842 4843 static inline abi_long host_to_target_timespec(abi_ulong target_addr, 4844 struct timespec *host_ts) 4845 { 4846 struct target_timespec *target_ts; 4847 4848 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) 4849 return -TARGET_EFAULT; 4850 target_ts->tv_sec = tswapal(host_ts->tv_sec); 4851 target_ts->tv_nsec = tswapal(host_ts->tv_nsec); 4852 unlock_user_struct(target_ts, target_addr, 1); 4853 return 0; 4854 } 4855 4856 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat) 4857 static inline abi_long host_to_target_stat64(void *cpu_env, 4858 abi_ulong target_addr, 4859 struct stat *host_st) 4860 { 4861 #ifdef TARGET_ARM 4862 if (((CPUARMState *)cpu_env)->eabi) { 4863 struct target_eabi_stat64 *target_st; 4864 4865 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 4866 return -TARGET_EFAULT; 4867 memset(target_st, 0, sizeof(struct target_eabi_stat64)); 4868 __put_user(host_st->st_dev, &target_st->st_dev); 4869 __put_user(host_st->st_ino, &target_st->st_ino); 4870 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 4871 __put_user(host_st->st_ino, &target_st->__st_ino); 4872 #endif 4873 __put_user(host_st->st_mode, &target_st->st_mode); 4874 __put_user(host_st->st_nlink, &target_st->st_nlink); 4875 __put_user(host_st->st_uid, &target_st->st_uid); 4876 __put_user(host_st->st_gid, &target_st->st_gid); 4877 __put_user(host_st->st_rdev, &target_st->st_rdev); 4878 __put_user(host_st->st_size, &target_st->st_size); 4879 __put_user(host_st->st_blksize, &target_st->st_blksize); 4880 __put_user(host_st->st_blocks, &target_st->st_blocks); 4881 __put_user(host_st->st_atime, &target_st->target_st_atime); 4882 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 4883 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 4884 unlock_user_struct(target_st, target_addr, 1); 4885 } else 4886 #endif 4887 { 4888 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA) 4889 struct target_stat *target_st; 4890 #else 4891 struct target_stat64 *target_st; 4892 #endif 4893 4894 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 4895 return -TARGET_EFAULT; 4896 memset(target_st, 0, sizeof(*target_st)); 4897 __put_user(host_st->st_dev, &target_st->st_dev); 4898 __put_user(host_st->st_ino, &target_st->st_ino); 4899 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 4900 __put_user(host_st->st_ino, &target_st->__st_ino); 4901 #endif 4902 __put_user(host_st->st_mode, &target_st->st_mode); 4903 __put_user(host_st->st_nlink, &target_st->st_nlink); 4904 __put_user(host_st->st_uid, &target_st->st_uid); 4905 __put_user(host_st->st_gid, &target_st->st_gid); 4906 __put_user(host_st->st_rdev, &target_st->st_rdev); 4907 /* XXX: better use of kernel struct */ 4908 __put_user(host_st->st_size, &target_st->st_size); 4909 __put_user(host_st->st_blksize, &target_st->st_blksize); 4910 __put_user(host_st->st_blocks, &target_st->st_blocks); 4911 __put_user(host_st->st_atime, &target_st->target_st_atime); 4912 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 4913 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 4914 unlock_user_struct(target_st, target_addr, 1); 4915 } 4916 4917 return 0; 4918 } 4919 #endif 4920 4921 #if defined(CONFIG_USE_NPTL) 4922 /* ??? Using host futex calls even when target atomic operations 4923 are not really atomic probably breaks things. However implementing 4924 futexes locally would make futexes shared between multiple processes 4925 tricky. However they're probably useless because guest atomic 4926 operations won't work either. */ 4927 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout, 4928 target_ulong uaddr2, int val3) 4929 { 4930 struct timespec ts, *pts; 4931 int base_op; 4932 4933 /* ??? We assume FUTEX_* constants are the same on both host 4934 and target. */ 4935 #ifdef FUTEX_CMD_MASK 4936 base_op = op & FUTEX_CMD_MASK; 4937 #else 4938 base_op = op; 4939 #endif 4940 switch (base_op) { 4941 case FUTEX_WAIT: 4942 case FUTEX_WAIT_BITSET: 4943 if (timeout) { 4944 pts = &ts; 4945 target_to_host_timespec(pts, timeout); 4946 } else { 4947 pts = NULL; 4948 } 4949 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val), 4950 pts, NULL, val3)); 4951 case FUTEX_WAKE: 4952 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 4953 case FUTEX_FD: 4954 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 4955 case FUTEX_REQUEUE: 4956 case FUTEX_CMP_REQUEUE: 4957 case FUTEX_WAKE_OP: 4958 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 4959 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 4960 But the prototype takes a `struct timespec *'; insert casts 4961 to satisfy the compiler. We do not need to tswap TIMEOUT 4962 since it's not compared to guest memory. */ 4963 pts = (struct timespec *)(uintptr_t) timeout; 4964 return get_errno(sys_futex(g2h(uaddr), op, val, pts, 4965 g2h(uaddr2), 4966 (base_op == FUTEX_CMP_REQUEUE 4967 ? tswap32(val3) 4968 : val3))); 4969 default: 4970 return -TARGET_ENOSYS; 4971 } 4972 } 4973 #endif 4974 4975 /* Map host to target signal numbers for the wait family of syscalls. 4976 Assume all other status bits are the same. */ 4977 int host_to_target_waitstatus(int status) 4978 { 4979 if (WIFSIGNALED(status)) { 4980 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f); 4981 } 4982 if (WIFSTOPPED(status)) { 4983 return (host_to_target_signal(WSTOPSIG(status)) << 8) 4984 | (status & 0xff); 4985 } 4986 return status; 4987 } 4988 4989 int get_osversion(void) 4990 { 4991 static int osversion; 4992 struct new_utsname buf; 4993 const char *s; 4994 int i, n, tmp; 4995 if (osversion) 4996 return osversion; 4997 if (qemu_uname_release && *qemu_uname_release) { 4998 s = qemu_uname_release; 4999 } else { 5000 if (sys_uname(&buf)) 5001 return 0; 5002 s = buf.release; 5003 } 5004 tmp = 0; 5005 for (i = 0; i < 3; i++) { 5006 n = 0; 5007 while (*s >= '0' && *s <= '9') { 5008 n *= 10; 5009 n += *s - '0'; 5010 s++; 5011 } 5012 tmp = (tmp << 8) + n; 5013 if (*s == '.') 5014 s++; 5015 } 5016 osversion = tmp; 5017 return osversion; 5018 } 5019 5020 5021 static int open_self_maps(void *cpu_env, int fd) 5022 { 5023 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32) 5024 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 5025 #endif 5026 FILE *fp; 5027 char *line = NULL; 5028 size_t len = 0; 5029 ssize_t read; 5030 5031 fp = fopen("/proc/self/maps", "r"); 5032 if (fp == NULL) { 5033 return -EACCES; 5034 } 5035 5036 while ((read = getline(&line, &len, fp)) != -1) { 5037 int fields, dev_maj, dev_min, inode; 5038 uint64_t min, max, offset; 5039 char flag_r, flag_w, flag_x, flag_p; 5040 char path[512] = ""; 5041 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d" 5042 " %512s", &min, &max, &flag_r, &flag_w, &flag_x, 5043 &flag_p, &offset, &dev_maj, &dev_min, &inode, path); 5044 5045 if ((fields < 10) || (fields > 11)) { 5046 continue; 5047 } 5048 if (!strncmp(path, "[stack]", 7)) { 5049 continue; 5050 } 5051 if (h2g_valid(min) && h2g_valid(max)) { 5052 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx 5053 " %c%c%c%c %08" PRIx64 " %02x:%02x %d%s%s\n", 5054 h2g(min), h2g(max), flag_r, flag_w, 5055 flag_x, flag_p, offset, dev_maj, dev_min, inode, 5056 path[0] ? " " : "", path); 5057 } 5058 } 5059 5060 free(line); 5061 fclose(fp); 5062 5063 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32) 5064 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n", 5065 (unsigned long long)ts->info->stack_limit, 5066 (unsigned long long)(ts->info->start_stack + 5067 (TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK, 5068 (unsigned long long)0); 5069 #endif 5070 5071 return 0; 5072 } 5073 5074 static int open_self_stat(void *cpu_env, int fd) 5075 { 5076 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 5077 abi_ulong start_stack = ts->info->start_stack; 5078 int i; 5079 5080 for (i = 0; i < 44; i++) { 5081 char buf[128]; 5082 int len; 5083 uint64_t val = 0; 5084 5085 if (i == 0) { 5086 /* pid */ 5087 val = getpid(); 5088 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 5089 } else if (i == 1) { 5090 /* app name */ 5091 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]); 5092 } else if (i == 27) { 5093 /* stack bottom */ 5094 val = start_stack; 5095 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 5096 } else { 5097 /* for the rest, there is MasterCard */ 5098 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' '); 5099 } 5100 5101 len = strlen(buf); 5102 if (write(fd, buf, len) != len) { 5103 return -1; 5104 } 5105 } 5106 5107 return 0; 5108 } 5109 5110 static int open_self_auxv(void *cpu_env, int fd) 5111 { 5112 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 5113 abi_ulong auxv = ts->info->saved_auxv; 5114 abi_ulong len = ts->info->auxv_len; 5115 char *ptr; 5116 5117 /* 5118 * Auxiliary vector is stored in target process stack. 5119 * read in whole auxv vector and copy it to file 5120 */ 5121 ptr = lock_user(VERIFY_READ, auxv, len, 0); 5122 if (ptr != NULL) { 5123 while (len > 0) { 5124 ssize_t r; 5125 r = write(fd, ptr, len); 5126 if (r <= 0) { 5127 break; 5128 } 5129 len -= r; 5130 ptr += r; 5131 } 5132 lseek(fd, 0, SEEK_SET); 5133 unlock_user(ptr, auxv, len); 5134 } 5135 5136 return 0; 5137 } 5138 5139 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode) 5140 { 5141 struct fake_open { 5142 const char *filename; 5143 int (*fill)(void *cpu_env, int fd); 5144 }; 5145 const struct fake_open *fake_open; 5146 static const struct fake_open fakes[] = { 5147 { "/proc/self/maps", open_self_maps }, 5148 { "/proc/self/stat", open_self_stat }, 5149 { "/proc/self/auxv", open_self_auxv }, 5150 { NULL, NULL } 5151 }; 5152 5153 for (fake_open = fakes; fake_open->filename; fake_open++) { 5154 if (!strncmp(pathname, fake_open->filename, 5155 strlen(fake_open->filename))) { 5156 break; 5157 } 5158 } 5159 5160 if (fake_open->filename) { 5161 const char *tmpdir; 5162 char filename[PATH_MAX]; 5163 int fd, r; 5164 5165 /* create temporary file to map stat to */ 5166 tmpdir = getenv("TMPDIR"); 5167 if (!tmpdir) 5168 tmpdir = "/tmp"; 5169 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir); 5170 fd = mkstemp(filename); 5171 if (fd < 0) { 5172 return fd; 5173 } 5174 unlink(filename); 5175 5176 if ((r = fake_open->fill(cpu_env, fd))) { 5177 close(fd); 5178 return r; 5179 } 5180 lseek(fd, 0, SEEK_SET); 5181 5182 return fd; 5183 } 5184 5185 return get_errno(open(path(pathname), flags, mode)); 5186 } 5187 5188 /* do_syscall() should always have a single exit point at the end so 5189 that actions, such as logging of syscall results, can be performed. 5190 All errnos that do_syscall() returns must be -TARGET_<errcode>. */ 5191 abi_long do_syscall(void *cpu_env, int num, abi_long arg1, 5192 abi_long arg2, abi_long arg3, abi_long arg4, 5193 abi_long arg5, abi_long arg6, abi_long arg7, 5194 abi_long arg8) 5195 { 5196 abi_long ret; 5197 struct stat st; 5198 struct statfs stfs; 5199 void *p; 5200 5201 #ifdef DEBUG 5202 gemu_log("syscall %d", num); 5203 #endif 5204 if(do_strace) 5205 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); 5206 5207 switch(num) { 5208 case TARGET_NR_exit: 5209 #ifdef CONFIG_USE_NPTL 5210 /* In old applications this may be used to implement _exit(2). 5211 However in threaded applictions it is used for thread termination, 5212 and _exit_group is used for application termination. 5213 Do thread termination if we have more then one thread. */ 5214 /* FIXME: This probably breaks if a signal arrives. We should probably 5215 be disabling signals. */ 5216 if (first_cpu->next_cpu) { 5217 TaskState *ts; 5218 CPUArchState **lastp; 5219 CPUArchState *p; 5220 5221 cpu_list_lock(); 5222 lastp = &first_cpu; 5223 p = first_cpu; 5224 while (p && p != (CPUArchState *)cpu_env) { 5225 lastp = &p->next_cpu; 5226 p = p->next_cpu; 5227 } 5228 /* If we didn't find the CPU for this thread then something is 5229 horribly wrong. */ 5230 if (!p) 5231 abort(); 5232 /* Remove the CPU from the list. */ 5233 *lastp = p->next_cpu; 5234 cpu_list_unlock(); 5235 ts = ((CPUArchState *)cpu_env)->opaque; 5236 if (ts->child_tidptr) { 5237 put_user_u32(0, ts->child_tidptr); 5238 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX, 5239 NULL, NULL, 0); 5240 } 5241 thread_env = NULL; 5242 object_unref(OBJECT(ENV_GET_CPU(cpu_env))); 5243 g_free(ts); 5244 pthread_exit(NULL); 5245 } 5246 #endif 5247 #ifdef TARGET_GPROF 5248 _mcleanup(); 5249 #endif 5250 gdb_exit(cpu_env, arg1); 5251 _exit(arg1); 5252 ret = 0; /* avoid warning */ 5253 break; 5254 case TARGET_NR_read: 5255 if (arg3 == 0) 5256 ret = 0; 5257 else { 5258 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 5259 goto efault; 5260 ret = get_errno(read(arg1, p, arg3)); 5261 unlock_user(p, arg2, ret); 5262 } 5263 break; 5264 case TARGET_NR_write: 5265 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 5266 goto efault; 5267 ret = get_errno(write(arg1, p, arg3)); 5268 unlock_user(p, arg2, 0); 5269 break; 5270 case TARGET_NR_open: 5271 if (!(p = lock_user_string(arg1))) 5272 goto efault; 5273 ret = get_errno(do_open(cpu_env, p, 5274 target_to_host_bitmask(arg2, fcntl_flags_tbl), 5275 arg3)); 5276 unlock_user(p, arg1, 0); 5277 break; 5278 #if defined(TARGET_NR_openat) && defined(__NR_openat) 5279 case TARGET_NR_openat: 5280 if (!(p = lock_user_string(arg2))) 5281 goto efault; 5282 ret = get_errno(sys_openat(arg1, 5283 path(p), 5284 target_to_host_bitmask(arg3, fcntl_flags_tbl), 5285 arg4)); 5286 unlock_user(p, arg2, 0); 5287 break; 5288 #endif 5289 case TARGET_NR_close: 5290 ret = get_errno(close(arg1)); 5291 break; 5292 case TARGET_NR_brk: 5293 ret = do_brk(arg1); 5294 break; 5295 case TARGET_NR_fork: 5296 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0)); 5297 break; 5298 #ifdef TARGET_NR_waitpid 5299 case TARGET_NR_waitpid: 5300 { 5301 int status; 5302 ret = get_errno(waitpid(arg1, &status, arg3)); 5303 if (!is_error(ret) && arg2 && ret 5304 && put_user_s32(host_to_target_waitstatus(status), arg2)) 5305 goto efault; 5306 } 5307 break; 5308 #endif 5309 #ifdef TARGET_NR_waitid 5310 case TARGET_NR_waitid: 5311 { 5312 siginfo_t info; 5313 info.si_pid = 0; 5314 ret = get_errno(waitid(arg1, arg2, &info, arg4)); 5315 if (!is_error(ret) && arg3 && info.si_pid != 0) { 5316 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) 5317 goto efault; 5318 host_to_target_siginfo(p, &info); 5319 unlock_user(p, arg3, sizeof(target_siginfo_t)); 5320 } 5321 } 5322 break; 5323 #endif 5324 #ifdef TARGET_NR_creat /* not on alpha */ 5325 case TARGET_NR_creat: 5326 if (!(p = lock_user_string(arg1))) 5327 goto efault; 5328 ret = get_errno(creat(p, arg2)); 5329 unlock_user(p, arg1, 0); 5330 break; 5331 #endif 5332 case TARGET_NR_link: 5333 { 5334 void * p2; 5335 p = lock_user_string(arg1); 5336 p2 = lock_user_string(arg2); 5337 if (!p || !p2) 5338 ret = -TARGET_EFAULT; 5339 else 5340 ret = get_errno(link(p, p2)); 5341 unlock_user(p2, arg2, 0); 5342 unlock_user(p, arg1, 0); 5343 } 5344 break; 5345 #if defined(TARGET_NR_linkat) && defined(__NR_linkat) 5346 case TARGET_NR_linkat: 5347 { 5348 void * p2 = NULL; 5349 if (!arg2 || !arg4) 5350 goto efault; 5351 p = lock_user_string(arg2); 5352 p2 = lock_user_string(arg4); 5353 if (!p || !p2) 5354 ret = -TARGET_EFAULT; 5355 else 5356 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5)); 5357 unlock_user(p, arg2, 0); 5358 unlock_user(p2, arg4, 0); 5359 } 5360 break; 5361 #endif 5362 case TARGET_NR_unlink: 5363 if (!(p = lock_user_string(arg1))) 5364 goto efault; 5365 ret = get_errno(unlink(p)); 5366 unlock_user(p, arg1, 0); 5367 break; 5368 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat) 5369 case TARGET_NR_unlinkat: 5370 if (!(p = lock_user_string(arg2))) 5371 goto efault; 5372 ret = get_errno(sys_unlinkat(arg1, p, arg3)); 5373 unlock_user(p, arg2, 0); 5374 break; 5375 #endif 5376 case TARGET_NR_execve: 5377 { 5378 char **argp, **envp; 5379 int argc, envc; 5380 abi_ulong gp; 5381 abi_ulong guest_argp; 5382 abi_ulong guest_envp; 5383 abi_ulong addr; 5384 char **q; 5385 int total_size = 0; 5386 5387 argc = 0; 5388 guest_argp = arg2; 5389 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { 5390 if (get_user_ual(addr, gp)) 5391 goto efault; 5392 if (!addr) 5393 break; 5394 argc++; 5395 } 5396 envc = 0; 5397 guest_envp = arg3; 5398 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { 5399 if (get_user_ual(addr, gp)) 5400 goto efault; 5401 if (!addr) 5402 break; 5403 envc++; 5404 } 5405 5406 argp = alloca((argc + 1) * sizeof(void *)); 5407 envp = alloca((envc + 1) * sizeof(void *)); 5408 5409 for (gp = guest_argp, q = argp; gp; 5410 gp += sizeof(abi_ulong), q++) { 5411 if (get_user_ual(addr, gp)) 5412 goto execve_efault; 5413 if (!addr) 5414 break; 5415 if (!(*q = lock_user_string(addr))) 5416 goto execve_efault; 5417 total_size += strlen(*q) + 1; 5418 } 5419 *q = NULL; 5420 5421 for (gp = guest_envp, q = envp; gp; 5422 gp += sizeof(abi_ulong), q++) { 5423 if (get_user_ual(addr, gp)) 5424 goto execve_efault; 5425 if (!addr) 5426 break; 5427 if (!(*q = lock_user_string(addr))) 5428 goto execve_efault; 5429 total_size += strlen(*q) + 1; 5430 } 5431 *q = NULL; 5432 5433 /* This case will not be caught by the host's execve() if its 5434 page size is bigger than the target's. */ 5435 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) { 5436 ret = -TARGET_E2BIG; 5437 goto execve_end; 5438 } 5439 if (!(p = lock_user_string(arg1))) 5440 goto execve_efault; 5441 ret = get_errno(execve(p, argp, envp)); 5442 unlock_user(p, arg1, 0); 5443 5444 goto execve_end; 5445 5446 execve_efault: 5447 ret = -TARGET_EFAULT; 5448 5449 execve_end: 5450 for (gp = guest_argp, q = argp; *q; 5451 gp += sizeof(abi_ulong), q++) { 5452 if (get_user_ual(addr, gp) 5453 || !addr) 5454 break; 5455 unlock_user(*q, addr, 0); 5456 } 5457 for (gp = guest_envp, q = envp; *q; 5458 gp += sizeof(abi_ulong), q++) { 5459 if (get_user_ual(addr, gp) 5460 || !addr) 5461 break; 5462 unlock_user(*q, addr, 0); 5463 } 5464 } 5465 break; 5466 case TARGET_NR_chdir: 5467 if (!(p = lock_user_string(arg1))) 5468 goto efault; 5469 ret = get_errno(chdir(p)); 5470 unlock_user(p, arg1, 0); 5471 break; 5472 #ifdef TARGET_NR_time 5473 case TARGET_NR_time: 5474 { 5475 time_t host_time; 5476 ret = get_errno(time(&host_time)); 5477 if (!is_error(ret) 5478 && arg1 5479 && put_user_sal(host_time, arg1)) 5480 goto efault; 5481 } 5482 break; 5483 #endif 5484 case TARGET_NR_mknod: 5485 if (!(p = lock_user_string(arg1))) 5486 goto efault; 5487 ret = get_errno(mknod(p, arg2, arg3)); 5488 unlock_user(p, arg1, 0); 5489 break; 5490 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat) 5491 case TARGET_NR_mknodat: 5492 if (!(p = lock_user_string(arg2))) 5493 goto efault; 5494 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4)); 5495 unlock_user(p, arg2, 0); 5496 break; 5497 #endif 5498 case TARGET_NR_chmod: 5499 if (!(p = lock_user_string(arg1))) 5500 goto efault; 5501 ret = get_errno(chmod(p, arg2)); 5502 unlock_user(p, arg1, 0); 5503 break; 5504 #ifdef TARGET_NR_break 5505 case TARGET_NR_break: 5506 goto unimplemented; 5507 #endif 5508 #ifdef TARGET_NR_oldstat 5509 case TARGET_NR_oldstat: 5510 goto unimplemented; 5511 #endif 5512 case TARGET_NR_lseek: 5513 ret = get_errno(lseek(arg1, arg2, arg3)); 5514 break; 5515 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) 5516 /* Alpha specific */ 5517 case TARGET_NR_getxpid: 5518 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid(); 5519 ret = get_errno(getpid()); 5520 break; 5521 #endif 5522 #ifdef TARGET_NR_getpid 5523 case TARGET_NR_getpid: 5524 ret = get_errno(getpid()); 5525 break; 5526 #endif 5527 case TARGET_NR_mount: 5528 { 5529 /* need to look at the data field */ 5530 void *p2, *p3; 5531 p = lock_user_string(arg1); 5532 p2 = lock_user_string(arg2); 5533 p3 = lock_user_string(arg3); 5534 if (!p || !p2 || !p3) 5535 ret = -TARGET_EFAULT; 5536 else { 5537 /* FIXME - arg5 should be locked, but it isn't clear how to 5538 * do that since it's not guaranteed to be a NULL-terminated 5539 * string. 5540 */ 5541 if ( ! arg5 ) 5542 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL)); 5543 else 5544 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5))); 5545 } 5546 unlock_user(p, arg1, 0); 5547 unlock_user(p2, arg2, 0); 5548 unlock_user(p3, arg3, 0); 5549 break; 5550 } 5551 #ifdef TARGET_NR_umount 5552 case TARGET_NR_umount: 5553 if (!(p = lock_user_string(arg1))) 5554 goto efault; 5555 ret = get_errno(umount(p)); 5556 unlock_user(p, arg1, 0); 5557 break; 5558 #endif 5559 #ifdef TARGET_NR_stime /* not on alpha */ 5560 case TARGET_NR_stime: 5561 { 5562 time_t host_time; 5563 if (get_user_sal(host_time, arg1)) 5564 goto efault; 5565 ret = get_errno(stime(&host_time)); 5566 } 5567 break; 5568 #endif 5569 case TARGET_NR_ptrace: 5570 goto unimplemented; 5571 #ifdef TARGET_NR_alarm /* not on alpha */ 5572 case TARGET_NR_alarm: 5573 ret = alarm(arg1); 5574 break; 5575 #endif 5576 #ifdef TARGET_NR_oldfstat 5577 case TARGET_NR_oldfstat: 5578 goto unimplemented; 5579 #endif 5580 #ifdef TARGET_NR_pause /* not on alpha */ 5581 case TARGET_NR_pause: 5582 ret = get_errno(pause()); 5583 break; 5584 #endif 5585 #ifdef TARGET_NR_utime 5586 case TARGET_NR_utime: 5587 { 5588 struct utimbuf tbuf, *host_tbuf; 5589 struct target_utimbuf *target_tbuf; 5590 if (arg2) { 5591 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) 5592 goto efault; 5593 tbuf.actime = tswapal(target_tbuf->actime); 5594 tbuf.modtime = tswapal(target_tbuf->modtime); 5595 unlock_user_struct(target_tbuf, arg2, 0); 5596 host_tbuf = &tbuf; 5597 } else { 5598 host_tbuf = NULL; 5599 } 5600 if (!(p = lock_user_string(arg1))) 5601 goto efault; 5602 ret = get_errno(utime(p, host_tbuf)); 5603 unlock_user(p, arg1, 0); 5604 } 5605 break; 5606 #endif 5607 case TARGET_NR_utimes: 5608 { 5609 struct timeval *tvp, tv[2]; 5610 if (arg2) { 5611 if (copy_from_user_timeval(&tv[0], arg2) 5612 || copy_from_user_timeval(&tv[1], 5613 arg2 + sizeof(struct target_timeval))) 5614 goto efault; 5615 tvp = tv; 5616 } else { 5617 tvp = NULL; 5618 } 5619 if (!(p = lock_user_string(arg1))) 5620 goto efault; 5621 ret = get_errno(utimes(p, tvp)); 5622 unlock_user(p, arg1, 0); 5623 } 5624 break; 5625 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat) 5626 case TARGET_NR_futimesat: 5627 { 5628 struct timeval *tvp, tv[2]; 5629 if (arg3) { 5630 if (copy_from_user_timeval(&tv[0], arg3) 5631 || copy_from_user_timeval(&tv[1], 5632 arg3 + sizeof(struct target_timeval))) 5633 goto efault; 5634 tvp = tv; 5635 } else { 5636 tvp = NULL; 5637 } 5638 if (!(p = lock_user_string(arg2))) 5639 goto efault; 5640 ret = get_errno(sys_futimesat(arg1, path(p), tvp)); 5641 unlock_user(p, arg2, 0); 5642 } 5643 break; 5644 #endif 5645 #ifdef TARGET_NR_stty 5646 case TARGET_NR_stty: 5647 goto unimplemented; 5648 #endif 5649 #ifdef TARGET_NR_gtty 5650 case TARGET_NR_gtty: 5651 goto unimplemented; 5652 #endif 5653 case TARGET_NR_access: 5654 if (!(p = lock_user_string(arg1))) 5655 goto efault; 5656 ret = get_errno(access(path(p), arg2)); 5657 unlock_user(p, arg1, 0); 5658 break; 5659 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 5660 case TARGET_NR_faccessat: 5661 if (!(p = lock_user_string(arg2))) 5662 goto efault; 5663 ret = get_errno(sys_faccessat(arg1, p, arg3)); 5664 unlock_user(p, arg2, 0); 5665 break; 5666 #endif 5667 #ifdef TARGET_NR_nice /* not on alpha */ 5668 case TARGET_NR_nice: 5669 ret = get_errno(nice(arg1)); 5670 break; 5671 #endif 5672 #ifdef TARGET_NR_ftime 5673 case TARGET_NR_ftime: 5674 goto unimplemented; 5675 #endif 5676 case TARGET_NR_sync: 5677 sync(); 5678 ret = 0; 5679 break; 5680 case TARGET_NR_kill: 5681 ret = get_errno(kill(arg1, target_to_host_signal(arg2))); 5682 break; 5683 case TARGET_NR_rename: 5684 { 5685 void *p2; 5686 p = lock_user_string(arg1); 5687 p2 = lock_user_string(arg2); 5688 if (!p || !p2) 5689 ret = -TARGET_EFAULT; 5690 else 5691 ret = get_errno(rename(p, p2)); 5692 unlock_user(p2, arg2, 0); 5693 unlock_user(p, arg1, 0); 5694 } 5695 break; 5696 #if defined(TARGET_NR_renameat) && defined(__NR_renameat) 5697 case TARGET_NR_renameat: 5698 { 5699 void *p2; 5700 p = lock_user_string(arg2); 5701 p2 = lock_user_string(arg4); 5702 if (!p || !p2) 5703 ret = -TARGET_EFAULT; 5704 else 5705 ret = get_errno(sys_renameat(arg1, p, arg3, p2)); 5706 unlock_user(p2, arg4, 0); 5707 unlock_user(p, arg2, 0); 5708 } 5709 break; 5710 #endif 5711 case TARGET_NR_mkdir: 5712 if (!(p = lock_user_string(arg1))) 5713 goto efault; 5714 ret = get_errno(mkdir(p, arg2)); 5715 unlock_user(p, arg1, 0); 5716 break; 5717 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat) 5718 case TARGET_NR_mkdirat: 5719 if (!(p = lock_user_string(arg2))) 5720 goto efault; 5721 ret = get_errno(sys_mkdirat(arg1, p, arg3)); 5722 unlock_user(p, arg2, 0); 5723 break; 5724 #endif 5725 case TARGET_NR_rmdir: 5726 if (!(p = lock_user_string(arg1))) 5727 goto efault; 5728 ret = get_errno(rmdir(p)); 5729 unlock_user(p, arg1, 0); 5730 break; 5731 case TARGET_NR_dup: 5732 ret = get_errno(dup(arg1)); 5733 break; 5734 case TARGET_NR_pipe: 5735 ret = do_pipe(cpu_env, arg1, 0, 0); 5736 break; 5737 #ifdef TARGET_NR_pipe2 5738 case TARGET_NR_pipe2: 5739 ret = do_pipe(cpu_env, arg1, 5740 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1); 5741 break; 5742 #endif 5743 case TARGET_NR_times: 5744 { 5745 struct target_tms *tmsp; 5746 struct tms tms; 5747 ret = get_errno(times(&tms)); 5748 if (arg1) { 5749 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); 5750 if (!tmsp) 5751 goto efault; 5752 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime)); 5753 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime)); 5754 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime)); 5755 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime)); 5756 } 5757 if (!is_error(ret)) 5758 ret = host_to_target_clock_t(ret); 5759 } 5760 break; 5761 #ifdef TARGET_NR_prof 5762 case TARGET_NR_prof: 5763 goto unimplemented; 5764 #endif 5765 #ifdef TARGET_NR_signal 5766 case TARGET_NR_signal: 5767 goto unimplemented; 5768 #endif 5769 case TARGET_NR_acct: 5770 if (arg1 == 0) { 5771 ret = get_errno(acct(NULL)); 5772 } else { 5773 if (!(p = lock_user_string(arg1))) 5774 goto efault; 5775 ret = get_errno(acct(path(p))); 5776 unlock_user(p, arg1, 0); 5777 } 5778 break; 5779 #ifdef TARGET_NR_umount2 /* not on alpha */ 5780 case TARGET_NR_umount2: 5781 if (!(p = lock_user_string(arg1))) 5782 goto efault; 5783 ret = get_errno(umount2(p, arg2)); 5784 unlock_user(p, arg1, 0); 5785 break; 5786 #endif 5787 #ifdef TARGET_NR_lock 5788 case TARGET_NR_lock: 5789 goto unimplemented; 5790 #endif 5791 case TARGET_NR_ioctl: 5792 ret = do_ioctl(arg1, arg2, arg3); 5793 break; 5794 case TARGET_NR_fcntl: 5795 ret = do_fcntl(arg1, arg2, arg3); 5796 break; 5797 #ifdef TARGET_NR_mpx 5798 case TARGET_NR_mpx: 5799 goto unimplemented; 5800 #endif 5801 case TARGET_NR_setpgid: 5802 ret = get_errno(setpgid(arg1, arg2)); 5803 break; 5804 #ifdef TARGET_NR_ulimit 5805 case TARGET_NR_ulimit: 5806 goto unimplemented; 5807 #endif 5808 #ifdef TARGET_NR_oldolduname 5809 case TARGET_NR_oldolduname: 5810 goto unimplemented; 5811 #endif 5812 case TARGET_NR_umask: 5813 ret = get_errno(umask(arg1)); 5814 break; 5815 case TARGET_NR_chroot: 5816 if (!(p = lock_user_string(arg1))) 5817 goto efault; 5818 ret = get_errno(chroot(p)); 5819 unlock_user(p, arg1, 0); 5820 break; 5821 case TARGET_NR_ustat: 5822 goto unimplemented; 5823 case TARGET_NR_dup2: 5824 ret = get_errno(dup2(arg1, arg2)); 5825 break; 5826 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) 5827 case TARGET_NR_dup3: 5828 ret = get_errno(dup3(arg1, arg2, arg3)); 5829 break; 5830 #endif 5831 #ifdef TARGET_NR_getppid /* not on alpha */ 5832 case TARGET_NR_getppid: 5833 ret = get_errno(getppid()); 5834 break; 5835 #endif 5836 case TARGET_NR_getpgrp: 5837 ret = get_errno(getpgrp()); 5838 break; 5839 case TARGET_NR_setsid: 5840 ret = get_errno(setsid()); 5841 break; 5842 #ifdef TARGET_NR_sigaction 5843 case TARGET_NR_sigaction: 5844 { 5845 #if defined(TARGET_ALPHA) 5846 struct target_sigaction act, oact, *pact = 0; 5847 struct target_old_sigaction *old_act; 5848 if (arg2) { 5849 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5850 goto efault; 5851 act._sa_handler = old_act->_sa_handler; 5852 target_siginitset(&act.sa_mask, old_act->sa_mask); 5853 act.sa_flags = old_act->sa_flags; 5854 act.sa_restorer = 0; 5855 unlock_user_struct(old_act, arg2, 0); 5856 pact = &act; 5857 } 5858 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5859 if (!is_error(ret) && arg3) { 5860 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5861 goto efault; 5862 old_act->_sa_handler = oact._sa_handler; 5863 old_act->sa_mask = oact.sa_mask.sig[0]; 5864 old_act->sa_flags = oact.sa_flags; 5865 unlock_user_struct(old_act, arg3, 1); 5866 } 5867 #elif defined(TARGET_MIPS) 5868 struct target_sigaction act, oact, *pact, *old_act; 5869 5870 if (arg2) { 5871 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5872 goto efault; 5873 act._sa_handler = old_act->_sa_handler; 5874 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); 5875 act.sa_flags = old_act->sa_flags; 5876 unlock_user_struct(old_act, arg2, 0); 5877 pact = &act; 5878 } else { 5879 pact = NULL; 5880 } 5881 5882 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5883 5884 if (!is_error(ret) && arg3) { 5885 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5886 goto efault; 5887 old_act->_sa_handler = oact._sa_handler; 5888 old_act->sa_flags = oact.sa_flags; 5889 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; 5890 old_act->sa_mask.sig[1] = 0; 5891 old_act->sa_mask.sig[2] = 0; 5892 old_act->sa_mask.sig[3] = 0; 5893 unlock_user_struct(old_act, arg3, 1); 5894 } 5895 #else 5896 struct target_old_sigaction *old_act; 5897 struct target_sigaction act, oact, *pact; 5898 if (arg2) { 5899 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5900 goto efault; 5901 act._sa_handler = old_act->_sa_handler; 5902 target_siginitset(&act.sa_mask, old_act->sa_mask); 5903 act.sa_flags = old_act->sa_flags; 5904 act.sa_restorer = old_act->sa_restorer; 5905 unlock_user_struct(old_act, arg2, 0); 5906 pact = &act; 5907 } else { 5908 pact = NULL; 5909 } 5910 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5911 if (!is_error(ret) && arg3) { 5912 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5913 goto efault; 5914 old_act->_sa_handler = oact._sa_handler; 5915 old_act->sa_mask = oact.sa_mask.sig[0]; 5916 old_act->sa_flags = oact.sa_flags; 5917 old_act->sa_restorer = oact.sa_restorer; 5918 unlock_user_struct(old_act, arg3, 1); 5919 } 5920 #endif 5921 } 5922 break; 5923 #endif 5924 case TARGET_NR_rt_sigaction: 5925 { 5926 #if defined(TARGET_ALPHA) 5927 struct target_sigaction act, oact, *pact = 0; 5928 struct target_rt_sigaction *rt_act; 5929 /* ??? arg4 == sizeof(sigset_t). */ 5930 if (arg2) { 5931 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1)) 5932 goto efault; 5933 act._sa_handler = rt_act->_sa_handler; 5934 act.sa_mask = rt_act->sa_mask; 5935 act.sa_flags = rt_act->sa_flags; 5936 act.sa_restorer = arg5; 5937 unlock_user_struct(rt_act, arg2, 0); 5938 pact = &act; 5939 } 5940 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5941 if (!is_error(ret) && arg3) { 5942 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0)) 5943 goto efault; 5944 rt_act->_sa_handler = oact._sa_handler; 5945 rt_act->sa_mask = oact.sa_mask; 5946 rt_act->sa_flags = oact.sa_flags; 5947 unlock_user_struct(rt_act, arg3, 1); 5948 } 5949 #else 5950 struct target_sigaction *act; 5951 struct target_sigaction *oact; 5952 5953 if (arg2) { 5954 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) 5955 goto efault; 5956 } else 5957 act = NULL; 5958 if (arg3) { 5959 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { 5960 ret = -TARGET_EFAULT; 5961 goto rt_sigaction_fail; 5962 } 5963 } else 5964 oact = NULL; 5965 ret = get_errno(do_sigaction(arg1, act, oact)); 5966 rt_sigaction_fail: 5967 if (act) 5968 unlock_user_struct(act, arg2, 0); 5969 if (oact) 5970 unlock_user_struct(oact, arg3, 1); 5971 #endif 5972 } 5973 break; 5974 #ifdef TARGET_NR_sgetmask /* not on alpha */ 5975 case TARGET_NR_sgetmask: 5976 { 5977 sigset_t cur_set; 5978 abi_ulong target_set; 5979 sigprocmask(0, NULL, &cur_set); 5980 host_to_target_old_sigset(&target_set, &cur_set); 5981 ret = target_set; 5982 } 5983 break; 5984 #endif 5985 #ifdef TARGET_NR_ssetmask /* not on alpha */ 5986 case TARGET_NR_ssetmask: 5987 { 5988 sigset_t set, oset, cur_set; 5989 abi_ulong target_set = arg1; 5990 sigprocmask(0, NULL, &cur_set); 5991 target_to_host_old_sigset(&set, &target_set); 5992 sigorset(&set, &set, &cur_set); 5993 sigprocmask(SIG_SETMASK, &set, &oset); 5994 host_to_target_old_sigset(&target_set, &oset); 5995 ret = target_set; 5996 } 5997 break; 5998 #endif 5999 #ifdef TARGET_NR_sigprocmask 6000 case TARGET_NR_sigprocmask: 6001 { 6002 #if defined(TARGET_ALPHA) 6003 sigset_t set, oldset; 6004 abi_ulong mask; 6005 int how; 6006 6007 switch (arg1) { 6008 case TARGET_SIG_BLOCK: 6009 how = SIG_BLOCK; 6010 break; 6011 case TARGET_SIG_UNBLOCK: 6012 how = SIG_UNBLOCK; 6013 break; 6014 case TARGET_SIG_SETMASK: 6015 how = SIG_SETMASK; 6016 break; 6017 default: 6018 ret = -TARGET_EINVAL; 6019 goto fail; 6020 } 6021 mask = arg2; 6022 target_to_host_old_sigset(&set, &mask); 6023 6024 ret = get_errno(sigprocmask(how, &set, &oldset)); 6025 if (!is_error(ret)) { 6026 host_to_target_old_sigset(&mask, &oldset); 6027 ret = mask; 6028 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */ 6029 } 6030 #else 6031 sigset_t set, oldset, *set_ptr; 6032 int how; 6033 6034 if (arg2) { 6035 switch (arg1) { 6036 case TARGET_SIG_BLOCK: 6037 how = SIG_BLOCK; 6038 break; 6039 case TARGET_SIG_UNBLOCK: 6040 how = SIG_UNBLOCK; 6041 break; 6042 case TARGET_SIG_SETMASK: 6043 how = SIG_SETMASK; 6044 break; 6045 default: 6046 ret = -TARGET_EINVAL; 6047 goto fail; 6048 } 6049 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 6050 goto efault; 6051 target_to_host_old_sigset(&set, p); 6052 unlock_user(p, arg2, 0); 6053 set_ptr = &set; 6054 } else { 6055 how = 0; 6056 set_ptr = NULL; 6057 } 6058 ret = get_errno(sigprocmask(how, set_ptr, &oldset)); 6059 if (!is_error(ret) && arg3) { 6060 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 6061 goto efault; 6062 host_to_target_old_sigset(p, &oldset); 6063 unlock_user(p, arg3, sizeof(target_sigset_t)); 6064 } 6065 #endif 6066 } 6067 break; 6068 #endif 6069 case TARGET_NR_rt_sigprocmask: 6070 { 6071 int how = arg1; 6072 sigset_t set, oldset, *set_ptr; 6073 6074 if (arg2) { 6075 switch(how) { 6076 case TARGET_SIG_BLOCK: 6077 how = SIG_BLOCK; 6078 break; 6079 case TARGET_SIG_UNBLOCK: 6080 how = SIG_UNBLOCK; 6081 break; 6082 case TARGET_SIG_SETMASK: 6083 how = SIG_SETMASK; 6084 break; 6085 default: 6086 ret = -TARGET_EINVAL; 6087 goto fail; 6088 } 6089 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 6090 goto efault; 6091 target_to_host_sigset(&set, p); 6092 unlock_user(p, arg2, 0); 6093 set_ptr = &set; 6094 } else { 6095 how = 0; 6096 set_ptr = NULL; 6097 } 6098 ret = get_errno(sigprocmask(how, set_ptr, &oldset)); 6099 if (!is_error(ret) && arg3) { 6100 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 6101 goto efault; 6102 host_to_target_sigset(p, &oldset); 6103 unlock_user(p, arg3, sizeof(target_sigset_t)); 6104 } 6105 } 6106 break; 6107 #ifdef TARGET_NR_sigpending 6108 case TARGET_NR_sigpending: 6109 { 6110 sigset_t set; 6111 ret = get_errno(sigpending(&set)); 6112 if (!is_error(ret)) { 6113 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 6114 goto efault; 6115 host_to_target_old_sigset(p, &set); 6116 unlock_user(p, arg1, sizeof(target_sigset_t)); 6117 } 6118 } 6119 break; 6120 #endif 6121 case TARGET_NR_rt_sigpending: 6122 { 6123 sigset_t set; 6124 ret = get_errno(sigpending(&set)); 6125 if (!is_error(ret)) { 6126 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 6127 goto efault; 6128 host_to_target_sigset(p, &set); 6129 unlock_user(p, arg1, sizeof(target_sigset_t)); 6130 } 6131 } 6132 break; 6133 #ifdef TARGET_NR_sigsuspend 6134 case TARGET_NR_sigsuspend: 6135 { 6136 sigset_t set; 6137 #if defined(TARGET_ALPHA) 6138 abi_ulong mask = arg1; 6139 target_to_host_old_sigset(&set, &mask); 6140 #else 6141 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6142 goto efault; 6143 target_to_host_old_sigset(&set, p); 6144 unlock_user(p, arg1, 0); 6145 #endif 6146 ret = get_errno(sigsuspend(&set)); 6147 } 6148 break; 6149 #endif 6150 case TARGET_NR_rt_sigsuspend: 6151 { 6152 sigset_t set; 6153 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6154 goto efault; 6155 target_to_host_sigset(&set, p); 6156 unlock_user(p, arg1, 0); 6157 ret = get_errno(sigsuspend(&set)); 6158 } 6159 break; 6160 case TARGET_NR_rt_sigtimedwait: 6161 { 6162 sigset_t set; 6163 struct timespec uts, *puts; 6164 siginfo_t uinfo; 6165 6166 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6167 goto efault; 6168 target_to_host_sigset(&set, p); 6169 unlock_user(p, arg1, 0); 6170 if (arg3) { 6171 puts = &uts; 6172 target_to_host_timespec(puts, arg3); 6173 } else { 6174 puts = NULL; 6175 } 6176 ret = get_errno(sigtimedwait(&set, &uinfo, puts)); 6177 if (!is_error(ret) && arg2) { 6178 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0))) 6179 goto efault; 6180 host_to_target_siginfo(p, &uinfo); 6181 unlock_user(p, arg2, sizeof(target_siginfo_t)); 6182 } 6183 } 6184 break; 6185 case TARGET_NR_rt_sigqueueinfo: 6186 { 6187 siginfo_t uinfo; 6188 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1))) 6189 goto efault; 6190 target_to_host_siginfo(&uinfo, p); 6191 unlock_user(p, arg1, 0); 6192 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo)); 6193 } 6194 break; 6195 #ifdef TARGET_NR_sigreturn 6196 case TARGET_NR_sigreturn: 6197 /* NOTE: ret is eax, so not transcoding must be done */ 6198 ret = do_sigreturn(cpu_env); 6199 break; 6200 #endif 6201 case TARGET_NR_rt_sigreturn: 6202 /* NOTE: ret is eax, so not transcoding must be done */ 6203 ret = do_rt_sigreturn(cpu_env); 6204 break; 6205 case TARGET_NR_sethostname: 6206 if (!(p = lock_user_string(arg1))) 6207 goto efault; 6208 ret = get_errno(sethostname(p, arg2)); 6209 unlock_user(p, arg1, 0); 6210 break; 6211 case TARGET_NR_setrlimit: 6212 { 6213 int resource = target_to_host_resource(arg1); 6214 struct target_rlimit *target_rlim; 6215 struct rlimit rlim; 6216 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) 6217 goto efault; 6218 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur); 6219 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max); 6220 unlock_user_struct(target_rlim, arg2, 0); 6221 ret = get_errno(setrlimit(resource, &rlim)); 6222 } 6223 break; 6224 case TARGET_NR_getrlimit: 6225 { 6226 int resource = target_to_host_resource(arg1); 6227 struct target_rlimit *target_rlim; 6228 struct rlimit rlim; 6229 6230 ret = get_errno(getrlimit(resource, &rlim)); 6231 if (!is_error(ret)) { 6232 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 6233 goto efault; 6234 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 6235 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 6236 unlock_user_struct(target_rlim, arg2, 1); 6237 } 6238 } 6239 break; 6240 case TARGET_NR_getrusage: 6241 { 6242 struct rusage rusage; 6243 ret = get_errno(getrusage(arg1, &rusage)); 6244 if (!is_error(ret)) { 6245 host_to_target_rusage(arg2, &rusage); 6246 } 6247 } 6248 break; 6249 case TARGET_NR_gettimeofday: 6250 { 6251 struct timeval tv; 6252 ret = get_errno(gettimeofday(&tv, NULL)); 6253 if (!is_error(ret)) { 6254 if (copy_to_user_timeval(arg1, &tv)) 6255 goto efault; 6256 } 6257 } 6258 break; 6259 case TARGET_NR_settimeofday: 6260 { 6261 struct timeval tv; 6262 if (copy_from_user_timeval(&tv, arg1)) 6263 goto efault; 6264 ret = get_errno(settimeofday(&tv, NULL)); 6265 } 6266 break; 6267 #if defined(TARGET_NR_select) 6268 case TARGET_NR_select: 6269 #if defined(TARGET_S390X) || defined(TARGET_ALPHA) 6270 ret = do_select(arg1, arg2, arg3, arg4, arg5); 6271 #else 6272 { 6273 struct target_sel_arg_struct *sel; 6274 abi_ulong inp, outp, exp, tvp; 6275 long nsel; 6276 6277 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) 6278 goto efault; 6279 nsel = tswapal(sel->n); 6280 inp = tswapal(sel->inp); 6281 outp = tswapal(sel->outp); 6282 exp = tswapal(sel->exp); 6283 tvp = tswapal(sel->tvp); 6284 unlock_user_struct(sel, arg1, 0); 6285 ret = do_select(nsel, inp, outp, exp, tvp); 6286 } 6287 #endif 6288 break; 6289 #endif 6290 #ifdef TARGET_NR_pselect6 6291 case TARGET_NR_pselect6: 6292 { 6293 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr; 6294 fd_set rfds, wfds, efds; 6295 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 6296 struct timespec ts, *ts_ptr; 6297 6298 /* 6299 * The 6th arg is actually two args smashed together, 6300 * so we cannot use the C library. 6301 */ 6302 sigset_t set; 6303 struct { 6304 sigset_t *set; 6305 size_t size; 6306 } sig, *sig_ptr; 6307 6308 abi_ulong arg_sigset, arg_sigsize, *arg7; 6309 target_sigset_t *target_sigset; 6310 6311 n = arg1; 6312 rfd_addr = arg2; 6313 wfd_addr = arg3; 6314 efd_addr = arg4; 6315 ts_addr = arg5; 6316 6317 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 6318 if (ret) { 6319 goto fail; 6320 } 6321 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 6322 if (ret) { 6323 goto fail; 6324 } 6325 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 6326 if (ret) { 6327 goto fail; 6328 } 6329 6330 /* 6331 * This takes a timespec, and not a timeval, so we cannot 6332 * use the do_select() helper ... 6333 */ 6334 if (ts_addr) { 6335 if (target_to_host_timespec(&ts, ts_addr)) { 6336 goto efault; 6337 } 6338 ts_ptr = &ts; 6339 } else { 6340 ts_ptr = NULL; 6341 } 6342 6343 /* Extract the two packed args for the sigset */ 6344 if (arg6) { 6345 sig_ptr = &sig; 6346 sig.size = _NSIG / 8; 6347 6348 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1); 6349 if (!arg7) { 6350 goto efault; 6351 } 6352 arg_sigset = tswapal(arg7[0]); 6353 arg_sigsize = tswapal(arg7[1]); 6354 unlock_user(arg7, arg6, 0); 6355 6356 if (arg_sigset) { 6357 sig.set = &set; 6358 if (arg_sigsize != sizeof(*target_sigset)) { 6359 /* Like the kernel, we enforce correct size sigsets */ 6360 ret = -TARGET_EINVAL; 6361 goto fail; 6362 } 6363 target_sigset = lock_user(VERIFY_READ, arg_sigset, 6364 sizeof(*target_sigset), 1); 6365 if (!target_sigset) { 6366 goto efault; 6367 } 6368 target_to_host_sigset(&set, target_sigset); 6369 unlock_user(target_sigset, arg_sigset, 0); 6370 } else { 6371 sig.set = NULL; 6372 } 6373 } else { 6374 sig_ptr = NULL; 6375 } 6376 6377 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 6378 ts_ptr, sig_ptr)); 6379 6380 if (!is_error(ret)) { 6381 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 6382 goto efault; 6383 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 6384 goto efault; 6385 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 6386 goto efault; 6387 6388 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) 6389 goto efault; 6390 } 6391 } 6392 break; 6393 #endif 6394 case TARGET_NR_symlink: 6395 { 6396 void *p2; 6397 p = lock_user_string(arg1); 6398 p2 = lock_user_string(arg2); 6399 if (!p || !p2) 6400 ret = -TARGET_EFAULT; 6401 else 6402 ret = get_errno(symlink(p, p2)); 6403 unlock_user(p2, arg2, 0); 6404 unlock_user(p, arg1, 0); 6405 } 6406 break; 6407 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat) 6408 case TARGET_NR_symlinkat: 6409 { 6410 void *p2; 6411 p = lock_user_string(arg1); 6412 p2 = lock_user_string(arg3); 6413 if (!p || !p2) 6414 ret = -TARGET_EFAULT; 6415 else 6416 ret = get_errno(sys_symlinkat(p, arg2, p2)); 6417 unlock_user(p2, arg3, 0); 6418 unlock_user(p, arg1, 0); 6419 } 6420 break; 6421 #endif 6422 #ifdef TARGET_NR_oldlstat 6423 case TARGET_NR_oldlstat: 6424 goto unimplemented; 6425 #endif 6426 case TARGET_NR_readlink: 6427 { 6428 void *p2, *temp; 6429 p = lock_user_string(arg1); 6430 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); 6431 if (!p || !p2) 6432 ret = -TARGET_EFAULT; 6433 else { 6434 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) { 6435 char real[PATH_MAX]; 6436 temp = realpath(exec_path,real); 6437 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ; 6438 snprintf((char *)p2, arg3, "%s", real); 6439 } 6440 else 6441 ret = get_errno(readlink(path(p), p2, arg3)); 6442 } 6443 unlock_user(p2, arg2, ret); 6444 unlock_user(p, arg1, 0); 6445 } 6446 break; 6447 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat) 6448 case TARGET_NR_readlinkat: 6449 { 6450 void *p2; 6451 p = lock_user_string(arg2); 6452 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); 6453 if (!p || !p2) 6454 ret = -TARGET_EFAULT; 6455 else 6456 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4)); 6457 unlock_user(p2, arg3, ret); 6458 unlock_user(p, arg2, 0); 6459 } 6460 break; 6461 #endif 6462 #ifdef TARGET_NR_uselib 6463 case TARGET_NR_uselib: 6464 goto unimplemented; 6465 #endif 6466 #ifdef TARGET_NR_swapon 6467 case TARGET_NR_swapon: 6468 if (!(p = lock_user_string(arg1))) 6469 goto efault; 6470 ret = get_errno(swapon(p, arg2)); 6471 unlock_user(p, arg1, 0); 6472 break; 6473 #endif 6474 case TARGET_NR_reboot: 6475 if (arg3 == LINUX_REBOOT_CMD_RESTART2) { 6476 /* arg4 must be ignored in all other cases */ 6477 p = lock_user_string(arg4); 6478 if (!p) { 6479 goto efault; 6480 } 6481 ret = get_errno(reboot(arg1, arg2, arg3, p)); 6482 unlock_user(p, arg4, 0); 6483 } else { 6484 ret = get_errno(reboot(arg1, arg2, arg3, NULL)); 6485 } 6486 break; 6487 #ifdef TARGET_NR_readdir 6488 case TARGET_NR_readdir: 6489 goto unimplemented; 6490 #endif 6491 #ifdef TARGET_NR_mmap 6492 case TARGET_NR_mmap: 6493 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \ 6494 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ 6495 || defined(TARGET_S390X) 6496 { 6497 abi_ulong *v; 6498 abi_ulong v1, v2, v3, v4, v5, v6; 6499 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) 6500 goto efault; 6501 v1 = tswapal(v[0]); 6502 v2 = tswapal(v[1]); 6503 v3 = tswapal(v[2]); 6504 v4 = tswapal(v[3]); 6505 v5 = tswapal(v[4]); 6506 v6 = tswapal(v[5]); 6507 unlock_user(v, arg1, 0); 6508 ret = get_errno(target_mmap(v1, v2, v3, 6509 target_to_host_bitmask(v4, mmap_flags_tbl), 6510 v5, v6)); 6511 } 6512 #else 6513 ret = get_errno(target_mmap(arg1, arg2, arg3, 6514 target_to_host_bitmask(arg4, mmap_flags_tbl), 6515 arg5, 6516 arg6)); 6517 #endif 6518 break; 6519 #endif 6520 #ifdef TARGET_NR_mmap2 6521 case TARGET_NR_mmap2: 6522 #ifndef MMAP_SHIFT 6523 #define MMAP_SHIFT 12 6524 #endif 6525 ret = get_errno(target_mmap(arg1, arg2, arg3, 6526 target_to_host_bitmask(arg4, mmap_flags_tbl), 6527 arg5, 6528 arg6 << MMAP_SHIFT)); 6529 break; 6530 #endif 6531 case TARGET_NR_munmap: 6532 ret = get_errno(target_munmap(arg1, arg2)); 6533 break; 6534 case TARGET_NR_mprotect: 6535 { 6536 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 6537 /* Special hack to detect libc making the stack executable. */ 6538 if ((arg3 & PROT_GROWSDOWN) 6539 && arg1 >= ts->info->stack_limit 6540 && arg1 <= ts->info->start_stack) { 6541 arg3 &= ~PROT_GROWSDOWN; 6542 arg2 = arg2 + arg1 - ts->info->stack_limit; 6543 arg1 = ts->info->stack_limit; 6544 } 6545 } 6546 ret = get_errno(target_mprotect(arg1, arg2, arg3)); 6547 break; 6548 #ifdef TARGET_NR_mremap 6549 case TARGET_NR_mremap: 6550 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); 6551 break; 6552 #endif 6553 /* ??? msync/mlock/munlock are broken for softmmu. */ 6554 #ifdef TARGET_NR_msync 6555 case TARGET_NR_msync: 6556 ret = get_errno(msync(g2h(arg1), arg2, arg3)); 6557 break; 6558 #endif 6559 #ifdef TARGET_NR_mlock 6560 case TARGET_NR_mlock: 6561 ret = get_errno(mlock(g2h(arg1), arg2)); 6562 break; 6563 #endif 6564 #ifdef TARGET_NR_munlock 6565 case TARGET_NR_munlock: 6566 ret = get_errno(munlock(g2h(arg1), arg2)); 6567 break; 6568 #endif 6569 #ifdef TARGET_NR_mlockall 6570 case TARGET_NR_mlockall: 6571 ret = get_errno(mlockall(arg1)); 6572 break; 6573 #endif 6574 #ifdef TARGET_NR_munlockall 6575 case TARGET_NR_munlockall: 6576 ret = get_errno(munlockall()); 6577 break; 6578 #endif 6579 case TARGET_NR_truncate: 6580 if (!(p = lock_user_string(arg1))) 6581 goto efault; 6582 ret = get_errno(truncate(p, arg2)); 6583 unlock_user(p, arg1, 0); 6584 break; 6585 case TARGET_NR_ftruncate: 6586 ret = get_errno(ftruncate(arg1, arg2)); 6587 break; 6588 case TARGET_NR_fchmod: 6589 ret = get_errno(fchmod(arg1, arg2)); 6590 break; 6591 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat) 6592 case TARGET_NR_fchmodat: 6593 if (!(p = lock_user_string(arg2))) 6594 goto efault; 6595 ret = get_errno(sys_fchmodat(arg1, p, arg3)); 6596 unlock_user(p, arg2, 0); 6597 break; 6598 #endif 6599 case TARGET_NR_getpriority: 6600 /* Note that negative values are valid for getpriority, so we must 6601 differentiate based on errno settings. */ 6602 errno = 0; 6603 ret = getpriority(arg1, arg2); 6604 if (ret == -1 && errno != 0) { 6605 ret = -host_to_target_errno(errno); 6606 break; 6607 } 6608 #ifdef TARGET_ALPHA 6609 /* Return value is the unbiased priority. Signal no error. */ 6610 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; 6611 #else 6612 /* Return value is a biased priority to avoid negative numbers. */ 6613 ret = 20 - ret; 6614 #endif 6615 break; 6616 case TARGET_NR_setpriority: 6617 ret = get_errno(setpriority(arg1, arg2, arg3)); 6618 break; 6619 #ifdef TARGET_NR_profil 6620 case TARGET_NR_profil: 6621 goto unimplemented; 6622 #endif 6623 case TARGET_NR_statfs: 6624 if (!(p = lock_user_string(arg1))) 6625 goto efault; 6626 ret = get_errno(statfs(path(p), &stfs)); 6627 unlock_user(p, arg1, 0); 6628 convert_statfs: 6629 if (!is_error(ret)) { 6630 struct target_statfs *target_stfs; 6631 6632 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) 6633 goto efault; 6634 __put_user(stfs.f_type, &target_stfs->f_type); 6635 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 6636 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 6637 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 6638 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 6639 __put_user(stfs.f_files, &target_stfs->f_files); 6640 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 6641 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 6642 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 6643 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 6644 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 6645 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 6646 unlock_user_struct(target_stfs, arg2, 1); 6647 } 6648 break; 6649 case TARGET_NR_fstatfs: 6650 ret = get_errno(fstatfs(arg1, &stfs)); 6651 goto convert_statfs; 6652 #ifdef TARGET_NR_statfs64 6653 case TARGET_NR_statfs64: 6654 if (!(p = lock_user_string(arg1))) 6655 goto efault; 6656 ret = get_errno(statfs(path(p), &stfs)); 6657 unlock_user(p, arg1, 0); 6658 convert_statfs64: 6659 if (!is_error(ret)) { 6660 struct target_statfs64 *target_stfs; 6661 6662 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) 6663 goto efault; 6664 __put_user(stfs.f_type, &target_stfs->f_type); 6665 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 6666 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 6667 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 6668 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 6669 __put_user(stfs.f_files, &target_stfs->f_files); 6670 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 6671 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 6672 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 6673 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 6674 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 6675 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 6676 unlock_user_struct(target_stfs, arg3, 1); 6677 } 6678 break; 6679 case TARGET_NR_fstatfs64: 6680 ret = get_errno(fstatfs(arg1, &stfs)); 6681 goto convert_statfs64; 6682 #endif 6683 #ifdef TARGET_NR_ioperm 6684 case TARGET_NR_ioperm: 6685 goto unimplemented; 6686 #endif 6687 #ifdef TARGET_NR_socketcall 6688 case TARGET_NR_socketcall: 6689 ret = do_socketcall(arg1, arg2); 6690 break; 6691 #endif 6692 #ifdef TARGET_NR_accept 6693 case TARGET_NR_accept: 6694 ret = do_accept4(arg1, arg2, arg3, 0); 6695 break; 6696 #endif 6697 #ifdef TARGET_NR_accept4 6698 case TARGET_NR_accept4: 6699 #ifdef CONFIG_ACCEPT4 6700 ret = do_accept4(arg1, arg2, arg3, arg4); 6701 #else 6702 goto unimplemented; 6703 #endif 6704 break; 6705 #endif 6706 #ifdef TARGET_NR_bind 6707 case TARGET_NR_bind: 6708 ret = do_bind(arg1, arg2, arg3); 6709 break; 6710 #endif 6711 #ifdef TARGET_NR_connect 6712 case TARGET_NR_connect: 6713 ret = do_connect(arg1, arg2, arg3); 6714 break; 6715 #endif 6716 #ifdef TARGET_NR_getpeername 6717 case TARGET_NR_getpeername: 6718 ret = do_getpeername(arg1, arg2, arg3); 6719 break; 6720 #endif 6721 #ifdef TARGET_NR_getsockname 6722 case TARGET_NR_getsockname: 6723 ret = do_getsockname(arg1, arg2, arg3); 6724 break; 6725 #endif 6726 #ifdef TARGET_NR_getsockopt 6727 case TARGET_NR_getsockopt: 6728 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5); 6729 break; 6730 #endif 6731 #ifdef TARGET_NR_listen 6732 case TARGET_NR_listen: 6733 ret = get_errno(listen(arg1, arg2)); 6734 break; 6735 #endif 6736 #ifdef TARGET_NR_recv 6737 case TARGET_NR_recv: 6738 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); 6739 break; 6740 #endif 6741 #ifdef TARGET_NR_recvfrom 6742 case TARGET_NR_recvfrom: 6743 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); 6744 break; 6745 #endif 6746 #ifdef TARGET_NR_recvmsg 6747 case TARGET_NR_recvmsg: 6748 ret = do_sendrecvmsg(arg1, arg2, arg3, 0); 6749 break; 6750 #endif 6751 #ifdef TARGET_NR_send 6752 case TARGET_NR_send: 6753 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0); 6754 break; 6755 #endif 6756 #ifdef TARGET_NR_sendmsg 6757 case TARGET_NR_sendmsg: 6758 ret = do_sendrecvmsg(arg1, arg2, arg3, 1); 6759 break; 6760 #endif 6761 #ifdef TARGET_NR_sendto 6762 case TARGET_NR_sendto: 6763 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); 6764 break; 6765 #endif 6766 #ifdef TARGET_NR_shutdown 6767 case TARGET_NR_shutdown: 6768 ret = get_errno(shutdown(arg1, arg2)); 6769 break; 6770 #endif 6771 #ifdef TARGET_NR_socket 6772 case TARGET_NR_socket: 6773 ret = do_socket(arg1, arg2, arg3); 6774 break; 6775 #endif 6776 #ifdef TARGET_NR_socketpair 6777 case TARGET_NR_socketpair: 6778 ret = do_socketpair(arg1, arg2, arg3, arg4); 6779 break; 6780 #endif 6781 #ifdef TARGET_NR_setsockopt 6782 case TARGET_NR_setsockopt: 6783 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); 6784 break; 6785 #endif 6786 6787 case TARGET_NR_syslog: 6788 if (!(p = lock_user_string(arg2))) 6789 goto efault; 6790 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); 6791 unlock_user(p, arg2, 0); 6792 break; 6793 6794 case TARGET_NR_setitimer: 6795 { 6796 struct itimerval value, ovalue, *pvalue; 6797 6798 if (arg2) { 6799 pvalue = &value; 6800 if (copy_from_user_timeval(&pvalue->it_interval, arg2) 6801 || copy_from_user_timeval(&pvalue->it_value, 6802 arg2 + sizeof(struct target_timeval))) 6803 goto efault; 6804 } else { 6805 pvalue = NULL; 6806 } 6807 ret = get_errno(setitimer(arg1, pvalue, &ovalue)); 6808 if (!is_error(ret) && arg3) { 6809 if (copy_to_user_timeval(arg3, 6810 &ovalue.it_interval) 6811 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), 6812 &ovalue.it_value)) 6813 goto efault; 6814 } 6815 } 6816 break; 6817 case TARGET_NR_getitimer: 6818 { 6819 struct itimerval value; 6820 6821 ret = get_errno(getitimer(arg1, &value)); 6822 if (!is_error(ret) && arg2) { 6823 if (copy_to_user_timeval(arg2, 6824 &value.it_interval) 6825 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), 6826 &value.it_value)) 6827 goto efault; 6828 } 6829 } 6830 break; 6831 case TARGET_NR_stat: 6832 if (!(p = lock_user_string(arg1))) 6833 goto efault; 6834 ret = get_errno(stat(path(p), &st)); 6835 unlock_user(p, arg1, 0); 6836 goto do_stat; 6837 case TARGET_NR_lstat: 6838 if (!(p = lock_user_string(arg1))) 6839 goto efault; 6840 ret = get_errno(lstat(path(p), &st)); 6841 unlock_user(p, arg1, 0); 6842 goto do_stat; 6843 case TARGET_NR_fstat: 6844 { 6845 ret = get_errno(fstat(arg1, &st)); 6846 do_stat: 6847 if (!is_error(ret)) { 6848 struct target_stat *target_st; 6849 6850 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) 6851 goto efault; 6852 memset(target_st, 0, sizeof(*target_st)); 6853 __put_user(st.st_dev, &target_st->st_dev); 6854 __put_user(st.st_ino, &target_st->st_ino); 6855 __put_user(st.st_mode, &target_st->st_mode); 6856 __put_user(st.st_uid, &target_st->st_uid); 6857 __put_user(st.st_gid, &target_st->st_gid); 6858 __put_user(st.st_nlink, &target_st->st_nlink); 6859 __put_user(st.st_rdev, &target_st->st_rdev); 6860 __put_user(st.st_size, &target_st->st_size); 6861 __put_user(st.st_blksize, &target_st->st_blksize); 6862 __put_user(st.st_blocks, &target_st->st_blocks); 6863 __put_user(st.st_atime, &target_st->target_st_atime); 6864 __put_user(st.st_mtime, &target_st->target_st_mtime); 6865 __put_user(st.st_ctime, &target_st->target_st_ctime); 6866 unlock_user_struct(target_st, arg2, 1); 6867 } 6868 } 6869 break; 6870 #ifdef TARGET_NR_olduname 6871 case TARGET_NR_olduname: 6872 goto unimplemented; 6873 #endif 6874 #ifdef TARGET_NR_iopl 6875 case TARGET_NR_iopl: 6876 goto unimplemented; 6877 #endif 6878 case TARGET_NR_vhangup: 6879 ret = get_errno(vhangup()); 6880 break; 6881 #ifdef TARGET_NR_idle 6882 case TARGET_NR_idle: 6883 goto unimplemented; 6884 #endif 6885 #ifdef TARGET_NR_syscall 6886 case TARGET_NR_syscall: 6887 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5, 6888 arg6, arg7, arg8, 0); 6889 break; 6890 #endif 6891 case TARGET_NR_wait4: 6892 { 6893 int status; 6894 abi_long status_ptr = arg2; 6895 struct rusage rusage, *rusage_ptr; 6896 abi_ulong target_rusage = arg4; 6897 if (target_rusage) 6898 rusage_ptr = &rusage; 6899 else 6900 rusage_ptr = NULL; 6901 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr)); 6902 if (!is_error(ret)) { 6903 if (status_ptr && ret) { 6904 status = host_to_target_waitstatus(status); 6905 if (put_user_s32(status, status_ptr)) 6906 goto efault; 6907 } 6908 if (target_rusage) 6909 host_to_target_rusage(target_rusage, &rusage); 6910 } 6911 } 6912 break; 6913 #ifdef TARGET_NR_swapoff 6914 case TARGET_NR_swapoff: 6915 if (!(p = lock_user_string(arg1))) 6916 goto efault; 6917 ret = get_errno(swapoff(p)); 6918 unlock_user(p, arg1, 0); 6919 break; 6920 #endif 6921 case TARGET_NR_sysinfo: 6922 { 6923 struct target_sysinfo *target_value; 6924 struct sysinfo value; 6925 ret = get_errno(sysinfo(&value)); 6926 if (!is_error(ret) && arg1) 6927 { 6928 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) 6929 goto efault; 6930 __put_user(value.uptime, &target_value->uptime); 6931 __put_user(value.loads[0], &target_value->loads[0]); 6932 __put_user(value.loads[1], &target_value->loads[1]); 6933 __put_user(value.loads[2], &target_value->loads[2]); 6934 __put_user(value.totalram, &target_value->totalram); 6935 __put_user(value.freeram, &target_value->freeram); 6936 __put_user(value.sharedram, &target_value->sharedram); 6937 __put_user(value.bufferram, &target_value->bufferram); 6938 __put_user(value.totalswap, &target_value->totalswap); 6939 __put_user(value.freeswap, &target_value->freeswap); 6940 __put_user(value.procs, &target_value->procs); 6941 __put_user(value.totalhigh, &target_value->totalhigh); 6942 __put_user(value.freehigh, &target_value->freehigh); 6943 __put_user(value.mem_unit, &target_value->mem_unit); 6944 unlock_user_struct(target_value, arg1, 1); 6945 } 6946 } 6947 break; 6948 #ifdef TARGET_NR_ipc 6949 case TARGET_NR_ipc: 6950 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6); 6951 break; 6952 #endif 6953 #ifdef TARGET_NR_semget 6954 case TARGET_NR_semget: 6955 ret = get_errno(semget(arg1, arg2, arg3)); 6956 break; 6957 #endif 6958 #ifdef TARGET_NR_semop 6959 case TARGET_NR_semop: 6960 ret = do_semop(arg1, arg2, arg3); 6961 break; 6962 #endif 6963 #ifdef TARGET_NR_semctl 6964 case TARGET_NR_semctl: 6965 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4); 6966 break; 6967 #endif 6968 #ifdef TARGET_NR_msgctl 6969 case TARGET_NR_msgctl: 6970 ret = do_msgctl(arg1, arg2, arg3); 6971 break; 6972 #endif 6973 #ifdef TARGET_NR_msgget 6974 case TARGET_NR_msgget: 6975 ret = get_errno(msgget(arg1, arg2)); 6976 break; 6977 #endif 6978 #ifdef TARGET_NR_msgrcv 6979 case TARGET_NR_msgrcv: 6980 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5); 6981 break; 6982 #endif 6983 #ifdef TARGET_NR_msgsnd 6984 case TARGET_NR_msgsnd: 6985 ret = do_msgsnd(arg1, arg2, arg3, arg4); 6986 break; 6987 #endif 6988 #ifdef TARGET_NR_shmget 6989 case TARGET_NR_shmget: 6990 ret = get_errno(shmget(arg1, arg2, arg3)); 6991 break; 6992 #endif 6993 #ifdef TARGET_NR_shmctl 6994 case TARGET_NR_shmctl: 6995 ret = do_shmctl(arg1, arg2, arg3); 6996 break; 6997 #endif 6998 #ifdef TARGET_NR_shmat 6999 case TARGET_NR_shmat: 7000 ret = do_shmat(arg1, arg2, arg3); 7001 break; 7002 #endif 7003 #ifdef TARGET_NR_shmdt 7004 case TARGET_NR_shmdt: 7005 ret = do_shmdt(arg1); 7006 break; 7007 #endif 7008 case TARGET_NR_fsync: 7009 ret = get_errno(fsync(arg1)); 7010 break; 7011 case TARGET_NR_clone: 7012 #if defined(TARGET_SH4) || defined(TARGET_ALPHA) 7013 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); 7014 #elif defined(TARGET_CRIS) 7015 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5)); 7016 #elif defined(TARGET_MICROBLAZE) 7017 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5)); 7018 #elif defined(TARGET_S390X) 7019 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); 7020 #else 7021 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); 7022 #endif 7023 break; 7024 #ifdef __NR_exit_group 7025 /* new thread calls */ 7026 case TARGET_NR_exit_group: 7027 #ifdef TARGET_GPROF 7028 _mcleanup(); 7029 #endif 7030 gdb_exit(cpu_env, arg1); 7031 ret = get_errno(exit_group(arg1)); 7032 break; 7033 #endif 7034 case TARGET_NR_setdomainname: 7035 if (!(p = lock_user_string(arg1))) 7036 goto efault; 7037 ret = get_errno(setdomainname(p, arg2)); 7038 unlock_user(p, arg1, 0); 7039 break; 7040 case TARGET_NR_uname: 7041 /* no need to transcode because we use the linux syscall */ 7042 { 7043 struct new_utsname * buf; 7044 7045 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) 7046 goto efault; 7047 ret = get_errno(sys_uname(buf)); 7048 if (!is_error(ret)) { 7049 /* Overrite the native machine name with whatever is being 7050 emulated. */ 7051 strcpy (buf->machine, cpu_to_uname_machine(cpu_env)); 7052 /* Allow the user to override the reported release. */ 7053 if (qemu_uname_release && *qemu_uname_release) 7054 strcpy (buf->release, qemu_uname_release); 7055 } 7056 unlock_user_struct(buf, arg1, 1); 7057 } 7058 break; 7059 #ifdef TARGET_I386 7060 case TARGET_NR_modify_ldt: 7061 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3); 7062 break; 7063 #if !defined(TARGET_X86_64) 7064 case TARGET_NR_vm86old: 7065 goto unimplemented; 7066 case TARGET_NR_vm86: 7067 ret = do_vm86(cpu_env, arg1, arg2); 7068 break; 7069 #endif 7070 #endif 7071 case TARGET_NR_adjtimex: 7072 goto unimplemented; 7073 #ifdef TARGET_NR_create_module 7074 case TARGET_NR_create_module: 7075 #endif 7076 case TARGET_NR_init_module: 7077 case TARGET_NR_delete_module: 7078 #ifdef TARGET_NR_get_kernel_syms 7079 case TARGET_NR_get_kernel_syms: 7080 #endif 7081 goto unimplemented; 7082 case TARGET_NR_quotactl: 7083 goto unimplemented; 7084 case TARGET_NR_getpgid: 7085 ret = get_errno(getpgid(arg1)); 7086 break; 7087 case TARGET_NR_fchdir: 7088 ret = get_errno(fchdir(arg1)); 7089 break; 7090 #ifdef TARGET_NR_bdflush /* not on x86_64 */ 7091 case TARGET_NR_bdflush: 7092 goto unimplemented; 7093 #endif 7094 #ifdef TARGET_NR_sysfs 7095 case TARGET_NR_sysfs: 7096 goto unimplemented; 7097 #endif 7098 case TARGET_NR_personality: 7099 ret = get_errno(personality(arg1)); 7100 break; 7101 #ifdef TARGET_NR_afs_syscall 7102 case TARGET_NR_afs_syscall: 7103 goto unimplemented; 7104 #endif 7105 #ifdef TARGET_NR__llseek /* Not on alpha */ 7106 case TARGET_NR__llseek: 7107 { 7108 int64_t res; 7109 #if !defined(__NR_llseek) 7110 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5); 7111 if (res == -1) { 7112 ret = get_errno(res); 7113 } else { 7114 ret = 0; 7115 } 7116 #else 7117 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); 7118 #endif 7119 if ((ret == 0) && put_user_s64(res, arg4)) { 7120 goto efault; 7121 } 7122 } 7123 break; 7124 #endif 7125 case TARGET_NR_getdents: 7126 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 7127 { 7128 struct target_dirent *target_dirp; 7129 struct linux_dirent *dirp; 7130 abi_long count = arg3; 7131 7132 dirp = malloc(count); 7133 if (!dirp) { 7134 ret = -TARGET_ENOMEM; 7135 goto fail; 7136 } 7137 7138 ret = get_errno(sys_getdents(arg1, dirp, count)); 7139 if (!is_error(ret)) { 7140 struct linux_dirent *de; 7141 struct target_dirent *tde; 7142 int len = ret; 7143 int reclen, treclen; 7144 int count1, tnamelen; 7145 7146 count1 = 0; 7147 de = dirp; 7148 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7149 goto efault; 7150 tde = target_dirp; 7151 while (len > 0) { 7152 reclen = de->d_reclen; 7153 tnamelen = reclen - offsetof(struct linux_dirent, d_name); 7154 assert(tnamelen >= 0); 7155 treclen = tnamelen + offsetof(struct target_dirent, d_name); 7156 assert(count1 + treclen <= count); 7157 tde->d_reclen = tswap16(treclen); 7158 tde->d_ino = tswapal(de->d_ino); 7159 tde->d_off = tswapal(de->d_off); 7160 memcpy(tde->d_name, de->d_name, tnamelen); 7161 de = (struct linux_dirent *)((char *)de + reclen); 7162 len -= reclen; 7163 tde = (struct target_dirent *)((char *)tde + treclen); 7164 count1 += treclen; 7165 } 7166 ret = count1; 7167 unlock_user(target_dirp, arg2, ret); 7168 } 7169 free(dirp); 7170 } 7171 #else 7172 { 7173 struct linux_dirent *dirp; 7174 abi_long count = arg3; 7175 7176 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7177 goto efault; 7178 ret = get_errno(sys_getdents(arg1, dirp, count)); 7179 if (!is_error(ret)) { 7180 struct linux_dirent *de; 7181 int len = ret; 7182 int reclen; 7183 de = dirp; 7184 while (len > 0) { 7185 reclen = de->d_reclen; 7186 if (reclen > len) 7187 break; 7188 de->d_reclen = tswap16(reclen); 7189 tswapls(&de->d_ino); 7190 tswapls(&de->d_off); 7191 de = (struct linux_dirent *)((char *)de + reclen); 7192 len -= reclen; 7193 } 7194 } 7195 unlock_user(dirp, arg2, ret); 7196 } 7197 #endif 7198 break; 7199 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 7200 case TARGET_NR_getdents64: 7201 { 7202 struct linux_dirent64 *dirp; 7203 abi_long count = arg3; 7204 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7205 goto efault; 7206 ret = get_errno(sys_getdents64(arg1, dirp, count)); 7207 if (!is_error(ret)) { 7208 struct linux_dirent64 *de; 7209 int len = ret; 7210 int reclen; 7211 de = dirp; 7212 while (len > 0) { 7213 reclen = de->d_reclen; 7214 if (reclen > len) 7215 break; 7216 de->d_reclen = tswap16(reclen); 7217 tswap64s((uint64_t *)&de->d_ino); 7218 tswap64s((uint64_t *)&de->d_off); 7219 de = (struct linux_dirent64 *)((char *)de + reclen); 7220 len -= reclen; 7221 } 7222 } 7223 unlock_user(dirp, arg2, ret); 7224 } 7225 break; 7226 #endif /* TARGET_NR_getdents64 */ 7227 #if defined(TARGET_NR__newselect) 7228 case TARGET_NR__newselect: 7229 ret = do_select(arg1, arg2, arg3, arg4, arg5); 7230 break; 7231 #endif 7232 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) 7233 # ifdef TARGET_NR_poll 7234 case TARGET_NR_poll: 7235 # endif 7236 # ifdef TARGET_NR_ppoll 7237 case TARGET_NR_ppoll: 7238 # endif 7239 { 7240 struct target_pollfd *target_pfd; 7241 unsigned int nfds = arg2; 7242 int timeout = arg3; 7243 struct pollfd *pfd; 7244 unsigned int i; 7245 7246 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1); 7247 if (!target_pfd) 7248 goto efault; 7249 7250 pfd = alloca(sizeof(struct pollfd) * nfds); 7251 for(i = 0; i < nfds; i++) { 7252 pfd[i].fd = tswap32(target_pfd[i].fd); 7253 pfd[i].events = tswap16(target_pfd[i].events); 7254 } 7255 7256 # ifdef TARGET_NR_ppoll 7257 if (num == TARGET_NR_ppoll) { 7258 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; 7259 target_sigset_t *target_set; 7260 sigset_t _set, *set = &_set; 7261 7262 if (arg3) { 7263 if (target_to_host_timespec(timeout_ts, arg3)) { 7264 unlock_user(target_pfd, arg1, 0); 7265 goto efault; 7266 } 7267 } else { 7268 timeout_ts = NULL; 7269 } 7270 7271 if (arg4) { 7272 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1); 7273 if (!target_set) { 7274 unlock_user(target_pfd, arg1, 0); 7275 goto efault; 7276 } 7277 target_to_host_sigset(set, target_set); 7278 } else { 7279 set = NULL; 7280 } 7281 7282 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8)); 7283 7284 if (!is_error(ret) && arg3) { 7285 host_to_target_timespec(arg3, timeout_ts); 7286 } 7287 if (arg4) { 7288 unlock_user(target_set, arg4, 0); 7289 } 7290 } else 7291 # endif 7292 ret = get_errno(poll(pfd, nfds, timeout)); 7293 7294 if (!is_error(ret)) { 7295 for(i = 0; i < nfds; i++) { 7296 target_pfd[i].revents = tswap16(pfd[i].revents); 7297 } 7298 } 7299 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds); 7300 } 7301 break; 7302 #endif 7303 case TARGET_NR_flock: 7304 /* NOTE: the flock constant seems to be the same for every 7305 Linux platform */ 7306 ret = get_errno(flock(arg1, arg2)); 7307 break; 7308 case TARGET_NR_readv: 7309 { 7310 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 7311 if (vec != NULL) { 7312 ret = get_errno(readv(arg1, vec, arg3)); 7313 unlock_iovec(vec, arg2, arg3, 1); 7314 } else { 7315 ret = -host_to_target_errno(errno); 7316 } 7317 } 7318 break; 7319 case TARGET_NR_writev: 7320 { 7321 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 7322 if (vec != NULL) { 7323 ret = get_errno(writev(arg1, vec, arg3)); 7324 unlock_iovec(vec, arg2, arg3, 0); 7325 } else { 7326 ret = -host_to_target_errno(errno); 7327 } 7328 } 7329 break; 7330 case TARGET_NR_getsid: 7331 ret = get_errno(getsid(arg1)); 7332 break; 7333 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ 7334 case TARGET_NR_fdatasync: 7335 ret = get_errno(fdatasync(arg1)); 7336 break; 7337 #endif 7338 case TARGET_NR__sysctl: 7339 /* We don't implement this, but ENOTDIR is always a safe 7340 return value. */ 7341 ret = -TARGET_ENOTDIR; 7342 break; 7343 case TARGET_NR_sched_getaffinity: 7344 { 7345 unsigned int mask_size; 7346 unsigned long *mask; 7347 7348 /* 7349 * sched_getaffinity needs multiples of ulong, so need to take 7350 * care of mismatches between target ulong and host ulong sizes. 7351 */ 7352 if (arg2 & (sizeof(abi_ulong) - 1)) { 7353 ret = -TARGET_EINVAL; 7354 break; 7355 } 7356 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 7357 7358 mask = alloca(mask_size); 7359 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask)); 7360 7361 if (!is_error(ret)) { 7362 if (copy_to_user(arg3, mask, ret)) { 7363 goto efault; 7364 } 7365 } 7366 } 7367 break; 7368 case TARGET_NR_sched_setaffinity: 7369 { 7370 unsigned int mask_size; 7371 unsigned long *mask; 7372 7373 /* 7374 * sched_setaffinity needs multiples of ulong, so need to take 7375 * care of mismatches between target ulong and host ulong sizes. 7376 */ 7377 if (arg2 & (sizeof(abi_ulong) - 1)) { 7378 ret = -TARGET_EINVAL; 7379 break; 7380 } 7381 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 7382 7383 mask = alloca(mask_size); 7384 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) { 7385 goto efault; 7386 } 7387 memcpy(mask, p, arg2); 7388 unlock_user_struct(p, arg2, 0); 7389 7390 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask)); 7391 } 7392 break; 7393 case TARGET_NR_sched_setparam: 7394 { 7395 struct sched_param *target_schp; 7396 struct sched_param schp; 7397 7398 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) 7399 goto efault; 7400 schp.sched_priority = tswap32(target_schp->sched_priority); 7401 unlock_user_struct(target_schp, arg2, 0); 7402 ret = get_errno(sched_setparam(arg1, &schp)); 7403 } 7404 break; 7405 case TARGET_NR_sched_getparam: 7406 { 7407 struct sched_param *target_schp; 7408 struct sched_param schp; 7409 ret = get_errno(sched_getparam(arg1, &schp)); 7410 if (!is_error(ret)) { 7411 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) 7412 goto efault; 7413 target_schp->sched_priority = tswap32(schp.sched_priority); 7414 unlock_user_struct(target_schp, arg2, 1); 7415 } 7416 } 7417 break; 7418 case TARGET_NR_sched_setscheduler: 7419 { 7420 struct sched_param *target_schp; 7421 struct sched_param schp; 7422 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) 7423 goto efault; 7424 schp.sched_priority = tswap32(target_schp->sched_priority); 7425 unlock_user_struct(target_schp, arg3, 0); 7426 ret = get_errno(sched_setscheduler(arg1, arg2, &schp)); 7427 } 7428 break; 7429 case TARGET_NR_sched_getscheduler: 7430 ret = get_errno(sched_getscheduler(arg1)); 7431 break; 7432 case TARGET_NR_sched_yield: 7433 ret = get_errno(sched_yield()); 7434 break; 7435 case TARGET_NR_sched_get_priority_max: 7436 ret = get_errno(sched_get_priority_max(arg1)); 7437 break; 7438 case TARGET_NR_sched_get_priority_min: 7439 ret = get_errno(sched_get_priority_min(arg1)); 7440 break; 7441 case TARGET_NR_sched_rr_get_interval: 7442 { 7443 struct timespec ts; 7444 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 7445 if (!is_error(ret)) { 7446 host_to_target_timespec(arg2, &ts); 7447 } 7448 } 7449 break; 7450 case TARGET_NR_nanosleep: 7451 { 7452 struct timespec req, rem; 7453 target_to_host_timespec(&req, arg1); 7454 ret = get_errno(nanosleep(&req, &rem)); 7455 if (is_error(ret) && arg2) { 7456 host_to_target_timespec(arg2, &rem); 7457 } 7458 } 7459 break; 7460 #ifdef TARGET_NR_query_module 7461 case TARGET_NR_query_module: 7462 goto unimplemented; 7463 #endif 7464 #ifdef TARGET_NR_nfsservctl 7465 case TARGET_NR_nfsservctl: 7466 goto unimplemented; 7467 #endif 7468 case TARGET_NR_prctl: 7469 switch (arg1) { 7470 case PR_GET_PDEATHSIG: 7471 { 7472 int deathsig; 7473 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5)); 7474 if (!is_error(ret) && arg2 7475 && put_user_ual(deathsig, arg2)) { 7476 goto efault; 7477 } 7478 break; 7479 } 7480 #ifdef PR_GET_NAME 7481 case PR_GET_NAME: 7482 { 7483 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1); 7484 if (!name) { 7485 goto efault; 7486 } 7487 ret = get_errno(prctl(arg1, (unsigned long)name, 7488 arg3, arg4, arg5)); 7489 unlock_user(name, arg2, 16); 7490 break; 7491 } 7492 case PR_SET_NAME: 7493 { 7494 void *name = lock_user(VERIFY_READ, arg2, 16, 1); 7495 if (!name) { 7496 goto efault; 7497 } 7498 ret = get_errno(prctl(arg1, (unsigned long)name, 7499 arg3, arg4, arg5)); 7500 unlock_user(name, arg2, 0); 7501 break; 7502 } 7503 #endif 7504 default: 7505 /* Most prctl options have no pointer arguments */ 7506 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5)); 7507 break; 7508 } 7509 break; 7510 #ifdef TARGET_NR_arch_prctl 7511 case TARGET_NR_arch_prctl: 7512 #if defined(TARGET_I386) && !defined(TARGET_ABI32) 7513 ret = do_arch_prctl(cpu_env, arg1, arg2); 7514 break; 7515 #else 7516 goto unimplemented; 7517 #endif 7518 #endif 7519 #ifdef TARGET_NR_pread64 7520 case TARGET_NR_pread64: 7521 if (regpairs_aligned(cpu_env)) { 7522 arg4 = arg5; 7523 arg5 = arg6; 7524 } 7525 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 7526 goto efault; 7527 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); 7528 unlock_user(p, arg2, ret); 7529 break; 7530 case TARGET_NR_pwrite64: 7531 if (regpairs_aligned(cpu_env)) { 7532 arg4 = arg5; 7533 arg5 = arg6; 7534 } 7535 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 7536 goto efault; 7537 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); 7538 unlock_user(p, arg2, 0); 7539 break; 7540 #endif 7541 case TARGET_NR_getcwd: 7542 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) 7543 goto efault; 7544 ret = get_errno(sys_getcwd1(p, arg2)); 7545 unlock_user(p, arg1, ret); 7546 break; 7547 case TARGET_NR_capget: 7548 goto unimplemented; 7549 case TARGET_NR_capset: 7550 goto unimplemented; 7551 case TARGET_NR_sigaltstack: 7552 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \ 7553 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \ 7554 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC) 7555 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env)); 7556 break; 7557 #else 7558 goto unimplemented; 7559 #endif 7560 7561 #ifdef CONFIG_SENDFILE 7562 case TARGET_NR_sendfile: 7563 { 7564 off_t *offp = NULL; 7565 off_t off; 7566 if (arg3) { 7567 ret = get_user_sal(off, arg3); 7568 if (is_error(ret)) { 7569 break; 7570 } 7571 offp = &off; 7572 } 7573 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 7574 if (!is_error(ret) && arg3) { 7575 abi_long ret2 = put_user_sal(off, arg3); 7576 if (is_error(ret2)) { 7577 ret = ret2; 7578 } 7579 } 7580 break; 7581 } 7582 #ifdef TARGET_NR_sendfile64 7583 case TARGET_NR_sendfile64: 7584 { 7585 off_t *offp = NULL; 7586 off_t off; 7587 if (arg3) { 7588 ret = get_user_s64(off, arg3); 7589 if (is_error(ret)) { 7590 break; 7591 } 7592 offp = &off; 7593 } 7594 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 7595 if (!is_error(ret) && arg3) { 7596 abi_long ret2 = put_user_s64(off, arg3); 7597 if (is_error(ret2)) { 7598 ret = ret2; 7599 } 7600 } 7601 break; 7602 } 7603 #endif 7604 #else 7605 case TARGET_NR_sendfile: 7606 #ifdef TARGET_NR_sendfile64 7607 case TARGET_NR_sendfile64: 7608 #endif 7609 goto unimplemented; 7610 #endif 7611 7612 #ifdef TARGET_NR_getpmsg 7613 case TARGET_NR_getpmsg: 7614 goto unimplemented; 7615 #endif 7616 #ifdef TARGET_NR_putpmsg 7617 case TARGET_NR_putpmsg: 7618 goto unimplemented; 7619 #endif 7620 #ifdef TARGET_NR_vfork 7621 case TARGET_NR_vfork: 7622 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 7623 0, 0, 0, 0)); 7624 break; 7625 #endif 7626 #ifdef TARGET_NR_ugetrlimit 7627 case TARGET_NR_ugetrlimit: 7628 { 7629 struct rlimit rlim; 7630 int resource = target_to_host_resource(arg1); 7631 ret = get_errno(getrlimit(resource, &rlim)); 7632 if (!is_error(ret)) { 7633 struct target_rlimit *target_rlim; 7634 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 7635 goto efault; 7636 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 7637 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 7638 unlock_user_struct(target_rlim, arg2, 1); 7639 } 7640 break; 7641 } 7642 #endif 7643 #ifdef TARGET_NR_truncate64 7644 case TARGET_NR_truncate64: 7645 if (!(p = lock_user_string(arg1))) 7646 goto efault; 7647 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); 7648 unlock_user(p, arg1, 0); 7649 break; 7650 #endif 7651 #ifdef TARGET_NR_ftruncate64 7652 case TARGET_NR_ftruncate64: 7653 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); 7654 break; 7655 #endif 7656 #ifdef TARGET_NR_stat64 7657 case TARGET_NR_stat64: 7658 if (!(p = lock_user_string(arg1))) 7659 goto efault; 7660 ret = get_errno(stat(path(p), &st)); 7661 unlock_user(p, arg1, 0); 7662 if (!is_error(ret)) 7663 ret = host_to_target_stat64(cpu_env, arg2, &st); 7664 break; 7665 #endif 7666 #ifdef TARGET_NR_lstat64 7667 case TARGET_NR_lstat64: 7668 if (!(p = lock_user_string(arg1))) 7669 goto efault; 7670 ret = get_errno(lstat(path(p), &st)); 7671 unlock_user(p, arg1, 0); 7672 if (!is_error(ret)) 7673 ret = host_to_target_stat64(cpu_env, arg2, &st); 7674 break; 7675 #endif 7676 #ifdef TARGET_NR_fstat64 7677 case TARGET_NR_fstat64: 7678 ret = get_errno(fstat(arg1, &st)); 7679 if (!is_error(ret)) 7680 ret = host_to_target_stat64(cpu_env, arg2, &st); 7681 break; 7682 #endif 7683 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \ 7684 (defined(__NR_fstatat64) || defined(__NR_newfstatat)) 7685 #ifdef TARGET_NR_fstatat64 7686 case TARGET_NR_fstatat64: 7687 #endif 7688 #ifdef TARGET_NR_newfstatat 7689 case TARGET_NR_newfstatat: 7690 #endif 7691 if (!(p = lock_user_string(arg2))) 7692 goto efault; 7693 #ifdef __NR_fstatat64 7694 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4)); 7695 #else 7696 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4)); 7697 #endif 7698 if (!is_error(ret)) 7699 ret = host_to_target_stat64(cpu_env, arg3, &st); 7700 break; 7701 #endif 7702 case TARGET_NR_lchown: 7703 if (!(p = lock_user_string(arg1))) 7704 goto efault; 7705 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); 7706 unlock_user(p, arg1, 0); 7707 break; 7708 #ifdef TARGET_NR_getuid 7709 case TARGET_NR_getuid: 7710 ret = get_errno(high2lowuid(getuid())); 7711 break; 7712 #endif 7713 #ifdef TARGET_NR_getgid 7714 case TARGET_NR_getgid: 7715 ret = get_errno(high2lowgid(getgid())); 7716 break; 7717 #endif 7718 #ifdef TARGET_NR_geteuid 7719 case TARGET_NR_geteuid: 7720 ret = get_errno(high2lowuid(geteuid())); 7721 break; 7722 #endif 7723 #ifdef TARGET_NR_getegid 7724 case TARGET_NR_getegid: 7725 ret = get_errno(high2lowgid(getegid())); 7726 break; 7727 #endif 7728 case TARGET_NR_setreuid: 7729 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); 7730 break; 7731 case TARGET_NR_setregid: 7732 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); 7733 break; 7734 case TARGET_NR_getgroups: 7735 { 7736 int gidsetsize = arg1; 7737 target_id *target_grouplist; 7738 gid_t *grouplist; 7739 int i; 7740 7741 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7742 ret = get_errno(getgroups(gidsetsize, grouplist)); 7743 if (gidsetsize == 0) 7744 break; 7745 if (!is_error(ret)) { 7746 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0); 7747 if (!target_grouplist) 7748 goto efault; 7749 for(i = 0;i < ret; i++) 7750 target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); 7751 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id)); 7752 } 7753 } 7754 break; 7755 case TARGET_NR_setgroups: 7756 { 7757 int gidsetsize = arg1; 7758 target_id *target_grouplist; 7759 gid_t *grouplist = NULL; 7760 int i; 7761 if (gidsetsize) { 7762 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7763 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1); 7764 if (!target_grouplist) { 7765 ret = -TARGET_EFAULT; 7766 goto fail; 7767 } 7768 for (i = 0; i < gidsetsize; i++) { 7769 grouplist[i] = low2highgid(tswapid(target_grouplist[i])); 7770 } 7771 unlock_user(target_grouplist, arg2, 0); 7772 } 7773 ret = get_errno(setgroups(gidsetsize, grouplist)); 7774 } 7775 break; 7776 case TARGET_NR_fchown: 7777 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); 7778 break; 7779 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) 7780 case TARGET_NR_fchownat: 7781 if (!(p = lock_user_string(arg2))) 7782 goto efault; 7783 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5)); 7784 unlock_user(p, arg2, 0); 7785 break; 7786 #endif 7787 #ifdef TARGET_NR_setresuid 7788 case TARGET_NR_setresuid: 7789 ret = get_errno(setresuid(low2highuid(arg1), 7790 low2highuid(arg2), 7791 low2highuid(arg3))); 7792 break; 7793 #endif 7794 #ifdef TARGET_NR_getresuid 7795 case TARGET_NR_getresuid: 7796 { 7797 uid_t ruid, euid, suid; 7798 ret = get_errno(getresuid(&ruid, &euid, &suid)); 7799 if (!is_error(ret)) { 7800 if (put_user_u16(high2lowuid(ruid), arg1) 7801 || put_user_u16(high2lowuid(euid), arg2) 7802 || put_user_u16(high2lowuid(suid), arg3)) 7803 goto efault; 7804 } 7805 } 7806 break; 7807 #endif 7808 #ifdef TARGET_NR_getresgid 7809 case TARGET_NR_setresgid: 7810 ret = get_errno(setresgid(low2highgid(arg1), 7811 low2highgid(arg2), 7812 low2highgid(arg3))); 7813 break; 7814 #endif 7815 #ifdef TARGET_NR_getresgid 7816 case TARGET_NR_getresgid: 7817 { 7818 gid_t rgid, egid, sgid; 7819 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 7820 if (!is_error(ret)) { 7821 if (put_user_u16(high2lowgid(rgid), arg1) 7822 || put_user_u16(high2lowgid(egid), arg2) 7823 || put_user_u16(high2lowgid(sgid), arg3)) 7824 goto efault; 7825 } 7826 } 7827 break; 7828 #endif 7829 case TARGET_NR_chown: 7830 if (!(p = lock_user_string(arg1))) 7831 goto efault; 7832 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); 7833 unlock_user(p, arg1, 0); 7834 break; 7835 case TARGET_NR_setuid: 7836 ret = get_errno(setuid(low2highuid(arg1))); 7837 break; 7838 case TARGET_NR_setgid: 7839 ret = get_errno(setgid(low2highgid(arg1))); 7840 break; 7841 case TARGET_NR_setfsuid: 7842 ret = get_errno(setfsuid(arg1)); 7843 break; 7844 case TARGET_NR_setfsgid: 7845 ret = get_errno(setfsgid(arg1)); 7846 break; 7847 7848 #ifdef TARGET_NR_lchown32 7849 case TARGET_NR_lchown32: 7850 if (!(p = lock_user_string(arg1))) 7851 goto efault; 7852 ret = get_errno(lchown(p, arg2, arg3)); 7853 unlock_user(p, arg1, 0); 7854 break; 7855 #endif 7856 #ifdef TARGET_NR_getuid32 7857 case TARGET_NR_getuid32: 7858 ret = get_errno(getuid()); 7859 break; 7860 #endif 7861 7862 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) 7863 /* Alpha specific */ 7864 case TARGET_NR_getxuid: 7865 { 7866 uid_t euid; 7867 euid=geteuid(); 7868 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid; 7869 } 7870 ret = get_errno(getuid()); 7871 break; 7872 #endif 7873 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) 7874 /* Alpha specific */ 7875 case TARGET_NR_getxgid: 7876 { 7877 uid_t egid; 7878 egid=getegid(); 7879 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid; 7880 } 7881 ret = get_errno(getgid()); 7882 break; 7883 #endif 7884 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) 7885 /* Alpha specific */ 7886 case TARGET_NR_osf_getsysinfo: 7887 ret = -TARGET_EOPNOTSUPP; 7888 switch (arg1) { 7889 case TARGET_GSI_IEEE_FP_CONTROL: 7890 { 7891 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env); 7892 7893 /* Copied from linux ieee_fpcr_to_swcr. */ 7894 swcr = (fpcr >> 35) & SWCR_STATUS_MASK; 7895 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ; 7896 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV 7897 | SWCR_TRAP_ENABLE_DZE 7898 | SWCR_TRAP_ENABLE_OVF); 7899 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF 7900 | SWCR_TRAP_ENABLE_INE); 7901 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ; 7902 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO; 7903 7904 if (put_user_u64 (swcr, arg2)) 7905 goto efault; 7906 ret = 0; 7907 } 7908 break; 7909 7910 /* case GSI_IEEE_STATE_AT_SIGNAL: 7911 -- Not implemented in linux kernel. 7912 case GSI_UACPROC: 7913 -- Retrieves current unaligned access state; not much used. 7914 case GSI_PROC_TYPE: 7915 -- Retrieves implver information; surely not used. 7916 case GSI_GET_HWRPB: 7917 -- Grabs a copy of the HWRPB; surely not used. 7918 */ 7919 } 7920 break; 7921 #endif 7922 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) 7923 /* Alpha specific */ 7924 case TARGET_NR_osf_setsysinfo: 7925 ret = -TARGET_EOPNOTSUPP; 7926 switch (arg1) { 7927 case TARGET_SSI_IEEE_FP_CONTROL: 7928 { 7929 uint64_t swcr, fpcr, orig_fpcr; 7930 7931 if (get_user_u64 (swcr, arg2)) { 7932 goto efault; 7933 } 7934 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 7935 fpcr = orig_fpcr & FPCR_DYN_MASK; 7936 7937 /* Copied from linux ieee_swcr_to_fpcr. */ 7938 fpcr |= (swcr & SWCR_STATUS_MASK) << 35; 7939 fpcr |= (swcr & SWCR_MAP_DMZ) << 36; 7940 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV 7941 | SWCR_TRAP_ENABLE_DZE 7942 | SWCR_TRAP_ENABLE_OVF)) << 48; 7943 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF 7944 | SWCR_TRAP_ENABLE_INE)) << 57; 7945 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0); 7946 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41; 7947 7948 cpu_alpha_store_fpcr(cpu_env, fpcr); 7949 ret = 0; 7950 } 7951 break; 7952 7953 case TARGET_SSI_IEEE_RAISE_EXCEPTION: 7954 { 7955 uint64_t exc, fpcr, orig_fpcr; 7956 int si_code; 7957 7958 if (get_user_u64(exc, arg2)) { 7959 goto efault; 7960 } 7961 7962 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 7963 7964 /* We only add to the exception status here. */ 7965 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35); 7966 7967 cpu_alpha_store_fpcr(cpu_env, fpcr); 7968 ret = 0; 7969 7970 /* Old exceptions are not signaled. */ 7971 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK); 7972 7973 /* If any exceptions set by this call, 7974 and are unmasked, send a signal. */ 7975 si_code = 0; 7976 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) { 7977 si_code = TARGET_FPE_FLTRES; 7978 } 7979 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) { 7980 si_code = TARGET_FPE_FLTUND; 7981 } 7982 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) { 7983 si_code = TARGET_FPE_FLTOVF; 7984 } 7985 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) { 7986 si_code = TARGET_FPE_FLTDIV; 7987 } 7988 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) { 7989 si_code = TARGET_FPE_FLTINV; 7990 } 7991 if (si_code != 0) { 7992 target_siginfo_t info; 7993 info.si_signo = SIGFPE; 7994 info.si_errno = 0; 7995 info.si_code = si_code; 7996 info._sifields._sigfault._addr 7997 = ((CPUArchState *)cpu_env)->pc; 7998 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info); 7999 } 8000 } 8001 break; 8002 8003 /* case SSI_NVPAIRS: 8004 -- Used with SSIN_UACPROC to enable unaligned accesses. 8005 case SSI_IEEE_STATE_AT_SIGNAL: 8006 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: 8007 -- Not implemented in linux kernel 8008 */ 8009 } 8010 break; 8011 #endif 8012 #ifdef TARGET_NR_osf_sigprocmask 8013 /* Alpha specific. */ 8014 case TARGET_NR_osf_sigprocmask: 8015 { 8016 abi_ulong mask; 8017 int how; 8018 sigset_t set, oldset; 8019 8020 switch(arg1) { 8021 case TARGET_SIG_BLOCK: 8022 how = SIG_BLOCK; 8023 break; 8024 case TARGET_SIG_UNBLOCK: 8025 how = SIG_UNBLOCK; 8026 break; 8027 case TARGET_SIG_SETMASK: 8028 how = SIG_SETMASK; 8029 break; 8030 default: 8031 ret = -TARGET_EINVAL; 8032 goto fail; 8033 } 8034 mask = arg2; 8035 target_to_host_old_sigset(&set, &mask); 8036 sigprocmask(how, &set, &oldset); 8037 host_to_target_old_sigset(&mask, &oldset); 8038 ret = mask; 8039 } 8040 break; 8041 #endif 8042 8043 #ifdef TARGET_NR_getgid32 8044 case TARGET_NR_getgid32: 8045 ret = get_errno(getgid()); 8046 break; 8047 #endif 8048 #ifdef TARGET_NR_geteuid32 8049 case TARGET_NR_geteuid32: 8050 ret = get_errno(geteuid()); 8051 break; 8052 #endif 8053 #ifdef TARGET_NR_getegid32 8054 case TARGET_NR_getegid32: 8055 ret = get_errno(getegid()); 8056 break; 8057 #endif 8058 #ifdef TARGET_NR_setreuid32 8059 case TARGET_NR_setreuid32: 8060 ret = get_errno(setreuid(arg1, arg2)); 8061 break; 8062 #endif 8063 #ifdef TARGET_NR_setregid32 8064 case TARGET_NR_setregid32: 8065 ret = get_errno(setregid(arg1, arg2)); 8066 break; 8067 #endif 8068 #ifdef TARGET_NR_getgroups32 8069 case TARGET_NR_getgroups32: 8070 { 8071 int gidsetsize = arg1; 8072 uint32_t *target_grouplist; 8073 gid_t *grouplist; 8074 int i; 8075 8076 grouplist = alloca(gidsetsize * sizeof(gid_t)); 8077 ret = get_errno(getgroups(gidsetsize, grouplist)); 8078 if (gidsetsize == 0) 8079 break; 8080 if (!is_error(ret)) { 8081 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); 8082 if (!target_grouplist) { 8083 ret = -TARGET_EFAULT; 8084 goto fail; 8085 } 8086 for(i = 0;i < ret; i++) 8087 target_grouplist[i] = tswap32(grouplist[i]); 8088 unlock_user(target_grouplist, arg2, gidsetsize * 4); 8089 } 8090 } 8091 break; 8092 #endif 8093 #ifdef TARGET_NR_setgroups32 8094 case TARGET_NR_setgroups32: 8095 { 8096 int gidsetsize = arg1; 8097 uint32_t *target_grouplist; 8098 gid_t *grouplist; 8099 int i; 8100 8101 grouplist = alloca(gidsetsize * sizeof(gid_t)); 8102 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); 8103 if (!target_grouplist) { 8104 ret = -TARGET_EFAULT; 8105 goto fail; 8106 } 8107 for(i = 0;i < gidsetsize; i++) 8108 grouplist[i] = tswap32(target_grouplist[i]); 8109 unlock_user(target_grouplist, arg2, 0); 8110 ret = get_errno(setgroups(gidsetsize, grouplist)); 8111 } 8112 break; 8113 #endif 8114 #ifdef TARGET_NR_fchown32 8115 case TARGET_NR_fchown32: 8116 ret = get_errno(fchown(arg1, arg2, arg3)); 8117 break; 8118 #endif 8119 #ifdef TARGET_NR_setresuid32 8120 case TARGET_NR_setresuid32: 8121 ret = get_errno(setresuid(arg1, arg2, arg3)); 8122 break; 8123 #endif 8124 #ifdef TARGET_NR_getresuid32 8125 case TARGET_NR_getresuid32: 8126 { 8127 uid_t ruid, euid, suid; 8128 ret = get_errno(getresuid(&ruid, &euid, &suid)); 8129 if (!is_error(ret)) { 8130 if (put_user_u32(ruid, arg1) 8131 || put_user_u32(euid, arg2) 8132 || put_user_u32(suid, arg3)) 8133 goto efault; 8134 } 8135 } 8136 break; 8137 #endif 8138 #ifdef TARGET_NR_setresgid32 8139 case TARGET_NR_setresgid32: 8140 ret = get_errno(setresgid(arg1, arg2, arg3)); 8141 break; 8142 #endif 8143 #ifdef TARGET_NR_getresgid32 8144 case TARGET_NR_getresgid32: 8145 { 8146 gid_t rgid, egid, sgid; 8147 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 8148 if (!is_error(ret)) { 8149 if (put_user_u32(rgid, arg1) 8150 || put_user_u32(egid, arg2) 8151 || put_user_u32(sgid, arg3)) 8152 goto efault; 8153 } 8154 } 8155 break; 8156 #endif 8157 #ifdef TARGET_NR_chown32 8158 case TARGET_NR_chown32: 8159 if (!(p = lock_user_string(arg1))) 8160 goto efault; 8161 ret = get_errno(chown(p, arg2, arg3)); 8162 unlock_user(p, arg1, 0); 8163 break; 8164 #endif 8165 #ifdef TARGET_NR_setuid32 8166 case TARGET_NR_setuid32: 8167 ret = get_errno(setuid(arg1)); 8168 break; 8169 #endif 8170 #ifdef TARGET_NR_setgid32 8171 case TARGET_NR_setgid32: 8172 ret = get_errno(setgid(arg1)); 8173 break; 8174 #endif 8175 #ifdef TARGET_NR_setfsuid32 8176 case TARGET_NR_setfsuid32: 8177 ret = get_errno(setfsuid(arg1)); 8178 break; 8179 #endif 8180 #ifdef TARGET_NR_setfsgid32 8181 case TARGET_NR_setfsgid32: 8182 ret = get_errno(setfsgid(arg1)); 8183 break; 8184 #endif 8185 8186 case TARGET_NR_pivot_root: 8187 goto unimplemented; 8188 #ifdef TARGET_NR_mincore 8189 case TARGET_NR_mincore: 8190 { 8191 void *a; 8192 ret = -TARGET_EFAULT; 8193 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0))) 8194 goto efault; 8195 if (!(p = lock_user_string(arg3))) 8196 goto mincore_fail; 8197 ret = get_errno(mincore(a, arg2, p)); 8198 unlock_user(p, arg3, ret); 8199 mincore_fail: 8200 unlock_user(a, arg1, 0); 8201 } 8202 break; 8203 #endif 8204 #ifdef TARGET_NR_arm_fadvise64_64 8205 case TARGET_NR_arm_fadvise64_64: 8206 { 8207 /* 8208 * arm_fadvise64_64 looks like fadvise64_64 but 8209 * with different argument order 8210 */ 8211 abi_long temp; 8212 temp = arg3; 8213 arg3 = arg4; 8214 arg4 = temp; 8215 } 8216 #endif 8217 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64) 8218 #ifdef TARGET_NR_fadvise64_64 8219 case TARGET_NR_fadvise64_64: 8220 #endif 8221 #ifdef TARGET_NR_fadvise64 8222 case TARGET_NR_fadvise64: 8223 #endif 8224 #ifdef TARGET_S390X 8225 switch (arg4) { 8226 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ 8227 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ 8228 case 6: arg4 = POSIX_FADV_DONTNEED; break; 8229 case 7: arg4 = POSIX_FADV_NOREUSE; break; 8230 default: break; 8231 } 8232 #endif 8233 ret = -posix_fadvise(arg1, arg2, arg3, arg4); 8234 break; 8235 #endif 8236 #ifdef TARGET_NR_madvise 8237 case TARGET_NR_madvise: 8238 /* A straight passthrough may not be safe because qemu sometimes 8239 turns private flie-backed mappings into anonymous mappings. 8240 This will break MADV_DONTNEED. 8241 This is a hint, so ignoring and returning success is ok. */ 8242 ret = get_errno(0); 8243 break; 8244 #endif 8245 #if TARGET_ABI_BITS == 32 8246 case TARGET_NR_fcntl64: 8247 { 8248 int cmd; 8249 struct flock64 fl; 8250 struct target_flock64 *target_fl; 8251 #ifdef TARGET_ARM 8252 struct target_eabi_flock64 *target_efl; 8253 #endif 8254 8255 cmd = target_to_host_fcntl_cmd(arg2); 8256 if (cmd == -TARGET_EINVAL) { 8257 ret = cmd; 8258 break; 8259 } 8260 8261 switch(arg2) { 8262 case TARGET_F_GETLK64: 8263 #ifdef TARGET_ARM 8264 if (((CPUARMState *)cpu_env)->eabi) { 8265 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 8266 goto efault; 8267 fl.l_type = tswap16(target_efl->l_type); 8268 fl.l_whence = tswap16(target_efl->l_whence); 8269 fl.l_start = tswap64(target_efl->l_start); 8270 fl.l_len = tswap64(target_efl->l_len); 8271 fl.l_pid = tswap32(target_efl->l_pid); 8272 unlock_user_struct(target_efl, arg3, 0); 8273 } else 8274 #endif 8275 { 8276 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 8277 goto efault; 8278 fl.l_type = tswap16(target_fl->l_type); 8279 fl.l_whence = tswap16(target_fl->l_whence); 8280 fl.l_start = tswap64(target_fl->l_start); 8281 fl.l_len = tswap64(target_fl->l_len); 8282 fl.l_pid = tswap32(target_fl->l_pid); 8283 unlock_user_struct(target_fl, arg3, 0); 8284 } 8285 ret = get_errno(fcntl(arg1, cmd, &fl)); 8286 if (ret == 0) { 8287 #ifdef TARGET_ARM 8288 if (((CPUARMState *)cpu_env)->eabi) { 8289 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0)) 8290 goto efault; 8291 target_efl->l_type = tswap16(fl.l_type); 8292 target_efl->l_whence = tswap16(fl.l_whence); 8293 target_efl->l_start = tswap64(fl.l_start); 8294 target_efl->l_len = tswap64(fl.l_len); 8295 target_efl->l_pid = tswap32(fl.l_pid); 8296 unlock_user_struct(target_efl, arg3, 1); 8297 } else 8298 #endif 8299 { 8300 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0)) 8301 goto efault; 8302 target_fl->l_type = tswap16(fl.l_type); 8303 target_fl->l_whence = tswap16(fl.l_whence); 8304 target_fl->l_start = tswap64(fl.l_start); 8305 target_fl->l_len = tswap64(fl.l_len); 8306 target_fl->l_pid = tswap32(fl.l_pid); 8307 unlock_user_struct(target_fl, arg3, 1); 8308 } 8309 } 8310 break; 8311 8312 case TARGET_F_SETLK64: 8313 case TARGET_F_SETLKW64: 8314 #ifdef TARGET_ARM 8315 if (((CPUARMState *)cpu_env)->eabi) { 8316 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 8317 goto efault; 8318 fl.l_type = tswap16(target_efl->l_type); 8319 fl.l_whence = tswap16(target_efl->l_whence); 8320 fl.l_start = tswap64(target_efl->l_start); 8321 fl.l_len = tswap64(target_efl->l_len); 8322 fl.l_pid = tswap32(target_efl->l_pid); 8323 unlock_user_struct(target_efl, arg3, 0); 8324 } else 8325 #endif 8326 { 8327 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 8328 goto efault; 8329 fl.l_type = tswap16(target_fl->l_type); 8330 fl.l_whence = tswap16(target_fl->l_whence); 8331 fl.l_start = tswap64(target_fl->l_start); 8332 fl.l_len = tswap64(target_fl->l_len); 8333 fl.l_pid = tswap32(target_fl->l_pid); 8334 unlock_user_struct(target_fl, arg3, 0); 8335 } 8336 ret = get_errno(fcntl(arg1, cmd, &fl)); 8337 break; 8338 default: 8339 ret = do_fcntl(arg1, arg2, arg3); 8340 break; 8341 } 8342 break; 8343 } 8344 #endif 8345 #ifdef TARGET_NR_cacheflush 8346 case TARGET_NR_cacheflush: 8347 /* self-modifying code is handled automatically, so nothing needed */ 8348 ret = 0; 8349 break; 8350 #endif 8351 #ifdef TARGET_NR_security 8352 case TARGET_NR_security: 8353 goto unimplemented; 8354 #endif 8355 #ifdef TARGET_NR_getpagesize 8356 case TARGET_NR_getpagesize: 8357 ret = TARGET_PAGE_SIZE; 8358 break; 8359 #endif 8360 case TARGET_NR_gettid: 8361 ret = get_errno(gettid()); 8362 break; 8363 #ifdef TARGET_NR_readahead 8364 case TARGET_NR_readahead: 8365 #if TARGET_ABI_BITS == 32 8366 if (regpairs_aligned(cpu_env)) { 8367 arg2 = arg3; 8368 arg3 = arg4; 8369 arg4 = arg5; 8370 } 8371 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4)); 8372 #else 8373 ret = get_errno(readahead(arg1, arg2, arg3)); 8374 #endif 8375 break; 8376 #endif 8377 #ifdef CONFIG_ATTR 8378 #ifdef TARGET_NR_setxattr 8379 case TARGET_NR_listxattr: 8380 case TARGET_NR_llistxattr: 8381 { 8382 void *p, *b = 0; 8383 if (arg2) { 8384 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 8385 if (!b) { 8386 ret = -TARGET_EFAULT; 8387 break; 8388 } 8389 } 8390 p = lock_user_string(arg1); 8391 if (p) { 8392 if (num == TARGET_NR_listxattr) { 8393 ret = get_errno(listxattr(p, b, arg3)); 8394 } else { 8395 ret = get_errno(llistxattr(p, b, arg3)); 8396 } 8397 } else { 8398 ret = -TARGET_EFAULT; 8399 } 8400 unlock_user(p, arg1, 0); 8401 unlock_user(b, arg2, arg3); 8402 break; 8403 } 8404 case TARGET_NR_flistxattr: 8405 { 8406 void *b = 0; 8407 if (arg2) { 8408 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 8409 if (!b) { 8410 ret = -TARGET_EFAULT; 8411 break; 8412 } 8413 } 8414 ret = get_errno(flistxattr(arg1, b, arg3)); 8415 unlock_user(b, arg2, arg3); 8416 break; 8417 } 8418 case TARGET_NR_setxattr: 8419 case TARGET_NR_lsetxattr: 8420 { 8421 void *p, *n, *v = 0; 8422 if (arg3) { 8423 v = lock_user(VERIFY_READ, arg3, arg4, 1); 8424 if (!v) { 8425 ret = -TARGET_EFAULT; 8426 break; 8427 } 8428 } 8429 p = lock_user_string(arg1); 8430 n = lock_user_string(arg2); 8431 if (p && n) { 8432 if (num == TARGET_NR_setxattr) { 8433 ret = get_errno(setxattr(p, n, v, arg4, arg5)); 8434 } else { 8435 ret = get_errno(lsetxattr(p, n, v, arg4, arg5)); 8436 } 8437 } else { 8438 ret = -TARGET_EFAULT; 8439 } 8440 unlock_user(p, arg1, 0); 8441 unlock_user(n, arg2, 0); 8442 unlock_user(v, arg3, 0); 8443 } 8444 break; 8445 case TARGET_NR_fsetxattr: 8446 { 8447 void *n, *v = 0; 8448 if (arg3) { 8449 v = lock_user(VERIFY_READ, arg3, arg4, 1); 8450 if (!v) { 8451 ret = -TARGET_EFAULT; 8452 break; 8453 } 8454 } 8455 n = lock_user_string(arg2); 8456 if (n) { 8457 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5)); 8458 } else { 8459 ret = -TARGET_EFAULT; 8460 } 8461 unlock_user(n, arg2, 0); 8462 unlock_user(v, arg3, 0); 8463 } 8464 break; 8465 case TARGET_NR_getxattr: 8466 case TARGET_NR_lgetxattr: 8467 { 8468 void *p, *n, *v = 0; 8469 if (arg3) { 8470 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 8471 if (!v) { 8472 ret = -TARGET_EFAULT; 8473 break; 8474 } 8475 } 8476 p = lock_user_string(arg1); 8477 n = lock_user_string(arg2); 8478 if (p && n) { 8479 if (num == TARGET_NR_getxattr) { 8480 ret = get_errno(getxattr(p, n, v, arg4)); 8481 } else { 8482 ret = get_errno(lgetxattr(p, n, v, arg4)); 8483 } 8484 } else { 8485 ret = -TARGET_EFAULT; 8486 } 8487 unlock_user(p, arg1, 0); 8488 unlock_user(n, arg2, 0); 8489 unlock_user(v, arg3, arg4); 8490 } 8491 break; 8492 case TARGET_NR_fgetxattr: 8493 { 8494 void *n, *v = 0; 8495 if (arg3) { 8496 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 8497 if (!v) { 8498 ret = -TARGET_EFAULT; 8499 break; 8500 } 8501 } 8502 n = lock_user_string(arg2); 8503 if (n) { 8504 ret = get_errno(fgetxattr(arg1, n, v, arg4)); 8505 } else { 8506 ret = -TARGET_EFAULT; 8507 } 8508 unlock_user(n, arg2, 0); 8509 unlock_user(v, arg3, arg4); 8510 } 8511 break; 8512 case TARGET_NR_removexattr: 8513 case TARGET_NR_lremovexattr: 8514 { 8515 void *p, *n; 8516 p = lock_user_string(arg1); 8517 n = lock_user_string(arg2); 8518 if (p && n) { 8519 if (num == TARGET_NR_removexattr) { 8520 ret = get_errno(removexattr(p, n)); 8521 } else { 8522 ret = get_errno(lremovexattr(p, n)); 8523 } 8524 } else { 8525 ret = -TARGET_EFAULT; 8526 } 8527 unlock_user(p, arg1, 0); 8528 unlock_user(n, arg2, 0); 8529 } 8530 break; 8531 case TARGET_NR_fremovexattr: 8532 { 8533 void *n; 8534 n = lock_user_string(arg2); 8535 if (n) { 8536 ret = get_errno(fremovexattr(arg1, n)); 8537 } else { 8538 ret = -TARGET_EFAULT; 8539 } 8540 unlock_user(n, arg2, 0); 8541 } 8542 break; 8543 #endif 8544 #endif /* CONFIG_ATTR */ 8545 #ifdef TARGET_NR_set_thread_area 8546 case TARGET_NR_set_thread_area: 8547 #if defined(TARGET_MIPS) 8548 ((CPUMIPSState *) cpu_env)->tls_value = arg1; 8549 ret = 0; 8550 break; 8551 #elif defined(TARGET_CRIS) 8552 if (arg1 & 0xff) 8553 ret = -TARGET_EINVAL; 8554 else { 8555 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1; 8556 ret = 0; 8557 } 8558 break; 8559 #elif defined(TARGET_I386) && defined(TARGET_ABI32) 8560 ret = do_set_thread_area(cpu_env, arg1); 8561 break; 8562 #else 8563 goto unimplemented_nowarn; 8564 #endif 8565 #endif 8566 #ifdef TARGET_NR_get_thread_area 8567 case TARGET_NR_get_thread_area: 8568 #if defined(TARGET_I386) && defined(TARGET_ABI32) 8569 ret = do_get_thread_area(cpu_env, arg1); 8570 #else 8571 goto unimplemented_nowarn; 8572 #endif 8573 #endif 8574 #ifdef TARGET_NR_getdomainname 8575 case TARGET_NR_getdomainname: 8576 goto unimplemented_nowarn; 8577 #endif 8578 8579 #ifdef TARGET_NR_clock_gettime 8580 case TARGET_NR_clock_gettime: 8581 { 8582 struct timespec ts; 8583 ret = get_errno(clock_gettime(arg1, &ts)); 8584 if (!is_error(ret)) { 8585 host_to_target_timespec(arg2, &ts); 8586 } 8587 break; 8588 } 8589 #endif 8590 #ifdef TARGET_NR_clock_getres 8591 case TARGET_NR_clock_getres: 8592 { 8593 struct timespec ts; 8594 ret = get_errno(clock_getres(arg1, &ts)); 8595 if (!is_error(ret)) { 8596 host_to_target_timespec(arg2, &ts); 8597 } 8598 break; 8599 } 8600 #endif 8601 #ifdef TARGET_NR_clock_nanosleep 8602 case TARGET_NR_clock_nanosleep: 8603 { 8604 struct timespec ts; 8605 target_to_host_timespec(&ts, arg3); 8606 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL)); 8607 if (arg4) 8608 host_to_target_timespec(arg4, &ts); 8609 break; 8610 } 8611 #endif 8612 8613 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 8614 case TARGET_NR_set_tid_address: 8615 ret = get_errno(set_tid_address((int *)g2h(arg1))); 8616 break; 8617 #endif 8618 8619 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 8620 case TARGET_NR_tkill: 8621 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2))); 8622 break; 8623 #endif 8624 8625 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 8626 case TARGET_NR_tgkill: 8627 ret = get_errno(sys_tgkill((int)arg1, (int)arg2, 8628 target_to_host_signal(arg3))); 8629 break; 8630 #endif 8631 8632 #ifdef TARGET_NR_set_robust_list 8633 case TARGET_NR_set_robust_list: 8634 case TARGET_NR_get_robust_list: 8635 /* The ABI for supporting robust futexes has userspace pass 8636 * the kernel a pointer to a linked list which is updated by 8637 * userspace after the syscall; the list is walked by the kernel 8638 * when the thread exits. Since the linked list in QEMU guest 8639 * memory isn't a valid linked list for the host and we have 8640 * no way to reliably intercept the thread-death event, we can't 8641 * support these. Silently return ENOSYS so that guest userspace 8642 * falls back to a non-robust futex implementation (which should 8643 * be OK except in the corner case of the guest crashing while 8644 * holding a mutex that is shared with another process via 8645 * shared memory). 8646 */ 8647 goto unimplemented_nowarn; 8648 #endif 8649 8650 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat) 8651 case TARGET_NR_utimensat: 8652 { 8653 struct timespec *tsp, ts[2]; 8654 if (!arg3) { 8655 tsp = NULL; 8656 } else { 8657 target_to_host_timespec(ts, arg3); 8658 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec)); 8659 tsp = ts; 8660 } 8661 if (!arg2) 8662 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 8663 else { 8664 if (!(p = lock_user_string(arg2))) { 8665 ret = -TARGET_EFAULT; 8666 goto fail; 8667 } 8668 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 8669 unlock_user(p, arg2, 0); 8670 } 8671 } 8672 break; 8673 #endif 8674 #if defined(CONFIG_USE_NPTL) 8675 case TARGET_NR_futex: 8676 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6); 8677 break; 8678 #endif 8679 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 8680 case TARGET_NR_inotify_init: 8681 ret = get_errno(sys_inotify_init()); 8682 break; 8683 #endif 8684 #ifdef CONFIG_INOTIFY1 8685 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 8686 case TARGET_NR_inotify_init1: 8687 ret = get_errno(sys_inotify_init1(arg1)); 8688 break; 8689 #endif 8690 #endif 8691 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 8692 case TARGET_NR_inotify_add_watch: 8693 p = lock_user_string(arg2); 8694 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3)); 8695 unlock_user(p, arg2, 0); 8696 break; 8697 #endif 8698 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 8699 case TARGET_NR_inotify_rm_watch: 8700 ret = get_errno(sys_inotify_rm_watch(arg1, arg2)); 8701 break; 8702 #endif 8703 8704 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 8705 case TARGET_NR_mq_open: 8706 { 8707 struct mq_attr posix_mq_attr; 8708 8709 p = lock_user_string(arg1 - 1); 8710 if (arg4 != 0) 8711 copy_from_user_mq_attr (&posix_mq_attr, arg4); 8712 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr)); 8713 unlock_user (p, arg1, 0); 8714 } 8715 break; 8716 8717 case TARGET_NR_mq_unlink: 8718 p = lock_user_string(arg1 - 1); 8719 ret = get_errno(mq_unlink(p)); 8720 unlock_user (p, arg1, 0); 8721 break; 8722 8723 case TARGET_NR_mq_timedsend: 8724 { 8725 struct timespec ts; 8726 8727 p = lock_user (VERIFY_READ, arg2, arg3, 1); 8728 if (arg5 != 0) { 8729 target_to_host_timespec(&ts, arg5); 8730 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts)); 8731 host_to_target_timespec(arg5, &ts); 8732 } 8733 else 8734 ret = get_errno(mq_send(arg1, p, arg3, arg4)); 8735 unlock_user (p, arg2, arg3); 8736 } 8737 break; 8738 8739 case TARGET_NR_mq_timedreceive: 8740 { 8741 struct timespec ts; 8742 unsigned int prio; 8743 8744 p = lock_user (VERIFY_READ, arg2, arg3, 1); 8745 if (arg5 != 0) { 8746 target_to_host_timespec(&ts, arg5); 8747 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts)); 8748 host_to_target_timespec(arg5, &ts); 8749 } 8750 else 8751 ret = get_errno(mq_receive(arg1, p, arg3, &prio)); 8752 unlock_user (p, arg2, arg3); 8753 if (arg4 != 0) 8754 put_user_u32(prio, arg4); 8755 } 8756 break; 8757 8758 /* Not implemented for now... */ 8759 /* case TARGET_NR_mq_notify: */ 8760 /* break; */ 8761 8762 case TARGET_NR_mq_getsetattr: 8763 { 8764 struct mq_attr posix_mq_attr_in, posix_mq_attr_out; 8765 ret = 0; 8766 if (arg3 != 0) { 8767 ret = mq_getattr(arg1, &posix_mq_attr_out); 8768 copy_to_user_mq_attr(arg3, &posix_mq_attr_out); 8769 } 8770 if (arg2 != 0) { 8771 copy_from_user_mq_attr(&posix_mq_attr_in, arg2); 8772 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out); 8773 } 8774 8775 } 8776 break; 8777 #endif 8778 8779 #ifdef CONFIG_SPLICE 8780 #ifdef TARGET_NR_tee 8781 case TARGET_NR_tee: 8782 { 8783 ret = get_errno(tee(arg1,arg2,arg3,arg4)); 8784 } 8785 break; 8786 #endif 8787 #ifdef TARGET_NR_splice 8788 case TARGET_NR_splice: 8789 { 8790 loff_t loff_in, loff_out; 8791 loff_t *ploff_in = NULL, *ploff_out = NULL; 8792 if(arg2) { 8793 get_user_u64(loff_in, arg2); 8794 ploff_in = &loff_in; 8795 } 8796 if(arg4) { 8797 get_user_u64(loff_out, arg2); 8798 ploff_out = &loff_out; 8799 } 8800 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); 8801 } 8802 break; 8803 #endif 8804 #ifdef TARGET_NR_vmsplice 8805 case TARGET_NR_vmsplice: 8806 { 8807 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 8808 if (vec != NULL) { 8809 ret = get_errno(vmsplice(arg1, vec, arg3, arg4)); 8810 unlock_iovec(vec, arg2, arg3, 0); 8811 } else { 8812 ret = -host_to_target_errno(errno); 8813 } 8814 } 8815 break; 8816 #endif 8817 #endif /* CONFIG_SPLICE */ 8818 #ifdef CONFIG_EVENTFD 8819 #if defined(TARGET_NR_eventfd) 8820 case TARGET_NR_eventfd: 8821 ret = get_errno(eventfd(arg1, 0)); 8822 break; 8823 #endif 8824 #if defined(TARGET_NR_eventfd2) 8825 case TARGET_NR_eventfd2: 8826 { 8827 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)); 8828 if (arg2 & TARGET_O_NONBLOCK) { 8829 host_flags |= O_NONBLOCK; 8830 } 8831 if (arg2 & TARGET_O_CLOEXEC) { 8832 host_flags |= O_CLOEXEC; 8833 } 8834 ret = get_errno(eventfd(arg1, host_flags)); 8835 break; 8836 } 8837 #endif 8838 #endif /* CONFIG_EVENTFD */ 8839 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) 8840 case TARGET_NR_fallocate: 8841 #if TARGET_ABI_BITS == 32 8842 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4), 8843 target_offset64(arg5, arg6))); 8844 #else 8845 ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); 8846 #endif 8847 break; 8848 #endif 8849 #if defined(CONFIG_SYNC_FILE_RANGE) 8850 #if defined(TARGET_NR_sync_file_range) 8851 case TARGET_NR_sync_file_range: 8852 #if TARGET_ABI_BITS == 32 8853 #if defined(TARGET_MIPS) 8854 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 8855 target_offset64(arg5, arg6), arg7)); 8856 #else 8857 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3), 8858 target_offset64(arg4, arg5), arg6)); 8859 #endif /* !TARGET_MIPS */ 8860 #else 8861 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4)); 8862 #endif 8863 break; 8864 #endif 8865 #if defined(TARGET_NR_sync_file_range2) 8866 case TARGET_NR_sync_file_range2: 8867 /* This is like sync_file_range but the arguments are reordered */ 8868 #if TARGET_ABI_BITS == 32 8869 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 8870 target_offset64(arg5, arg6), arg2)); 8871 #else 8872 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2)); 8873 #endif 8874 break; 8875 #endif 8876 #endif 8877 #if defined(CONFIG_EPOLL) 8878 #if defined(TARGET_NR_epoll_create) 8879 case TARGET_NR_epoll_create: 8880 ret = get_errno(epoll_create(arg1)); 8881 break; 8882 #endif 8883 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1) 8884 case TARGET_NR_epoll_create1: 8885 ret = get_errno(epoll_create1(arg1)); 8886 break; 8887 #endif 8888 #if defined(TARGET_NR_epoll_ctl) 8889 case TARGET_NR_epoll_ctl: 8890 { 8891 struct epoll_event ep; 8892 struct epoll_event *epp = 0; 8893 if (arg4) { 8894 struct target_epoll_event *target_ep; 8895 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) { 8896 goto efault; 8897 } 8898 ep.events = tswap32(target_ep->events); 8899 /* The epoll_data_t union is just opaque data to the kernel, 8900 * so we transfer all 64 bits across and need not worry what 8901 * actual data type it is. 8902 */ 8903 ep.data.u64 = tswap64(target_ep->data.u64); 8904 unlock_user_struct(target_ep, arg4, 0); 8905 epp = &ep; 8906 } 8907 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp)); 8908 break; 8909 } 8910 #endif 8911 8912 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT) 8913 #define IMPLEMENT_EPOLL_PWAIT 8914 #endif 8915 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT) 8916 #if defined(TARGET_NR_epoll_wait) 8917 case TARGET_NR_epoll_wait: 8918 #endif 8919 #if defined(IMPLEMENT_EPOLL_PWAIT) 8920 case TARGET_NR_epoll_pwait: 8921 #endif 8922 { 8923 struct target_epoll_event *target_ep; 8924 struct epoll_event *ep; 8925 int epfd = arg1; 8926 int maxevents = arg3; 8927 int timeout = arg4; 8928 8929 target_ep = lock_user(VERIFY_WRITE, arg2, 8930 maxevents * sizeof(struct target_epoll_event), 1); 8931 if (!target_ep) { 8932 goto efault; 8933 } 8934 8935 ep = alloca(maxevents * sizeof(struct epoll_event)); 8936 8937 switch (num) { 8938 #if defined(IMPLEMENT_EPOLL_PWAIT) 8939 case TARGET_NR_epoll_pwait: 8940 { 8941 target_sigset_t *target_set; 8942 sigset_t _set, *set = &_set; 8943 8944 if (arg5) { 8945 target_set = lock_user(VERIFY_READ, arg5, 8946 sizeof(target_sigset_t), 1); 8947 if (!target_set) { 8948 unlock_user(target_ep, arg2, 0); 8949 goto efault; 8950 } 8951 target_to_host_sigset(set, target_set); 8952 unlock_user(target_set, arg5, 0); 8953 } else { 8954 set = NULL; 8955 } 8956 8957 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set)); 8958 break; 8959 } 8960 #endif 8961 #if defined(TARGET_NR_epoll_wait) 8962 case TARGET_NR_epoll_wait: 8963 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout)); 8964 break; 8965 #endif 8966 default: 8967 ret = -TARGET_ENOSYS; 8968 } 8969 if (!is_error(ret)) { 8970 int i; 8971 for (i = 0; i < ret; i++) { 8972 target_ep[i].events = tswap32(ep[i].events); 8973 target_ep[i].data.u64 = tswap64(ep[i].data.u64); 8974 } 8975 } 8976 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event)); 8977 break; 8978 } 8979 #endif 8980 #endif 8981 #ifdef TARGET_NR_prlimit64 8982 case TARGET_NR_prlimit64: 8983 { 8984 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */ 8985 struct target_rlimit64 *target_rnew, *target_rold; 8986 struct host_rlimit64 rnew, rold, *rnewp = 0; 8987 if (arg3) { 8988 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { 8989 goto efault; 8990 } 8991 rnew.rlim_cur = tswap64(target_rnew->rlim_cur); 8992 rnew.rlim_max = tswap64(target_rnew->rlim_max); 8993 unlock_user_struct(target_rnew, arg3, 0); 8994 rnewp = &rnew; 8995 } 8996 8997 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0)); 8998 if (!is_error(ret) && arg4) { 8999 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { 9000 goto efault; 9001 } 9002 target_rold->rlim_cur = tswap64(rold.rlim_cur); 9003 target_rold->rlim_max = tswap64(rold.rlim_max); 9004 unlock_user_struct(target_rold, arg4, 1); 9005 } 9006 break; 9007 } 9008 #endif 9009 #ifdef TARGET_NR_gethostname 9010 case TARGET_NR_gethostname: 9011 { 9012 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0); 9013 if (name) { 9014 ret = get_errno(gethostname(name, arg2)); 9015 unlock_user(name, arg1, arg2); 9016 } else { 9017 ret = -TARGET_EFAULT; 9018 } 9019 break; 9020 } 9021 #endif 9022 default: 9023 unimplemented: 9024 gemu_log("qemu: Unsupported syscall: %d\n", num); 9025 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list) 9026 unimplemented_nowarn: 9027 #endif 9028 ret = -TARGET_ENOSYS; 9029 break; 9030 } 9031 fail: 9032 #ifdef DEBUG 9033 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret); 9034 #endif 9035 if(do_strace) 9036 print_syscall_ret(num, ret); 9037 return ret; 9038 efault: 9039 ret = -TARGET_EFAULT; 9040 goto fail; 9041 } 9042