1 /* 2 * Linux syscalls 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #define _ATFILE_SOURCE 20 #include <stdlib.h> 21 #include <stdio.h> 22 #include <stdarg.h> 23 #include <string.h> 24 #include <elf.h> 25 #include <endian.h> 26 #include <errno.h> 27 #include <unistd.h> 28 #include <fcntl.h> 29 #include <time.h> 30 #include <limits.h> 31 #include <sys/types.h> 32 #include <sys/ipc.h> 33 #include <sys/msg.h> 34 #include <sys/wait.h> 35 #include <sys/time.h> 36 #include <sys/stat.h> 37 #include <sys/mount.h> 38 #include <sys/prctl.h> 39 #include <sys/resource.h> 40 #include <sys/mman.h> 41 #include <sys/swap.h> 42 #include <signal.h> 43 #include <sched.h> 44 #ifdef __ia64__ 45 int __clone2(int (*fn)(void *), void *child_stack_base, 46 size_t stack_size, int flags, void *arg, ...); 47 #endif 48 #include <sys/socket.h> 49 #include <sys/un.h> 50 #include <sys/uio.h> 51 #include <sys/poll.h> 52 #include <sys/times.h> 53 #include <sys/shm.h> 54 #include <sys/sem.h> 55 #include <sys/statfs.h> 56 #include <utime.h> 57 #include <sys/sysinfo.h> 58 #include <sys/utsname.h> 59 //#include <sys/user.h> 60 #include <netinet/ip.h> 61 #include <netinet/tcp.h> 62 #include <net/if.h> 63 #include <qemu-common.h> 64 #ifdef TARGET_GPROF 65 #include <sys/gmon.h> 66 #endif 67 #ifdef CONFIG_EVENTFD 68 #include <sys/eventfd.h> 69 #endif 70 #ifdef CONFIG_EPOLL 71 #include <sys/epoll.h> 72 #endif 73 74 #define termios host_termios 75 #define winsize host_winsize 76 #define termio host_termio 77 #define sgttyb host_sgttyb /* same as target */ 78 #define tchars host_tchars /* same as target */ 79 #define ltchars host_ltchars /* same as target */ 80 81 #include <linux/termios.h> 82 #include <linux/unistd.h> 83 #include <linux/utsname.h> 84 #include <linux/cdrom.h> 85 #include <linux/hdreg.h> 86 #include <linux/soundcard.h> 87 #include <linux/kd.h> 88 #include <linux/mtio.h> 89 #include <linux/fs.h> 90 #if defined(CONFIG_FIEMAP) 91 #include <linux/fiemap.h> 92 #endif 93 #include <linux/fb.h> 94 #include <linux/vt.h> 95 #include "linux_loop.h" 96 #include "cpu-uname.h" 97 98 #include "qemu.h" 99 #include "qemu-common.h" 100 101 #if defined(CONFIG_USE_NPTL) 102 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \ 103 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID) 104 #else 105 /* XXX: Hardcode the above values. */ 106 #define CLONE_NPTL_FLAGS2 0 107 #endif 108 109 //#define DEBUG 110 111 //#include <linux/msdos_fs.h> 112 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2]) 113 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2]) 114 115 116 #undef _syscall0 117 #undef _syscall1 118 #undef _syscall2 119 #undef _syscall3 120 #undef _syscall4 121 #undef _syscall5 122 #undef _syscall6 123 124 #define _syscall0(type,name) \ 125 static type name (void) \ 126 { \ 127 return syscall(__NR_##name); \ 128 } 129 130 #define _syscall1(type,name,type1,arg1) \ 131 static type name (type1 arg1) \ 132 { \ 133 return syscall(__NR_##name, arg1); \ 134 } 135 136 #define _syscall2(type,name,type1,arg1,type2,arg2) \ 137 static type name (type1 arg1,type2 arg2) \ 138 { \ 139 return syscall(__NR_##name, arg1, arg2); \ 140 } 141 142 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 143 static type name (type1 arg1,type2 arg2,type3 arg3) \ 144 { \ 145 return syscall(__NR_##name, arg1, arg2, arg3); \ 146 } 147 148 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 149 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ 150 { \ 151 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 152 } 153 154 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 155 type5,arg5) \ 156 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ 157 { \ 158 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 159 } 160 161 162 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 163 type5,arg5,type6,arg6) \ 164 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \ 165 type6 arg6) \ 166 { \ 167 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 168 } 169 170 171 #define __NR_sys_uname __NR_uname 172 #define __NR_sys_faccessat __NR_faccessat 173 #define __NR_sys_fchmodat __NR_fchmodat 174 #define __NR_sys_fchownat __NR_fchownat 175 #define __NR_sys_fstatat64 __NR_fstatat64 176 #define __NR_sys_futimesat __NR_futimesat 177 #define __NR_sys_getcwd1 __NR_getcwd 178 #define __NR_sys_getdents __NR_getdents 179 #define __NR_sys_getdents64 __NR_getdents64 180 #define __NR_sys_getpriority __NR_getpriority 181 #define __NR_sys_linkat __NR_linkat 182 #define __NR_sys_mkdirat __NR_mkdirat 183 #define __NR_sys_mknodat __NR_mknodat 184 #define __NR_sys_newfstatat __NR_newfstatat 185 #define __NR_sys_openat __NR_openat 186 #define __NR_sys_readlinkat __NR_readlinkat 187 #define __NR_sys_renameat __NR_renameat 188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo 189 #define __NR_sys_symlinkat __NR_symlinkat 190 #define __NR_sys_syslog __NR_syslog 191 #define __NR_sys_tgkill __NR_tgkill 192 #define __NR_sys_tkill __NR_tkill 193 #define __NR_sys_unlinkat __NR_unlinkat 194 #define __NR_sys_utimensat __NR_utimensat 195 #define __NR_sys_futex __NR_futex 196 #define __NR_sys_inotify_init __NR_inotify_init 197 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch 198 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch 199 200 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) 201 #define __NR__llseek __NR_lseek 202 #endif 203 204 #ifdef __NR_gettid 205 _syscall0(int, gettid) 206 #else 207 /* This is a replacement for the host gettid() and must return a host 208 errno. */ 209 static int gettid(void) { 210 return -ENOSYS; 211 } 212 #endif 213 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count); 214 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 215 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count); 216 #endif 217 _syscall2(int, sys_getpriority, int, which, int, who); 218 #if defined(TARGET_NR__llseek) && defined(__NR_llseek) 219 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo, 220 loff_t *, res, uint, wh); 221 #endif 222 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo) 223 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len) 224 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 225 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig) 226 #endif 227 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 228 _syscall2(int,sys_tkill,int,tid,int,sig) 229 #endif 230 #ifdef __NR_exit_group 231 _syscall1(int,exit_group,int,error_code) 232 #endif 233 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 234 _syscall1(int,set_tid_address,int *,tidptr) 235 #endif 236 #if defined(CONFIG_USE_NPTL) 237 #if defined(TARGET_NR_futex) && defined(__NR_futex) 238 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, 239 const struct timespec *,timeout,int *,uaddr2,int,val3) 240 #endif 241 #endif 242 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity 243 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, 244 unsigned long *, user_mask_ptr); 245 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity 246 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len, 247 unsigned long *, user_mask_ptr); 248 249 static bitmask_transtbl fcntl_flags_tbl[] = { 250 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, }, 251 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, }, 252 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, }, 253 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, }, 254 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, }, 255 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, }, 256 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, }, 257 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, }, 258 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, }, 259 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, }, 260 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, }, 261 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, }, 262 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, }, 263 #if defined(O_DIRECT) 264 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, }, 265 #endif 266 { 0, 0, 0, 0 } 267 }; 268 269 #define COPY_UTSNAME_FIELD(dest, src) \ 270 do { \ 271 /* __NEW_UTS_LEN doesn't include terminating null */ \ 272 (void) strncpy((dest), (src), __NEW_UTS_LEN); \ 273 (dest)[__NEW_UTS_LEN] = '\0'; \ 274 } while (0) 275 276 static int sys_uname(struct new_utsname *buf) 277 { 278 struct utsname uts_buf; 279 280 if (uname(&uts_buf) < 0) 281 return (-1); 282 283 /* 284 * Just in case these have some differences, we 285 * translate utsname to new_utsname (which is the 286 * struct linux kernel uses). 287 */ 288 289 bzero(buf, sizeof (*buf)); 290 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname); 291 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename); 292 COPY_UTSNAME_FIELD(buf->release, uts_buf.release); 293 COPY_UTSNAME_FIELD(buf->version, uts_buf.version); 294 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine); 295 #ifdef _GNU_SOURCE 296 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname); 297 #endif 298 return (0); 299 300 #undef COPY_UTSNAME_FIELD 301 } 302 303 static int sys_getcwd1(char *buf, size_t size) 304 { 305 if (getcwd(buf, size) == NULL) { 306 /* getcwd() sets errno */ 307 return (-1); 308 } 309 return strlen(buf)+1; 310 } 311 312 #ifdef CONFIG_ATFILE 313 /* 314 * Host system seems to have atfile syscall stubs available. We 315 * now enable them one by one as specified by target syscall_nr.h. 316 */ 317 318 #ifdef TARGET_NR_faccessat 319 static int sys_faccessat(int dirfd, const char *pathname, int mode) 320 { 321 return (faccessat(dirfd, pathname, mode, 0)); 322 } 323 #endif 324 #ifdef TARGET_NR_fchmodat 325 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode) 326 { 327 return (fchmodat(dirfd, pathname, mode, 0)); 328 } 329 #endif 330 #if defined(TARGET_NR_fchownat) && defined(USE_UID16) 331 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner, 332 gid_t group, int flags) 333 { 334 return (fchownat(dirfd, pathname, owner, group, flags)); 335 } 336 #endif 337 #ifdef __NR_fstatat64 338 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf, 339 int flags) 340 { 341 return (fstatat(dirfd, pathname, buf, flags)); 342 } 343 #endif 344 #ifdef __NR_newfstatat 345 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf, 346 int flags) 347 { 348 return (fstatat(dirfd, pathname, buf, flags)); 349 } 350 #endif 351 #ifdef TARGET_NR_futimesat 352 static int sys_futimesat(int dirfd, const char *pathname, 353 const struct timeval times[2]) 354 { 355 return (futimesat(dirfd, pathname, times)); 356 } 357 #endif 358 #ifdef TARGET_NR_linkat 359 static int sys_linkat(int olddirfd, const char *oldpath, 360 int newdirfd, const char *newpath, int flags) 361 { 362 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags)); 363 } 364 #endif 365 #ifdef TARGET_NR_mkdirat 366 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode) 367 { 368 return (mkdirat(dirfd, pathname, mode)); 369 } 370 #endif 371 #ifdef TARGET_NR_mknodat 372 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode, 373 dev_t dev) 374 { 375 return (mknodat(dirfd, pathname, mode, dev)); 376 } 377 #endif 378 #ifdef TARGET_NR_openat 379 static int sys_openat(int dirfd, const char *pathname, int flags, ...) 380 { 381 /* 382 * open(2) has extra parameter 'mode' when called with 383 * flag O_CREAT. 384 */ 385 if ((flags & O_CREAT) != 0) { 386 va_list ap; 387 mode_t mode; 388 389 /* 390 * Get the 'mode' parameter and translate it to 391 * host bits. 392 */ 393 va_start(ap, flags); 394 mode = va_arg(ap, mode_t); 395 mode = target_to_host_bitmask(mode, fcntl_flags_tbl); 396 va_end(ap); 397 398 return (openat(dirfd, pathname, flags, mode)); 399 } 400 return (openat(dirfd, pathname, flags)); 401 } 402 #endif 403 #ifdef TARGET_NR_readlinkat 404 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz) 405 { 406 return (readlinkat(dirfd, pathname, buf, bufsiz)); 407 } 408 #endif 409 #ifdef TARGET_NR_renameat 410 static int sys_renameat(int olddirfd, const char *oldpath, 411 int newdirfd, const char *newpath) 412 { 413 return (renameat(olddirfd, oldpath, newdirfd, newpath)); 414 } 415 #endif 416 #ifdef TARGET_NR_symlinkat 417 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath) 418 { 419 return (symlinkat(oldpath, newdirfd, newpath)); 420 } 421 #endif 422 #ifdef TARGET_NR_unlinkat 423 static int sys_unlinkat(int dirfd, const char *pathname, int flags) 424 { 425 return (unlinkat(dirfd, pathname, flags)); 426 } 427 #endif 428 #else /* !CONFIG_ATFILE */ 429 430 /* 431 * Try direct syscalls instead 432 */ 433 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 434 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode) 435 #endif 436 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat) 437 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode) 438 #endif 439 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) && defined(USE_UID16) 440 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname, 441 uid_t,owner,gid_t,group,int,flags) 442 #endif 443 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \ 444 defined(__NR_fstatat64) 445 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname, 446 struct stat *,buf,int,flags) 447 #endif 448 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat) 449 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname, 450 const struct timeval *,times) 451 #endif 452 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \ 453 defined(__NR_newfstatat) 454 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname, 455 struct stat *,buf,int,flags) 456 #endif 457 #if defined(TARGET_NR_linkat) && defined(__NR_linkat) 458 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath, 459 int,newdirfd,const char *,newpath,int,flags) 460 #endif 461 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat) 462 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode) 463 #endif 464 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat) 465 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname, 466 mode_t,mode,dev_t,dev) 467 #endif 468 #if defined(TARGET_NR_openat) && defined(__NR_openat) 469 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode) 470 #endif 471 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat) 472 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname, 473 char *,buf,size_t,bufsize) 474 #endif 475 #if defined(TARGET_NR_renameat) && defined(__NR_renameat) 476 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath, 477 int,newdirfd,const char *,newpath) 478 #endif 479 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat) 480 _syscall3(int,sys_symlinkat,const char *,oldpath, 481 int,newdirfd,const char *,newpath) 482 #endif 483 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat) 484 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags) 485 #endif 486 487 #endif /* CONFIG_ATFILE */ 488 489 #ifdef CONFIG_UTIMENSAT 490 static int sys_utimensat(int dirfd, const char *pathname, 491 const struct timespec times[2], int flags) 492 { 493 if (pathname == NULL) 494 return futimens(dirfd, times); 495 else 496 return utimensat(dirfd, pathname, times, flags); 497 } 498 #else 499 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat) 500 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname, 501 const struct timespec *,tsp,int,flags) 502 #endif 503 #endif /* CONFIG_UTIMENSAT */ 504 505 #ifdef CONFIG_INOTIFY 506 #include <sys/inotify.h> 507 508 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 509 static int sys_inotify_init(void) 510 { 511 return (inotify_init()); 512 } 513 #endif 514 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 515 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask) 516 { 517 return (inotify_add_watch(fd, pathname, mask)); 518 } 519 #endif 520 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 521 static int sys_inotify_rm_watch(int fd, int32_t wd) 522 { 523 return (inotify_rm_watch(fd, wd)); 524 } 525 #endif 526 #ifdef CONFIG_INOTIFY1 527 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 528 static int sys_inotify_init1(int flags) 529 { 530 return (inotify_init1(flags)); 531 } 532 #endif 533 #endif 534 #else 535 /* Userspace can usually survive runtime without inotify */ 536 #undef TARGET_NR_inotify_init 537 #undef TARGET_NR_inotify_init1 538 #undef TARGET_NR_inotify_add_watch 539 #undef TARGET_NR_inotify_rm_watch 540 #endif /* CONFIG_INOTIFY */ 541 542 #if defined(TARGET_NR_ppoll) 543 #ifndef __NR_ppoll 544 # define __NR_ppoll -1 545 #endif 546 #define __NR_sys_ppoll __NR_ppoll 547 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds, 548 struct timespec *, timeout, const __sigset_t *, sigmask, 549 size_t, sigsetsize) 550 #endif 551 552 extern int personality(int); 553 extern int flock(int, int); 554 extern int setfsuid(int); 555 extern int setfsgid(int); 556 extern int setgroups(int, gid_t *); 557 558 #define ERRNO_TABLE_SIZE 1200 559 560 /* target_to_host_errno_table[] is initialized from 561 * host_to_target_errno_table[] in syscall_init(). */ 562 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = { 563 }; 564 565 /* 566 * This list is the union of errno values overridden in asm-<arch>/errno.h 567 * minus the errnos that are not actually generic to all archs. 568 */ 569 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = { 570 [EIDRM] = TARGET_EIDRM, 571 [ECHRNG] = TARGET_ECHRNG, 572 [EL2NSYNC] = TARGET_EL2NSYNC, 573 [EL3HLT] = TARGET_EL3HLT, 574 [EL3RST] = TARGET_EL3RST, 575 [ELNRNG] = TARGET_ELNRNG, 576 [EUNATCH] = TARGET_EUNATCH, 577 [ENOCSI] = TARGET_ENOCSI, 578 [EL2HLT] = TARGET_EL2HLT, 579 [EDEADLK] = TARGET_EDEADLK, 580 [ENOLCK] = TARGET_ENOLCK, 581 [EBADE] = TARGET_EBADE, 582 [EBADR] = TARGET_EBADR, 583 [EXFULL] = TARGET_EXFULL, 584 [ENOANO] = TARGET_ENOANO, 585 [EBADRQC] = TARGET_EBADRQC, 586 [EBADSLT] = TARGET_EBADSLT, 587 [EBFONT] = TARGET_EBFONT, 588 [ENOSTR] = TARGET_ENOSTR, 589 [ENODATA] = TARGET_ENODATA, 590 [ETIME] = TARGET_ETIME, 591 [ENOSR] = TARGET_ENOSR, 592 [ENONET] = TARGET_ENONET, 593 [ENOPKG] = TARGET_ENOPKG, 594 [EREMOTE] = TARGET_EREMOTE, 595 [ENOLINK] = TARGET_ENOLINK, 596 [EADV] = TARGET_EADV, 597 [ESRMNT] = TARGET_ESRMNT, 598 [ECOMM] = TARGET_ECOMM, 599 [EPROTO] = TARGET_EPROTO, 600 [EDOTDOT] = TARGET_EDOTDOT, 601 [EMULTIHOP] = TARGET_EMULTIHOP, 602 [EBADMSG] = TARGET_EBADMSG, 603 [ENAMETOOLONG] = TARGET_ENAMETOOLONG, 604 [EOVERFLOW] = TARGET_EOVERFLOW, 605 [ENOTUNIQ] = TARGET_ENOTUNIQ, 606 [EBADFD] = TARGET_EBADFD, 607 [EREMCHG] = TARGET_EREMCHG, 608 [ELIBACC] = TARGET_ELIBACC, 609 [ELIBBAD] = TARGET_ELIBBAD, 610 [ELIBSCN] = TARGET_ELIBSCN, 611 [ELIBMAX] = TARGET_ELIBMAX, 612 [ELIBEXEC] = TARGET_ELIBEXEC, 613 [EILSEQ] = TARGET_EILSEQ, 614 [ENOSYS] = TARGET_ENOSYS, 615 [ELOOP] = TARGET_ELOOP, 616 [ERESTART] = TARGET_ERESTART, 617 [ESTRPIPE] = TARGET_ESTRPIPE, 618 [ENOTEMPTY] = TARGET_ENOTEMPTY, 619 [EUSERS] = TARGET_EUSERS, 620 [ENOTSOCK] = TARGET_ENOTSOCK, 621 [EDESTADDRREQ] = TARGET_EDESTADDRREQ, 622 [EMSGSIZE] = TARGET_EMSGSIZE, 623 [EPROTOTYPE] = TARGET_EPROTOTYPE, 624 [ENOPROTOOPT] = TARGET_ENOPROTOOPT, 625 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT, 626 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT, 627 [EOPNOTSUPP] = TARGET_EOPNOTSUPP, 628 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT, 629 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT, 630 [EADDRINUSE] = TARGET_EADDRINUSE, 631 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL, 632 [ENETDOWN] = TARGET_ENETDOWN, 633 [ENETUNREACH] = TARGET_ENETUNREACH, 634 [ENETRESET] = TARGET_ENETRESET, 635 [ECONNABORTED] = TARGET_ECONNABORTED, 636 [ECONNRESET] = TARGET_ECONNRESET, 637 [ENOBUFS] = TARGET_ENOBUFS, 638 [EISCONN] = TARGET_EISCONN, 639 [ENOTCONN] = TARGET_ENOTCONN, 640 [EUCLEAN] = TARGET_EUCLEAN, 641 [ENOTNAM] = TARGET_ENOTNAM, 642 [ENAVAIL] = TARGET_ENAVAIL, 643 [EISNAM] = TARGET_EISNAM, 644 [EREMOTEIO] = TARGET_EREMOTEIO, 645 [ESHUTDOWN] = TARGET_ESHUTDOWN, 646 [ETOOMANYREFS] = TARGET_ETOOMANYREFS, 647 [ETIMEDOUT] = TARGET_ETIMEDOUT, 648 [ECONNREFUSED] = TARGET_ECONNREFUSED, 649 [EHOSTDOWN] = TARGET_EHOSTDOWN, 650 [EHOSTUNREACH] = TARGET_EHOSTUNREACH, 651 [EALREADY] = TARGET_EALREADY, 652 [EINPROGRESS] = TARGET_EINPROGRESS, 653 [ESTALE] = TARGET_ESTALE, 654 [ECANCELED] = TARGET_ECANCELED, 655 [ENOMEDIUM] = TARGET_ENOMEDIUM, 656 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE, 657 #ifdef ENOKEY 658 [ENOKEY] = TARGET_ENOKEY, 659 #endif 660 #ifdef EKEYEXPIRED 661 [EKEYEXPIRED] = TARGET_EKEYEXPIRED, 662 #endif 663 #ifdef EKEYREVOKED 664 [EKEYREVOKED] = TARGET_EKEYREVOKED, 665 #endif 666 #ifdef EKEYREJECTED 667 [EKEYREJECTED] = TARGET_EKEYREJECTED, 668 #endif 669 #ifdef EOWNERDEAD 670 [EOWNERDEAD] = TARGET_EOWNERDEAD, 671 #endif 672 #ifdef ENOTRECOVERABLE 673 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE, 674 #endif 675 }; 676 677 static inline int host_to_target_errno(int err) 678 { 679 if(host_to_target_errno_table[err]) 680 return host_to_target_errno_table[err]; 681 return err; 682 } 683 684 static inline int target_to_host_errno(int err) 685 { 686 if (target_to_host_errno_table[err]) 687 return target_to_host_errno_table[err]; 688 return err; 689 } 690 691 static inline abi_long get_errno(abi_long ret) 692 { 693 if (ret == -1) 694 return -host_to_target_errno(errno); 695 else 696 return ret; 697 } 698 699 static inline int is_error(abi_long ret) 700 { 701 return (abi_ulong)ret >= (abi_ulong)(-4096); 702 } 703 704 char *target_strerror(int err) 705 { 706 return strerror(target_to_host_errno(err)); 707 } 708 709 static abi_ulong target_brk; 710 static abi_ulong target_original_brk; 711 712 void target_set_brk(abi_ulong new_brk) 713 { 714 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk); 715 } 716 717 /* do_brk() must return target values and target errnos. */ 718 abi_long do_brk(abi_ulong new_brk) 719 { 720 abi_ulong brk_page; 721 abi_long mapped_addr; 722 int new_alloc_size; 723 724 if (!new_brk) 725 return target_brk; 726 if (new_brk < target_original_brk) 727 return target_brk; 728 729 brk_page = HOST_PAGE_ALIGN(target_brk); 730 731 /* If the new brk is less than this, set it and we're done... */ 732 if (new_brk < brk_page) { 733 target_brk = new_brk; 734 return target_brk; 735 } 736 737 /* We need to allocate more memory after the brk... */ 738 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page + 1); 739 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, 740 PROT_READ|PROT_WRITE, 741 MAP_ANON|MAP_FIXED|MAP_PRIVATE, 0, 0)); 742 743 #if defined(TARGET_ALPHA) 744 /* We (partially) emulate OSF/1 on Alpha, which requires we 745 return a proper errno, not an unchanged brk value. */ 746 if (is_error(mapped_addr)) { 747 return -TARGET_ENOMEM; 748 } 749 #endif 750 751 if (!is_error(mapped_addr)) { 752 target_brk = new_brk; 753 } 754 return target_brk; 755 } 756 757 static inline abi_long copy_from_user_fdset(fd_set *fds, 758 abi_ulong target_fds_addr, 759 int n) 760 { 761 int i, nw, j, k; 762 abi_ulong b, *target_fds; 763 764 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 765 if (!(target_fds = lock_user(VERIFY_READ, 766 target_fds_addr, 767 sizeof(abi_ulong) * nw, 768 1))) 769 return -TARGET_EFAULT; 770 771 FD_ZERO(fds); 772 k = 0; 773 for (i = 0; i < nw; i++) { 774 /* grab the abi_ulong */ 775 __get_user(b, &target_fds[i]); 776 for (j = 0; j < TARGET_ABI_BITS; j++) { 777 /* check the bit inside the abi_ulong */ 778 if ((b >> j) & 1) 779 FD_SET(k, fds); 780 k++; 781 } 782 } 783 784 unlock_user(target_fds, target_fds_addr, 0); 785 786 return 0; 787 } 788 789 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr, 790 const fd_set *fds, 791 int n) 792 { 793 int i, nw, j, k; 794 abi_long v; 795 abi_ulong *target_fds; 796 797 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 798 if (!(target_fds = lock_user(VERIFY_WRITE, 799 target_fds_addr, 800 sizeof(abi_ulong) * nw, 801 0))) 802 return -TARGET_EFAULT; 803 804 k = 0; 805 for (i = 0; i < nw; i++) { 806 v = 0; 807 for (j = 0; j < TARGET_ABI_BITS; j++) { 808 v |= ((FD_ISSET(k, fds) != 0) << j); 809 k++; 810 } 811 __put_user(v, &target_fds[i]); 812 } 813 814 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw); 815 816 return 0; 817 } 818 819 #if defined(__alpha__) 820 #define HOST_HZ 1024 821 #else 822 #define HOST_HZ 100 823 #endif 824 825 static inline abi_long host_to_target_clock_t(long ticks) 826 { 827 #if HOST_HZ == TARGET_HZ 828 return ticks; 829 #else 830 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ; 831 #endif 832 } 833 834 static inline abi_long host_to_target_rusage(abi_ulong target_addr, 835 const struct rusage *rusage) 836 { 837 struct target_rusage *target_rusage; 838 839 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0)) 840 return -TARGET_EFAULT; 841 target_rusage->ru_utime.tv_sec = tswapl(rusage->ru_utime.tv_sec); 842 target_rusage->ru_utime.tv_usec = tswapl(rusage->ru_utime.tv_usec); 843 target_rusage->ru_stime.tv_sec = tswapl(rusage->ru_stime.tv_sec); 844 target_rusage->ru_stime.tv_usec = tswapl(rusage->ru_stime.tv_usec); 845 target_rusage->ru_maxrss = tswapl(rusage->ru_maxrss); 846 target_rusage->ru_ixrss = tswapl(rusage->ru_ixrss); 847 target_rusage->ru_idrss = tswapl(rusage->ru_idrss); 848 target_rusage->ru_isrss = tswapl(rusage->ru_isrss); 849 target_rusage->ru_minflt = tswapl(rusage->ru_minflt); 850 target_rusage->ru_majflt = tswapl(rusage->ru_majflt); 851 target_rusage->ru_nswap = tswapl(rusage->ru_nswap); 852 target_rusage->ru_inblock = tswapl(rusage->ru_inblock); 853 target_rusage->ru_oublock = tswapl(rusage->ru_oublock); 854 target_rusage->ru_msgsnd = tswapl(rusage->ru_msgsnd); 855 target_rusage->ru_msgrcv = tswapl(rusage->ru_msgrcv); 856 target_rusage->ru_nsignals = tswapl(rusage->ru_nsignals); 857 target_rusage->ru_nvcsw = tswapl(rusage->ru_nvcsw); 858 target_rusage->ru_nivcsw = tswapl(rusage->ru_nivcsw); 859 unlock_user_struct(target_rusage, target_addr, 1); 860 861 return 0; 862 } 863 864 static inline rlim_t target_to_host_rlim(target_ulong target_rlim) 865 { 866 if (target_rlim == TARGET_RLIM_INFINITY) 867 return RLIM_INFINITY; 868 else 869 return tswapl(target_rlim); 870 } 871 872 static inline target_ulong host_to_target_rlim(rlim_t rlim) 873 { 874 if (rlim == RLIM_INFINITY || rlim != (target_long)rlim) 875 return TARGET_RLIM_INFINITY; 876 else 877 return tswapl(rlim); 878 } 879 880 static inline abi_long copy_from_user_timeval(struct timeval *tv, 881 abi_ulong target_tv_addr) 882 { 883 struct target_timeval *target_tv; 884 885 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) 886 return -TARGET_EFAULT; 887 888 __get_user(tv->tv_sec, &target_tv->tv_sec); 889 __get_user(tv->tv_usec, &target_tv->tv_usec); 890 891 unlock_user_struct(target_tv, target_tv_addr, 0); 892 893 return 0; 894 } 895 896 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr, 897 const struct timeval *tv) 898 { 899 struct target_timeval *target_tv; 900 901 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) 902 return -TARGET_EFAULT; 903 904 __put_user(tv->tv_sec, &target_tv->tv_sec); 905 __put_user(tv->tv_usec, &target_tv->tv_usec); 906 907 unlock_user_struct(target_tv, target_tv_addr, 1); 908 909 return 0; 910 } 911 912 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 913 #include <mqueue.h> 914 915 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr, 916 abi_ulong target_mq_attr_addr) 917 { 918 struct target_mq_attr *target_mq_attr; 919 920 if (!lock_user_struct(VERIFY_READ, target_mq_attr, 921 target_mq_attr_addr, 1)) 922 return -TARGET_EFAULT; 923 924 __get_user(attr->mq_flags, &target_mq_attr->mq_flags); 925 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 926 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 927 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 928 929 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0); 930 931 return 0; 932 } 933 934 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr, 935 const struct mq_attr *attr) 936 { 937 struct target_mq_attr *target_mq_attr; 938 939 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr, 940 target_mq_attr_addr, 0)) 941 return -TARGET_EFAULT; 942 943 __put_user(attr->mq_flags, &target_mq_attr->mq_flags); 944 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 945 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 946 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 947 948 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1); 949 950 return 0; 951 } 952 #endif 953 954 /* do_select() must return target values and target errnos. */ 955 static abi_long do_select(int n, 956 abi_ulong rfd_addr, abi_ulong wfd_addr, 957 abi_ulong efd_addr, abi_ulong target_tv_addr) 958 { 959 fd_set rfds, wfds, efds; 960 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 961 struct timeval tv, *tv_ptr; 962 abi_long ret; 963 964 if (rfd_addr) { 965 if (copy_from_user_fdset(&rfds, rfd_addr, n)) 966 return -TARGET_EFAULT; 967 rfds_ptr = &rfds; 968 } else { 969 rfds_ptr = NULL; 970 } 971 if (wfd_addr) { 972 if (copy_from_user_fdset(&wfds, wfd_addr, n)) 973 return -TARGET_EFAULT; 974 wfds_ptr = &wfds; 975 } else { 976 wfds_ptr = NULL; 977 } 978 if (efd_addr) { 979 if (copy_from_user_fdset(&efds, efd_addr, n)) 980 return -TARGET_EFAULT; 981 efds_ptr = &efds; 982 } else { 983 efds_ptr = NULL; 984 } 985 986 if (target_tv_addr) { 987 if (copy_from_user_timeval(&tv, target_tv_addr)) 988 return -TARGET_EFAULT; 989 tv_ptr = &tv; 990 } else { 991 tv_ptr = NULL; 992 } 993 994 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr)); 995 996 if (!is_error(ret)) { 997 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 998 return -TARGET_EFAULT; 999 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 1000 return -TARGET_EFAULT; 1001 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 1002 return -TARGET_EFAULT; 1003 1004 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv)) 1005 return -TARGET_EFAULT; 1006 } 1007 1008 return ret; 1009 } 1010 1011 static abi_long do_pipe2(int host_pipe[], int flags) 1012 { 1013 #ifdef CONFIG_PIPE2 1014 return pipe2(host_pipe, flags); 1015 #else 1016 return -ENOSYS; 1017 #endif 1018 } 1019 1020 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes, 1021 int flags, int is_pipe2) 1022 { 1023 int host_pipe[2]; 1024 abi_long ret; 1025 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe); 1026 1027 if (is_error(ret)) 1028 return get_errno(ret); 1029 1030 /* Several targets have special calling conventions for the original 1031 pipe syscall, but didn't replicate this into the pipe2 syscall. */ 1032 if (!is_pipe2) { 1033 #if defined(TARGET_ALPHA) 1034 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1]; 1035 return host_pipe[0]; 1036 #elif defined(TARGET_MIPS) 1037 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1]; 1038 return host_pipe[0]; 1039 #elif defined(TARGET_SH4) 1040 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1]; 1041 return host_pipe[0]; 1042 #endif 1043 } 1044 1045 if (put_user_s32(host_pipe[0], pipedes) 1046 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0]))) 1047 return -TARGET_EFAULT; 1048 return get_errno(ret); 1049 } 1050 1051 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn, 1052 abi_ulong target_addr, 1053 socklen_t len) 1054 { 1055 struct target_ip_mreqn *target_smreqn; 1056 1057 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1); 1058 if (!target_smreqn) 1059 return -TARGET_EFAULT; 1060 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr; 1061 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr; 1062 if (len == sizeof(struct target_ip_mreqn)) 1063 mreqn->imr_ifindex = tswapl(target_smreqn->imr_ifindex); 1064 unlock_user(target_smreqn, target_addr, 0); 1065 1066 return 0; 1067 } 1068 1069 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr, 1070 abi_ulong target_addr, 1071 socklen_t len) 1072 { 1073 const socklen_t unix_maxlen = sizeof (struct sockaddr_un); 1074 sa_family_t sa_family; 1075 struct target_sockaddr *target_saddr; 1076 1077 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 1078 if (!target_saddr) 1079 return -TARGET_EFAULT; 1080 1081 sa_family = tswap16(target_saddr->sa_family); 1082 1083 /* Oops. The caller might send a incomplete sun_path; sun_path 1084 * must be terminated by \0 (see the manual page), but 1085 * unfortunately it is quite common to specify sockaddr_un 1086 * length as "strlen(x->sun_path)" while it should be 1087 * "strlen(...) + 1". We'll fix that here if needed. 1088 * Linux kernel has a similar feature. 1089 */ 1090 1091 if (sa_family == AF_UNIX) { 1092 if (len < unix_maxlen && len > 0) { 1093 char *cp = (char*)target_saddr; 1094 1095 if ( cp[len-1] && !cp[len] ) 1096 len++; 1097 } 1098 if (len > unix_maxlen) 1099 len = unix_maxlen; 1100 } 1101 1102 memcpy(addr, target_saddr, len); 1103 addr->sa_family = sa_family; 1104 unlock_user(target_saddr, target_addr, 0); 1105 1106 return 0; 1107 } 1108 1109 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr, 1110 struct sockaddr *addr, 1111 socklen_t len) 1112 { 1113 struct target_sockaddr *target_saddr; 1114 1115 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0); 1116 if (!target_saddr) 1117 return -TARGET_EFAULT; 1118 memcpy(target_saddr, addr, len); 1119 target_saddr->sa_family = tswap16(addr->sa_family); 1120 unlock_user(target_saddr, target_addr, len); 1121 1122 return 0; 1123 } 1124 1125 /* ??? Should this also swap msgh->name? */ 1126 static inline abi_long target_to_host_cmsg(struct msghdr *msgh, 1127 struct target_msghdr *target_msgh) 1128 { 1129 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1130 abi_long msg_controllen; 1131 abi_ulong target_cmsg_addr; 1132 struct target_cmsghdr *target_cmsg; 1133 socklen_t space = 0; 1134 1135 msg_controllen = tswapl(target_msgh->msg_controllen); 1136 if (msg_controllen < sizeof (struct target_cmsghdr)) 1137 goto the_end; 1138 target_cmsg_addr = tswapl(target_msgh->msg_control); 1139 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1); 1140 if (!target_cmsg) 1141 return -TARGET_EFAULT; 1142 1143 while (cmsg && target_cmsg) { 1144 void *data = CMSG_DATA(cmsg); 1145 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1146 1147 int len = tswapl(target_cmsg->cmsg_len) 1148 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr)); 1149 1150 space += CMSG_SPACE(len); 1151 if (space > msgh->msg_controllen) { 1152 space -= CMSG_SPACE(len); 1153 gemu_log("Host cmsg overflow\n"); 1154 break; 1155 } 1156 1157 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level); 1158 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type); 1159 cmsg->cmsg_len = CMSG_LEN(len); 1160 1161 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) { 1162 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type); 1163 memcpy(data, target_data, len); 1164 } else { 1165 int *fd = (int *)data; 1166 int *target_fd = (int *)target_data; 1167 int i, numfds = len / sizeof(int); 1168 1169 for (i = 0; i < numfds; i++) 1170 fd[i] = tswap32(target_fd[i]); 1171 } 1172 1173 cmsg = CMSG_NXTHDR(msgh, cmsg); 1174 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1175 } 1176 unlock_user(target_cmsg, target_cmsg_addr, 0); 1177 the_end: 1178 msgh->msg_controllen = space; 1179 return 0; 1180 } 1181 1182 /* ??? Should this also swap msgh->name? */ 1183 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, 1184 struct msghdr *msgh) 1185 { 1186 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1187 abi_long msg_controllen; 1188 abi_ulong target_cmsg_addr; 1189 struct target_cmsghdr *target_cmsg; 1190 socklen_t space = 0; 1191 1192 msg_controllen = tswapl(target_msgh->msg_controllen); 1193 if (msg_controllen < sizeof (struct target_cmsghdr)) 1194 goto the_end; 1195 target_cmsg_addr = tswapl(target_msgh->msg_control); 1196 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0); 1197 if (!target_cmsg) 1198 return -TARGET_EFAULT; 1199 1200 while (cmsg && target_cmsg) { 1201 void *data = CMSG_DATA(cmsg); 1202 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1203 1204 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr)); 1205 1206 space += TARGET_CMSG_SPACE(len); 1207 if (space > msg_controllen) { 1208 space -= TARGET_CMSG_SPACE(len); 1209 gemu_log("Target cmsg overflow\n"); 1210 break; 1211 } 1212 1213 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level); 1214 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type); 1215 target_cmsg->cmsg_len = tswapl(TARGET_CMSG_LEN(len)); 1216 1217 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) { 1218 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type); 1219 memcpy(target_data, data, len); 1220 } else { 1221 int *fd = (int *)data; 1222 int *target_fd = (int *)target_data; 1223 int i, numfds = len / sizeof(int); 1224 1225 for (i = 0; i < numfds; i++) 1226 target_fd[i] = tswap32(fd[i]); 1227 } 1228 1229 cmsg = CMSG_NXTHDR(msgh, cmsg); 1230 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1231 } 1232 unlock_user(target_cmsg, target_cmsg_addr, space); 1233 the_end: 1234 target_msgh->msg_controllen = tswapl(space); 1235 return 0; 1236 } 1237 1238 /* do_setsockopt() Must return target values and target errnos. */ 1239 static abi_long do_setsockopt(int sockfd, int level, int optname, 1240 abi_ulong optval_addr, socklen_t optlen) 1241 { 1242 abi_long ret; 1243 int val; 1244 struct ip_mreqn *ip_mreq; 1245 struct ip_mreq_source *ip_mreq_source; 1246 1247 switch(level) { 1248 case SOL_TCP: 1249 /* TCP options all take an 'int' value. */ 1250 if (optlen < sizeof(uint32_t)) 1251 return -TARGET_EINVAL; 1252 1253 if (get_user_u32(val, optval_addr)) 1254 return -TARGET_EFAULT; 1255 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1256 break; 1257 case SOL_IP: 1258 switch(optname) { 1259 case IP_TOS: 1260 case IP_TTL: 1261 case IP_HDRINCL: 1262 case IP_ROUTER_ALERT: 1263 case IP_RECVOPTS: 1264 case IP_RETOPTS: 1265 case IP_PKTINFO: 1266 case IP_MTU_DISCOVER: 1267 case IP_RECVERR: 1268 case IP_RECVTOS: 1269 #ifdef IP_FREEBIND 1270 case IP_FREEBIND: 1271 #endif 1272 case IP_MULTICAST_TTL: 1273 case IP_MULTICAST_LOOP: 1274 val = 0; 1275 if (optlen >= sizeof(uint32_t)) { 1276 if (get_user_u32(val, optval_addr)) 1277 return -TARGET_EFAULT; 1278 } else if (optlen >= 1) { 1279 if (get_user_u8(val, optval_addr)) 1280 return -TARGET_EFAULT; 1281 } 1282 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1283 break; 1284 case IP_ADD_MEMBERSHIP: 1285 case IP_DROP_MEMBERSHIP: 1286 if (optlen < sizeof (struct target_ip_mreq) || 1287 optlen > sizeof (struct target_ip_mreqn)) 1288 return -TARGET_EINVAL; 1289 1290 ip_mreq = (struct ip_mreqn *) alloca(optlen); 1291 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen); 1292 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen)); 1293 break; 1294 1295 case IP_BLOCK_SOURCE: 1296 case IP_UNBLOCK_SOURCE: 1297 case IP_ADD_SOURCE_MEMBERSHIP: 1298 case IP_DROP_SOURCE_MEMBERSHIP: 1299 if (optlen != sizeof (struct target_ip_mreq_source)) 1300 return -TARGET_EINVAL; 1301 1302 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1); 1303 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen)); 1304 unlock_user (ip_mreq_source, optval_addr, 0); 1305 break; 1306 1307 default: 1308 goto unimplemented; 1309 } 1310 break; 1311 case TARGET_SOL_SOCKET: 1312 switch (optname) { 1313 /* Options with 'int' argument. */ 1314 case TARGET_SO_DEBUG: 1315 optname = SO_DEBUG; 1316 break; 1317 case TARGET_SO_REUSEADDR: 1318 optname = SO_REUSEADDR; 1319 break; 1320 case TARGET_SO_TYPE: 1321 optname = SO_TYPE; 1322 break; 1323 case TARGET_SO_ERROR: 1324 optname = SO_ERROR; 1325 break; 1326 case TARGET_SO_DONTROUTE: 1327 optname = SO_DONTROUTE; 1328 break; 1329 case TARGET_SO_BROADCAST: 1330 optname = SO_BROADCAST; 1331 break; 1332 case TARGET_SO_SNDBUF: 1333 optname = SO_SNDBUF; 1334 break; 1335 case TARGET_SO_RCVBUF: 1336 optname = SO_RCVBUF; 1337 break; 1338 case TARGET_SO_KEEPALIVE: 1339 optname = SO_KEEPALIVE; 1340 break; 1341 case TARGET_SO_OOBINLINE: 1342 optname = SO_OOBINLINE; 1343 break; 1344 case TARGET_SO_NO_CHECK: 1345 optname = SO_NO_CHECK; 1346 break; 1347 case TARGET_SO_PRIORITY: 1348 optname = SO_PRIORITY; 1349 break; 1350 #ifdef SO_BSDCOMPAT 1351 case TARGET_SO_BSDCOMPAT: 1352 optname = SO_BSDCOMPAT; 1353 break; 1354 #endif 1355 case TARGET_SO_PASSCRED: 1356 optname = SO_PASSCRED; 1357 break; 1358 case TARGET_SO_TIMESTAMP: 1359 optname = SO_TIMESTAMP; 1360 break; 1361 case TARGET_SO_RCVLOWAT: 1362 optname = SO_RCVLOWAT; 1363 break; 1364 case TARGET_SO_RCVTIMEO: 1365 optname = SO_RCVTIMEO; 1366 break; 1367 case TARGET_SO_SNDTIMEO: 1368 optname = SO_SNDTIMEO; 1369 break; 1370 break; 1371 default: 1372 goto unimplemented; 1373 } 1374 if (optlen < sizeof(uint32_t)) 1375 return -TARGET_EINVAL; 1376 1377 if (get_user_u32(val, optval_addr)) 1378 return -TARGET_EFAULT; 1379 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val))); 1380 break; 1381 default: 1382 unimplemented: 1383 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level, optname); 1384 ret = -TARGET_ENOPROTOOPT; 1385 } 1386 return ret; 1387 } 1388 1389 /* do_getsockopt() Must return target values and target errnos. */ 1390 static abi_long do_getsockopt(int sockfd, int level, int optname, 1391 abi_ulong optval_addr, abi_ulong optlen) 1392 { 1393 abi_long ret; 1394 int len, val; 1395 socklen_t lv; 1396 1397 switch(level) { 1398 case TARGET_SOL_SOCKET: 1399 level = SOL_SOCKET; 1400 switch (optname) { 1401 /* These don't just return a single integer */ 1402 case TARGET_SO_LINGER: 1403 case TARGET_SO_RCVTIMEO: 1404 case TARGET_SO_SNDTIMEO: 1405 case TARGET_SO_PEERCRED: 1406 case TARGET_SO_PEERNAME: 1407 goto unimplemented; 1408 /* Options with 'int' argument. */ 1409 case TARGET_SO_DEBUG: 1410 optname = SO_DEBUG; 1411 goto int_case; 1412 case TARGET_SO_REUSEADDR: 1413 optname = SO_REUSEADDR; 1414 goto int_case; 1415 case TARGET_SO_TYPE: 1416 optname = SO_TYPE; 1417 goto int_case; 1418 case TARGET_SO_ERROR: 1419 optname = SO_ERROR; 1420 goto int_case; 1421 case TARGET_SO_DONTROUTE: 1422 optname = SO_DONTROUTE; 1423 goto int_case; 1424 case TARGET_SO_BROADCAST: 1425 optname = SO_BROADCAST; 1426 goto int_case; 1427 case TARGET_SO_SNDBUF: 1428 optname = SO_SNDBUF; 1429 goto int_case; 1430 case TARGET_SO_RCVBUF: 1431 optname = SO_RCVBUF; 1432 goto int_case; 1433 case TARGET_SO_KEEPALIVE: 1434 optname = SO_KEEPALIVE; 1435 goto int_case; 1436 case TARGET_SO_OOBINLINE: 1437 optname = SO_OOBINLINE; 1438 goto int_case; 1439 case TARGET_SO_NO_CHECK: 1440 optname = SO_NO_CHECK; 1441 goto int_case; 1442 case TARGET_SO_PRIORITY: 1443 optname = SO_PRIORITY; 1444 goto int_case; 1445 #ifdef SO_BSDCOMPAT 1446 case TARGET_SO_BSDCOMPAT: 1447 optname = SO_BSDCOMPAT; 1448 goto int_case; 1449 #endif 1450 case TARGET_SO_PASSCRED: 1451 optname = SO_PASSCRED; 1452 goto int_case; 1453 case TARGET_SO_TIMESTAMP: 1454 optname = SO_TIMESTAMP; 1455 goto int_case; 1456 case TARGET_SO_RCVLOWAT: 1457 optname = SO_RCVLOWAT; 1458 goto int_case; 1459 default: 1460 goto int_case; 1461 } 1462 break; 1463 case SOL_TCP: 1464 /* TCP options all take an 'int' value. */ 1465 int_case: 1466 if (get_user_u32(len, optlen)) 1467 return -TARGET_EFAULT; 1468 if (len < 0) 1469 return -TARGET_EINVAL; 1470 lv = sizeof(lv); 1471 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1472 if (ret < 0) 1473 return ret; 1474 if (len > lv) 1475 len = lv; 1476 if (len == 4) { 1477 if (put_user_u32(val, optval_addr)) 1478 return -TARGET_EFAULT; 1479 } else { 1480 if (put_user_u8(val, optval_addr)) 1481 return -TARGET_EFAULT; 1482 } 1483 if (put_user_u32(len, optlen)) 1484 return -TARGET_EFAULT; 1485 break; 1486 case SOL_IP: 1487 switch(optname) { 1488 case IP_TOS: 1489 case IP_TTL: 1490 case IP_HDRINCL: 1491 case IP_ROUTER_ALERT: 1492 case IP_RECVOPTS: 1493 case IP_RETOPTS: 1494 case IP_PKTINFO: 1495 case IP_MTU_DISCOVER: 1496 case IP_RECVERR: 1497 case IP_RECVTOS: 1498 #ifdef IP_FREEBIND 1499 case IP_FREEBIND: 1500 #endif 1501 case IP_MULTICAST_TTL: 1502 case IP_MULTICAST_LOOP: 1503 if (get_user_u32(len, optlen)) 1504 return -TARGET_EFAULT; 1505 if (len < 0) 1506 return -TARGET_EINVAL; 1507 lv = sizeof(lv); 1508 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1509 if (ret < 0) 1510 return ret; 1511 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 1512 len = 1; 1513 if (put_user_u32(len, optlen) 1514 || put_user_u8(val, optval_addr)) 1515 return -TARGET_EFAULT; 1516 } else { 1517 if (len > sizeof(int)) 1518 len = sizeof(int); 1519 if (put_user_u32(len, optlen) 1520 || put_user_u32(val, optval_addr)) 1521 return -TARGET_EFAULT; 1522 } 1523 break; 1524 default: 1525 ret = -TARGET_ENOPROTOOPT; 1526 break; 1527 } 1528 break; 1529 default: 1530 unimplemented: 1531 gemu_log("getsockopt level=%d optname=%d not yet supported\n", 1532 level, optname); 1533 ret = -TARGET_EOPNOTSUPP; 1534 break; 1535 } 1536 return ret; 1537 } 1538 1539 /* FIXME 1540 * lock_iovec()/unlock_iovec() have a return code of 0 for success where 1541 * other lock functions have a return code of 0 for failure. 1542 */ 1543 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr, 1544 int count, int copy) 1545 { 1546 struct target_iovec *target_vec; 1547 abi_ulong base; 1548 int i; 1549 1550 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1); 1551 if (!target_vec) 1552 return -TARGET_EFAULT; 1553 for(i = 0;i < count; i++) { 1554 base = tswapl(target_vec[i].iov_base); 1555 vec[i].iov_len = tswapl(target_vec[i].iov_len); 1556 if (vec[i].iov_len != 0) { 1557 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy); 1558 /* Don't check lock_user return value. We must call writev even 1559 if a element has invalid base address. */ 1560 } else { 1561 /* zero length pointer is ignored */ 1562 vec[i].iov_base = NULL; 1563 } 1564 } 1565 unlock_user (target_vec, target_addr, 0); 1566 return 0; 1567 } 1568 1569 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr, 1570 int count, int copy) 1571 { 1572 struct target_iovec *target_vec; 1573 abi_ulong base; 1574 int i; 1575 1576 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1); 1577 if (!target_vec) 1578 return -TARGET_EFAULT; 1579 for(i = 0;i < count; i++) { 1580 if (target_vec[i].iov_base) { 1581 base = tswapl(target_vec[i].iov_base); 1582 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0); 1583 } 1584 } 1585 unlock_user (target_vec, target_addr, 0); 1586 1587 return 0; 1588 } 1589 1590 /* do_socket() Must return target values and target errnos. */ 1591 static abi_long do_socket(int domain, int type, int protocol) 1592 { 1593 #if defined(TARGET_MIPS) 1594 switch(type) { 1595 case TARGET_SOCK_DGRAM: 1596 type = SOCK_DGRAM; 1597 break; 1598 case TARGET_SOCK_STREAM: 1599 type = SOCK_STREAM; 1600 break; 1601 case TARGET_SOCK_RAW: 1602 type = SOCK_RAW; 1603 break; 1604 case TARGET_SOCK_RDM: 1605 type = SOCK_RDM; 1606 break; 1607 case TARGET_SOCK_SEQPACKET: 1608 type = SOCK_SEQPACKET; 1609 break; 1610 case TARGET_SOCK_PACKET: 1611 type = SOCK_PACKET; 1612 break; 1613 } 1614 #endif 1615 if (domain == PF_NETLINK) 1616 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */ 1617 return get_errno(socket(domain, type, protocol)); 1618 } 1619 1620 /* do_bind() Must return target values and target errnos. */ 1621 static abi_long do_bind(int sockfd, abi_ulong target_addr, 1622 socklen_t addrlen) 1623 { 1624 void *addr; 1625 abi_long ret; 1626 1627 if ((int)addrlen < 0) { 1628 return -TARGET_EINVAL; 1629 } 1630 1631 addr = alloca(addrlen+1); 1632 1633 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1634 if (ret) 1635 return ret; 1636 1637 return get_errno(bind(sockfd, addr, addrlen)); 1638 } 1639 1640 /* do_connect() Must return target values and target errnos. */ 1641 static abi_long do_connect(int sockfd, abi_ulong target_addr, 1642 socklen_t addrlen) 1643 { 1644 void *addr; 1645 abi_long ret; 1646 1647 if ((int)addrlen < 0) { 1648 return -TARGET_EINVAL; 1649 } 1650 1651 addr = alloca(addrlen); 1652 1653 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1654 if (ret) 1655 return ret; 1656 1657 return get_errno(connect(sockfd, addr, addrlen)); 1658 } 1659 1660 /* do_sendrecvmsg() Must return target values and target errnos. */ 1661 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg, 1662 int flags, int send) 1663 { 1664 abi_long ret, len; 1665 struct target_msghdr *msgp; 1666 struct msghdr msg; 1667 int count; 1668 struct iovec *vec; 1669 abi_ulong target_vec; 1670 1671 /* FIXME */ 1672 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE, 1673 msgp, 1674 target_msg, 1675 send ? 1 : 0)) 1676 return -TARGET_EFAULT; 1677 if (msgp->msg_name) { 1678 msg.msg_namelen = tswap32(msgp->msg_namelen); 1679 msg.msg_name = alloca(msg.msg_namelen); 1680 ret = target_to_host_sockaddr(msg.msg_name, tswapl(msgp->msg_name), 1681 msg.msg_namelen); 1682 if (ret) { 1683 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 1684 return ret; 1685 } 1686 } else { 1687 msg.msg_name = NULL; 1688 msg.msg_namelen = 0; 1689 } 1690 msg.msg_controllen = 2 * tswapl(msgp->msg_controllen); 1691 msg.msg_control = alloca(msg.msg_controllen); 1692 msg.msg_flags = tswap32(msgp->msg_flags); 1693 1694 count = tswapl(msgp->msg_iovlen); 1695 vec = alloca(count * sizeof(struct iovec)); 1696 target_vec = tswapl(msgp->msg_iov); 1697 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send); 1698 msg.msg_iovlen = count; 1699 msg.msg_iov = vec; 1700 1701 if (send) { 1702 ret = target_to_host_cmsg(&msg, msgp); 1703 if (ret == 0) 1704 ret = get_errno(sendmsg(fd, &msg, flags)); 1705 } else { 1706 ret = get_errno(recvmsg(fd, &msg, flags)); 1707 if (!is_error(ret)) { 1708 len = ret; 1709 ret = host_to_target_cmsg(msgp, &msg); 1710 if (!is_error(ret)) 1711 ret = len; 1712 } 1713 } 1714 unlock_iovec(vec, target_vec, count, !send); 1715 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 1716 return ret; 1717 } 1718 1719 /* do_accept() Must return target values and target errnos. */ 1720 static abi_long do_accept(int fd, abi_ulong target_addr, 1721 abi_ulong target_addrlen_addr) 1722 { 1723 socklen_t addrlen; 1724 void *addr; 1725 abi_long ret; 1726 1727 if (target_addr == 0) 1728 return get_errno(accept(fd, NULL, NULL)); 1729 1730 /* linux returns EINVAL if addrlen pointer is invalid */ 1731 if (get_user_u32(addrlen, target_addrlen_addr)) 1732 return -TARGET_EINVAL; 1733 1734 if ((int)addrlen < 0) { 1735 return -TARGET_EINVAL; 1736 } 1737 1738 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 1739 return -TARGET_EINVAL; 1740 1741 addr = alloca(addrlen); 1742 1743 ret = get_errno(accept(fd, addr, &addrlen)); 1744 if (!is_error(ret)) { 1745 host_to_target_sockaddr(target_addr, addr, addrlen); 1746 if (put_user_u32(addrlen, target_addrlen_addr)) 1747 ret = -TARGET_EFAULT; 1748 } 1749 return ret; 1750 } 1751 1752 /* do_getpeername() Must return target values and target errnos. */ 1753 static abi_long do_getpeername(int fd, abi_ulong target_addr, 1754 abi_ulong target_addrlen_addr) 1755 { 1756 socklen_t addrlen; 1757 void *addr; 1758 abi_long ret; 1759 1760 if (get_user_u32(addrlen, target_addrlen_addr)) 1761 return -TARGET_EFAULT; 1762 1763 if ((int)addrlen < 0) { 1764 return -TARGET_EINVAL; 1765 } 1766 1767 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 1768 return -TARGET_EFAULT; 1769 1770 addr = alloca(addrlen); 1771 1772 ret = get_errno(getpeername(fd, addr, &addrlen)); 1773 if (!is_error(ret)) { 1774 host_to_target_sockaddr(target_addr, addr, addrlen); 1775 if (put_user_u32(addrlen, target_addrlen_addr)) 1776 ret = -TARGET_EFAULT; 1777 } 1778 return ret; 1779 } 1780 1781 /* do_getsockname() Must return target values and target errnos. */ 1782 static abi_long do_getsockname(int fd, abi_ulong target_addr, 1783 abi_ulong target_addrlen_addr) 1784 { 1785 socklen_t addrlen; 1786 void *addr; 1787 abi_long ret; 1788 1789 if (get_user_u32(addrlen, target_addrlen_addr)) 1790 return -TARGET_EFAULT; 1791 1792 if ((int)addrlen < 0) { 1793 return -TARGET_EINVAL; 1794 } 1795 1796 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 1797 return -TARGET_EFAULT; 1798 1799 addr = alloca(addrlen); 1800 1801 ret = get_errno(getsockname(fd, addr, &addrlen)); 1802 if (!is_error(ret)) { 1803 host_to_target_sockaddr(target_addr, addr, addrlen); 1804 if (put_user_u32(addrlen, target_addrlen_addr)) 1805 ret = -TARGET_EFAULT; 1806 } 1807 return ret; 1808 } 1809 1810 /* do_socketpair() Must return target values and target errnos. */ 1811 static abi_long do_socketpair(int domain, int type, int protocol, 1812 abi_ulong target_tab_addr) 1813 { 1814 int tab[2]; 1815 abi_long ret; 1816 1817 ret = get_errno(socketpair(domain, type, protocol, tab)); 1818 if (!is_error(ret)) { 1819 if (put_user_s32(tab[0], target_tab_addr) 1820 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0]))) 1821 ret = -TARGET_EFAULT; 1822 } 1823 return ret; 1824 } 1825 1826 /* do_sendto() Must return target values and target errnos. */ 1827 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags, 1828 abi_ulong target_addr, socklen_t addrlen) 1829 { 1830 void *addr; 1831 void *host_msg; 1832 abi_long ret; 1833 1834 if ((int)addrlen < 0) { 1835 return -TARGET_EINVAL; 1836 } 1837 1838 host_msg = lock_user(VERIFY_READ, msg, len, 1); 1839 if (!host_msg) 1840 return -TARGET_EFAULT; 1841 if (target_addr) { 1842 addr = alloca(addrlen); 1843 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1844 if (ret) { 1845 unlock_user(host_msg, msg, 0); 1846 return ret; 1847 } 1848 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen)); 1849 } else { 1850 ret = get_errno(send(fd, host_msg, len, flags)); 1851 } 1852 unlock_user(host_msg, msg, 0); 1853 return ret; 1854 } 1855 1856 /* do_recvfrom() Must return target values and target errnos. */ 1857 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags, 1858 abi_ulong target_addr, 1859 abi_ulong target_addrlen) 1860 { 1861 socklen_t addrlen; 1862 void *addr; 1863 void *host_msg; 1864 abi_long ret; 1865 1866 host_msg = lock_user(VERIFY_WRITE, msg, len, 0); 1867 if (!host_msg) 1868 return -TARGET_EFAULT; 1869 if (target_addr) { 1870 if (get_user_u32(addrlen, target_addrlen)) { 1871 ret = -TARGET_EFAULT; 1872 goto fail; 1873 } 1874 if ((int)addrlen < 0) { 1875 ret = -TARGET_EINVAL; 1876 goto fail; 1877 } 1878 addr = alloca(addrlen); 1879 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen)); 1880 } else { 1881 addr = NULL; /* To keep compiler quiet. */ 1882 ret = get_errno(recv(fd, host_msg, len, flags)); 1883 } 1884 if (!is_error(ret)) { 1885 if (target_addr) { 1886 host_to_target_sockaddr(target_addr, addr, addrlen); 1887 if (put_user_u32(addrlen, target_addrlen)) { 1888 ret = -TARGET_EFAULT; 1889 goto fail; 1890 } 1891 } 1892 unlock_user(host_msg, msg, len); 1893 } else { 1894 fail: 1895 unlock_user(host_msg, msg, 0); 1896 } 1897 return ret; 1898 } 1899 1900 #ifdef TARGET_NR_socketcall 1901 /* do_socketcall() Must return target values and target errnos. */ 1902 static abi_long do_socketcall(int num, abi_ulong vptr) 1903 { 1904 abi_long ret; 1905 const int n = sizeof(abi_ulong); 1906 1907 switch(num) { 1908 case SOCKOP_socket: 1909 { 1910 abi_ulong domain, type, protocol; 1911 1912 if (get_user_ual(domain, vptr) 1913 || get_user_ual(type, vptr + n) 1914 || get_user_ual(protocol, vptr + 2 * n)) 1915 return -TARGET_EFAULT; 1916 1917 ret = do_socket(domain, type, protocol); 1918 } 1919 break; 1920 case SOCKOP_bind: 1921 { 1922 abi_ulong sockfd; 1923 abi_ulong target_addr; 1924 socklen_t addrlen; 1925 1926 if (get_user_ual(sockfd, vptr) 1927 || get_user_ual(target_addr, vptr + n) 1928 || get_user_ual(addrlen, vptr + 2 * n)) 1929 return -TARGET_EFAULT; 1930 1931 ret = do_bind(sockfd, target_addr, addrlen); 1932 } 1933 break; 1934 case SOCKOP_connect: 1935 { 1936 abi_ulong sockfd; 1937 abi_ulong target_addr; 1938 socklen_t addrlen; 1939 1940 if (get_user_ual(sockfd, vptr) 1941 || get_user_ual(target_addr, vptr + n) 1942 || get_user_ual(addrlen, vptr + 2 * n)) 1943 return -TARGET_EFAULT; 1944 1945 ret = do_connect(sockfd, target_addr, addrlen); 1946 } 1947 break; 1948 case SOCKOP_listen: 1949 { 1950 abi_ulong sockfd, backlog; 1951 1952 if (get_user_ual(sockfd, vptr) 1953 || get_user_ual(backlog, vptr + n)) 1954 return -TARGET_EFAULT; 1955 1956 ret = get_errno(listen(sockfd, backlog)); 1957 } 1958 break; 1959 case SOCKOP_accept: 1960 { 1961 abi_ulong sockfd; 1962 abi_ulong target_addr, target_addrlen; 1963 1964 if (get_user_ual(sockfd, vptr) 1965 || get_user_ual(target_addr, vptr + n) 1966 || get_user_ual(target_addrlen, vptr + 2 * n)) 1967 return -TARGET_EFAULT; 1968 1969 ret = do_accept(sockfd, target_addr, target_addrlen); 1970 } 1971 break; 1972 case SOCKOP_getsockname: 1973 { 1974 abi_ulong sockfd; 1975 abi_ulong target_addr, target_addrlen; 1976 1977 if (get_user_ual(sockfd, vptr) 1978 || get_user_ual(target_addr, vptr + n) 1979 || get_user_ual(target_addrlen, vptr + 2 * n)) 1980 return -TARGET_EFAULT; 1981 1982 ret = do_getsockname(sockfd, target_addr, target_addrlen); 1983 } 1984 break; 1985 case SOCKOP_getpeername: 1986 { 1987 abi_ulong sockfd; 1988 abi_ulong target_addr, target_addrlen; 1989 1990 if (get_user_ual(sockfd, vptr) 1991 || get_user_ual(target_addr, vptr + n) 1992 || get_user_ual(target_addrlen, vptr + 2 * n)) 1993 return -TARGET_EFAULT; 1994 1995 ret = do_getpeername(sockfd, target_addr, target_addrlen); 1996 } 1997 break; 1998 case SOCKOP_socketpair: 1999 { 2000 abi_ulong domain, type, protocol; 2001 abi_ulong tab; 2002 2003 if (get_user_ual(domain, vptr) 2004 || get_user_ual(type, vptr + n) 2005 || get_user_ual(protocol, vptr + 2 * n) 2006 || get_user_ual(tab, vptr + 3 * n)) 2007 return -TARGET_EFAULT; 2008 2009 ret = do_socketpair(domain, type, protocol, tab); 2010 } 2011 break; 2012 case SOCKOP_send: 2013 { 2014 abi_ulong sockfd; 2015 abi_ulong msg; 2016 size_t len; 2017 abi_ulong flags; 2018 2019 if (get_user_ual(sockfd, vptr) 2020 || get_user_ual(msg, vptr + n) 2021 || get_user_ual(len, vptr + 2 * n) 2022 || get_user_ual(flags, vptr + 3 * n)) 2023 return -TARGET_EFAULT; 2024 2025 ret = do_sendto(sockfd, msg, len, flags, 0, 0); 2026 } 2027 break; 2028 case SOCKOP_recv: 2029 { 2030 abi_ulong sockfd; 2031 abi_ulong msg; 2032 size_t len; 2033 abi_ulong flags; 2034 2035 if (get_user_ual(sockfd, vptr) 2036 || get_user_ual(msg, vptr + n) 2037 || get_user_ual(len, vptr + 2 * n) 2038 || get_user_ual(flags, vptr + 3 * n)) 2039 return -TARGET_EFAULT; 2040 2041 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0); 2042 } 2043 break; 2044 case SOCKOP_sendto: 2045 { 2046 abi_ulong sockfd; 2047 abi_ulong msg; 2048 size_t len; 2049 abi_ulong flags; 2050 abi_ulong addr; 2051 socklen_t addrlen; 2052 2053 if (get_user_ual(sockfd, vptr) 2054 || get_user_ual(msg, vptr + n) 2055 || get_user_ual(len, vptr + 2 * n) 2056 || get_user_ual(flags, vptr + 3 * n) 2057 || get_user_ual(addr, vptr + 4 * n) 2058 || get_user_ual(addrlen, vptr + 5 * n)) 2059 return -TARGET_EFAULT; 2060 2061 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen); 2062 } 2063 break; 2064 case SOCKOP_recvfrom: 2065 { 2066 abi_ulong sockfd; 2067 abi_ulong msg; 2068 size_t len; 2069 abi_ulong flags; 2070 abi_ulong addr; 2071 socklen_t addrlen; 2072 2073 if (get_user_ual(sockfd, vptr) 2074 || get_user_ual(msg, vptr + n) 2075 || get_user_ual(len, vptr + 2 * n) 2076 || get_user_ual(flags, vptr + 3 * n) 2077 || get_user_ual(addr, vptr + 4 * n) 2078 || get_user_ual(addrlen, vptr + 5 * n)) 2079 return -TARGET_EFAULT; 2080 2081 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen); 2082 } 2083 break; 2084 case SOCKOP_shutdown: 2085 { 2086 abi_ulong sockfd, how; 2087 2088 if (get_user_ual(sockfd, vptr) 2089 || get_user_ual(how, vptr + n)) 2090 return -TARGET_EFAULT; 2091 2092 ret = get_errno(shutdown(sockfd, how)); 2093 } 2094 break; 2095 case SOCKOP_sendmsg: 2096 case SOCKOP_recvmsg: 2097 { 2098 abi_ulong fd; 2099 abi_ulong target_msg; 2100 abi_ulong flags; 2101 2102 if (get_user_ual(fd, vptr) 2103 || get_user_ual(target_msg, vptr + n) 2104 || get_user_ual(flags, vptr + 2 * n)) 2105 return -TARGET_EFAULT; 2106 2107 ret = do_sendrecvmsg(fd, target_msg, flags, 2108 (num == SOCKOP_sendmsg)); 2109 } 2110 break; 2111 case SOCKOP_setsockopt: 2112 { 2113 abi_ulong sockfd; 2114 abi_ulong level; 2115 abi_ulong optname; 2116 abi_ulong optval; 2117 socklen_t optlen; 2118 2119 if (get_user_ual(sockfd, vptr) 2120 || get_user_ual(level, vptr + n) 2121 || get_user_ual(optname, vptr + 2 * n) 2122 || get_user_ual(optval, vptr + 3 * n) 2123 || get_user_ual(optlen, vptr + 4 * n)) 2124 return -TARGET_EFAULT; 2125 2126 ret = do_setsockopt(sockfd, level, optname, optval, optlen); 2127 } 2128 break; 2129 case SOCKOP_getsockopt: 2130 { 2131 abi_ulong sockfd; 2132 abi_ulong level; 2133 abi_ulong optname; 2134 abi_ulong optval; 2135 socklen_t optlen; 2136 2137 if (get_user_ual(sockfd, vptr) 2138 || get_user_ual(level, vptr + n) 2139 || get_user_ual(optname, vptr + 2 * n) 2140 || get_user_ual(optval, vptr + 3 * n) 2141 || get_user_ual(optlen, vptr + 4 * n)) 2142 return -TARGET_EFAULT; 2143 2144 ret = do_getsockopt(sockfd, level, optname, optval, optlen); 2145 } 2146 break; 2147 default: 2148 gemu_log("Unsupported socketcall: %d\n", num); 2149 ret = -TARGET_ENOSYS; 2150 break; 2151 } 2152 return ret; 2153 } 2154 #endif 2155 2156 #define N_SHM_REGIONS 32 2157 2158 static struct shm_region { 2159 abi_ulong start; 2160 abi_ulong size; 2161 } shm_regions[N_SHM_REGIONS]; 2162 2163 struct target_ipc_perm 2164 { 2165 abi_long __key; 2166 abi_ulong uid; 2167 abi_ulong gid; 2168 abi_ulong cuid; 2169 abi_ulong cgid; 2170 unsigned short int mode; 2171 unsigned short int __pad1; 2172 unsigned short int __seq; 2173 unsigned short int __pad2; 2174 abi_ulong __unused1; 2175 abi_ulong __unused2; 2176 }; 2177 2178 struct target_semid_ds 2179 { 2180 struct target_ipc_perm sem_perm; 2181 abi_ulong sem_otime; 2182 abi_ulong __unused1; 2183 abi_ulong sem_ctime; 2184 abi_ulong __unused2; 2185 abi_ulong sem_nsems; 2186 abi_ulong __unused3; 2187 abi_ulong __unused4; 2188 }; 2189 2190 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip, 2191 abi_ulong target_addr) 2192 { 2193 struct target_ipc_perm *target_ip; 2194 struct target_semid_ds *target_sd; 2195 2196 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2197 return -TARGET_EFAULT; 2198 target_ip = &(target_sd->sem_perm); 2199 host_ip->__key = tswapl(target_ip->__key); 2200 host_ip->uid = tswapl(target_ip->uid); 2201 host_ip->gid = tswapl(target_ip->gid); 2202 host_ip->cuid = tswapl(target_ip->cuid); 2203 host_ip->cgid = tswapl(target_ip->cgid); 2204 host_ip->mode = tswapl(target_ip->mode); 2205 unlock_user_struct(target_sd, target_addr, 0); 2206 return 0; 2207 } 2208 2209 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr, 2210 struct ipc_perm *host_ip) 2211 { 2212 struct target_ipc_perm *target_ip; 2213 struct target_semid_ds *target_sd; 2214 2215 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2216 return -TARGET_EFAULT; 2217 target_ip = &(target_sd->sem_perm); 2218 target_ip->__key = tswapl(host_ip->__key); 2219 target_ip->uid = tswapl(host_ip->uid); 2220 target_ip->gid = tswapl(host_ip->gid); 2221 target_ip->cuid = tswapl(host_ip->cuid); 2222 target_ip->cgid = tswapl(host_ip->cgid); 2223 target_ip->mode = tswapl(host_ip->mode); 2224 unlock_user_struct(target_sd, target_addr, 1); 2225 return 0; 2226 } 2227 2228 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd, 2229 abi_ulong target_addr) 2230 { 2231 struct target_semid_ds *target_sd; 2232 2233 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2234 return -TARGET_EFAULT; 2235 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr)) 2236 return -TARGET_EFAULT; 2237 host_sd->sem_nsems = tswapl(target_sd->sem_nsems); 2238 host_sd->sem_otime = tswapl(target_sd->sem_otime); 2239 host_sd->sem_ctime = tswapl(target_sd->sem_ctime); 2240 unlock_user_struct(target_sd, target_addr, 0); 2241 return 0; 2242 } 2243 2244 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr, 2245 struct semid_ds *host_sd) 2246 { 2247 struct target_semid_ds *target_sd; 2248 2249 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2250 return -TARGET_EFAULT; 2251 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm))) 2252 return -TARGET_EFAULT;; 2253 target_sd->sem_nsems = tswapl(host_sd->sem_nsems); 2254 target_sd->sem_otime = tswapl(host_sd->sem_otime); 2255 target_sd->sem_ctime = tswapl(host_sd->sem_ctime); 2256 unlock_user_struct(target_sd, target_addr, 1); 2257 return 0; 2258 } 2259 2260 struct target_seminfo { 2261 int semmap; 2262 int semmni; 2263 int semmns; 2264 int semmnu; 2265 int semmsl; 2266 int semopm; 2267 int semume; 2268 int semusz; 2269 int semvmx; 2270 int semaem; 2271 }; 2272 2273 static inline abi_long host_to_target_seminfo(abi_ulong target_addr, 2274 struct seminfo *host_seminfo) 2275 { 2276 struct target_seminfo *target_seminfo; 2277 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0)) 2278 return -TARGET_EFAULT; 2279 __put_user(host_seminfo->semmap, &target_seminfo->semmap); 2280 __put_user(host_seminfo->semmni, &target_seminfo->semmni); 2281 __put_user(host_seminfo->semmns, &target_seminfo->semmns); 2282 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu); 2283 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl); 2284 __put_user(host_seminfo->semopm, &target_seminfo->semopm); 2285 __put_user(host_seminfo->semume, &target_seminfo->semume); 2286 __put_user(host_seminfo->semusz, &target_seminfo->semusz); 2287 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx); 2288 __put_user(host_seminfo->semaem, &target_seminfo->semaem); 2289 unlock_user_struct(target_seminfo, target_addr, 1); 2290 return 0; 2291 } 2292 2293 union semun { 2294 int val; 2295 struct semid_ds *buf; 2296 unsigned short *array; 2297 struct seminfo *__buf; 2298 }; 2299 2300 union target_semun { 2301 int val; 2302 abi_ulong buf; 2303 abi_ulong array; 2304 abi_ulong __buf; 2305 }; 2306 2307 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array, 2308 abi_ulong target_addr) 2309 { 2310 int nsems; 2311 unsigned short *array; 2312 union semun semun; 2313 struct semid_ds semid_ds; 2314 int i, ret; 2315 2316 semun.buf = &semid_ds; 2317 2318 ret = semctl(semid, 0, IPC_STAT, semun); 2319 if (ret == -1) 2320 return get_errno(ret); 2321 2322 nsems = semid_ds.sem_nsems; 2323 2324 *host_array = malloc(nsems*sizeof(unsigned short)); 2325 array = lock_user(VERIFY_READ, target_addr, 2326 nsems*sizeof(unsigned short), 1); 2327 if (!array) 2328 return -TARGET_EFAULT; 2329 2330 for(i=0; i<nsems; i++) { 2331 __get_user((*host_array)[i], &array[i]); 2332 } 2333 unlock_user(array, target_addr, 0); 2334 2335 return 0; 2336 } 2337 2338 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr, 2339 unsigned short **host_array) 2340 { 2341 int nsems; 2342 unsigned short *array; 2343 union semun semun; 2344 struct semid_ds semid_ds; 2345 int i, ret; 2346 2347 semun.buf = &semid_ds; 2348 2349 ret = semctl(semid, 0, IPC_STAT, semun); 2350 if (ret == -1) 2351 return get_errno(ret); 2352 2353 nsems = semid_ds.sem_nsems; 2354 2355 array = lock_user(VERIFY_WRITE, target_addr, 2356 nsems*sizeof(unsigned short), 0); 2357 if (!array) 2358 return -TARGET_EFAULT; 2359 2360 for(i=0; i<nsems; i++) { 2361 __put_user((*host_array)[i], &array[i]); 2362 } 2363 free(*host_array); 2364 unlock_user(array, target_addr, 1); 2365 2366 return 0; 2367 } 2368 2369 static inline abi_long do_semctl(int semid, int semnum, int cmd, 2370 union target_semun target_su) 2371 { 2372 union semun arg; 2373 struct semid_ds dsarg; 2374 unsigned short *array = NULL; 2375 struct seminfo seminfo; 2376 abi_long ret = -TARGET_EINVAL; 2377 abi_long err; 2378 cmd &= 0xff; 2379 2380 switch( cmd ) { 2381 case GETVAL: 2382 case SETVAL: 2383 arg.val = tswapl(target_su.val); 2384 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2385 target_su.val = tswapl(arg.val); 2386 break; 2387 case GETALL: 2388 case SETALL: 2389 err = target_to_host_semarray(semid, &array, target_su.array); 2390 if (err) 2391 return err; 2392 arg.array = array; 2393 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2394 err = host_to_target_semarray(semid, target_su.array, &array); 2395 if (err) 2396 return err; 2397 break; 2398 case IPC_STAT: 2399 case IPC_SET: 2400 case SEM_STAT: 2401 err = target_to_host_semid_ds(&dsarg, target_su.buf); 2402 if (err) 2403 return err; 2404 arg.buf = &dsarg; 2405 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2406 err = host_to_target_semid_ds(target_su.buf, &dsarg); 2407 if (err) 2408 return err; 2409 break; 2410 case IPC_INFO: 2411 case SEM_INFO: 2412 arg.__buf = &seminfo; 2413 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2414 err = host_to_target_seminfo(target_su.__buf, &seminfo); 2415 if (err) 2416 return err; 2417 break; 2418 case IPC_RMID: 2419 case GETPID: 2420 case GETNCNT: 2421 case GETZCNT: 2422 ret = get_errno(semctl(semid, semnum, cmd, NULL)); 2423 break; 2424 } 2425 2426 return ret; 2427 } 2428 2429 struct target_sembuf { 2430 unsigned short sem_num; 2431 short sem_op; 2432 short sem_flg; 2433 }; 2434 2435 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf, 2436 abi_ulong target_addr, 2437 unsigned nsops) 2438 { 2439 struct target_sembuf *target_sembuf; 2440 int i; 2441 2442 target_sembuf = lock_user(VERIFY_READ, target_addr, 2443 nsops*sizeof(struct target_sembuf), 1); 2444 if (!target_sembuf) 2445 return -TARGET_EFAULT; 2446 2447 for(i=0; i<nsops; i++) { 2448 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num); 2449 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op); 2450 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg); 2451 } 2452 2453 unlock_user(target_sembuf, target_addr, 0); 2454 2455 return 0; 2456 } 2457 2458 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops) 2459 { 2460 struct sembuf sops[nsops]; 2461 2462 if (target_to_host_sembuf(sops, ptr, nsops)) 2463 return -TARGET_EFAULT; 2464 2465 return semop(semid, sops, nsops); 2466 } 2467 2468 struct target_msqid_ds 2469 { 2470 struct target_ipc_perm msg_perm; 2471 abi_ulong msg_stime; 2472 #if TARGET_ABI_BITS == 32 2473 abi_ulong __unused1; 2474 #endif 2475 abi_ulong msg_rtime; 2476 #if TARGET_ABI_BITS == 32 2477 abi_ulong __unused2; 2478 #endif 2479 abi_ulong msg_ctime; 2480 #if TARGET_ABI_BITS == 32 2481 abi_ulong __unused3; 2482 #endif 2483 abi_ulong __msg_cbytes; 2484 abi_ulong msg_qnum; 2485 abi_ulong msg_qbytes; 2486 abi_ulong msg_lspid; 2487 abi_ulong msg_lrpid; 2488 abi_ulong __unused4; 2489 abi_ulong __unused5; 2490 }; 2491 2492 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md, 2493 abi_ulong target_addr) 2494 { 2495 struct target_msqid_ds *target_md; 2496 2497 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1)) 2498 return -TARGET_EFAULT; 2499 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr)) 2500 return -TARGET_EFAULT; 2501 host_md->msg_stime = tswapl(target_md->msg_stime); 2502 host_md->msg_rtime = tswapl(target_md->msg_rtime); 2503 host_md->msg_ctime = tswapl(target_md->msg_ctime); 2504 host_md->__msg_cbytes = tswapl(target_md->__msg_cbytes); 2505 host_md->msg_qnum = tswapl(target_md->msg_qnum); 2506 host_md->msg_qbytes = tswapl(target_md->msg_qbytes); 2507 host_md->msg_lspid = tswapl(target_md->msg_lspid); 2508 host_md->msg_lrpid = tswapl(target_md->msg_lrpid); 2509 unlock_user_struct(target_md, target_addr, 0); 2510 return 0; 2511 } 2512 2513 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr, 2514 struct msqid_ds *host_md) 2515 { 2516 struct target_msqid_ds *target_md; 2517 2518 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0)) 2519 return -TARGET_EFAULT; 2520 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm))) 2521 return -TARGET_EFAULT; 2522 target_md->msg_stime = tswapl(host_md->msg_stime); 2523 target_md->msg_rtime = tswapl(host_md->msg_rtime); 2524 target_md->msg_ctime = tswapl(host_md->msg_ctime); 2525 target_md->__msg_cbytes = tswapl(host_md->__msg_cbytes); 2526 target_md->msg_qnum = tswapl(host_md->msg_qnum); 2527 target_md->msg_qbytes = tswapl(host_md->msg_qbytes); 2528 target_md->msg_lspid = tswapl(host_md->msg_lspid); 2529 target_md->msg_lrpid = tswapl(host_md->msg_lrpid); 2530 unlock_user_struct(target_md, target_addr, 1); 2531 return 0; 2532 } 2533 2534 struct target_msginfo { 2535 int msgpool; 2536 int msgmap; 2537 int msgmax; 2538 int msgmnb; 2539 int msgmni; 2540 int msgssz; 2541 int msgtql; 2542 unsigned short int msgseg; 2543 }; 2544 2545 static inline abi_long host_to_target_msginfo(abi_ulong target_addr, 2546 struct msginfo *host_msginfo) 2547 { 2548 struct target_msginfo *target_msginfo; 2549 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0)) 2550 return -TARGET_EFAULT; 2551 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool); 2552 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap); 2553 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax); 2554 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb); 2555 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni); 2556 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz); 2557 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql); 2558 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg); 2559 unlock_user_struct(target_msginfo, target_addr, 1); 2560 return 0; 2561 } 2562 2563 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr) 2564 { 2565 struct msqid_ds dsarg; 2566 struct msginfo msginfo; 2567 abi_long ret = -TARGET_EINVAL; 2568 2569 cmd &= 0xff; 2570 2571 switch (cmd) { 2572 case IPC_STAT: 2573 case IPC_SET: 2574 case MSG_STAT: 2575 if (target_to_host_msqid_ds(&dsarg,ptr)) 2576 return -TARGET_EFAULT; 2577 ret = get_errno(msgctl(msgid, cmd, &dsarg)); 2578 if (host_to_target_msqid_ds(ptr,&dsarg)) 2579 return -TARGET_EFAULT; 2580 break; 2581 case IPC_RMID: 2582 ret = get_errno(msgctl(msgid, cmd, NULL)); 2583 break; 2584 case IPC_INFO: 2585 case MSG_INFO: 2586 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo)); 2587 if (host_to_target_msginfo(ptr, &msginfo)) 2588 return -TARGET_EFAULT; 2589 break; 2590 } 2591 2592 return ret; 2593 } 2594 2595 struct target_msgbuf { 2596 abi_long mtype; 2597 char mtext[1]; 2598 }; 2599 2600 static inline abi_long do_msgsnd(int msqid, abi_long msgp, 2601 unsigned int msgsz, int msgflg) 2602 { 2603 struct target_msgbuf *target_mb; 2604 struct msgbuf *host_mb; 2605 abi_long ret = 0; 2606 2607 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0)) 2608 return -TARGET_EFAULT; 2609 host_mb = malloc(msgsz+sizeof(long)); 2610 host_mb->mtype = (abi_long) tswapl(target_mb->mtype); 2611 memcpy(host_mb->mtext, target_mb->mtext, msgsz); 2612 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg)); 2613 free(host_mb); 2614 unlock_user_struct(target_mb, msgp, 0); 2615 2616 return ret; 2617 } 2618 2619 static inline abi_long do_msgrcv(int msqid, abi_long msgp, 2620 unsigned int msgsz, abi_long msgtyp, 2621 int msgflg) 2622 { 2623 struct target_msgbuf *target_mb; 2624 char *target_mtext; 2625 struct msgbuf *host_mb; 2626 abi_long ret = 0; 2627 2628 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0)) 2629 return -TARGET_EFAULT; 2630 2631 host_mb = malloc(msgsz+sizeof(long)); 2632 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapl(msgtyp), msgflg)); 2633 2634 if (ret > 0) { 2635 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong); 2636 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0); 2637 if (!target_mtext) { 2638 ret = -TARGET_EFAULT; 2639 goto end; 2640 } 2641 memcpy(target_mb->mtext, host_mb->mtext, ret); 2642 unlock_user(target_mtext, target_mtext_addr, ret); 2643 } 2644 2645 target_mb->mtype = tswapl(host_mb->mtype); 2646 free(host_mb); 2647 2648 end: 2649 if (target_mb) 2650 unlock_user_struct(target_mb, msgp, 1); 2651 return ret; 2652 } 2653 2654 struct target_shmid_ds 2655 { 2656 struct target_ipc_perm shm_perm; 2657 abi_ulong shm_segsz; 2658 abi_ulong shm_atime; 2659 #if TARGET_ABI_BITS == 32 2660 abi_ulong __unused1; 2661 #endif 2662 abi_ulong shm_dtime; 2663 #if TARGET_ABI_BITS == 32 2664 abi_ulong __unused2; 2665 #endif 2666 abi_ulong shm_ctime; 2667 #if TARGET_ABI_BITS == 32 2668 abi_ulong __unused3; 2669 #endif 2670 int shm_cpid; 2671 int shm_lpid; 2672 abi_ulong shm_nattch; 2673 unsigned long int __unused4; 2674 unsigned long int __unused5; 2675 }; 2676 2677 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd, 2678 abi_ulong target_addr) 2679 { 2680 struct target_shmid_ds *target_sd; 2681 2682 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2683 return -TARGET_EFAULT; 2684 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr)) 2685 return -TARGET_EFAULT; 2686 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2687 __get_user(host_sd->shm_atime, &target_sd->shm_atime); 2688 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2689 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2690 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2691 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2692 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2693 unlock_user_struct(target_sd, target_addr, 0); 2694 return 0; 2695 } 2696 2697 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr, 2698 struct shmid_ds *host_sd) 2699 { 2700 struct target_shmid_ds *target_sd; 2701 2702 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2703 return -TARGET_EFAULT; 2704 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm))) 2705 return -TARGET_EFAULT; 2706 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2707 __put_user(host_sd->shm_atime, &target_sd->shm_atime); 2708 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2709 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2710 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2711 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2712 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2713 unlock_user_struct(target_sd, target_addr, 1); 2714 return 0; 2715 } 2716 2717 struct target_shminfo { 2718 abi_ulong shmmax; 2719 abi_ulong shmmin; 2720 abi_ulong shmmni; 2721 abi_ulong shmseg; 2722 abi_ulong shmall; 2723 }; 2724 2725 static inline abi_long host_to_target_shminfo(abi_ulong target_addr, 2726 struct shminfo *host_shminfo) 2727 { 2728 struct target_shminfo *target_shminfo; 2729 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0)) 2730 return -TARGET_EFAULT; 2731 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax); 2732 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin); 2733 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni); 2734 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg); 2735 __put_user(host_shminfo->shmall, &target_shminfo->shmall); 2736 unlock_user_struct(target_shminfo, target_addr, 1); 2737 return 0; 2738 } 2739 2740 struct target_shm_info { 2741 int used_ids; 2742 abi_ulong shm_tot; 2743 abi_ulong shm_rss; 2744 abi_ulong shm_swp; 2745 abi_ulong swap_attempts; 2746 abi_ulong swap_successes; 2747 }; 2748 2749 static inline abi_long host_to_target_shm_info(abi_ulong target_addr, 2750 struct shm_info *host_shm_info) 2751 { 2752 struct target_shm_info *target_shm_info; 2753 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0)) 2754 return -TARGET_EFAULT; 2755 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids); 2756 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot); 2757 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss); 2758 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp); 2759 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts); 2760 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes); 2761 unlock_user_struct(target_shm_info, target_addr, 1); 2762 return 0; 2763 } 2764 2765 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf) 2766 { 2767 struct shmid_ds dsarg; 2768 struct shminfo shminfo; 2769 struct shm_info shm_info; 2770 abi_long ret = -TARGET_EINVAL; 2771 2772 cmd &= 0xff; 2773 2774 switch(cmd) { 2775 case IPC_STAT: 2776 case IPC_SET: 2777 case SHM_STAT: 2778 if (target_to_host_shmid_ds(&dsarg, buf)) 2779 return -TARGET_EFAULT; 2780 ret = get_errno(shmctl(shmid, cmd, &dsarg)); 2781 if (host_to_target_shmid_ds(buf, &dsarg)) 2782 return -TARGET_EFAULT; 2783 break; 2784 case IPC_INFO: 2785 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo)); 2786 if (host_to_target_shminfo(buf, &shminfo)) 2787 return -TARGET_EFAULT; 2788 break; 2789 case SHM_INFO: 2790 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info)); 2791 if (host_to_target_shm_info(buf, &shm_info)) 2792 return -TARGET_EFAULT; 2793 break; 2794 case IPC_RMID: 2795 case SHM_LOCK: 2796 case SHM_UNLOCK: 2797 ret = get_errno(shmctl(shmid, cmd, NULL)); 2798 break; 2799 } 2800 2801 return ret; 2802 } 2803 2804 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg) 2805 { 2806 abi_long raddr; 2807 void *host_raddr; 2808 struct shmid_ds shm_info; 2809 int i,ret; 2810 2811 /* find out the length of the shared memory segment */ 2812 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); 2813 if (is_error(ret)) { 2814 /* can't get length, bail out */ 2815 return ret; 2816 } 2817 2818 mmap_lock(); 2819 2820 if (shmaddr) 2821 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg); 2822 else { 2823 abi_ulong mmap_start; 2824 2825 mmap_start = mmap_find_vma(0, shm_info.shm_segsz); 2826 2827 if (mmap_start == -1) { 2828 errno = ENOMEM; 2829 host_raddr = (void *)-1; 2830 } else 2831 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP); 2832 } 2833 2834 if (host_raddr == (void *)-1) { 2835 mmap_unlock(); 2836 return get_errno((long)host_raddr); 2837 } 2838 raddr=h2g((unsigned long)host_raddr); 2839 2840 page_set_flags(raddr, raddr + shm_info.shm_segsz, 2841 PAGE_VALID | PAGE_READ | 2842 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE)); 2843 2844 for (i = 0; i < N_SHM_REGIONS; i++) { 2845 if (shm_regions[i].start == 0) { 2846 shm_regions[i].start = raddr; 2847 shm_regions[i].size = shm_info.shm_segsz; 2848 break; 2849 } 2850 } 2851 2852 mmap_unlock(); 2853 return raddr; 2854 2855 } 2856 2857 static inline abi_long do_shmdt(abi_ulong shmaddr) 2858 { 2859 int i; 2860 2861 for (i = 0; i < N_SHM_REGIONS; ++i) { 2862 if (shm_regions[i].start == shmaddr) { 2863 shm_regions[i].start = 0; 2864 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0); 2865 break; 2866 } 2867 } 2868 2869 return get_errno(shmdt(g2h(shmaddr))); 2870 } 2871 2872 #ifdef TARGET_NR_ipc 2873 /* ??? This only works with linear mappings. */ 2874 /* do_ipc() must return target values and target errnos. */ 2875 static abi_long do_ipc(unsigned int call, int first, 2876 int second, int third, 2877 abi_long ptr, abi_long fifth) 2878 { 2879 int version; 2880 abi_long ret = 0; 2881 2882 version = call >> 16; 2883 call &= 0xffff; 2884 2885 switch (call) { 2886 case IPCOP_semop: 2887 ret = do_semop(first, ptr, second); 2888 break; 2889 2890 case IPCOP_semget: 2891 ret = get_errno(semget(first, second, third)); 2892 break; 2893 2894 case IPCOP_semctl: 2895 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr); 2896 break; 2897 2898 case IPCOP_msgget: 2899 ret = get_errno(msgget(first, second)); 2900 break; 2901 2902 case IPCOP_msgsnd: 2903 ret = do_msgsnd(first, ptr, second, third); 2904 break; 2905 2906 case IPCOP_msgctl: 2907 ret = do_msgctl(first, second, ptr); 2908 break; 2909 2910 case IPCOP_msgrcv: 2911 switch (version) { 2912 case 0: 2913 { 2914 struct target_ipc_kludge { 2915 abi_long msgp; 2916 abi_long msgtyp; 2917 } *tmp; 2918 2919 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) { 2920 ret = -TARGET_EFAULT; 2921 break; 2922 } 2923 2924 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third); 2925 2926 unlock_user_struct(tmp, ptr, 0); 2927 break; 2928 } 2929 default: 2930 ret = do_msgrcv(first, ptr, second, fifth, third); 2931 } 2932 break; 2933 2934 case IPCOP_shmat: 2935 switch (version) { 2936 default: 2937 { 2938 abi_ulong raddr; 2939 raddr = do_shmat(first, ptr, second); 2940 if (is_error(raddr)) 2941 return get_errno(raddr); 2942 if (put_user_ual(raddr, third)) 2943 return -TARGET_EFAULT; 2944 break; 2945 } 2946 case 1: 2947 ret = -TARGET_EINVAL; 2948 break; 2949 } 2950 break; 2951 case IPCOP_shmdt: 2952 ret = do_shmdt(ptr); 2953 break; 2954 2955 case IPCOP_shmget: 2956 /* IPC_* flag values are the same on all linux platforms */ 2957 ret = get_errno(shmget(first, second, third)); 2958 break; 2959 2960 /* IPC_* and SHM_* command values are the same on all linux platforms */ 2961 case IPCOP_shmctl: 2962 ret = do_shmctl(first, second, third); 2963 break; 2964 default: 2965 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version); 2966 ret = -TARGET_ENOSYS; 2967 break; 2968 } 2969 return ret; 2970 } 2971 #endif 2972 2973 /* kernel structure types definitions */ 2974 2975 #define STRUCT(name, ...) STRUCT_ ## name, 2976 #define STRUCT_SPECIAL(name) STRUCT_ ## name, 2977 enum { 2978 #include "syscall_types.h" 2979 }; 2980 #undef STRUCT 2981 #undef STRUCT_SPECIAL 2982 2983 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL }; 2984 #define STRUCT_SPECIAL(name) 2985 #include "syscall_types.h" 2986 #undef STRUCT 2987 #undef STRUCT_SPECIAL 2988 2989 typedef struct IOCTLEntry IOCTLEntry; 2990 2991 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp, 2992 int fd, abi_long cmd, abi_long arg); 2993 2994 struct IOCTLEntry { 2995 unsigned int target_cmd; 2996 unsigned int host_cmd; 2997 const char *name; 2998 int access; 2999 do_ioctl_fn *do_ioctl; 3000 const argtype arg_type[5]; 3001 }; 3002 3003 #define IOC_R 0x0001 3004 #define IOC_W 0x0002 3005 #define IOC_RW (IOC_R | IOC_W) 3006 3007 #define MAX_STRUCT_SIZE 4096 3008 3009 #ifdef CONFIG_FIEMAP 3010 /* So fiemap access checks don't overflow on 32 bit systems. 3011 * This is very slightly smaller than the limit imposed by 3012 * the underlying kernel. 3013 */ 3014 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \ 3015 / sizeof(struct fiemap_extent)) 3016 3017 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp, 3018 int fd, abi_long cmd, abi_long arg) 3019 { 3020 /* The parameter for this ioctl is a struct fiemap followed 3021 * by an array of struct fiemap_extent whose size is set 3022 * in fiemap->fm_extent_count. The array is filled in by the 3023 * ioctl. 3024 */ 3025 int target_size_in, target_size_out; 3026 struct fiemap *fm; 3027 const argtype *arg_type = ie->arg_type; 3028 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) }; 3029 void *argptr, *p; 3030 abi_long ret; 3031 int i, extent_size = thunk_type_size(extent_arg_type, 0); 3032 uint32_t outbufsz; 3033 int free_fm = 0; 3034 3035 assert(arg_type[0] == TYPE_PTR); 3036 assert(ie->access == IOC_RW); 3037 arg_type++; 3038 target_size_in = thunk_type_size(arg_type, 0); 3039 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1); 3040 if (!argptr) { 3041 return -TARGET_EFAULT; 3042 } 3043 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3044 unlock_user(argptr, arg, 0); 3045 fm = (struct fiemap *)buf_temp; 3046 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) { 3047 return -TARGET_EINVAL; 3048 } 3049 3050 outbufsz = sizeof (*fm) + 3051 (sizeof(struct fiemap_extent) * fm->fm_extent_count); 3052 3053 if (outbufsz > MAX_STRUCT_SIZE) { 3054 /* We can't fit all the extents into the fixed size buffer. 3055 * Allocate one that is large enough and use it instead. 3056 */ 3057 fm = malloc(outbufsz); 3058 if (!fm) { 3059 return -TARGET_ENOMEM; 3060 } 3061 memcpy(fm, buf_temp, sizeof(struct fiemap)); 3062 free_fm = 1; 3063 } 3064 ret = get_errno(ioctl(fd, ie->host_cmd, fm)); 3065 if (!is_error(ret)) { 3066 target_size_out = target_size_in; 3067 /* An extent_count of 0 means we were only counting the extents 3068 * so there are no structs to copy 3069 */ 3070 if (fm->fm_extent_count != 0) { 3071 target_size_out += fm->fm_mapped_extents * extent_size; 3072 } 3073 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0); 3074 if (!argptr) { 3075 ret = -TARGET_EFAULT; 3076 } else { 3077 /* Convert the struct fiemap */ 3078 thunk_convert(argptr, fm, arg_type, THUNK_TARGET); 3079 if (fm->fm_extent_count != 0) { 3080 p = argptr + target_size_in; 3081 /* ...and then all the struct fiemap_extents */ 3082 for (i = 0; i < fm->fm_mapped_extents; i++) { 3083 thunk_convert(p, &fm->fm_extents[i], extent_arg_type, 3084 THUNK_TARGET); 3085 p += extent_size; 3086 } 3087 } 3088 unlock_user(argptr, arg, target_size_out); 3089 } 3090 } 3091 if (free_fm) { 3092 free(fm); 3093 } 3094 return ret; 3095 } 3096 #endif 3097 3098 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, 3099 int fd, abi_long cmd, abi_long arg) 3100 { 3101 const argtype *arg_type = ie->arg_type; 3102 int target_size; 3103 void *argptr; 3104 int ret; 3105 struct ifconf *host_ifconf; 3106 uint32_t outbufsz; 3107 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) }; 3108 int target_ifreq_size; 3109 int nb_ifreq; 3110 int free_buf = 0; 3111 int i; 3112 int target_ifc_len; 3113 abi_long target_ifc_buf; 3114 int host_ifc_len; 3115 char *host_ifc_buf; 3116 3117 assert(arg_type[0] == TYPE_PTR); 3118 assert(ie->access == IOC_RW); 3119 3120 arg_type++; 3121 target_size = thunk_type_size(arg_type, 0); 3122 3123 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3124 if (!argptr) 3125 return -TARGET_EFAULT; 3126 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3127 unlock_user(argptr, arg, 0); 3128 3129 host_ifconf = (struct ifconf *)(unsigned long)buf_temp; 3130 target_ifc_len = host_ifconf->ifc_len; 3131 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf; 3132 3133 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0); 3134 nb_ifreq = target_ifc_len / target_ifreq_size; 3135 host_ifc_len = nb_ifreq * sizeof(struct ifreq); 3136 3137 outbufsz = sizeof(*host_ifconf) + host_ifc_len; 3138 if (outbufsz > MAX_STRUCT_SIZE) { 3139 /* We can't fit all the extents into the fixed size buffer. 3140 * Allocate one that is large enough and use it instead. 3141 */ 3142 host_ifconf = malloc(outbufsz); 3143 if (!host_ifconf) { 3144 return -TARGET_ENOMEM; 3145 } 3146 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf)); 3147 free_buf = 1; 3148 } 3149 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf); 3150 3151 host_ifconf->ifc_len = host_ifc_len; 3152 host_ifconf->ifc_buf = host_ifc_buf; 3153 3154 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf)); 3155 if (!is_error(ret)) { 3156 /* convert host ifc_len to target ifc_len */ 3157 3158 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq); 3159 target_ifc_len = nb_ifreq * target_ifreq_size; 3160 host_ifconf->ifc_len = target_ifc_len; 3161 3162 /* restore target ifc_buf */ 3163 3164 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf; 3165 3166 /* copy struct ifconf to target user */ 3167 3168 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3169 if (!argptr) 3170 return -TARGET_EFAULT; 3171 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET); 3172 unlock_user(argptr, arg, target_size); 3173 3174 /* copy ifreq[] to target user */ 3175 3176 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0); 3177 for (i = 0; i < nb_ifreq ; i++) { 3178 thunk_convert(argptr + i * target_ifreq_size, 3179 host_ifc_buf + i * sizeof(struct ifreq), 3180 ifreq_arg_type, THUNK_TARGET); 3181 } 3182 unlock_user(argptr, target_ifc_buf, target_ifc_len); 3183 } 3184 3185 if (free_buf) { 3186 free(host_ifconf); 3187 } 3188 3189 return ret; 3190 } 3191 3192 static IOCTLEntry ioctl_entries[] = { 3193 #define IOCTL(cmd, access, ...) \ 3194 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } }, 3195 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \ 3196 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } }, 3197 #include "ioctls.h" 3198 { 0, 0, }, 3199 }; 3200 3201 /* ??? Implement proper locking for ioctls. */ 3202 /* do_ioctl() Must return target values and target errnos. */ 3203 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg) 3204 { 3205 const IOCTLEntry *ie; 3206 const argtype *arg_type; 3207 abi_long ret; 3208 uint8_t buf_temp[MAX_STRUCT_SIZE]; 3209 int target_size; 3210 void *argptr; 3211 3212 ie = ioctl_entries; 3213 for(;;) { 3214 if (ie->target_cmd == 0) { 3215 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd); 3216 return -TARGET_ENOSYS; 3217 } 3218 if (ie->target_cmd == cmd) 3219 break; 3220 ie++; 3221 } 3222 arg_type = ie->arg_type; 3223 #if defined(DEBUG) 3224 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name); 3225 #endif 3226 if (ie->do_ioctl) { 3227 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg); 3228 } 3229 3230 switch(arg_type[0]) { 3231 case TYPE_NULL: 3232 /* no argument */ 3233 ret = get_errno(ioctl(fd, ie->host_cmd)); 3234 break; 3235 case TYPE_PTRVOID: 3236 case TYPE_INT: 3237 /* int argment */ 3238 ret = get_errno(ioctl(fd, ie->host_cmd, arg)); 3239 break; 3240 case TYPE_PTR: 3241 arg_type++; 3242 target_size = thunk_type_size(arg_type, 0); 3243 switch(ie->access) { 3244 case IOC_R: 3245 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3246 if (!is_error(ret)) { 3247 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3248 if (!argptr) 3249 return -TARGET_EFAULT; 3250 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3251 unlock_user(argptr, arg, target_size); 3252 } 3253 break; 3254 case IOC_W: 3255 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3256 if (!argptr) 3257 return -TARGET_EFAULT; 3258 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3259 unlock_user(argptr, arg, 0); 3260 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3261 break; 3262 default: 3263 case IOC_RW: 3264 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3265 if (!argptr) 3266 return -TARGET_EFAULT; 3267 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3268 unlock_user(argptr, arg, 0); 3269 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3270 if (!is_error(ret)) { 3271 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3272 if (!argptr) 3273 return -TARGET_EFAULT; 3274 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3275 unlock_user(argptr, arg, target_size); 3276 } 3277 break; 3278 } 3279 break; 3280 default: 3281 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n", 3282 (long)cmd, arg_type[0]); 3283 ret = -TARGET_ENOSYS; 3284 break; 3285 } 3286 return ret; 3287 } 3288 3289 static const bitmask_transtbl iflag_tbl[] = { 3290 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK }, 3291 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT }, 3292 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR }, 3293 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK }, 3294 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK }, 3295 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP }, 3296 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR }, 3297 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR }, 3298 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL }, 3299 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC }, 3300 { TARGET_IXON, TARGET_IXON, IXON, IXON }, 3301 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY }, 3302 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF }, 3303 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL }, 3304 { 0, 0, 0, 0 } 3305 }; 3306 3307 static const bitmask_transtbl oflag_tbl[] = { 3308 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST }, 3309 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC }, 3310 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR }, 3311 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL }, 3312 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR }, 3313 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET }, 3314 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL }, 3315 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL }, 3316 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 }, 3317 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 }, 3318 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 }, 3319 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 }, 3320 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 }, 3321 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 }, 3322 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 }, 3323 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 }, 3324 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 }, 3325 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 }, 3326 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 }, 3327 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 }, 3328 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 }, 3329 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 }, 3330 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 }, 3331 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 }, 3332 { 0, 0, 0, 0 } 3333 }; 3334 3335 static const bitmask_transtbl cflag_tbl[] = { 3336 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 }, 3337 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 }, 3338 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 }, 3339 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 }, 3340 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 }, 3341 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 }, 3342 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 }, 3343 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 }, 3344 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 }, 3345 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 }, 3346 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 }, 3347 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 }, 3348 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 }, 3349 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 }, 3350 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 }, 3351 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 }, 3352 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 }, 3353 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 }, 3354 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 }, 3355 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 }, 3356 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 }, 3357 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 }, 3358 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 }, 3359 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 }, 3360 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB }, 3361 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD }, 3362 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB }, 3363 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD }, 3364 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL }, 3365 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL }, 3366 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS }, 3367 { 0, 0, 0, 0 } 3368 }; 3369 3370 static const bitmask_transtbl lflag_tbl[] = { 3371 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG }, 3372 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON }, 3373 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE }, 3374 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO }, 3375 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE }, 3376 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK }, 3377 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL }, 3378 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH }, 3379 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP }, 3380 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL }, 3381 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT }, 3382 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE }, 3383 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO }, 3384 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN }, 3385 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN }, 3386 { 0, 0, 0, 0 } 3387 }; 3388 3389 static void target_to_host_termios (void *dst, const void *src) 3390 { 3391 struct host_termios *host = dst; 3392 const struct target_termios *target = src; 3393 3394 host->c_iflag = 3395 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl); 3396 host->c_oflag = 3397 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl); 3398 host->c_cflag = 3399 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl); 3400 host->c_lflag = 3401 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl); 3402 host->c_line = target->c_line; 3403 3404 memset(host->c_cc, 0, sizeof(host->c_cc)); 3405 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR]; 3406 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT]; 3407 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE]; 3408 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL]; 3409 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF]; 3410 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME]; 3411 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN]; 3412 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC]; 3413 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART]; 3414 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP]; 3415 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP]; 3416 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL]; 3417 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT]; 3418 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD]; 3419 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE]; 3420 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT]; 3421 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2]; 3422 } 3423 3424 static void host_to_target_termios (void *dst, const void *src) 3425 { 3426 struct target_termios *target = dst; 3427 const struct host_termios *host = src; 3428 3429 target->c_iflag = 3430 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl)); 3431 target->c_oflag = 3432 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl)); 3433 target->c_cflag = 3434 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl)); 3435 target->c_lflag = 3436 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl)); 3437 target->c_line = host->c_line; 3438 3439 memset(target->c_cc, 0, sizeof(target->c_cc)); 3440 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR]; 3441 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT]; 3442 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE]; 3443 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL]; 3444 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF]; 3445 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME]; 3446 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN]; 3447 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC]; 3448 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART]; 3449 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP]; 3450 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP]; 3451 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL]; 3452 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT]; 3453 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD]; 3454 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE]; 3455 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT]; 3456 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2]; 3457 } 3458 3459 static const StructEntry struct_termios_def = { 3460 .convert = { host_to_target_termios, target_to_host_termios }, 3461 .size = { sizeof(struct target_termios), sizeof(struct host_termios) }, 3462 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) }, 3463 }; 3464 3465 static bitmask_transtbl mmap_flags_tbl[] = { 3466 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED }, 3467 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE }, 3468 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED }, 3469 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS }, 3470 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN }, 3471 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE }, 3472 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE }, 3473 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED }, 3474 { 0, 0, 0, 0 } 3475 }; 3476 3477 #if defined(TARGET_I386) 3478 3479 /* NOTE: there is really one LDT for all the threads */ 3480 static uint8_t *ldt_table; 3481 3482 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount) 3483 { 3484 int size; 3485 void *p; 3486 3487 if (!ldt_table) 3488 return 0; 3489 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE; 3490 if (size > bytecount) 3491 size = bytecount; 3492 p = lock_user(VERIFY_WRITE, ptr, size, 0); 3493 if (!p) 3494 return -TARGET_EFAULT; 3495 /* ??? Should this by byteswapped? */ 3496 memcpy(p, ldt_table, size); 3497 unlock_user(p, ptr, size); 3498 return size; 3499 } 3500 3501 /* XXX: add locking support */ 3502 static abi_long write_ldt(CPUX86State *env, 3503 abi_ulong ptr, unsigned long bytecount, int oldmode) 3504 { 3505 struct target_modify_ldt_ldt_s ldt_info; 3506 struct target_modify_ldt_ldt_s *target_ldt_info; 3507 int seg_32bit, contents, read_exec_only, limit_in_pages; 3508 int seg_not_present, useable, lm; 3509 uint32_t *lp, entry_1, entry_2; 3510 3511 if (bytecount != sizeof(ldt_info)) 3512 return -TARGET_EINVAL; 3513 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1)) 3514 return -TARGET_EFAULT; 3515 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 3516 ldt_info.base_addr = tswapl(target_ldt_info->base_addr); 3517 ldt_info.limit = tswap32(target_ldt_info->limit); 3518 ldt_info.flags = tswap32(target_ldt_info->flags); 3519 unlock_user_struct(target_ldt_info, ptr, 0); 3520 3521 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES) 3522 return -TARGET_EINVAL; 3523 seg_32bit = ldt_info.flags & 1; 3524 contents = (ldt_info.flags >> 1) & 3; 3525 read_exec_only = (ldt_info.flags >> 3) & 1; 3526 limit_in_pages = (ldt_info.flags >> 4) & 1; 3527 seg_not_present = (ldt_info.flags >> 5) & 1; 3528 useable = (ldt_info.flags >> 6) & 1; 3529 #ifdef TARGET_ABI32 3530 lm = 0; 3531 #else 3532 lm = (ldt_info.flags >> 7) & 1; 3533 #endif 3534 if (contents == 3) { 3535 if (oldmode) 3536 return -TARGET_EINVAL; 3537 if (seg_not_present == 0) 3538 return -TARGET_EINVAL; 3539 } 3540 /* allocate the LDT */ 3541 if (!ldt_table) { 3542 env->ldt.base = target_mmap(0, 3543 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE, 3544 PROT_READ|PROT_WRITE, 3545 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 3546 if (env->ldt.base == -1) 3547 return -TARGET_ENOMEM; 3548 memset(g2h(env->ldt.base), 0, 3549 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE); 3550 env->ldt.limit = 0xffff; 3551 ldt_table = g2h(env->ldt.base); 3552 } 3553 3554 /* NOTE: same code as Linux kernel */ 3555 /* Allow LDTs to be cleared by the user. */ 3556 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 3557 if (oldmode || 3558 (contents == 0 && 3559 read_exec_only == 1 && 3560 seg_32bit == 0 && 3561 limit_in_pages == 0 && 3562 seg_not_present == 1 && 3563 useable == 0 )) { 3564 entry_1 = 0; 3565 entry_2 = 0; 3566 goto install; 3567 } 3568 } 3569 3570 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 3571 (ldt_info.limit & 0x0ffff); 3572 entry_2 = (ldt_info.base_addr & 0xff000000) | 3573 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 3574 (ldt_info.limit & 0xf0000) | 3575 ((read_exec_only ^ 1) << 9) | 3576 (contents << 10) | 3577 ((seg_not_present ^ 1) << 15) | 3578 (seg_32bit << 22) | 3579 (limit_in_pages << 23) | 3580 (lm << 21) | 3581 0x7000; 3582 if (!oldmode) 3583 entry_2 |= (useable << 20); 3584 3585 /* Install the new entry ... */ 3586 install: 3587 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3)); 3588 lp[0] = tswap32(entry_1); 3589 lp[1] = tswap32(entry_2); 3590 return 0; 3591 } 3592 3593 /* specific and weird i386 syscalls */ 3594 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr, 3595 unsigned long bytecount) 3596 { 3597 abi_long ret; 3598 3599 switch (func) { 3600 case 0: 3601 ret = read_ldt(ptr, bytecount); 3602 break; 3603 case 1: 3604 ret = write_ldt(env, ptr, bytecount, 1); 3605 break; 3606 case 0x11: 3607 ret = write_ldt(env, ptr, bytecount, 0); 3608 break; 3609 default: 3610 ret = -TARGET_ENOSYS; 3611 break; 3612 } 3613 return ret; 3614 } 3615 3616 #if defined(TARGET_I386) && defined(TARGET_ABI32) 3617 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr) 3618 { 3619 uint64_t *gdt_table = g2h(env->gdt.base); 3620 struct target_modify_ldt_ldt_s ldt_info; 3621 struct target_modify_ldt_ldt_s *target_ldt_info; 3622 int seg_32bit, contents, read_exec_only, limit_in_pages; 3623 int seg_not_present, useable, lm; 3624 uint32_t *lp, entry_1, entry_2; 3625 int i; 3626 3627 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 3628 if (!target_ldt_info) 3629 return -TARGET_EFAULT; 3630 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 3631 ldt_info.base_addr = tswapl(target_ldt_info->base_addr); 3632 ldt_info.limit = tswap32(target_ldt_info->limit); 3633 ldt_info.flags = tswap32(target_ldt_info->flags); 3634 if (ldt_info.entry_number == -1) { 3635 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) { 3636 if (gdt_table[i] == 0) { 3637 ldt_info.entry_number = i; 3638 target_ldt_info->entry_number = tswap32(i); 3639 break; 3640 } 3641 } 3642 } 3643 unlock_user_struct(target_ldt_info, ptr, 1); 3644 3645 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 3646 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX) 3647 return -TARGET_EINVAL; 3648 seg_32bit = ldt_info.flags & 1; 3649 contents = (ldt_info.flags >> 1) & 3; 3650 read_exec_only = (ldt_info.flags >> 3) & 1; 3651 limit_in_pages = (ldt_info.flags >> 4) & 1; 3652 seg_not_present = (ldt_info.flags >> 5) & 1; 3653 useable = (ldt_info.flags >> 6) & 1; 3654 #ifdef TARGET_ABI32 3655 lm = 0; 3656 #else 3657 lm = (ldt_info.flags >> 7) & 1; 3658 #endif 3659 3660 if (contents == 3) { 3661 if (seg_not_present == 0) 3662 return -TARGET_EINVAL; 3663 } 3664 3665 /* NOTE: same code as Linux kernel */ 3666 /* Allow LDTs to be cleared by the user. */ 3667 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 3668 if ((contents == 0 && 3669 read_exec_only == 1 && 3670 seg_32bit == 0 && 3671 limit_in_pages == 0 && 3672 seg_not_present == 1 && 3673 useable == 0 )) { 3674 entry_1 = 0; 3675 entry_2 = 0; 3676 goto install; 3677 } 3678 } 3679 3680 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 3681 (ldt_info.limit & 0x0ffff); 3682 entry_2 = (ldt_info.base_addr & 0xff000000) | 3683 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 3684 (ldt_info.limit & 0xf0000) | 3685 ((read_exec_only ^ 1) << 9) | 3686 (contents << 10) | 3687 ((seg_not_present ^ 1) << 15) | 3688 (seg_32bit << 22) | 3689 (limit_in_pages << 23) | 3690 (useable << 20) | 3691 (lm << 21) | 3692 0x7000; 3693 3694 /* Install the new entry ... */ 3695 install: 3696 lp = (uint32_t *)(gdt_table + ldt_info.entry_number); 3697 lp[0] = tswap32(entry_1); 3698 lp[1] = tswap32(entry_2); 3699 return 0; 3700 } 3701 3702 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr) 3703 { 3704 struct target_modify_ldt_ldt_s *target_ldt_info; 3705 uint64_t *gdt_table = g2h(env->gdt.base); 3706 uint32_t base_addr, limit, flags; 3707 int seg_32bit, contents, read_exec_only, limit_in_pages, idx; 3708 int seg_not_present, useable, lm; 3709 uint32_t *lp, entry_1, entry_2; 3710 3711 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 3712 if (!target_ldt_info) 3713 return -TARGET_EFAULT; 3714 idx = tswap32(target_ldt_info->entry_number); 3715 if (idx < TARGET_GDT_ENTRY_TLS_MIN || 3716 idx > TARGET_GDT_ENTRY_TLS_MAX) { 3717 unlock_user_struct(target_ldt_info, ptr, 1); 3718 return -TARGET_EINVAL; 3719 } 3720 lp = (uint32_t *)(gdt_table + idx); 3721 entry_1 = tswap32(lp[0]); 3722 entry_2 = tswap32(lp[1]); 3723 3724 read_exec_only = ((entry_2 >> 9) & 1) ^ 1; 3725 contents = (entry_2 >> 10) & 3; 3726 seg_not_present = ((entry_2 >> 15) & 1) ^ 1; 3727 seg_32bit = (entry_2 >> 22) & 1; 3728 limit_in_pages = (entry_2 >> 23) & 1; 3729 useable = (entry_2 >> 20) & 1; 3730 #ifdef TARGET_ABI32 3731 lm = 0; 3732 #else 3733 lm = (entry_2 >> 21) & 1; 3734 #endif 3735 flags = (seg_32bit << 0) | (contents << 1) | 3736 (read_exec_only << 3) | (limit_in_pages << 4) | 3737 (seg_not_present << 5) | (useable << 6) | (lm << 7); 3738 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000); 3739 base_addr = (entry_1 >> 16) | 3740 (entry_2 & 0xff000000) | 3741 ((entry_2 & 0xff) << 16); 3742 target_ldt_info->base_addr = tswapl(base_addr); 3743 target_ldt_info->limit = tswap32(limit); 3744 target_ldt_info->flags = tswap32(flags); 3745 unlock_user_struct(target_ldt_info, ptr, 1); 3746 return 0; 3747 } 3748 #endif /* TARGET_I386 && TARGET_ABI32 */ 3749 3750 #ifndef TARGET_ABI32 3751 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 3752 { 3753 abi_long ret; 3754 abi_ulong val; 3755 int idx; 3756 3757 switch(code) { 3758 case TARGET_ARCH_SET_GS: 3759 case TARGET_ARCH_SET_FS: 3760 if (code == TARGET_ARCH_SET_GS) 3761 idx = R_GS; 3762 else 3763 idx = R_FS; 3764 cpu_x86_load_seg(env, idx, 0); 3765 env->segs[idx].base = addr; 3766 break; 3767 case TARGET_ARCH_GET_GS: 3768 case TARGET_ARCH_GET_FS: 3769 if (code == TARGET_ARCH_GET_GS) 3770 idx = R_GS; 3771 else 3772 idx = R_FS; 3773 val = env->segs[idx].base; 3774 if (put_user(val, addr, abi_ulong)) 3775 return -TARGET_EFAULT; 3776 break; 3777 default: 3778 ret = -TARGET_EINVAL; 3779 break; 3780 } 3781 return 0; 3782 } 3783 #endif 3784 3785 #endif /* defined(TARGET_I386) */ 3786 3787 #define NEW_STACK_SIZE 0x40000 3788 3789 #if defined(CONFIG_USE_NPTL) 3790 3791 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; 3792 typedef struct { 3793 CPUState *env; 3794 pthread_mutex_t mutex; 3795 pthread_cond_t cond; 3796 pthread_t thread; 3797 uint32_t tid; 3798 abi_ulong child_tidptr; 3799 abi_ulong parent_tidptr; 3800 sigset_t sigmask; 3801 } new_thread_info; 3802 3803 static void *clone_func(void *arg) 3804 { 3805 new_thread_info *info = arg; 3806 CPUState *env; 3807 TaskState *ts; 3808 3809 env = info->env; 3810 thread_env = env; 3811 ts = (TaskState *)thread_env->opaque; 3812 info->tid = gettid(); 3813 env->host_tid = info->tid; 3814 task_settid(ts); 3815 if (info->child_tidptr) 3816 put_user_u32(info->tid, info->child_tidptr); 3817 if (info->parent_tidptr) 3818 put_user_u32(info->tid, info->parent_tidptr); 3819 /* Enable signals. */ 3820 sigprocmask(SIG_SETMASK, &info->sigmask, NULL); 3821 /* Signal to the parent that we're ready. */ 3822 pthread_mutex_lock(&info->mutex); 3823 pthread_cond_broadcast(&info->cond); 3824 pthread_mutex_unlock(&info->mutex); 3825 /* Wait until the parent has finshed initializing the tls state. */ 3826 pthread_mutex_lock(&clone_lock); 3827 pthread_mutex_unlock(&clone_lock); 3828 cpu_loop(env); 3829 /* never exits */ 3830 return NULL; 3831 } 3832 #else 3833 3834 static int clone_func(void *arg) 3835 { 3836 CPUState *env = arg; 3837 cpu_loop(env); 3838 /* never exits */ 3839 return 0; 3840 } 3841 #endif 3842 3843 /* do_fork() Must return host values and target errnos (unlike most 3844 do_*() functions). */ 3845 static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp, 3846 abi_ulong parent_tidptr, target_ulong newtls, 3847 abi_ulong child_tidptr) 3848 { 3849 int ret; 3850 TaskState *ts; 3851 CPUState *new_env; 3852 #if defined(CONFIG_USE_NPTL) 3853 unsigned int nptl_flags; 3854 sigset_t sigmask; 3855 #else 3856 uint8_t *new_stack; 3857 #endif 3858 3859 /* Emulate vfork() with fork() */ 3860 if (flags & CLONE_VFORK) 3861 flags &= ~(CLONE_VFORK | CLONE_VM); 3862 3863 if (flags & CLONE_VM) { 3864 TaskState *parent_ts = (TaskState *)env->opaque; 3865 #if defined(CONFIG_USE_NPTL) 3866 new_thread_info info; 3867 pthread_attr_t attr; 3868 #endif 3869 ts = qemu_mallocz(sizeof(TaskState)); 3870 init_task_state(ts); 3871 /* we create a new CPU instance. */ 3872 new_env = cpu_copy(env); 3873 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC) 3874 cpu_reset(new_env); 3875 #endif 3876 /* Init regs that differ from the parent. */ 3877 cpu_clone_regs(new_env, newsp); 3878 new_env->opaque = ts; 3879 ts->bprm = parent_ts->bprm; 3880 ts->info = parent_ts->info; 3881 #if defined(CONFIG_USE_NPTL) 3882 nptl_flags = flags; 3883 flags &= ~CLONE_NPTL_FLAGS2; 3884 3885 if (nptl_flags & CLONE_CHILD_CLEARTID) { 3886 ts->child_tidptr = child_tidptr; 3887 } 3888 3889 if (nptl_flags & CLONE_SETTLS) 3890 cpu_set_tls (new_env, newtls); 3891 3892 /* Grab a mutex so that thread setup appears atomic. */ 3893 pthread_mutex_lock(&clone_lock); 3894 3895 memset(&info, 0, sizeof(info)); 3896 pthread_mutex_init(&info.mutex, NULL); 3897 pthread_mutex_lock(&info.mutex); 3898 pthread_cond_init(&info.cond, NULL); 3899 info.env = new_env; 3900 if (nptl_flags & CLONE_CHILD_SETTID) 3901 info.child_tidptr = child_tidptr; 3902 if (nptl_flags & CLONE_PARENT_SETTID) 3903 info.parent_tidptr = parent_tidptr; 3904 3905 ret = pthread_attr_init(&attr); 3906 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE); 3907 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 3908 /* It is not safe to deliver signals until the child has finished 3909 initializing, so temporarily block all signals. */ 3910 sigfillset(&sigmask); 3911 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); 3912 3913 ret = pthread_create(&info.thread, &attr, clone_func, &info); 3914 /* TODO: Free new CPU state if thread creation failed. */ 3915 3916 sigprocmask(SIG_SETMASK, &info.sigmask, NULL); 3917 pthread_attr_destroy(&attr); 3918 if (ret == 0) { 3919 /* Wait for the child to initialize. */ 3920 pthread_cond_wait(&info.cond, &info.mutex); 3921 ret = info.tid; 3922 if (flags & CLONE_PARENT_SETTID) 3923 put_user_u32(ret, parent_tidptr); 3924 } else { 3925 ret = -1; 3926 } 3927 pthread_mutex_unlock(&info.mutex); 3928 pthread_cond_destroy(&info.cond); 3929 pthread_mutex_destroy(&info.mutex); 3930 pthread_mutex_unlock(&clone_lock); 3931 #else 3932 if (flags & CLONE_NPTL_FLAGS2) 3933 return -EINVAL; 3934 /* This is probably going to die very quickly, but do it anyway. */ 3935 new_stack = qemu_mallocz (NEW_STACK_SIZE); 3936 #ifdef __ia64__ 3937 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env); 3938 #else 3939 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env); 3940 #endif 3941 #endif 3942 } else { 3943 /* if no CLONE_VM, we consider it is a fork */ 3944 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) 3945 return -EINVAL; 3946 fork_start(); 3947 ret = fork(); 3948 if (ret == 0) { 3949 /* Child Process. */ 3950 cpu_clone_regs(env, newsp); 3951 fork_end(1); 3952 #if defined(CONFIG_USE_NPTL) 3953 /* There is a race condition here. The parent process could 3954 theoretically read the TID in the child process before the child 3955 tid is set. This would require using either ptrace 3956 (not implemented) or having *_tidptr to point at a shared memory 3957 mapping. We can't repeat the spinlock hack used above because 3958 the child process gets its own copy of the lock. */ 3959 if (flags & CLONE_CHILD_SETTID) 3960 put_user_u32(gettid(), child_tidptr); 3961 if (flags & CLONE_PARENT_SETTID) 3962 put_user_u32(gettid(), parent_tidptr); 3963 ts = (TaskState *)env->opaque; 3964 if (flags & CLONE_SETTLS) 3965 cpu_set_tls (env, newtls); 3966 if (flags & CLONE_CHILD_CLEARTID) 3967 ts->child_tidptr = child_tidptr; 3968 #endif 3969 } else { 3970 fork_end(0); 3971 } 3972 } 3973 return ret; 3974 } 3975 3976 /* warning : doesn't handle linux specific flags... */ 3977 static int target_to_host_fcntl_cmd(int cmd) 3978 { 3979 switch(cmd) { 3980 case TARGET_F_DUPFD: 3981 case TARGET_F_GETFD: 3982 case TARGET_F_SETFD: 3983 case TARGET_F_GETFL: 3984 case TARGET_F_SETFL: 3985 return cmd; 3986 case TARGET_F_GETLK: 3987 return F_GETLK; 3988 case TARGET_F_SETLK: 3989 return F_SETLK; 3990 case TARGET_F_SETLKW: 3991 return F_SETLKW; 3992 case TARGET_F_GETOWN: 3993 return F_GETOWN; 3994 case TARGET_F_SETOWN: 3995 return F_SETOWN; 3996 case TARGET_F_GETSIG: 3997 return F_GETSIG; 3998 case TARGET_F_SETSIG: 3999 return F_SETSIG; 4000 #if TARGET_ABI_BITS == 32 4001 case TARGET_F_GETLK64: 4002 return F_GETLK64; 4003 case TARGET_F_SETLK64: 4004 return F_SETLK64; 4005 case TARGET_F_SETLKW64: 4006 return F_SETLKW64; 4007 #endif 4008 case TARGET_F_SETLEASE: 4009 return F_SETLEASE; 4010 case TARGET_F_GETLEASE: 4011 return F_GETLEASE; 4012 #ifdef F_DUPFD_CLOEXEC 4013 case TARGET_F_DUPFD_CLOEXEC: 4014 return F_DUPFD_CLOEXEC; 4015 #endif 4016 case TARGET_F_NOTIFY: 4017 return F_NOTIFY; 4018 default: 4019 return -TARGET_EINVAL; 4020 } 4021 return -TARGET_EINVAL; 4022 } 4023 4024 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) 4025 { 4026 struct flock fl; 4027 struct target_flock *target_fl; 4028 struct flock64 fl64; 4029 struct target_flock64 *target_fl64; 4030 abi_long ret; 4031 int host_cmd = target_to_host_fcntl_cmd(cmd); 4032 4033 if (host_cmd == -TARGET_EINVAL) 4034 return host_cmd; 4035 4036 switch(cmd) { 4037 case TARGET_F_GETLK: 4038 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4039 return -TARGET_EFAULT; 4040 fl.l_type = tswap16(target_fl->l_type); 4041 fl.l_whence = tswap16(target_fl->l_whence); 4042 fl.l_start = tswapl(target_fl->l_start); 4043 fl.l_len = tswapl(target_fl->l_len); 4044 fl.l_pid = tswap32(target_fl->l_pid); 4045 unlock_user_struct(target_fl, arg, 0); 4046 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4047 if (ret == 0) { 4048 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0)) 4049 return -TARGET_EFAULT; 4050 target_fl->l_type = tswap16(fl.l_type); 4051 target_fl->l_whence = tswap16(fl.l_whence); 4052 target_fl->l_start = tswapl(fl.l_start); 4053 target_fl->l_len = tswapl(fl.l_len); 4054 target_fl->l_pid = tswap32(fl.l_pid); 4055 unlock_user_struct(target_fl, arg, 1); 4056 } 4057 break; 4058 4059 case TARGET_F_SETLK: 4060 case TARGET_F_SETLKW: 4061 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4062 return -TARGET_EFAULT; 4063 fl.l_type = tswap16(target_fl->l_type); 4064 fl.l_whence = tswap16(target_fl->l_whence); 4065 fl.l_start = tswapl(target_fl->l_start); 4066 fl.l_len = tswapl(target_fl->l_len); 4067 fl.l_pid = tswap32(target_fl->l_pid); 4068 unlock_user_struct(target_fl, arg, 0); 4069 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4070 break; 4071 4072 case TARGET_F_GETLK64: 4073 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4074 return -TARGET_EFAULT; 4075 fl64.l_type = tswap16(target_fl64->l_type) >> 1; 4076 fl64.l_whence = tswap16(target_fl64->l_whence); 4077 fl64.l_start = tswapl(target_fl64->l_start); 4078 fl64.l_len = tswapl(target_fl64->l_len); 4079 fl64.l_pid = tswap32(target_fl64->l_pid); 4080 unlock_user_struct(target_fl64, arg, 0); 4081 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4082 if (ret == 0) { 4083 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0)) 4084 return -TARGET_EFAULT; 4085 target_fl64->l_type = tswap16(fl64.l_type) >> 1; 4086 target_fl64->l_whence = tswap16(fl64.l_whence); 4087 target_fl64->l_start = tswapl(fl64.l_start); 4088 target_fl64->l_len = tswapl(fl64.l_len); 4089 target_fl64->l_pid = tswap32(fl64.l_pid); 4090 unlock_user_struct(target_fl64, arg, 1); 4091 } 4092 break; 4093 case TARGET_F_SETLK64: 4094 case TARGET_F_SETLKW64: 4095 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4096 return -TARGET_EFAULT; 4097 fl64.l_type = tswap16(target_fl64->l_type) >> 1; 4098 fl64.l_whence = tswap16(target_fl64->l_whence); 4099 fl64.l_start = tswapl(target_fl64->l_start); 4100 fl64.l_len = tswapl(target_fl64->l_len); 4101 fl64.l_pid = tswap32(target_fl64->l_pid); 4102 unlock_user_struct(target_fl64, arg, 0); 4103 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4104 break; 4105 4106 case TARGET_F_GETFL: 4107 ret = get_errno(fcntl(fd, host_cmd, arg)); 4108 if (ret >= 0) { 4109 ret = host_to_target_bitmask(ret, fcntl_flags_tbl); 4110 } 4111 break; 4112 4113 case TARGET_F_SETFL: 4114 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl))); 4115 break; 4116 4117 case TARGET_F_SETOWN: 4118 case TARGET_F_GETOWN: 4119 case TARGET_F_SETSIG: 4120 case TARGET_F_GETSIG: 4121 case TARGET_F_SETLEASE: 4122 case TARGET_F_GETLEASE: 4123 ret = get_errno(fcntl(fd, host_cmd, arg)); 4124 break; 4125 4126 default: 4127 ret = get_errno(fcntl(fd, cmd, arg)); 4128 break; 4129 } 4130 return ret; 4131 } 4132 4133 #ifdef USE_UID16 4134 4135 static inline int high2lowuid(int uid) 4136 { 4137 if (uid > 65535) 4138 return 65534; 4139 else 4140 return uid; 4141 } 4142 4143 static inline int high2lowgid(int gid) 4144 { 4145 if (gid > 65535) 4146 return 65534; 4147 else 4148 return gid; 4149 } 4150 4151 static inline int low2highuid(int uid) 4152 { 4153 if ((int16_t)uid == -1) 4154 return -1; 4155 else 4156 return uid; 4157 } 4158 4159 static inline int low2highgid(int gid) 4160 { 4161 if ((int16_t)gid == -1) 4162 return -1; 4163 else 4164 return gid; 4165 } 4166 4167 #endif /* USE_UID16 */ 4168 4169 void syscall_init(void) 4170 { 4171 IOCTLEntry *ie; 4172 const argtype *arg_type; 4173 int size; 4174 int i; 4175 4176 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def); 4177 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def); 4178 #include "syscall_types.h" 4179 #undef STRUCT 4180 #undef STRUCT_SPECIAL 4181 4182 /* we patch the ioctl size if necessary. We rely on the fact that 4183 no ioctl has all the bits at '1' in the size field */ 4184 ie = ioctl_entries; 4185 while (ie->target_cmd != 0) { 4186 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) == 4187 TARGET_IOC_SIZEMASK) { 4188 arg_type = ie->arg_type; 4189 if (arg_type[0] != TYPE_PTR) { 4190 fprintf(stderr, "cannot patch size for ioctl 0x%x\n", 4191 ie->target_cmd); 4192 exit(1); 4193 } 4194 arg_type++; 4195 size = thunk_type_size(arg_type, 0); 4196 ie->target_cmd = (ie->target_cmd & 4197 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) | 4198 (size << TARGET_IOC_SIZESHIFT); 4199 } 4200 4201 /* Build target_to_host_errno_table[] table from 4202 * host_to_target_errno_table[]. */ 4203 for (i=0; i < ERRNO_TABLE_SIZE; i++) 4204 target_to_host_errno_table[host_to_target_errno_table[i]] = i; 4205 4206 /* automatic consistency check if same arch */ 4207 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 4208 (defined(__x86_64__) && defined(TARGET_X86_64)) 4209 if (unlikely(ie->target_cmd != ie->host_cmd)) { 4210 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n", 4211 ie->name, ie->target_cmd, ie->host_cmd); 4212 } 4213 #endif 4214 ie++; 4215 } 4216 } 4217 4218 #if TARGET_ABI_BITS == 32 4219 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1) 4220 { 4221 #ifdef TARGET_WORDS_BIGENDIAN 4222 return ((uint64_t)word0 << 32) | word1; 4223 #else 4224 return ((uint64_t)word1 << 32) | word0; 4225 #endif 4226 } 4227 #else /* TARGET_ABI_BITS == 32 */ 4228 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1) 4229 { 4230 return word0; 4231 } 4232 #endif /* TARGET_ABI_BITS != 32 */ 4233 4234 #ifdef TARGET_NR_truncate64 4235 static inline abi_long target_truncate64(void *cpu_env, const char *arg1, 4236 abi_long arg2, 4237 abi_long arg3, 4238 abi_long arg4) 4239 { 4240 #ifdef TARGET_ARM 4241 if (((CPUARMState *)cpu_env)->eabi) 4242 { 4243 arg2 = arg3; 4244 arg3 = arg4; 4245 } 4246 #endif 4247 return get_errno(truncate64(arg1, target_offset64(arg2, arg3))); 4248 } 4249 #endif 4250 4251 #ifdef TARGET_NR_ftruncate64 4252 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1, 4253 abi_long arg2, 4254 abi_long arg3, 4255 abi_long arg4) 4256 { 4257 #ifdef TARGET_ARM 4258 if (((CPUARMState *)cpu_env)->eabi) 4259 { 4260 arg2 = arg3; 4261 arg3 = arg4; 4262 } 4263 #endif 4264 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3))); 4265 } 4266 #endif 4267 4268 static inline abi_long target_to_host_timespec(struct timespec *host_ts, 4269 abi_ulong target_addr) 4270 { 4271 struct target_timespec *target_ts; 4272 4273 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) 4274 return -TARGET_EFAULT; 4275 host_ts->tv_sec = tswapl(target_ts->tv_sec); 4276 host_ts->tv_nsec = tswapl(target_ts->tv_nsec); 4277 unlock_user_struct(target_ts, target_addr, 0); 4278 return 0; 4279 } 4280 4281 static inline abi_long host_to_target_timespec(abi_ulong target_addr, 4282 struct timespec *host_ts) 4283 { 4284 struct target_timespec *target_ts; 4285 4286 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) 4287 return -TARGET_EFAULT; 4288 target_ts->tv_sec = tswapl(host_ts->tv_sec); 4289 target_ts->tv_nsec = tswapl(host_ts->tv_nsec); 4290 unlock_user_struct(target_ts, target_addr, 1); 4291 return 0; 4292 } 4293 4294 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat) 4295 static inline abi_long host_to_target_stat64(void *cpu_env, 4296 abi_ulong target_addr, 4297 struct stat *host_st) 4298 { 4299 #ifdef TARGET_ARM 4300 if (((CPUARMState *)cpu_env)->eabi) { 4301 struct target_eabi_stat64 *target_st; 4302 4303 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 4304 return -TARGET_EFAULT; 4305 memset(target_st, 0, sizeof(struct target_eabi_stat64)); 4306 __put_user(host_st->st_dev, &target_st->st_dev); 4307 __put_user(host_st->st_ino, &target_st->st_ino); 4308 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 4309 __put_user(host_st->st_ino, &target_st->__st_ino); 4310 #endif 4311 __put_user(host_st->st_mode, &target_st->st_mode); 4312 __put_user(host_st->st_nlink, &target_st->st_nlink); 4313 __put_user(host_st->st_uid, &target_st->st_uid); 4314 __put_user(host_st->st_gid, &target_st->st_gid); 4315 __put_user(host_st->st_rdev, &target_st->st_rdev); 4316 __put_user(host_st->st_size, &target_st->st_size); 4317 __put_user(host_st->st_blksize, &target_st->st_blksize); 4318 __put_user(host_st->st_blocks, &target_st->st_blocks); 4319 __put_user(host_st->st_atime, &target_st->target_st_atime); 4320 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 4321 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 4322 unlock_user_struct(target_st, target_addr, 1); 4323 } else 4324 #endif 4325 { 4326 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA) 4327 struct target_stat *target_st; 4328 #else 4329 struct target_stat64 *target_st; 4330 #endif 4331 4332 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 4333 return -TARGET_EFAULT; 4334 memset(target_st, 0, sizeof(*target_st)); 4335 __put_user(host_st->st_dev, &target_st->st_dev); 4336 __put_user(host_st->st_ino, &target_st->st_ino); 4337 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 4338 __put_user(host_st->st_ino, &target_st->__st_ino); 4339 #endif 4340 __put_user(host_st->st_mode, &target_st->st_mode); 4341 __put_user(host_st->st_nlink, &target_st->st_nlink); 4342 __put_user(host_st->st_uid, &target_st->st_uid); 4343 __put_user(host_st->st_gid, &target_st->st_gid); 4344 __put_user(host_st->st_rdev, &target_st->st_rdev); 4345 /* XXX: better use of kernel struct */ 4346 __put_user(host_st->st_size, &target_st->st_size); 4347 __put_user(host_st->st_blksize, &target_st->st_blksize); 4348 __put_user(host_st->st_blocks, &target_st->st_blocks); 4349 __put_user(host_st->st_atime, &target_st->target_st_atime); 4350 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 4351 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 4352 unlock_user_struct(target_st, target_addr, 1); 4353 } 4354 4355 return 0; 4356 } 4357 #endif 4358 4359 #if defined(CONFIG_USE_NPTL) 4360 /* ??? Using host futex calls even when target atomic operations 4361 are not really atomic probably breaks things. However implementing 4362 futexes locally would make futexes shared between multiple processes 4363 tricky. However they're probably useless because guest atomic 4364 operations won't work either. */ 4365 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout, 4366 target_ulong uaddr2, int val3) 4367 { 4368 struct timespec ts, *pts; 4369 int base_op; 4370 4371 /* ??? We assume FUTEX_* constants are the same on both host 4372 and target. */ 4373 #ifdef FUTEX_CMD_MASK 4374 base_op = op & FUTEX_CMD_MASK; 4375 #else 4376 base_op = op; 4377 #endif 4378 switch (base_op) { 4379 case FUTEX_WAIT: 4380 if (timeout) { 4381 pts = &ts; 4382 target_to_host_timespec(pts, timeout); 4383 } else { 4384 pts = NULL; 4385 } 4386 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val), 4387 pts, NULL, 0)); 4388 case FUTEX_WAKE: 4389 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 4390 case FUTEX_FD: 4391 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 4392 case FUTEX_REQUEUE: 4393 case FUTEX_CMP_REQUEUE: 4394 case FUTEX_WAKE_OP: 4395 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 4396 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 4397 But the prototype takes a `struct timespec *'; insert casts 4398 to satisfy the compiler. We do not need to tswap TIMEOUT 4399 since it's not compared to guest memory. */ 4400 pts = (struct timespec *)(uintptr_t) timeout; 4401 return get_errno(sys_futex(g2h(uaddr), op, val, pts, 4402 g2h(uaddr2), 4403 (base_op == FUTEX_CMP_REQUEUE 4404 ? tswap32(val3) 4405 : val3))); 4406 default: 4407 return -TARGET_ENOSYS; 4408 } 4409 } 4410 #endif 4411 4412 /* Map host to target signal numbers for the wait family of syscalls. 4413 Assume all other status bits are the same. */ 4414 static int host_to_target_waitstatus(int status) 4415 { 4416 if (WIFSIGNALED(status)) { 4417 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f); 4418 } 4419 if (WIFSTOPPED(status)) { 4420 return (host_to_target_signal(WSTOPSIG(status)) << 8) 4421 | (status & 0xff); 4422 } 4423 return status; 4424 } 4425 4426 int get_osversion(void) 4427 { 4428 static int osversion; 4429 struct new_utsname buf; 4430 const char *s; 4431 int i, n, tmp; 4432 if (osversion) 4433 return osversion; 4434 if (qemu_uname_release && *qemu_uname_release) { 4435 s = qemu_uname_release; 4436 } else { 4437 if (sys_uname(&buf)) 4438 return 0; 4439 s = buf.release; 4440 } 4441 tmp = 0; 4442 for (i = 0; i < 3; i++) { 4443 n = 0; 4444 while (*s >= '0' && *s <= '9') { 4445 n *= 10; 4446 n += *s - '0'; 4447 s++; 4448 } 4449 tmp = (tmp << 8) + n; 4450 if (*s == '.') 4451 s++; 4452 } 4453 osversion = tmp; 4454 return osversion; 4455 } 4456 4457 /* do_syscall() should always have a single exit point at the end so 4458 that actions, such as logging of syscall results, can be performed. 4459 All errnos that do_syscall() returns must be -TARGET_<errcode>. */ 4460 abi_long do_syscall(void *cpu_env, int num, abi_long arg1, 4461 abi_long arg2, abi_long arg3, abi_long arg4, 4462 abi_long arg5, abi_long arg6) 4463 { 4464 abi_long ret; 4465 struct stat st; 4466 struct statfs stfs; 4467 void *p; 4468 4469 #ifdef DEBUG 4470 gemu_log("syscall %d", num); 4471 #endif 4472 if(do_strace) 4473 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); 4474 4475 switch(num) { 4476 case TARGET_NR_exit: 4477 #ifdef CONFIG_USE_NPTL 4478 /* In old applications this may be used to implement _exit(2). 4479 However in threaded applictions it is used for thread termination, 4480 and _exit_group is used for application termination. 4481 Do thread termination if we have more then one thread. */ 4482 /* FIXME: This probably breaks if a signal arrives. We should probably 4483 be disabling signals. */ 4484 if (first_cpu->next_cpu) { 4485 TaskState *ts; 4486 CPUState **lastp; 4487 CPUState *p; 4488 4489 cpu_list_lock(); 4490 lastp = &first_cpu; 4491 p = first_cpu; 4492 while (p && p != (CPUState *)cpu_env) { 4493 lastp = &p->next_cpu; 4494 p = p->next_cpu; 4495 } 4496 /* If we didn't find the CPU for this thread then something is 4497 horribly wrong. */ 4498 if (!p) 4499 abort(); 4500 /* Remove the CPU from the list. */ 4501 *lastp = p->next_cpu; 4502 cpu_list_unlock(); 4503 ts = ((CPUState *)cpu_env)->opaque; 4504 if (ts->child_tidptr) { 4505 put_user_u32(0, ts->child_tidptr); 4506 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX, 4507 NULL, NULL, 0); 4508 } 4509 thread_env = NULL; 4510 qemu_free(cpu_env); 4511 qemu_free(ts); 4512 pthread_exit(NULL); 4513 } 4514 #endif 4515 #ifdef TARGET_GPROF 4516 _mcleanup(); 4517 #endif 4518 gdb_exit(cpu_env, arg1); 4519 _exit(arg1); 4520 ret = 0; /* avoid warning */ 4521 break; 4522 case TARGET_NR_read: 4523 if (arg3 == 0) 4524 ret = 0; 4525 else { 4526 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 4527 goto efault; 4528 ret = get_errno(read(arg1, p, arg3)); 4529 unlock_user(p, arg2, ret); 4530 } 4531 break; 4532 case TARGET_NR_write: 4533 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 4534 goto efault; 4535 ret = get_errno(write(arg1, p, arg3)); 4536 unlock_user(p, arg2, 0); 4537 break; 4538 case TARGET_NR_open: 4539 if (!(p = lock_user_string(arg1))) 4540 goto efault; 4541 ret = get_errno(open(path(p), 4542 target_to_host_bitmask(arg2, fcntl_flags_tbl), 4543 arg3)); 4544 unlock_user(p, arg1, 0); 4545 break; 4546 #if defined(TARGET_NR_openat) && defined(__NR_openat) 4547 case TARGET_NR_openat: 4548 if (!(p = lock_user_string(arg2))) 4549 goto efault; 4550 ret = get_errno(sys_openat(arg1, 4551 path(p), 4552 target_to_host_bitmask(arg3, fcntl_flags_tbl), 4553 arg4)); 4554 unlock_user(p, arg2, 0); 4555 break; 4556 #endif 4557 case TARGET_NR_close: 4558 ret = get_errno(close(arg1)); 4559 break; 4560 case TARGET_NR_brk: 4561 ret = do_brk(arg1); 4562 break; 4563 case TARGET_NR_fork: 4564 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0)); 4565 break; 4566 #ifdef TARGET_NR_waitpid 4567 case TARGET_NR_waitpid: 4568 { 4569 int status; 4570 ret = get_errno(waitpid(arg1, &status, arg3)); 4571 if (!is_error(ret) && arg2 4572 && put_user_s32(host_to_target_waitstatus(status), arg2)) 4573 goto efault; 4574 } 4575 break; 4576 #endif 4577 #ifdef TARGET_NR_waitid 4578 case TARGET_NR_waitid: 4579 { 4580 siginfo_t info; 4581 info.si_pid = 0; 4582 ret = get_errno(waitid(arg1, arg2, &info, arg4)); 4583 if (!is_error(ret) && arg3 && info.si_pid != 0) { 4584 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) 4585 goto efault; 4586 host_to_target_siginfo(p, &info); 4587 unlock_user(p, arg3, sizeof(target_siginfo_t)); 4588 } 4589 } 4590 break; 4591 #endif 4592 #ifdef TARGET_NR_creat /* not on alpha */ 4593 case TARGET_NR_creat: 4594 if (!(p = lock_user_string(arg1))) 4595 goto efault; 4596 ret = get_errno(creat(p, arg2)); 4597 unlock_user(p, arg1, 0); 4598 break; 4599 #endif 4600 case TARGET_NR_link: 4601 { 4602 void * p2; 4603 p = lock_user_string(arg1); 4604 p2 = lock_user_string(arg2); 4605 if (!p || !p2) 4606 ret = -TARGET_EFAULT; 4607 else 4608 ret = get_errno(link(p, p2)); 4609 unlock_user(p2, arg2, 0); 4610 unlock_user(p, arg1, 0); 4611 } 4612 break; 4613 #if defined(TARGET_NR_linkat) && defined(__NR_linkat) 4614 case TARGET_NR_linkat: 4615 { 4616 void * p2 = NULL; 4617 if (!arg2 || !arg4) 4618 goto efault; 4619 p = lock_user_string(arg2); 4620 p2 = lock_user_string(arg4); 4621 if (!p || !p2) 4622 ret = -TARGET_EFAULT; 4623 else 4624 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5)); 4625 unlock_user(p, arg2, 0); 4626 unlock_user(p2, arg4, 0); 4627 } 4628 break; 4629 #endif 4630 case TARGET_NR_unlink: 4631 if (!(p = lock_user_string(arg1))) 4632 goto efault; 4633 ret = get_errno(unlink(p)); 4634 unlock_user(p, arg1, 0); 4635 break; 4636 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat) 4637 case TARGET_NR_unlinkat: 4638 if (!(p = lock_user_string(arg2))) 4639 goto efault; 4640 ret = get_errno(sys_unlinkat(arg1, p, arg3)); 4641 unlock_user(p, arg2, 0); 4642 break; 4643 #endif 4644 case TARGET_NR_execve: 4645 { 4646 char **argp, **envp; 4647 int argc, envc; 4648 abi_ulong gp; 4649 abi_ulong guest_argp; 4650 abi_ulong guest_envp; 4651 abi_ulong addr; 4652 char **q; 4653 4654 argc = 0; 4655 guest_argp = arg2; 4656 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { 4657 if (get_user_ual(addr, gp)) 4658 goto efault; 4659 if (!addr) 4660 break; 4661 argc++; 4662 } 4663 envc = 0; 4664 guest_envp = arg3; 4665 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { 4666 if (get_user_ual(addr, gp)) 4667 goto efault; 4668 if (!addr) 4669 break; 4670 envc++; 4671 } 4672 4673 argp = alloca((argc + 1) * sizeof(void *)); 4674 envp = alloca((envc + 1) * sizeof(void *)); 4675 4676 for (gp = guest_argp, q = argp; gp; 4677 gp += sizeof(abi_ulong), q++) { 4678 if (get_user_ual(addr, gp)) 4679 goto execve_efault; 4680 if (!addr) 4681 break; 4682 if (!(*q = lock_user_string(addr))) 4683 goto execve_efault; 4684 } 4685 *q = NULL; 4686 4687 for (gp = guest_envp, q = envp; gp; 4688 gp += sizeof(abi_ulong), q++) { 4689 if (get_user_ual(addr, gp)) 4690 goto execve_efault; 4691 if (!addr) 4692 break; 4693 if (!(*q = lock_user_string(addr))) 4694 goto execve_efault; 4695 } 4696 *q = NULL; 4697 4698 if (!(p = lock_user_string(arg1))) 4699 goto execve_efault; 4700 ret = get_errno(execve(p, argp, envp)); 4701 unlock_user(p, arg1, 0); 4702 4703 goto execve_end; 4704 4705 execve_efault: 4706 ret = -TARGET_EFAULT; 4707 4708 execve_end: 4709 for (gp = guest_argp, q = argp; *q; 4710 gp += sizeof(abi_ulong), q++) { 4711 if (get_user_ual(addr, gp) 4712 || !addr) 4713 break; 4714 unlock_user(*q, addr, 0); 4715 } 4716 for (gp = guest_envp, q = envp; *q; 4717 gp += sizeof(abi_ulong), q++) { 4718 if (get_user_ual(addr, gp) 4719 || !addr) 4720 break; 4721 unlock_user(*q, addr, 0); 4722 } 4723 } 4724 break; 4725 case TARGET_NR_chdir: 4726 if (!(p = lock_user_string(arg1))) 4727 goto efault; 4728 ret = get_errno(chdir(p)); 4729 unlock_user(p, arg1, 0); 4730 break; 4731 #ifdef TARGET_NR_time 4732 case TARGET_NR_time: 4733 { 4734 time_t host_time; 4735 ret = get_errno(time(&host_time)); 4736 if (!is_error(ret) 4737 && arg1 4738 && put_user_sal(host_time, arg1)) 4739 goto efault; 4740 } 4741 break; 4742 #endif 4743 case TARGET_NR_mknod: 4744 if (!(p = lock_user_string(arg1))) 4745 goto efault; 4746 ret = get_errno(mknod(p, arg2, arg3)); 4747 unlock_user(p, arg1, 0); 4748 break; 4749 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat) 4750 case TARGET_NR_mknodat: 4751 if (!(p = lock_user_string(arg2))) 4752 goto efault; 4753 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4)); 4754 unlock_user(p, arg2, 0); 4755 break; 4756 #endif 4757 case TARGET_NR_chmod: 4758 if (!(p = lock_user_string(arg1))) 4759 goto efault; 4760 ret = get_errno(chmod(p, arg2)); 4761 unlock_user(p, arg1, 0); 4762 break; 4763 #ifdef TARGET_NR_break 4764 case TARGET_NR_break: 4765 goto unimplemented; 4766 #endif 4767 #ifdef TARGET_NR_oldstat 4768 case TARGET_NR_oldstat: 4769 goto unimplemented; 4770 #endif 4771 case TARGET_NR_lseek: 4772 ret = get_errno(lseek(arg1, arg2, arg3)); 4773 break; 4774 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) 4775 /* Alpha specific */ 4776 case TARGET_NR_getxpid: 4777 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid(); 4778 ret = get_errno(getpid()); 4779 break; 4780 #endif 4781 #ifdef TARGET_NR_getpid 4782 case TARGET_NR_getpid: 4783 ret = get_errno(getpid()); 4784 break; 4785 #endif 4786 case TARGET_NR_mount: 4787 { 4788 /* need to look at the data field */ 4789 void *p2, *p3; 4790 p = lock_user_string(arg1); 4791 p2 = lock_user_string(arg2); 4792 p3 = lock_user_string(arg3); 4793 if (!p || !p2 || !p3) 4794 ret = -TARGET_EFAULT; 4795 else { 4796 /* FIXME - arg5 should be locked, but it isn't clear how to 4797 * do that since it's not guaranteed to be a NULL-terminated 4798 * string. 4799 */ 4800 if ( ! arg5 ) 4801 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL)); 4802 else 4803 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5))); 4804 } 4805 unlock_user(p, arg1, 0); 4806 unlock_user(p2, arg2, 0); 4807 unlock_user(p3, arg3, 0); 4808 break; 4809 } 4810 #ifdef TARGET_NR_umount 4811 case TARGET_NR_umount: 4812 if (!(p = lock_user_string(arg1))) 4813 goto efault; 4814 ret = get_errno(umount(p)); 4815 unlock_user(p, arg1, 0); 4816 break; 4817 #endif 4818 #ifdef TARGET_NR_stime /* not on alpha */ 4819 case TARGET_NR_stime: 4820 { 4821 time_t host_time; 4822 if (get_user_sal(host_time, arg1)) 4823 goto efault; 4824 ret = get_errno(stime(&host_time)); 4825 } 4826 break; 4827 #endif 4828 case TARGET_NR_ptrace: 4829 goto unimplemented; 4830 #ifdef TARGET_NR_alarm /* not on alpha */ 4831 case TARGET_NR_alarm: 4832 ret = alarm(arg1); 4833 break; 4834 #endif 4835 #ifdef TARGET_NR_oldfstat 4836 case TARGET_NR_oldfstat: 4837 goto unimplemented; 4838 #endif 4839 #ifdef TARGET_NR_pause /* not on alpha */ 4840 case TARGET_NR_pause: 4841 ret = get_errno(pause()); 4842 break; 4843 #endif 4844 #ifdef TARGET_NR_utime 4845 case TARGET_NR_utime: 4846 { 4847 struct utimbuf tbuf, *host_tbuf; 4848 struct target_utimbuf *target_tbuf; 4849 if (arg2) { 4850 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) 4851 goto efault; 4852 tbuf.actime = tswapl(target_tbuf->actime); 4853 tbuf.modtime = tswapl(target_tbuf->modtime); 4854 unlock_user_struct(target_tbuf, arg2, 0); 4855 host_tbuf = &tbuf; 4856 } else { 4857 host_tbuf = NULL; 4858 } 4859 if (!(p = lock_user_string(arg1))) 4860 goto efault; 4861 ret = get_errno(utime(p, host_tbuf)); 4862 unlock_user(p, arg1, 0); 4863 } 4864 break; 4865 #endif 4866 case TARGET_NR_utimes: 4867 { 4868 struct timeval *tvp, tv[2]; 4869 if (arg2) { 4870 if (copy_from_user_timeval(&tv[0], arg2) 4871 || copy_from_user_timeval(&tv[1], 4872 arg2 + sizeof(struct target_timeval))) 4873 goto efault; 4874 tvp = tv; 4875 } else { 4876 tvp = NULL; 4877 } 4878 if (!(p = lock_user_string(arg1))) 4879 goto efault; 4880 ret = get_errno(utimes(p, tvp)); 4881 unlock_user(p, arg1, 0); 4882 } 4883 break; 4884 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat) 4885 case TARGET_NR_futimesat: 4886 { 4887 struct timeval *tvp, tv[2]; 4888 if (arg3) { 4889 if (copy_from_user_timeval(&tv[0], arg3) 4890 || copy_from_user_timeval(&tv[1], 4891 arg3 + sizeof(struct target_timeval))) 4892 goto efault; 4893 tvp = tv; 4894 } else { 4895 tvp = NULL; 4896 } 4897 if (!(p = lock_user_string(arg2))) 4898 goto efault; 4899 ret = get_errno(sys_futimesat(arg1, path(p), tvp)); 4900 unlock_user(p, arg2, 0); 4901 } 4902 break; 4903 #endif 4904 #ifdef TARGET_NR_stty 4905 case TARGET_NR_stty: 4906 goto unimplemented; 4907 #endif 4908 #ifdef TARGET_NR_gtty 4909 case TARGET_NR_gtty: 4910 goto unimplemented; 4911 #endif 4912 case TARGET_NR_access: 4913 if (!(p = lock_user_string(arg1))) 4914 goto efault; 4915 ret = get_errno(access(path(p), arg2)); 4916 unlock_user(p, arg1, 0); 4917 break; 4918 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 4919 case TARGET_NR_faccessat: 4920 if (!(p = lock_user_string(arg2))) 4921 goto efault; 4922 ret = get_errno(sys_faccessat(arg1, p, arg3)); 4923 unlock_user(p, arg2, 0); 4924 break; 4925 #endif 4926 #ifdef TARGET_NR_nice /* not on alpha */ 4927 case TARGET_NR_nice: 4928 ret = get_errno(nice(arg1)); 4929 break; 4930 #endif 4931 #ifdef TARGET_NR_ftime 4932 case TARGET_NR_ftime: 4933 goto unimplemented; 4934 #endif 4935 case TARGET_NR_sync: 4936 sync(); 4937 ret = 0; 4938 break; 4939 case TARGET_NR_kill: 4940 ret = get_errno(kill(arg1, target_to_host_signal(arg2))); 4941 break; 4942 case TARGET_NR_rename: 4943 { 4944 void *p2; 4945 p = lock_user_string(arg1); 4946 p2 = lock_user_string(arg2); 4947 if (!p || !p2) 4948 ret = -TARGET_EFAULT; 4949 else 4950 ret = get_errno(rename(p, p2)); 4951 unlock_user(p2, arg2, 0); 4952 unlock_user(p, arg1, 0); 4953 } 4954 break; 4955 #if defined(TARGET_NR_renameat) && defined(__NR_renameat) 4956 case TARGET_NR_renameat: 4957 { 4958 void *p2; 4959 p = lock_user_string(arg2); 4960 p2 = lock_user_string(arg4); 4961 if (!p || !p2) 4962 ret = -TARGET_EFAULT; 4963 else 4964 ret = get_errno(sys_renameat(arg1, p, arg3, p2)); 4965 unlock_user(p2, arg4, 0); 4966 unlock_user(p, arg2, 0); 4967 } 4968 break; 4969 #endif 4970 case TARGET_NR_mkdir: 4971 if (!(p = lock_user_string(arg1))) 4972 goto efault; 4973 ret = get_errno(mkdir(p, arg2)); 4974 unlock_user(p, arg1, 0); 4975 break; 4976 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat) 4977 case TARGET_NR_mkdirat: 4978 if (!(p = lock_user_string(arg2))) 4979 goto efault; 4980 ret = get_errno(sys_mkdirat(arg1, p, arg3)); 4981 unlock_user(p, arg2, 0); 4982 break; 4983 #endif 4984 case TARGET_NR_rmdir: 4985 if (!(p = lock_user_string(arg1))) 4986 goto efault; 4987 ret = get_errno(rmdir(p)); 4988 unlock_user(p, arg1, 0); 4989 break; 4990 case TARGET_NR_dup: 4991 ret = get_errno(dup(arg1)); 4992 break; 4993 case TARGET_NR_pipe: 4994 ret = do_pipe(cpu_env, arg1, 0, 0); 4995 break; 4996 #ifdef TARGET_NR_pipe2 4997 case TARGET_NR_pipe2: 4998 ret = do_pipe(cpu_env, arg1, arg2, 1); 4999 break; 5000 #endif 5001 case TARGET_NR_times: 5002 { 5003 struct target_tms *tmsp; 5004 struct tms tms; 5005 ret = get_errno(times(&tms)); 5006 if (arg1) { 5007 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); 5008 if (!tmsp) 5009 goto efault; 5010 tmsp->tms_utime = tswapl(host_to_target_clock_t(tms.tms_utime)); 5011 tmsp->tms_stime = tswapl(host_to_target_clock_t(tms.tms_stime)); 5012 tmsp->tms_cutime = tswapl(host_to_target_clock_t(tms.tms_cutime)); 5013 tmsp->tms_cstime = tswapl(host_to_target_clock_t(tms.tms_cstime)); 5014 } 5015 if (!is_error(ret)) 5016 ret = host_to_target_clock_t(ret); 5017 } 5018 break; 5019 #ifdef TARGET_NR_prof 5020 case TARGET_NR_prof: 5021 goto unimplemented; 5022 #endif 5023 #ifdef TARGET_NR_signal 5024 case TARGET_NR_signal: 5025 goto unimplemented; 5026 #endif 5027 case TARGET_NR_acct: 5028 if (arg1 == 0) { 5029 ret = get_errno(acct(NULL)); 5030 } else { 5031 if (!(p = lock_user_string(arg1))) 5032 goto efault; 5033 ret = get_errno(acct(path(p))); 5034 unlock_user(p, arg1, 0); 5035 } 5036 break; 5037 #ifdef TARGET_NR_umount2 /* not on alpha */ 5038 case TARGET_NR_umount2: 5039 if (!(p = lock_user_string(arg1))) 5040 goto efault; 5041 ret = get_errno(umount2(p, arg2)); 5042 unlock_user(p, arg1, 0); 5043 break; 5044 #endif 5045 #ifdef TARGET_NR_lock 5046 case TARGET_NR_lock: 5047 goto unimplemented; 5048 #endif 5049 case TARGET_NR_ioctl: 5050 ret = do_ioctl(arg1, arg2, arg3); 5051 break; 5052 case TARGET_NR_fcntl: 5053 ret = do_fcntl(arg1, arg2, arg3); 5054 break; 5055 #ifdef TARGET_NR_mpx 5056 case TARGET_NR_mpx: 5057 goto unimplemented; 5058 #endif 5059 case TARGET_NR_setpgid: 5060 ret = get_errno(setpgid(arg1, arg2)); 5061 break; 5062 #ifdef TARGET_NR_ulimit 5063 case TARGET_NR_ulimit: 5064 goto unimplemented; 5065 #endif 5066 #ifdef TARGET_NR_oldolduname 5067 case TARGET_NR_oldolduname: 5068 goto unimplemented; 5069 #endif 5070 case TARGET_NR_umask: 5071 ret = get_errno(umask(arg1)); 5072 break; 5073 case TARGET_NR_chroot: 5074 if (!(p = lock_user_string(arg1))) 5075 goto efault; 5076 ret = get_errno(chroot(p)); 5077 unlock_user(p, arg1, 0); 5078 break; 5079 case TARGET_NR_ustat: 5080 goto unimplemented; 5081 case TARGET_NR_dup2: 5082 ret = get_errno(dup2(arg1, arg2)); 5083 break; 5084 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) 5085 case TARGET_NR_dup3: 5086 ret = get_errno(dup3(arg1, arg2, arg3)); 5087 break; 5088 #endif 5089 #ifdef TARGET_NR_getppid /* not on alpha */ 5090 case TARGET_NR_getppid: 5091 ret = get_errno(getppid()); 5092 break; 5093 #endif 5094 case TARGET_NR_getpgrp: 5095 ret = get_errno(getpgrp()); 5096 break; 5097 case TARGET_NR_setsid: 5098 ret = get_errno(setsid()); 5099 break; 5100 #ifdef TARGET_NR_sigaction 5101 case TARGET_NR_sigaction: 5102 { 5103 #if defined(TARGET_ALPHA) 5104 struct target_sigaction act, oact, *pact = 0; 5105 struct target_old_sigaction *old_act; 5106 if (arg2) { 5107 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5108 goto efault; 5109 act._sa_handler = old_act->_sa_handler; 5110 target_siginitset(&act.sa_mask, old_act->sa_mask); 5111 act.sa_flags = old_act->sa_flags; 5112 act.sa_restorer = 0; 5113 unlock_user_struct(old_act, arg2, 0); 5114 pact = &act; 5115 } 5116 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5117 if (!is_error(ret) && arg3) { 5118 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5119 goto efault; 5120 old_act->_sa_handler = oact._sa_handler; 5121 old_act->sa_mask = oact.sa_mask.sig[0]; 5122 old_act->sa_flags = oact.sa_flags; 5123 unlock_user_struct(old_act, arg3, 1); 5124 } 5125 #elif defined(TARGET_MIPS) 5126 struct target_sigaction act, oact, *pact, *old_act; 5127 5128 if (arg2) { 5129 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5130 goto efault; 5131 act._sa_handler = old_act->_sa_handler; 5132 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); 5133 act.sa_flags = old_act->sa_flags; 5134 unlock_user_struct(old_act, arg2, 0); 5135 pact = &act; 5136 } else { 5137 pact = NULL; 5138 } 5139 5140 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5141 5142 if (!is_error(ret) && arg3) { 5143 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5144 goto efault; 5145 old_act->_sa_handler = oact._sa_handler; 5146 old_act->sa_flags = oact.sa_flags; 5147 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; 5148 old_act->sa_mask.sig[1] = 0; 5149 old_act->sa_mask.sig[2] = 0; 5150 old_act->sa_mask.sig[3] = 0; 5151 unlock_user_struct(old_act, arg3, 1); 5152 } 5153 #else 5154 struct target_old_sigaction *old_act; 5155 struct target_sigaction act, oact, *pact; 5156 if (arg2) { 5157 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5158 goto efault; 5159 act._sa_handler = old_act->_sa_handler; 5160 target_siginitset(&act.sa_mask, old_act->sa_mask); 5161 act.sa_flags = old_act->sa_flags; 5162 act.sa_restorer = old_act->sa_restorer; 5163 unlock_user_struct(old_act, arg2, 0); 5164 pact = &act; 5165 } else { 5166 pact = NULL; 5167 } 5168 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5169 if (!is_error(ret) && arg3) { 5170 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5171 goto efault; 5172 old_act->_sa_handler = oact._sa_handler; 5173 old_act->sa_mask = oact.sa_mask.sig[0]; 5174 old_act->sa_flags = oact.sa_flags; 5175 old_act->sa_restorer = oact.sa_restorer; 5176 unlock_user_struct(old_act, arg3, 1); 5177 } 5178 #endif 5179 } 5180 break; 5181 #endif 5182 case TARGET_NR_rt_sigaction: 5183 { 5184 #if defined(TARGET_ALPHA) 5185 struct target_sigaction act, oact, *pact = 0; 5186 struct target_rt_sigaction *rt_act; 5187 /* ??? arg4 == sizeof(sigset_t). */ 5188 if (arg2) { 5189 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1)) 5190 goto efault; 5191 act._sa_handler = rt_act->_sa_handler; 5192 act.sa_mask = rt_act->sa_mask; 5193 act.sa_flags = rt_act->sa_flags; 5194 act.sa_restorer = arg5; 5195 unlock_user_struct(rt_act, arg2, 0); 5196 pact = &act; 5197 } 5198 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5199 if (!is_error(ret) && arg3) { 5200 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0)) 5201 goto efault; 5202 rt_act->_sa_handler = oact._sa_handler; 5203 rt_act->sa_mask = oact.sa_mask; 5204 rt_act->sa_flags = oact.sa_flags; 5205 unlock_user_struct(rt_act, arg3, 1); 5206 } 5207 #else 5208 struct target_sigaction *act; 5209 struct target_sigaction *oact; 5210 5211 if (arg2) { 5212 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) 5213 goto efault; 5214 } else 5215 act = NULL; 5216 if (arg3) { 5217 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { 5218 ret = -TARGET_EFAULT; 5219 goto rt_sigaction_fail; 5220 } 5221 } else 5222 oact = NULL; 5223 ret = get_errno(do_sigaction(arg1, act, oact)); 5224 rt_sigaction_fail: 5225 if (act) 5226 unlock_user_struct(act, arg2, 0); 5227 if (oact) 5228 unlock_user_struct(oact, arg3, 1); 5229 #endif 5230 } 5231 break; 5232 #ifdef TARGET_NR_sgetmask /* not on alpha */ 5233 case TARGET_NR_sgetmask: 5234 { 5235 sigset_t cur_set; 5236 abi_ulong target_set; 5237 sigprocmask(0, NULL, &cur_set); 5238 host_to_target_old_sigset(&target_set, &cur_set); 5239 ret = target_set; 5240 } 5241 break; 5242 #endif 5243 #ifdef TARGET_NR_ssetmask /* not on alpha */ 5244 case TARGET_NR_ssetmask: 5245 { 5246 sigset_t set, oset, cur_set; 5247 abi_ulong target_set = arg1; 5248 sigprocmask(0, NULL, &cur_set); 5249 target_to_host_old_sigset(&set, &target_set); 5250 sigorset(&set, &set, &cur_set); 5251 sigprocmask(SIG_SETMASK, &set, &oset); 5252 host_to_target_old_sigset(&target_set, &oset); 5253 ret = target_set; 5254 } 5255 break; 5256 #endif 5257 #ifdef TARGET_NR_sigprocmask 5258 case TARGET_NR_sigprocmask: 5259 { 5260 #if defined(TARGET_ALPHA) 5261 sigset_t set, oldset; 5262 abi_ulong mask; 5263 int how; 5264 5265 switch (arg1) { 5266 case TARGET_SIG_BLOCK: 5267 how = SIG_BLOCK; 5268 break; 5269 case TARGET_SIG_UNBLOCK: 5270 how = SIG_UNBLOCK; 5271 break; 5272 case TARGET_SIG_SETMASK: 5273 how = SIG_SETMASK; 5274 break; 5275 default: 5276 ret = -TARGET_EINVAL; 5277 goto fail; 5278 } 5279 mask = arg2; 5280 target_to_host_old_sigset(&set, &mask); 5281 5282 ret = get_errno(sigprocmask(how, &set, &oldset)); 5283 5284 if (!is_error(ret)) { 5285 host_to_target_old_sigset(&mask, &oldset); 5286 ret = mask; 5287 ((CPUAlphaState *)cpu_env)->[IR_V0] = 0; /* force no error */ 5288 } 5289 #else 5290 sigset_t set, oldset, *set_ptr; 5291 int how; 5292 5293 if (arg2) { 5294 switch (arg1) { 5295 case TARGET_SIG_BLOCK: 5296 how = SIG_BLOCK; 5297 break; 5298 case TARGET_SIG_UNBLOCK: 5299 how = SIG_UNBLOCK; 5300 break; 5301 case TARGET_SIG_SETMASK: 5302 how = SIG_SETMASK; 5303 break; 5304 default: 5305 ret = -TARGET_EINVAL; 5306 goto fail; 5307 } 5308 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 5309 goto efault; 5310 target_to_host_old_sigset(&set, p); 5311 unlock_user(p, arg2, 0); 5312 set_ptr = &set; 5313 } else { 5314 how = 0; 5315 set_ptr = NULL; 5316 } 5317 ret = get_errno(sigprocmask(how, set_ptr, &oldset)); 5318 if (!is_error(ret) && arg3) { 5319 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 5320 goto efault; 5321 host_to_target_old_sigset(p, &oldset); 5322 unlock_user(p, arg3, sizeof(target_sigset_t)); 5323 } 5324 #endif 5325 } 5326 break; 5327 #endif 5328 case TARGET_NR_rt_sigprocmask: 5329 { 5330 int how = arg1; 5331 sigset_t set, oldset, *set_ptr; 5332 5333 if (arg2) { 5334 switch(how) { 5335 case TARGET_SIG_BLOCK: 5336 how = SIG_BLOCK; 5337 break; 5338 case TARGET_SIG_UNBLOCK: 5339 how = SIG_UNBLOCK; 5340 break; 5341 case TARGET_SIG_SETMASK: 5342 how = SIG_SETMASK; 5343 break; 5344 default: 5345 ret = -TARGET_EINVAL; 5346 goto fail; 5347 } 5348 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 5349 goto efault; 5350 target_to_host_sigset(&set, p); 5351 unlock_user(p, arg2, 0); 5352 set_ptr = &set; 5353 } else { 5354 how = 0; 5355 set_ptr = NULL; 5356 } 5357 ret = get_errno(sigprocmask(how, set_ptr, &oldset)); 5358 if (!is_error(ret) && arg3) { 5359 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 5360 goto efault; 5361 host_to_target_sigset(p, &oldset); 5362 unlock_user(p, arg3, sizeof(target_sigset_t)); 5363 } 5364 } 5365 break; 5366 #ifdef TARGET_NR_sigpending 5367 case TARGET_NR_sigpending: 5368 { 5369 sigset_t set; 5370 ret = get_errno(sigpending(&set)); 5371 if (!is_error(ret)) { 5372 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 5373 goto efault; 5374 host_to_target_old_sigset(p, &set); 5375 unlock_user(p, arg1, sizeof(target_sigset_t)); 5376 } 5377 } 5378 break; 5379 #endif 5380 case TARGET_NR_rt_sigpending: 5381 { 5382 sigset_t set; 5383 ret = get_errno(sigpending(&set)); 5384 if (!is_error(ret)) { 5385 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 5386 goto efault; 5387 host_to_target_sigset(p, &set); 5388 unlock_user(p, arg1, sizeof(target_sigset_t)); 5389 } 5390 } 5391 break; 5392 #ifdef TARGET_NR_sigsuspend 5393 case TARGET_NR_sigsuspend: 5394 { 5395 sigset_t set; 5396 #if defined(TARGET_ALPHA) 5397 abi_ulong mask = arg1; 5398 target_to_host_old_sigset(&set, &mask); 5399 #else 5400 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 5401 goto efault; 5402 target_to_host_old_sigset(&set, p); 5403 unlock_user(p, arg1, 0); 5404 #endif 5405 ret = get_errno(sigsuspend(&set)); 5406 } 5407 break; 5408 #endif 5409 case TARGET_NR_rt_sigsuspend: 5410 { 5411 sigset_t set; 5412 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 5413 goto efault; 5414 target_to_host_sigset(&set, p); 5415 unlock_user(p, arg1, 0); 5416 ret = get_errno(sigsuspend(&set)); 5417 } 5418 break; 5419 case TARGET_NR_rt_sigtimedwait: 5420 { 5421 sigset_t set; 5422 struct timespec uts, *puts; 5423 siginfo_t uinfo; 5424 5425 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 5426 goto efault; 5427 target_to_host_sigset(&set, p); 5428 unlock_user(p, arg1, 0); 5429 if (arg3) { 5430 puts = &uts; 5431 target_to_host_timespec(puts, arg3); 5432 } else { 5433 puts = NULL; 5434 } 5435 ret = get_errno(sigtimedwait(&set, &uinfo, puts)); 5436 if (!is_error(ret) && arg2) { 5437 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0))) 5438 goto efault; 5439 host_to_target_siginfo(p, &uinfo); 5440 unlock_user(p, arg2, sizeof(target_siginfo_t)); 5441 } 5442 } 5443 break; 5444 case TARGET_NR_rt_sigqueueinfo: 5445 { 5446 siginfo_t uinfo; 5447 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1))) 5448 goto efault; 5449 target_to_host_siginfo(&uinfo, p); 5450 unlock_user(p, arg1, 0); 5451 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo)); 5452 } 5453 break; 5454 #ifdef TARGET_NR_sigreturn 5455 case TARGET_NR_sigreturn: 5456 /* NOTE: ret is eax, so not transcoding must be done */ 5457 ret = do_sigreturn(cpu_env); 5458 break; 5459 #endif 5460 case TARGET_NR_rt_sigreturn: 5461 /* NOTE: ret is eax, so not transcoding must be done */ 5462 ret = do_rt_sigreturn(cpu_env); 5463 break; 5464 case TARGET_NR_sethostname: 5465 if (!(p = lock_user_string(arg1))) 5466 goto efault; 5467 ret = get_errno(sethostname(p, arg2)); 5468 unlock_user(p, arg1, 0); 5469 break; 5470 case TARGET_NR_setrlimit: 5471 { 5472 int resource = arg1; 5473 struct target_rlimit *target_rlim; 5474 struct rlimit rlim; 5475 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) 5476 goto efault; 5477 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur); 5478 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max); 5479 unlock_user_struct(target_rlim, arg2, 0); 5480 ret = get_errno(setrlimit(resource, &rlim)); 5481 } 5482 break; 5483 case TARGET_NR_getrlimit: 5484 { 5485 int resource = arg1; 5486 struct target_rlimit *target_rlim; 5487 struct rlimit rlim; 5488 5489 ret = get_errno(getrlimit(resource, &rlim)); 5490 if (!is_error(ret)) { 5491 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 5492 goto efault; 5493 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 5494 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 5495 unlock_user_struct(target_rlim, arg2, 1); 5496 } 5497 } 5498 break; 5499 case TARGET_NR_getrusage: 5500 { 5501 struct rusage rusage; 5502 ret = get_errno(getrusage(arg1, &rusage)); 5503 if (!is_error(ret)) { 5504 host_to_target_rusage(arg2, &rusage); 5505 } 5506 } 5507 break; 5508 case TARGET_NR_gettimeofday: 5509 { 5510 struct timeval tv; 5511 ret = get_errno(gettimeofday(&tv, NULL)); 5512 if (!is_error(ret)) { 5513 if (copy_to_user_timeval(arg1, &tv)) 5514 goto efault; 5515 } 5516 } 5517 break; 5518 case TARGET_NR_settimeofday: 5519 { 5520 struct timeval tv; 5521 if (copy_from_user_timeval(&tv, arg1)) 5522 goto efault; 5523 ret = get_errno(settimeofday(&tv, NULL)); 5524 } 5525 break; 5526 #ifdef TARGET_NR_select 5527 case TARGET_NR_select: 5528 { 5529 struct target_sel_arg_struct *sel; 5530 abi_ulong inp, outp, exp, tvp; 5531 long nsel; 5532 5533 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) 5534 goto efault; 5535 nsel = tswapl(sel->n); 5536 inp = tswapl(sel->inp); 5537 outp = tswapl(sel->outp); 5538 exp = tswapl(sel->exp); 5539 tvp = tswapl(sel->tvp); 5540 unlock_user_struct(sel, arg1, 0); 5541 ret = do_select(nsel, inp, outp, exp, tvp); 5542 } 5543 break; 5544 #endif 5545 #ifdef TARGET_NR_pselect6 5546 case TARGET_NR_pselect6: 5547 goto unimplemented_nowarn; 5548 #endif 5549 case TARGET_NR_symlink: 5550 { 5551 void *p2; 5552 p = lock_user_string(arg1); 5553 p2 = lock_user_string(arg2); 5554 if (!p || !p2) 5555 ret = -TARGET_EFAULT; 5556 else 5557 ret = get_errno(symlink(p, p2)); 5558 unlock_user(p2, arg2, 0); 5559 unlock_user(p, arg1, 0); 5560 } 5561 break; 5562 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat) 5563 case TARGET_NR_symlinkat: 5564 { 5565 void *p2; 5566 p = lock_user_string(arg1); 5567 p2 = lock_user_string(arg3); 5568 if (!p || !p2) 5569 ret = -TARGET_EFAULT; 5570 else 5571 ret = get_errno(sys_symlinkat(p, arg2, p2)); 5572 unlock_user(p2, arg3, 0); 5573 unlock_user(p, arg1, 0); 5574 } 5575 break; 5576 #endif 5577 #ifdef TARGET_NR_oldlstat 5578 case TARGET_NR_oldlstat: 5579 goto unimplemented; 5580 #endif 5581 case TARGET_NR_readlink: 5582 { 5583 void *p2, *temp; 5584 p = lock_user_string(arg1); 5585 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); 5586 if (!p || !p2) 5587 ret = -TARGET_EFAULT; 5588 else { 5589 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) { 5590 char real[PATH_MAX]; 5591 temp = realpath(exec_path,real); 5592 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ; 5593 snprintf((char *)p2, arg3, "%s", real); 5594 } 5595 else 5596 ret = get_errno(readlink(path(p), p2, arg3)); 5597 } 5598 unlock_user(p2, arg2, ret); 5599 unlock_user(p, arg1, 0); 5600 } 5601 break; 5602 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat) 5603 case TARGET_NR_readlinkat: 5604 { 5605 void *p2; 5606 p = lock_user_string(arg2); 5607 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); 5608 if (!p || !p2) 5609 ret = -TARGET_EFAULT; 5610 else 5611 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4)); 5612 unlock_user(p2, arg3, ret); 5613 unlock_user(p, arg2, 0); 5614 } 5615 break; 5616 #endif 5617 #ifdef TARGET_NR_uselib 5618 case TARGET_NR_uselib: 5619 goto unimplemented; 5620 #endif 5621 #ifdef TARGET_NR_swapon 5622 case TARGET_NR_swapon: 5623 if (!(p = lock_user_string(arg1))) 5624 goto efault; 5625 ret = get_errno(swapon(p, arg2)); 5626 unlock_user(p, arg1, 0); 5627 break; 5628 #endif 5629 case TARGET_NR_reboot: 5630 goto unimplemented; 5631 #ifdef TARGET_NR_readdir 5632 case TARGET_NR_readdir: 5633 goto unimplemented; 5634 #endif 5635 #ifdef TARGET_NR_mmap 5636 case TARGET_NR_mmap: 5637 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) 5638 { 5639 abi_ulong *v; 5640 abi_ulong v1, v2, v3, v4, v5, v6; 5641 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) 5642 goto efault; 5643 v1 = tswapl(v[0]); 5644 v2 = tswapl(v[1]); 5645 v3 = tswapl(v[2]); 5646 v4 = tswapl(v[3]); 5647 v5 = tswapl(v[4]); 5648 v6 = tswapl(v[5]); 5649 unlock_user(v, arg1, 0); 5650 ret = get_errno(target_mmap(v1, v2, v3, 5651 target_to_host_bitmask(v4, mmap_flags_tbl), 5652 v5, v6)); 5653 } 5654 #else 5655 ret = get_errno(target_mmap(arg1, arg2, arg3, 5656 target_to_host_bitmask(arg4, mmap_flags_tbl), 5657 arg5, 5658 arg6)); 5659 #endif 5660 break; 5661 #endif 5662 #ifdef TARGET_NR_mmap2 5663 case TARGET_NR_mmap2: 5664 #ifndef MMAP_SHIFT 5665 #define MMAP_SHIFT 12 5666 #endif 5667 ret = get_errno(target_mmap(arg1, arg2, arg3, 5668 target_to_host_bitmask(arg4, mmap_flags_tbl), 5669 arg5, 5670 arg6 << MMAP_SHIFT)); 5671 break; 5672 #endif 5673 case TARGET_NR_munmap: 5674 ret = get_errno(target_munmap(arg1, arg2)); 5675 break; 5676 case TARGET_NR_mprotect: 5677 { 5678 TaskState *ts = ((CPUState *)cpu_env)->opaque; 5679 /* Special hack to detect libc making the stack executable. */ 5680 if ((arg3 & PROT_GROWSDOWN) 5681 && arg1 >= ts->info->stack_limit 5682 && arg1 <= ts->info->start_stack) { 5683 arg3 &= ~PROT_GROWSDOWN; 5684 arg2 = arg2 + arg1 - ts->info->stack_limit; 5685 arg1 = ts->info->stack_limit; 5686 } 5687 } 5688 ret = get_errno(target_mprotect(arg1, arg2, arg3)); 5689 break; 5690 #ifdef TARGET_NR_mremap 5691 case TARGET_NR_mremap: 5692 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); 5693 break; 5694 #endif 5695 /* ??? msync/mlock/munlock are broken for softmmu. */ 5696 #ifdef TARGET_NR_msync 5697 case TARGET_NR_msync: 5698 ret = get_errno(msync(g2h(arg1), arg2, arg3)); 5699 break; 5700 #endif 5701 #ifdef TARGET_NR_mlock 5702 case TARGET_NR_mlock: 5703 ret = get_errno(mlock(g2h(arg1), arg2)); 5704 break; 5705 #endif 5706 #ifdef TARGET_NR_munlock 5707 case TARGET_NR_munlock: 5708 ret = get_errno(munlock(g2h(arg1), arg2)); 5709 break; 5710 #endif 5711 #ifdef TARGET_NR_mlockall 5712 case TARGET_NR_mlockall: 5713 ret = get_errno(mlockall(arg1)); 5714 break; 5715 #endif 5716 #ifdef TARGET_NR_munlockall 5717 case TARGET_NR_munlockall: 5718 ret = get_errno(munlockall()); 5719 break; 5720 #endif 5721 case TARGET_NR_truncate: 5722 if (!(p = lock_user_string(arg1))) 5723 goto efault; 5724 ret = get_errno(truncate(p, arg2)); 5725 unlock_user(p, arg1, 0); 5726 break; 5727 case TARGET_NR_ftruncate: 5728 ret = get_errno(ftruncate(arg1, arg2)); 5729 break; 5730 case TARGET_NR_fchmod: 5731 ret = get_errno(fchmod(arg1, arg2)); 5732 break; 5733 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat) 5734 case TARGET_NR_fchmodat: 5735 if (!(p = lock_user_string(arg2))) 5736 goto efault; 5737 ret = get_errno(sys_fchmodat(arg1, p, arg3)); 5738 unlock_user(p, arg2, 0); 5739 break; 5740 #endif 5741 case TARGET_NR_getpriority: 5742 /* libc does special remapping of the return value of 5743 * sys_getpriority() so it's just easiest to call 5744 * sys_getpriority() directly rather than through libc. */ 5745 ret = get_errno(sys_getpriority(arg1, arg2)); 5746 break; 5747 case TARGET_NR_setpriority: 5748 ret = get_errno(setpriority(arg1, arg2, arg3)); 5749 break; 5750 #ifdef TARGET_NR_profil 5751 case TARGET_NR_profil: 5752 goto unimplemented; 5753 #endif 5754 case TARGET_NR_statfs: 5755 if (!(p = lock_user_string(arg1))) 5756 goto efault; 5757 ret = get_errno(statfs(path(p), &stfs)); 5758 unlock_user(p, arg1, 0); 5759 convert_statfs: 5760 if (!is_error(ret)) { 5761 struct target_statfs *target_stfs; 5762 5763 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) 5764 goto efault; 5765 __put_user(stfs.f_type, &target_stfs->f_type); 5766 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 5767 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 5768 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 5769 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 5770 __put_user(stfs.f_files, &target_stfs->f_files); 5771 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 5772 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 5773 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 5774 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 5775 unlock_user_struct(target_stfs, arg2, 1); 5776 } 5777 break; 5778 case TARGET_NR_fstatfs: 5779 ret = get_errno(fstatfs(arg1, &stfs)); 5780 goto convert_statfs; 5781 #ifdef TARGET_NR_statfs64 5782 case TARGET_NR_statfs64: 5783 if (!(p = lock_user_string(arg1))) 5784 goto efault; 5785 ret = get_errno(statfs(path(p), &stfs)); 5786 unlock_user(p, arg1, 0); 5787 convert_statfs64: 5788 if (!is_error(ret)) { 5789 struct target_statfs64 *target_stfs; 5790 5791 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) 5792 goto efault; 5793 __put_user(stfs.f_type, &target_stfs->f_type); 5794 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 5795 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 5796 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 5797 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 5798 __put_user(stfs.f_files, &target_stfs->f_files); 5799 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 5800 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 5801 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 5802 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 5803 unlock_user_struct(target_stfs, arg3, 1); 5804 } 5805 break; 5806 case TARGET_NR_fstatfs64: 5807 ret = get_errno(fstatfs(arg1, &stfs)); 5808 goto convert_statfs64; 5809 #endif 5810 #ifdef TARGET_NR_ioperm 5811 case TARGET_NR_ioperm: 5812 goto unimplemented; 5813 #endif 5814 #ifdef TARGET_NR_socketcall 5815 case TARGET_NR_socketcall: 5816 ret = do_socketcall(arg1, arg2); 5817 break; 5818 #endif 5819 #ifdef TARGET_NR_accept 5820 case TARGET_NR_accept: 5821 ret = do_accept(arg1, arg2, arg3); 5822 break; 5823 #endif 5824 #ifdef TARGET_NR_bind 5825 case TARGET_NR_bind: 5826 ret = do_bind(arg1, arg2, arg3); 5827 break; 5828 #endif 5829 #ifdef TARGET_NR_connect 5830 case TARGET_NR_connect: 5831 ret = do_connect(arg1, arg2, arg3); 5832 break; 5833 #endif 5834 #ifdef TARGET_NR_getpeername 5835 case TARGET_NR_getpeername: 5836 ret = do_getpeername(arg1, arg2, arg3); 5837 break; 5838 #endif 5839 #ifdef TARGET_NR_getsockname 5840 case TARGET_NR_getsockname: 5841 ret = do_getsockname(arg1, arg2, arg3); 5842 break; 5843 #endif 5844 #ifdef TARGET_NR_getsockopt 5845 case TARGET_NR_getsockopt: 5846 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5); 5847 break; 5848 #endif 5849 #ifdef TARGET_NR_listen 5850 case TARGET_NR_listen: 5851 ret = get_errno(listen(arg1, arg2)); 5852 break; 5853 #endif 5854 #ifdef TARGET_NR_recv 5855 case TARGET_NR_recv: 5856 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); 5857 break; 5858 #endif 5859 #ifdef TARGET_NR_recvfrom 5860 case TARGET_NR_recvfrom: 5861 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); 5862 break; 5863 #endif 5864 #ifdef TARGET_NR_recvmsg 5865 case TARGET_NR_recvmsg: 5866 ret = do_sendrecvmsg(arg1, arg2, arg3, 0); 5867 break; 5868 #endif 5869 #ifdef TARGET_NR_send 5870 case TARGET_NR_send: 5871 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0); 5872 break; 5873 #endif 5874 #ifdef TARGET_NR_sendmsg 5875 case TARGET_NR_sendmsg: 5876 ret = do_sendrecvmsg(arg1, arg2, arg3, 1); 5877 break; 5878 #endif 5879 #ifdef TARGET_NR_sendto 5880 case TARGET_NR_sendto: 5881 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); 5882 break; 5883 #endif 5884 #ifdef TARGET_NR_shutdown 5885 case TARGET_NR_shutdown: 5886 ret = get_errno(shutdown(arg1, arg2)); 5887 break; 5888 #endif 5889 #ifdef TARGET_NR_socket 5890 case TARGET_NR_socket: 5891 ret = do_socket(arg1, arg2, arg3); 5892 break; 5893 #endif 5894 #ifdef TARGET_NR_socketpair 5895 case TARGET_NR_socketpair: 5896 ret = do_socketpair(arg1, arg2, arg3, arg4); 5897 break; 5898 #endif 5899 #ifdef TARGET_NR_setsockopt 5900 case TARGET_NR_setsockopt: 5901 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); 5902 break; 5903 #endif 5904 5905 case TARGET_NR_syslog: 5906 if (!(p = lock_user_string(arg2))) 5907 goto efault; 5908 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); 5909 unlock_user(p, arg2, 0); 5910 break; 5911 5912 case TARGET_NR_setitimer: 5913 { 5914 struct itimerval value, ovalue, *pvalue; 5915 5916 if (arg2) { 5917 pvalue = &value; 5918 if (copy_from_user_timeval(&pvalue->it_interval, arg2) 5919 || copy_from_user_timeval(&pvalue->it_value, 5920 arg2 + sizeof(struct target_timeval))) 5921 goto efault; 5922 } else { 5923 pvalue = NULL; 5924 } 5925 ret = get_errno(setitimer(arg1, pvalue, &ovalue)); 5926 if (!is_error(ret) && arg3) { 5927 if (copy_to_user_timeval(arg3, 5928 &ovalue.it_interval) 5929 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), 5930 &ovalue.it_value)) 5931 goto efault; 5932 } 5933 } 5934 break; 5935 case TARGET_NR_getitimer: 5936 { 5937 struct itimerval value; 5938 5939 ret = get_errno(getitimer(arg1, &value)); 5940 if (!is_error(ret) && arg2) { 5941 if (copy_to_user_timeval(arg2, 5942 &value.it_interval) 5943 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), 5944 &value.it_value)) 5945 goto efault; 5946 } 5947 } 5948 break; 5949 case TARGET_NR_stat: 5950 if (!(p = lock_user_string(arg1))) 5951 goto efault; 5952 ret = get_errno(stat(path(p), &st)); 5953 unlock_user(p, arg1, 0); 5954 goto do_stat; 5955 case TARGET_NR_lstat: 5956 if (!(p = lock_user_string(arg1))) 5957 goto efault; 5958 ret = get_errno(lstat(path(p), &st)); 5959 unlock_user(p, arg1, 0); 5960 goto do_stat; 5961 case TARGET_NR_fstat: 5962 { 5963 ret = get_errno(fstat(arg1, &st)); 5964 do_stat: 5965 if (!is_error(ret)) { 5966 struct target_stat *target_st; 5967 5968 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) 5969 goto efault; 5970 memset(target_st, 0, sizeof(*target_st)); 5971 __put_user(st.st_dev, &target_st->st_dev); 5972 __put_user(st.st_ino, &target_st->st_ino); 5973 __put_user(st.st_mode, &target_st->st_mode); 5974 __put_user(st.st_uid, &target_st->st_uid); 5975 __put_user(st.st_gid, &target_st->st_gid); 5976 __put_user(st.st_nlink, &target_st->st_nlink); 5977 __put_user(st.st_rdev, &target_st->st_rdev); 5978 __put_user(st.st_size, &target_st->st_size); 5979 __put_user(st.st_blksize, &target_st->st_blksize); 5980 __put_user(st.st_blocks, &target_st->st_blocks); 5981 __put_user(st.st_atime, &target_st->target_st_atime); 5982 __put_user(st.st_mtime, &target_st->target_st_mtime); 5983 __put_user(st.st_ctime, &target_st->target_st_ctime); 5984 unlock_user_struct(target_st, arg2, 1); 5985 } 5986 } 5987 break; 5988 #ifdef TARGET_NR_olduname 5989 case TARGET_NR_olduname: 5990 goto unimplemented; 5991 #endif 5992 #ifdef TARGET_NR_iopl 5993 case TARGET_NR_iopl: 5994 goto unimplemented; 5995 #endif 5996 case TARGET_NR_vhangup: 5997 ret = get_errno(vhangup()); 5998 break; 5999 #ifdef TARGET_NR_idle 6000 case TARGET_NR_idle: 6001 goto unimplemented; 6002 #endif 6003 #ifdef TARGET_NR_syscall 6004 case TARGET_NR_syscall: 6005 ret = do_syscall(cpu_env,arg1 & 0xffff,arg2,arg3,arg4,arg5,arg6,0); 6006 break; 6007 #endif 6008 case TARGET_NR_wait4: 6009 { 6010 int status; 6011 abi_long status_ptr = arg2; 6012 struct rusage rusage, *rusage_ptr; 6013 abi_ulong target_rusage = arg4; 6014 if (target_rusage) 6015 rusage_ptr = &rusage; 6016 else 6017 rusage_ptr = NULL; 6018 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr)); 6019 if (!is_error(ret)) { 6020 if (status_ptr) { 6021 status = host_to_target_waitstatus(status); 6022 if (put_user_s32(status, status_ptr)) 6023 goto efault; 6024 } 6025 if (target_rusage) 6026 host_to_target_rusage(target_rusage, &rusage); 6027 } 6028 } 6029 break; 6030 #ifdef TARGET_NR_swapoff 6031 case TARGET_NR_swapoff: 6032 if (!(p = lock_user_string(arg1))) 6033 goto efault; 6034 ret = get_errno(swapoff(p)); 6035 unlock_user(p, arg1, 0); 6036 break; 6037 #endif 6038 case TARGET_NR_sysinfo: 6039 { 6040 struct target_sysinfo *target_value; 6041 struct sysinfo value; 6042 ret = get_errno(sysinfo(&value)); 6043 if (!is_error(ret) && arg1) 6044 { 6045 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) 6046 goto efault; 6047 __put_user(value.uptime, &target_value->uptime); 6048 __put_user(value.loads[0], &target_value->loads[0]); 6049 __put_user(value.loads[1], &target_value->loads[1]); 6050 __put_user(value.loads[2], &target_value->loads[2]); 6051 __put_user(value.totalram, &target_value->totalram); 6052 __put_user(value.freeram, &target_value->freeram); 6053 __put_user(value.sharedram, &target_value->sharedram); 6054 __put_user(value.bufferram, &target_value->bufferram); 6055 __put_user(value.totalswap, &target_value->totalswap); 6056 __put_user(value.freeswap, &target_value->freeswap); 6057 __put_user(value.procs, &target_value->procs); 6058 __put_user(value.totalhigh, &target_value->totalhigh); 6059 __put_user(value.freehigh, &target_value->freehigh); 6060 __put_user(value.mem_unit, &target_value->mem_unit); 6061 unlock_user_struct(target_value, arg1, 1); 6062 } 6063 } 6064 break; 6065 #ifdef TARGET_NR_ipc 6066 case TARGET_NR_ipc: 6067 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6); 6068 break; 6069 #endif 6070 #ifdef TARGET_NR_semget 6071 case TARGET_NR_semget: 6072 ret = get_errno(semget(arg1, arg2, arg3)); 6073 break; 6074 #endif 6075 #ifdef TARGET_NR_semop 6076 case TARGET_NR_semop: 6077 ret = get_errno(do_semop(arg1, arg2, arg3)); 6078 break; 6079 #endif 6080 #ifdef TARGET_NR_semctl 6081 case TARGET_NR_semctl: 6082 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4); 6083 break; 6084 #endif 6085 #ifdef TARGET_NR_msgctl 6086 case TARGET_NR_msgctl: 6087 ret = do_msgctl(arg1, arg2, arg3); 6088 break; 6089 #endif 6090 #ifdef TARGET_NR_msgget 6091 case TARGET_NR_msgget: 6092 ret = get_errno(msgget(arg1, arg2)); 6093 break; 6094 #endif 6095 #ifdef TARGET_NR_msgrcv 6096 case TARGET_NR_msgrcv: 6097 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5); 6098 break; 6099 #endif 6100 #ifdef TARGET_NR_msgsnd 6101 case TARGET_NR_msgsnd: 6102 ret = do_msgsnd(arg1, arg2, arg3, arg4); 6103 break; 6104 #endif 6105 #ifdef TARGET_NR_shmget 6106 case TARGET_NR_shmget: 6107 ret = get_errno(shmget(arg1, arg2, arg3)); 6108 break; 6109 #endif 6110 #ifdef TARGET_NR_shmctl 6111 case TARGET_NR_shmctl: 6112 ret = do_shmctl(arg1, arg2, arg3); 6113 break; 6114 #endif 6115 #ifdef TARGET_NR_shmat 6116 case TARGET_NR_shmat: 6117 ret = do_shmat(arg1, arg2, arg3); 6118 break; 6119 #endif 6120 #ifdef TARGET_NR_shmdt 6121 case TARGET_NR_shmdt: 6122 ret = do_shmdt(arg1); 6123 break; 6124 #endif 6125 case TARGET_NR_fsync: 6126 ret = get_errno(fsync(arg1)); 6127 break; 6128 case TARGET_NR_clone: 6129 #if defined(TARGET_SH4) || defined(TARGET_ALPHA) 6130 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); 6131 #elif defined(TARGET_CRIS) 6132 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5)); 6133 #else 6134 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); 6135 #endif 6136 break; 6137 #ifdef __NR_exit_group 6138 /* new thread calls */ 6139 case TARGET_NR_exit_group: 6140 #ifdef TARGET_GPROF 6141 _mcleanup(); 6142 #endif 6143 gdb_exit(cpu_env, arg1); 6144 ret = get_errno(exit_group(arg1)); 6145 break; 6146 #endif 6147 case TARGET_NR_setdomainname: 6148 if (!(p = lock_user_string(arg1))) 6149 goto efault; 6150 ret = get_errno(setdomainname(p, arg2)); 6151 unlock_user(p, arg1, 0); 6152 break; 6153 case TARGET_NR_uname: 6154 /* no need to transcode because we use the linux syscall */ 6155 { 6156 struct new_utsname * buf; 6157 6158 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) 6159 goto efault; 6160 ret = get_errno(sys_uname(buf)); 6161 if (!is_error(ret)) { 6162 /* Overrite the native machine name with whatever is being 6163 emulated. */ 6164 strcpy (buf->machine, cpu_to_uname_machine(cpu_env)); 6165 /* Allow the user to override the reported release. */ 6166 if (qemu_uname_release && *qemu_uname_release) 6167 strcpy (buf->release, qemu_uname_release); 6168 } 6169 unlock_user_struct(buf, arg1, 1); 6170 } 6171 break; 6172 #ifdef TARGET_I386 6173 case TARGET_NR_modify_ldt: 6174 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3); 6175 break; 6176 #if !defined(TARGET_X86_64) 6177 case TARGET_NR_vm86old: 6178 goto unimplemented; 6179 case TARGET_NR_vm86: 6180 ret = do_vm86(cpu_env, arg1, arg2); 6181 break; 6182 #endif 6183 #endif 6184 case TARGET_NR_adjtimex: 6185 goto unimplemented; 6186 #ifdef TARGET_NR_create_module 6187 case TARGET_NR_create_module: 6188 #endif 6189 case TARGET_NR_init_module: 6190 case TARGET_NR_delete_module: 6191 #ifdef TARGET_NR_get_kernel_syms 6192 case TARGET_NR_get_kernel_syms: 6193 #endif 6194 goto unimplemented; 6195 case TARGET_NR_quotactl: 6196 goto unimplemented; 6197 case TARGET_NR_getpgid: 6198 ret = get_errno(getpgid(arg1)); 6199 break; 6200 case TARGET_NR_fchdir: 6201 ret = get_errno(fchdir(arg1)); 6202 break; 6203 #ifdef TARGET_NR_bdflush /* not on x86_64 */ 6204 case TARGET_NR_bdflush: 6205 goto unimplemented; 6206 #endif 6207 #ifdef TARGET_NR_sysfs 6208 case TARGET_NR_sysfs: 6209 goto unimplemented; 6210 #endif 6211 case TARGET_NR_personality: 6212 ret = get_errno(personality(arg1)); 6213 break; 6214 #ifdef TARGET_NR_afs_syscall 6215 case TARGET_NR_afs_syscall: 6216 goto unimplemented; 6217 #endif 6218 #ifdef TARGET_NR__llseek /* Not on alpha */ 6219 case TARGET_NR__llseek: 6220 { 6221 int64_t res; 6222 #if !defined(__NR_llseek) 6223 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5); 6224 if (res == -1) { 6225 ret = get_errno(res); 6226 } else { 6227 ret = 0; 6228 } 6229 #else 6230 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); 6231 #endif 6232 if ((ret == 0) && put_user_s64(res, arg4)) { 6233 goto efault; 6234 } 6235 } 6236 break; 6237 #endif 6238 case TARGET_NR_getdents: 6239 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 6240 { 6241 struct target_dirent *target_dirp; 6242 struct linux_dirent *dirp; 6243 abi_long count = arg3; 6244 6245 dirp = malloc(count); 6246 if (!dirp) { 6247 ret = -TARGET_ENOMEM; 6248 goto fail; 6249 } 6250 6251 ret = get_errno(sys_getdents(arg1, dirp, count)); 6252 if (!is_error(ret)) { 6253 struct linux_dirent *de; 6254 struct target_dirent *tde; 6255 int len = ret; 6256 int reclen, treclen; 6257 int count1, tnamelen; 6258 6259 count1 = 0; 6260 de = dirp; 6261 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 6262 goto efault; 6263 tde = target_dirp; 6264 while (len > 0) { 6265 reclen = de->d_reclen; 6266 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long))); 6267 tde->d_reclen = tswap16(treclen); 6268 tde->d_ino = tswapl(de->d_ino); 6269 tde->d_off = tswapl(de->d_off); 6270 tnamelen = treclen - (2 * sizeof(abi_long) + 2); 6271 if (tnamelen > 256) 6272 tnamelen = 256; 6273 /* XXX: may not be correct */ 6274 pstrcpy(tde->d_name, tnamelen, de->d_name); 6275 de = (struct linux_dirent *)((char *)de + reclen); 6276 len -= reclen; 6277 tde = (struct target_dirent *)((char *)tde + treclen); 6278 count1 += treclen; 6279 } 6280 ret = count1; 6281 unlock_user(target_dirp, arg2, ret); 6282 } 6283 free(dirp); 6284 } 6285 #else 6286 { 6287 struct linux_dirent *dirp; 6288 abi_long count = arg3; 6289 6290 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 6291 goto efault; 6292 ret = get_errno(sys_getdents(arg1, dirp, count)); 6293 if (!is_error(ret)) { 6294 struct linux_dirent *de; 6295 int len = ret; 6296 int reclen; 6297 de = dirp; 6298 while (len > 0) { 6299 reclen = de->d_reclen; 6300 if (reclen > len) 6301 break; 6302 de->d_reclen = tswap16(reclen); 6303 tswapls(&de->d_ino); 6304 tswapls(&de->d_off); 6305 de = (struct linux_dirent *)((char *)de + reclen); 6306 len -= reclen; 6307 } 6308 } 6309 unlock_user(dirp, arg2, ret); 6310 } 6311 #endif 6312 break; 6313 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 6314 case TARGET_NR_getdents64: 6315 { 6316 struct linux_dirent64 *dirp; 6317 abi_long count = arg3; 6318 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 6319 goto efault; 6320 ret = get_errno(sys_getdents64(arg1, dirp, count)); 6321 if (!is_error(ret)) { 6322 struct linux_dirent64 *de; 6323 int len = ret; 6324 int reclen; 6325 de = dirp; 6326 while (len > 0) { 6327 reclen = de->d_reclen; 6328 if (reclen > len) 6329 break; 6330 de->d_reclen = tswap16(reclen); 6331 tswap64s((uint64_t *)&de->d_ino); 6332 tswap64s((uint64_t *)&de->d_off); 6333 de = (struct linux_dirent64 *)((char *)de + reclen); 6334 len -= reclen; 6335 } 6336 } 6337 unlock_user(dirp, arg2, ret); 6338 } 6339 break; 6340 #endif /* TARGET_NR_getdents64 */ 6341 #ifdef TARGET_NR__newselect 6342 case TARGET_NR__newselect: 6343 ret = do_select(arg1, arg2, arg3, arg4, arg5); 6344 break; 6345 #endif 6346 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) 6347 # ifdef TARGET_NR_poll 6348 case TARGET_NR_poll: 6349 # endif 6350 # ifdef TARGET_NR_ppoll 6351 case TARGET_NR_ppoll: 6352 # endif 6353 { 6354 struct target_pollfd *target_pfd; 6355 unsigned int nfds = arg2; 6356 int timeout = arg3; 6357 struct pollfd *pfd; 6358 unsigned int i; 6359 6360 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1); 6361 if (!target_pfd) 6362 goto efault; 6363 6364 pfd = alloca(sizeof(struct pollfd) * nfds); 6365 for(i = 0; i < nfds; i++) { 6366 pfd[i].fd = tswap32(target_pfd[i].fd); 6367 pfd[i].events = tswap16(target_pfd[i].events); 6368 } 6369 6370 # ifdef TARGET_NR_ppoll 6371 if (num == TARGET_NR_ppoll) { 6372 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; 6373 target_sigset_t *target_set; 6374 sigset_t _set, *set = &_set; 6375 6376 if (arg3) { 6377 if (target_to_host_timespec(timeout_ts, arg3)) { 6378 unlock_user(target_pfd, arg1, 0); 6379 goto efault; 6380 } 6381 } else { 6382 timeout_ts = NULL; 6383 } 6384 6385 if (arg4) { 6386 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1); 6387 if (!target_set) { 6388 unlock_user(target_pfd, arg1, 0); 6389 goto efault; 6390 } 6391 target_to_host_sigset(set, target_set); 6392 } else { 6393 set = NULL; 6394 } 6395 6396 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8)); 6397 6398 if (!is_error(ret) && arg3) { 6399 host_to_target_timespec(arg3, timeout_ts); 6400 } 6401 if (arg4) { 6402 unlock_user(target_set, arg4, 0); 6403 } 6404 } else 6405 # endif 6406 ret = get_errno(poll(pfd, nfds, timeout)); 6407 6408 if (!is_error(ret)) { 6409 for(i = 0; i < nfds; i++) { 6410 target_pfd[i].revents = tswap16(pfd[i].revents); 6411 } 6412 } 6413 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds); 6414 } 6415 break; 6416 #endif 6417 case TARGET_NR_flock: 6418 /* NOTE: the flock constant seems to be the same for every 6419 Linux platform */ 6420 ret = get_errno(flock(arg1, arg2)); 6421 break; 6422 case TARGET_NR_readv: 6423 { 6424 int count = arg3; 6425 struct iovec *vec; 6426 6427 vec = alloca(count * sizeof(struct iovec)); 6428 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0) 6429 goto efault; 6430 ret = get_errno(readv(arg1, vec, count)); 6431 unlock_iovec(vec, arg2, count, 1); 6432 } 6433 break; 6434 case TARGET_NR_writev: 6435 { 6436 int count = arg3; 6437 struct iovec *vec; 6438 6439 vec = alloca(count * sizeof(struct iovec)); 6440 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0) 6441 goto efault; 6442 ret = get_errno(writev(arg1, vec, count)); 6443 unlock_iovec(vec, arg2, count, 0); 6444 } 6445 break; 6446 case TARGET_NR_getsid: 6447 ret = get_errno(getsid(arg1)); 6448 break; 6449 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ 6450 case TARGET_NR_fdatasync: 6451 ret = get_errno(fdatasync(arg1)); 6452 break; 6453 #endif 6454 case TARGET_NR__sysctl: 6455 /* We don't implement this, but ENOTDIR is always a safe 6456 return value. */ 6457 ret = -TARGET_ENOTDIR; 6458 break; 6459 case TARGET_NR_sched_getaffinity: 6460 { 6461 unsigned int mask_size; 6462 unsigned long *mask; 6463 6464 /* 6465 * sched_getaffinity needs multiples of ulong, so need to take 6466 * care of mismatches between target ulong and host ulong sizes. 6467 */ 6468 if (arg2 & (sizeof(abi_ulong) - 1)) { 6469 ret = -TARGET_EINVAL; 6470 break; 6471 } 6472 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 6473 6474 mask = alloca(mask_size); 6475 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask)); 6476 6477 if (!is_error(ret)) { 6478 if (arg2 > ret) { 6479 /* Zero out any extra space kernel didn't fill */ 6480 unsigned long zero = arg2 - ret; 6481 p = alloca(zero); 6482 memset(p, 0, zero); 6483 if (copy_to_user(arg3 + zero, p, zero)) { 6484 goto efault; 6485 } 6486 arg2 = ret; 6487 } 6488 if (copy_to_user(arg3, mask, arg2)) { 6489 goto efault; 6490 } 6491 ret = arg2; 6492 } 6493 } 6494 break; 6495 case TARGET_NR_sched_setaffinity: 6496 { 6497 unsigned int mask_size; 6498 unsigned long *mask; 6499 6500 /* 6501 * sched_setaffinity needs multiples of ulong, so need to take 6502 * care of mismatches between target ulong and host ulong sizes. 6503 */ 6504 if (arg2 & (sizeof(abi_ulong) - 1)) { 6505 ret = -TARGET_EINVAL; 6506 break; 6507 } 6508 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 6509 6510 mask = alloca(mask_size); 6511 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) { 6512 goto efault; 6513 } 6514 memcpy(mask, p, arg2); 6515 unlock_user_struct(p, arg2, 0); 6516 6517 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask)); 6518 } 6519 break; 6520 case TARGET_NR_sched_setparam: 6521 { 6522 struct sched_param *target_schp; 6523 struct sched_param schp; 6524 6525 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) 6526 goto efault; 6527 schp.sched_priority = tswap32(target_schp->sched_priority); 6528 unlock_user_struct(target_schp, arg2, 0); 6529 ret = get_errno(sched_setparam(arg1, &schp)); 6530 } 6531 break; 6532 case TARGET_NR_sched_getparam: 6533 { 6534 struct sched_param *target_schp; 6535 struct sched_param schp; 6536 ret = get_errno(sched_getparam(arg1, &schp)); 6537 if (!is_error(ret)) { 6538 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) 6539 goto efault; 6540 target_schp->sched_priority = tswap32(schp.sched_priority); 6541 unlock_user_struct(target_schp, arg2, 1); 6542 } 6543 } 6544 break; 6545 case TARGET_NR_sched_setscheduler: 6546 { 6547 struct sched_param *target_schp; 6548 struct sched_param schp; 6549 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) 6550 goto efault; 6551 schp.sched_priority = tswap32(target_schp->sched_priority); 6552 unlock_user_struct(target_schp, arg3, 0); 6553 ret = get_errno(sched_setscheduler(arg1, arg2, &schp)); 6554 } 6555 break; 6556 case TARGET_NR_sched_getscheduler: 6557 ret = get_errno(sched_getscheduler(arg1)); 6558 break; 6559 case TARGET_NR_sched_yield: 6560 ret = get_errno(sched_yield()); 6561 break; 6562 case TARGET_NR_sched_get_priority_max: 6563 ret = get_errno(sched_get_priority_max(arg1)); 6564 break; 6565 case TARGET_NR_sched_get_priority_min: 6566 ret = get_errno(sched_get_priority_min(arg1)); 6567 break; 6568 case TARGET_NR_sched_rr_get_interval: 6569 { 6570 struct timespec ts; 6571 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 6572 if (!is_error(ret)) { 6573 host_to_target_timespec(arg2, &ts); 6574 } 6575 } 6576 break; 6577 case TARGET_NR_nanosleep: 6578 { 6579 struct timespec req, rem; 6580 target_to_host_timespec(&req, arg1); 6581 ret = get_errno(nanosleep(&req, &rem)); 6582 if (is_error(ret) && arg2) { 6583 host_to_target_timespec(arg2, &rem); 6584 } 6585 } 6586 break; 6587 #ifdef TARGET_NR_query_module 6588 case TARGET_NR_query_module: 6589 goto unimplemented; 6590 #endif 6591 #ifdef TARGET_NR_nfsservctl 6592 case TARGET_NR_nfsservctl: 6593 goto unimplemented; 6594 #endif 6595 case TARGET_NR_prctl: 6596 switch (arg1) 6597 { 6598 case PR_GET_PDEATHSIG: 6599 { 6600 int deathsig; 6601 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5)); 6602 if (!is_error(ret) && arg2 6603 && put_user_ual(deathsig, arg2)) 6604 goto efault; 6605 } 6606 break; 6607 default: 6608 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5)); 6609 break; 6610 } 6611 break; 6612 #ifdef TARGET_NR_arch_prctl 6613 case TARGET_NR_arch_prctl: 6614 #if defined(TARGET_I386) && !defined(TARGET_ABI32) 6615 ret = do_arch_prctl(cpu_env, arg1, arg2); 6616 break; 6617 #else 6618 goto unimplemented; 6619 #endif 6620 #endif 6621 #ifdef TARGET_NR_pread 6622 case TARGET_NR_pread: 6623 #ifdef TARGET_ARM 6624 if (((CPUARMState *)cpu_env)->eabi) 6625 arg4 = arg5; 6626 #endif 6627 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 6628 goto efault; 6629 ret = get_errno(pread(arg1, p, arg3, arg4)); 6630 unlock_user(p, arg2, ret); 6631 break; 6632 case TARGET_NR_pwrite: 6633 #ifdef TARGET_ARM 6634 if (((CPUARMState *)cpu_env)->eabi) 6635 arg4 = arg5; 6636 #endif 6637 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 6638 goto efault; 6639 ret = get_errno(pwrite(arg1, p, arg3, arg4)); 6640 unlock_user(p, arg2, 0); 6641 break; 6642 #endif 6643 #ifdef TARGET_NR_pread64 6644 case TARGET_NR_pread64: 6645 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 6646 goto efault; 6647 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); 6648 unlock_user(p, arg2, ret); 6649 break; 6650 case TARGET_NR_pwrite64: 6651 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 6652 goto efault; 6653 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); 6654 unlock_user(p, arg2, 0); 6655 break; 6656 #endif 6657 case TARGET_NR_getcwd: 6658 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) 6659 goto efault; 6660 ret = get_errno(sys_getcwd1(p, arg2)); 6661 unlock_user(p, arg1, ret); 6662 break; 6663 case TARGET_NR_capget: 6664 goto unimplemented; 6665 case TARGET_NR_capset: 6666 goto unimplemented; 6667 case TARGET_NR_sigaltstack: 6668 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \ 6669 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \ 6670 defined(TARGET_M68K) 6671 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env)); 6672 break; 6673 #else 6674 goto unimplemented; 6675 #endif 6676 case TARGET_NR_sendfile: 6677 goto unimplemented; 6678 #ifdef TARGET_NR_getpmsg 6679 case TARGET_NR_getpmsg: 6680 goto unimplemented; 6681 #endif 6682 #ifdef TARGET_NR_putpmsg 6683 case TARGET_NR_putpmsg: 6684 goto unimplemented; 6685 #endif 6686 #ifdef TARGET_NR_vfork 6687 case TARGET_NR_vfork: 6688 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 6689 0, 0, 0, 0)); 6690 break; 6691 #endif 6692 #ifdef TARGET_NR_ugetrlimit 6693 case TARGET_NR_ugetrlimit: 6694 { 6695 struct rlimit rlim; 6696 ret = get_errno(getrlimit(arg1, &rlim)); 6697 if (!is_error(ret)) { 6698 struct target_rlimit *target_rlim; 6699 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 6700 goto efault; 6701 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 6702 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 6703 unlock_user_struct(target_rlim, arg2, 1); 6704 } 6705 break; 6706 } 6707 #endif 6708 #ifdef TARGET_NR_truncate64 6709 case TARGET_NR_truncate64: 6710 if (!(p = lock_user_string(arg1))) 6711 goto efault; 6712 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); 6713 unlock_user(p, arg1, 0); 6714 break; 6715 #endif 6716 #ifdef TARGET_NR_ftruncate64 6717 case TARGET_NR_ftruncate64: 6718 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); 6719 break; 6720 #endif 6721 #ifdef TARGET_NR_stat64 6722 case TARGET_NR_stat64: 6723 if (!(p = lock_user_string(arg1))) 6724 goto efault; 6725 ret = get_errno(stat(path(p), &st)); 6726 unlock_user(p, arg1, 0); 6727 if (!is_error(ret)) 6728 ret = host_to_target_stat64(cpu_env, arg2, &st); 6729 break; 6730 #endif 6731 #ifdef TARGET_NR_lstat64 6732 case TARGET_NR_lstat64: 6733 if (!(p = lock_user_string(arg1))) 6734 goto efault; 6735 ret = get_errno(lstat(path(p), &st)); 6736 unlock_user(p, arg1, 0); 6737 if (!is_error(ret)) 6738 ret = host_to_target_stat64(cpu_env, arg2, &st); 6739 break; 6740 #endif 6741 #ifdef TARGET_NR_fstat64 6742 case TARGET_NR_fstat64: 6743 ret = get_errno(fstat(arg1, &st)); 6744 if (!is_error(ret)) 6745 ret = host_to_target_stat64(cpu_env, arg2, &st); 6746 break; 6747 #endif 6748 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \ 6749 (defined(__NR_fstatat64) || defined(__NR_newfstatat)) 6750 #ifdef TARGET_NR_fstatat64 6751 case TARGET_NR_fstatat64: 6752 #endif 6753 #ifdef TARGET_NR_newfstatat 6754 case TARGET_NR_newfstatat: 6755 #endif 6756 if (!(p = lock_user_string(arg2))) 6757 goto efault; 6758 #ifdef __NR_fstatat64 6759 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4)); 6760 #else 6761 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4)); 6762 #endif 6763 if (!is_error(ret)) 6764 ret = host_to_target_stat64(cpu_env, arg3, &st); 6765 break; 6766 #endif 6767 #ifdef USE_UID16 6768 case TARGET_NR_lchown: 6769 if (!(p = lock_user_string(arg1))) 6770 goto efault; 6771 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); 6772 unlock_user(p, arg1, 0); 6773 break; 6774 case TARGET_NR_getuid: 6775 ret = get_errno(high2lowuid(getuid())); 6776 break; 6777 case TARGET_NR_getgid: 6778 ret = get_errno(high2lowgid(getgid())); 6779 break; 6780 case TARGET_NR_geteuid: 6781 ret = get_errno(high2lowuid(geteuid())); 6782 break; 6783 case TARGET_NR_getegid: 6784 ret = get_errno(high2lowgid(getegid())); 6785 break; 6786 case TARGET_NR_setreuid: 6787 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); 6788 break; 6789 case TARGET_NR_setregid: 6790 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); 6791 break; 6792 case TARGET_NR_getgroups: 6793 { 6794 int gidsetsize = arg1; 6795 uint16_t *target_grouplist; 6796 gid_t *grouplist; 6797 int i; 6798 6799 grouplist = alloca(gidsetsize * sizeof(gid_t)); 6800 ret = get_errno(getgroups(gidsetsize, grouplist)); 6801 if (gidsetsize == 0) 6802 break; 6803 if (!is_error(ret)) { 6804 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0); 6805 if (!target_grouplist) 6806 goto efault; 6807 for(i = 0;i < ret; i++) 6808 target_grouplist[i] = tswap16(grouplist[i]); 6809 unlock_user(target_grouplist, arg2, gidsetsize * 2); 6810 } 6811 } 6812 break; 6813 case TARGET_NR_setgroups: 6814 { 6815 int gidsetsize = arg1; 6816 uint16_t *target_grouplist; 6817 gid_t *grouplist; 6818 int i; 6819 6820 grouplist = alloca(gidsetsize * sizeof(gid_t)); 6821 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1); 6822 if (!target_grouplist) { 6823 ret = -TARGET_EFAULT; 6824 goto fail; 6825 } 6826 for(i = 0;i < gidsetsize; i++) 6827 grouplist[i] = tswap16(target_grouplist[i]); 6828 unlock_user(target_grouplist, arg2, 0); 6829 ret = get_errno(setgroups(gidsetsize, grouplist)); 6830 } 6831 break; 6832 case TARGET_NR_fchown: 6833 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); 6834 break; 6835 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) 6836 case TARGET_NR_fchownat: 6837 if (!(p = lock_user_string(arg2))) 6838 goto efault; 6839 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5)); 6840 unlock_user(p, arg2, 0); 6841 break; 6842 #endif 6843 #ifdef TARGET_NR_setresuid 6844 case TARGET_NR_setresuid: 6845 ret = get_errno(setresuid(low2highuid(arg1), 6846 low2highuid(arg2), 6847 low2highuid(arg3))); 6848 break; 6849 #endif 6850 #ifdef TARGET_NR_getresuid 6851 case TARGET_NR_getresuid: 6852 { 6853 uid_t ruid, euid, suid; 6854 ret = get_errno(getresuid(&ruid, &euid, &suid)); 6855 if (!is_error(ret)) { 6856 if (put_user_u16(high2lowuid(ruid), arg1) 6857 || put_user_u16(high2lowuid(euid), arg2) 6858 || put_user_u16(high2lowuid(suid), arg3)) 6859 goto efault; 6860 } 6861 } 6862 break; 6863 #endif 6864 #ifdef TARGET_NR_getresgid 6865 case TARGET_NR_setresgid: 6866 ret = get_errno(setresgid(low2highgid(arg1), 6867 low2highgid(arg2), 6868 low2highgid(arg3))); 6869 break; 6870 #endif 6871 #ifdef TARGET_NR_getresgid 6872 case TARGET_NR_getresgid: 6873 { 6874 gid_t rgid, egid, sgid; 6875 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 6876 if (!is_error(ret)) { 6877 if (put_user_u16(high2lowgid(rgid), arg1) 6878 || put_user_u16(high2lowgid(egid), arg2) 6879 || put_user_u16(high2lowgid(sgid), arg3)) 6880 goto efault; 6881 } 6882 } 6883 break; 6884 #endif 6885 case TARGET_NR_chown: 6886 if (!(p = lock_user_string(arg1))) 6887 goto efault; 6888 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); 6889 unlock_user(p, arg1, 0); 6890 break; 6891 case TARGET_NR_setuid: 6892 ret = get_errno(setuid(low2highuid(arg1))); 6893 break; 6894 case TARGET_NR_setgid: 6895 ret = get_errno(setgid(low2highgid(arg1))); 6896 break; 6897 case TARGET_NR_setfsuid: 6898 ret = get_errno(setfsuid(arg1)); 6899 break; 6900 case TARGET_NR_setfsgid: 6901 ret = get_errno(setfsgid(arg1)); 6902 break; 6903 #endif /* USE_UID16 */ 6904 6905 #ifdef TARGET_NR_lchown32 6906 case TARGET_NR_lchown32: 6907 if (!(p = lock_user_string(arg1))) 6908 goto efault; 6909 ret = get_errno(lchown(p, arg2, arg3)); 6910 unlock_user(p, arg1, 0); 6911 break; 6912 #endif 6913 #ifdef TARGET_NR_getuid32 6914 case TARGET_NR_getuid32: 6915 ret = get_errno(getuid()); 6916 break; 6917 #endif 6918 6919 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) 6920 /* Alpha specific */ 6921 case TARGET_NR_getxuid: 6922 { 6923 uid_t euid; 6924 euid=geteuid(); 6925 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid; 6926 } 6927 ret = get_errno(getuid()); 6928 break; 6929 #endif 6930 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) 6931 /* Alpha specific */ 6932 case TARGET_NR_getxgid: 6933 { 6934 uid_t egid; 6935 egid=getegid(); 6936 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid; 6937 } 6938 ret = get_errno(getgid()); 6939 break; 6940 #endif 6941 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) 6942 /* Alpha specific */ 6943 case TARGET_NR_osf_getsysinfo: 6944 ret = -TARGET_EOPNOTSUPP; 6945 switch (arg1) { 6946 case TARGET_GSI_IEEE_FP_CONTROL: 6947 { 6948 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env); 6949 6950 /* Copied from linux ieee_fpcr_to_swcr. */ 6951 swcr = (fpcr >> 35) & SWCR_STATUS_MASK; 6952 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ; 6953 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV 6954 | SWCR_TRAP_ENABLE_DZE 6955 | SWCR_TRAP_ENABLE_OVF); 6956 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF 6957 | SWCR_TRAP_ENABLE_INE); 6958 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ; 6959 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO; 6960 6961 if (put_user_u64 (swcr, arg2)) 6962 goto efault; 6963 ret = 0; 6964 } 6965 break; 6966 6967 /* case GSI_IEEE_STATE_AT_SIGNAL: 6968 -- Not implemented in linux kernel. 6969 case GSI_UACPROC: 6970 -- Retrieves current unaligned access state; not much used. 6971 case GSI_PROC_TYPE: 6972 -- Retrieves implver information; surely not used. 6973 case GSI_GET_HWRPB: 6974 -- Grabs a copy of the HWRPB; surely not used. 6975 */ 6976 } 6977 break; 6978 #endif 6979 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) 6980 /* Alpha specific */ 6981 case TARGET_NR_osf_setsysinfo: 6982 ret = -TARGET_EOPNOTSUPP; 6983 switch (arg1) { 6984 case TARGET_SSI_IEEE_FP_CONTROL: 6985 case TARGET_SSI_IEEE_RAISE_EXCEPTION: 6986 { 6987 uint64_t swcr, fpcr, orig_fpcr; 6988 6989 if (get_user_u64 (swcr, arg2)) 6990 goto efault; 6991 orig_fpcr = cpu_alpha_load_fpcr (cpu_env); 6992 fpcr = orig_fpcr & FPCR_DYN_MASK; 6993 6994 /* Copied from linux ieee_swcr_to_fpcr. */ 6995 fpcr |= (swcr & SWCR_STATUS_MASK) << 35; 6996 fpcr |= (swcr & SWCR_MAP_DMZ) << 36; 6997 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV 6998 | SWCR_TRAP_ENABLE_DZE 6999 | SWCR_TRAP_ENABLE_OVF)) << 48; 7000 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF 7001 | SWCR_TRAP_ENABLE_INE)) << 57; 7002 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0); 7003 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41; 7004 7005 cpu_alpha_store_fpcr (cpu_env, fpcr); 7006 ret = 0; 7007 7008 if (arg1 == TARGET_SSI_IEEE_RAISE_EXCEPTION) { 7009 /* Old exceptions are not signaled. */ 7010 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK); 7011 7012 /* If any exceptions set by this call, and are unmasked, 7013 send a signal. */ 7014 /* ??? FIXME */ 7015 } 7016 } 7017 break; 7018 7019 /* case SSI_NVPAIRS: 7020 -- Used with SSIN_UACPROC to enable unaligned accesses. 7021 case SSI_IEEE_STATE_AT_SIGNAL: 7022 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: 7023 -- Not implemented in linux kernel 7024 */ 7025 } 7026 break; 7027 #endif 7028 #ifdef TARGET_NR_osf_sigprocmask 7029 /* Alpha specific. */ 7030 case TARGET_NR_osf_sigprocmask: 7031 { 7032 abi_ulong mask; 7033 int how = arg1; 7034 sigset_t set, oldset; 7035 7036 switch(arg1) { 7037 case TARGET_SIG_BLOCK: 7038 how = SIG_BLOCK; 7039 break; 7040 case TARGET_SIG_UNBLOCK: 7041 how = SIG_UNBLOCK; 7042 break; 7043 case TARGET_SIG_SETMASK: 7044 how = SIG_SETMASK; 7045 break; 7046 default: 7047 ret = -TARGET_EINVAL; 7048 goto fail; 7049 } 7050 mask = arg2; 7051 target_to_host_old_sigset(&set, &mask); 7052 sigprocmask(arg1, &set, &oldset); 7053 host_to_target_old_sigset(&mask, &oldset); 7054 ret = mask; 7055 } 7056 break; 7057 #endif 7058 7059 #ifdef TARGET_NR_getgid32 7060 case TARGET_NR_getgid32: 7061 ret = get_errno(getgid()); 7062 break; 7063 #endif 7064 #ifdef TARGET_NR_geteuid32 7065 case TARGET_NR_geteuid32: 7066 ret = get_errno(geteuid()); 7067 break; 7068 #endif 7069 #ifdef TARGET_NR_getegid32 7070 case TARGET_NR_getegid32: 7071 ret = get_errno(getegid()); 7072 break; 7073 #endif 7074 #ifdef TARGET_NR_setreuid32 7075 case TARGET_NR_setreuid32: 7076 ret = get_errno(setreuid(arg1, arg2)); 7077 break; 7078 #endif 7079 #ifdef TARGET_NR_setregid32 7080 case TARGET_NR_setregid32: 7081 ret = get_errno(setregid(arg1, arg2)); 7082 break; 7083 #endif 7084 #ifdef TARGET_NR_getgroups32 7085 case TARGET_NR_getgroups32: 7086 { 7087 int gidsetsize = arg1; 7088 uint32_t *target_grouplist; 7089 gid_t *grouplist; 7090 int i; 7091 7092 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7093 ret = get_errno(getgroups(gidsetsize, grouplist)); 7094 if (gidsetsize == 0) 7095 break; 7096 if (!is_error(ret)) { 7097 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); 7098 if (!target_grouplist) { 7099 ret = -TARGET_EFAULT; 7100 goto fail; 7101 } 7102 for(i = 0;i < ret; i++) 7103 target_grouplist[i] = tswap32(grouplist[i]); 7104 unlock_user(target_grouplist, arg2, gidsetsize * 4); 7105 } 7106 } 7107 break; 7108 #endif 7109 #ifdef TARGET_NR_setgroups32 7110 case TARGET_NR_setgroups32: 7111 { 7112 int gidsetsize = arg1; 7113 uint32_t *target_grouplist; 7114 gid_t *grouplist; 7115 int i; 7116 7117 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7118 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); 7119 if (!target_grouplist) { 7120 ret = -TARGET_EFAULT; 7121 goto fail; 7122 } 7123 for(i = 0;i < gidsetsize; i++) 7124 grouplist[i] = tswap32(target_grouplist[i]); 7125 unlock_user(target_grouplist, arg2, 0); 7126 ret = get_errno(setgroups(gidsetsize, grouplist)); 7127 } 7128 break; 7129 #endif 7130 #ifdef TARGET_NR_fchown32 7131 case TARGET_NR_fchown32: 7132 ret = get_errno(fchown(arg1, arg2, arg3)); 7133 break; 7134 #endif 7135 #ifdef TARGET_NR_setresuid32 7136 case TARGET_NR_setresuid32: 7137 ret = get_errno(setresuid(arg1, arg2, arg3)); 7138 break; 7139 #endif 7140 #ifdef TARGET_NR_getresuid32 7141 case TARGET_NR_getresuid32: 7142 { 7143 uid_t ruid, euid, suid; 7144 ret = get_errno(getresuid(&ruid, &euid, &suid)); 7145 if (!is_error(ret)) { 7146 if (put_user_u32(ruid, arg1) 7147 || put_user_u32(euid, arg2) 7148 || put_user_u32(suid, arg3)) 7149 goto efault; 7150 } 7151 } 7152 break; 7153 #endif 7154 #ifdef TARGET_NR_setresgid32 7155 case TARGET_NR_setresgid32: 7156 ret = get_errno(setresgid(arg1, arg2, arg3)); 7157 break; 7158 #endif 7159 #ifdef TARGET_NR_getresgid32 7160 case TARGET_NR_getresgid32: 7161 { 7162 gid_t rgid, egid, sgid; 7163 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 7164 if (!is_error(ret)) { 7165 if (put_user_u32(rgid, arg1) 7166 || put_user_u32(egid, arg2) 7167 || put_user_u32(sgid, arg3)) 7168 goto efault; 7169 } 7170 } 7171 break; 7172 #endif 7173 #ifdef TARGET_NR_chown32 7174 case TARGET_NR_chown32: 7175 if (!(p = lock_user_string(arg1))) 7176 goto efault; 7177 ret = get_errno(chown(p, arg2, arg3)); 7178 unlock_user(p, arg1, 0); 7179 break; 7180 #endif 7181 #ifdef TARGET_NR_setuid32 7182 case TARGET_NR_setuid32: 7183 ret = get_errno(setuid(arg1)); 7184 break; 7185 #endif 7186 #ifdef TARGET_NR_setgid32 7187 case TARGET_NR_setgid32: 7188 ret = get_errno(setgid(arg1)); 7189 break; 7190 #endif 7191 #ifdef TARGET_NR_setfsuid32 7192 case TARGET_NR_setfsuid32: 7193 ret = get_errno(setfsuid(arg1)); 7194 break; 7195 #endif 7196 #ifdef TARGET_NR_setfsgid32 7197 case TARGET_NR_setfsgid32: 7198 ret = get_errno(setfsgid(arg1)); 7199 break; 7200 #endif 7201 7202 case TARGET_NR_pivot_root: 7203 goto unimplemented; 7204 #ifdef TARGET_NR_mincore 7205 case TARGET_NR_mincore: 7206 { 7207 void *a; 7208 ret = -TARGET_EFAULT; 7209 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0))) 7210 goto efault; 7211 if (!(p = lock_user_string(arg3))) 7212 goto mincore_fail; 7213 ret = get_errno(mincore(a, arg2, p)); 7214 unlock_user(p, arg3, ret); 7215 mincore_fail: 7216 unlock_user(a, arg1, 0); 7217 } 7218 break; 7219 #endif 7220 #ifdef TARGET_NR_arm_fadvise64_64 7221 case TARGET_NR_arm_fadvise64_64: 7222 { 7223 /* 7224 * arm_fadvise64_64 looks like fadvise64_64 but 7225 * with different argument order 7226 */ 7227 abi_long temp; 7228 temp = arg3; 7229 arg3 = arg4; 7230 arg4 = temp; 7231 } 7232 #endif 7233 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64) 7234 #ifdef TARGET_NR_fadvise64_64 7235 case TARGET_NR_fadvise64_64: 7236 #endif 7237 #ifdef TARGET_NR_fadvise64 7238 case TARGET_NR_fadvise64: 7239 #endif 7240 #ifdef TARGET_S390X 7241 switch (arg4) { 7242 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ 7243 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ 7244 case 6: arg4 = POSIX_FADV_DONTNEED; break; 7245 case 7: arg4 = POSIX_FADV_NOREUSE; break; 7246 default: break; 7247 } 7248 #endif 7249 ret = -posix_fadvise(arg1, arg2, arg3, arg4); 7250 break; 7251 #endif 7252 #ifdef TARGET_NR_madvise 7253 case TARGET_NR_madvise: 7254 /* A straight passthrough may not be safe because qemu sometimes 7255 turns private flie-backed mappings into anonymous mappings. 7256 This will break MADV_DONTNEED. 7257 This is a hint, so ignoring and returning success is ok. */ 7258 ret = get_errno(0); 7259 break; 7260 #endif 7261 #if TARGET_ABI_BITS == 32 7262 case TARGET_NR_fcntl64: 7263 { 7264 int cmd; 7265 struct flock64 fl; 7266 struct target_flock64 *target_fl; 7267 #ifdef TARGET_ARM 7268 struct target_eabi_flock64 *target_efl; 7269 #endif 7270 7271 cmd = target_to_host_fcntl_cmd(arg2); 7272 if (cmd == -TARGET_EINVAL) 7273 return cmd; 7274 7275 switch(arg2) { 7276 case TARGET_F_GETLK64: 7277 #ifdef TARGET_ARM 7278 if (((CPUARMState *)cpu_env)->eabi) { 7279 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 7280 goto efault; 7281 fl.l_type = tswap16(target_efl->l_type); 7282 fl.l_whence = tswap16(target_efl->l_whence); 7283 fl.l_start = tswap64(target_efl->l_start); 7284 fl.l_len = tswap64(target_efl->l_len); 7285 fl.l_pid = tswap32(target_efl->l_pid); 7286 unlock_user_struct(target_efl, arg3, 0); 7287 } else 7288 #endif 7289 { 7290 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 7291 goto efault; 7292 fl.l_type = tswap16(target_fl->l_type); 7293 fl.l_whence = tswap16(target_fl->l_whence); 7294 fl.l_start = tswap64(target_fl->l_start); 7295 fl.l_len = tswap64(target_fl->l_len); 7296 fl.l_pid = tswap32(target_fl->l_pid); 7297 unlock_user_struct(target_fl, arg3, 0); 7298 } 7299 ret = get_errno(fcntl(arg1, cmd, &fl)); 7300 if (ret == 0) { 7301 #ifdef TARGET_ARM 7302 if (((CPUARMState *)cpu_env)->eabi) { 7303 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0)) 7304 goto efault; 7305 target_efl->l_type = tswap16(fl.l_type); 7306 target_efl->l_whence = tswap16(fl.l_whence); 7307 target_efl->l_start = tswap64(fl.l_start); 7308 target_efl->l_len = tswap64(fl.l_len); 7309 target_efl->l_pid = tswap32(fl.l_pid); 7310 unlock_user_struct(target_efl, arg3, 1); 7311 } else 7312 #endif 7313 { 7314 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0)) 7315 goto efault; 7316 target_fl->l_type = tswap16(fl.l_type); 7317 target_fl->l_whence = tswap16(fl.l_whence); 7318 target_fl->l_start = tswap64(fl.l_start); 7319 target_fl->l_len = tswap64(fl.l_len); 7320 target_fl->l_pid = tswap32(fl.l_pid); 7321 unlock_user_struct(target_fl, arg3, 1); 7322 } 7323 } 7324 break; 7325 7326 case TARGET_F_SETLK64: 7327 case TARGET_F_SETLKW64: 7328 #ifdef TARGET_ARM 7329 if (((CPUARMState *)cpu_env)->eabi) { 7330 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 7331 goto efault; 7332 fl.l_type = tswap16(target_efl->l_type); 7333 fl.l_whence = tswap16(target_efl->l_whence); 7334 fl.l_start = tswap64(target_efl->l_start); 7335 fl.l_len = tswap64(target_efl->l_len); 7336 fl.l_pid = tswap32(target_efl->l_pid); 7337 unlock_user_struct(target_efl, arg3, 0); 7338 } else 7339 #endif 7340 { 7341 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 7342 goto efault; 7343 fl.l_type = tswap16(target_fl->l_type); 7344 fl.l_whence = tswap16(target_fl->l_whence); 7345 fl.l_start = tswap64(target_fl->l_start); 7346 fl.l_len = tswap64(target_fl->l_len); 7347 fl.l_pid = tswap32(target_fl->l_pid); 7348 unlock_user_struct(target_fl, arg3, 0); 7349 } 7350 ret = get_errno(fcntl(arg1, cmd, &fl)); 7351 break; 7352 default: 7353 ret = do_fcntl(arg1, arg2, arg3); 7354 break; 7355 } 7356 break; 7357 } 7358 #endif 7359 #ifdef TARGET_NR_cacheflush 7360 case TARGET_NR_cacheflush: 7361 /* self-modifying code is handled automatically, so nothing needed */ 7362 ret = 0; 7363 break; 7364 #endif 7365 #ifdef TARGET_NR_security 7366 case TARGET_NR_security: 7367 goto unimplemented; 7368 #endif 7369 #ifdef TARGET_NR_getpagesize 7370 case TARGET_NR_getpagesize: 7371 ret = TARGET_PAGE_SIZE; 7372 break; 7373 #endif 7374 case TARGET_NR_gettid: 7375 ret = get_errno(gettid()); 7376 break; 7377 #ifdef TARGET_NR_readahead 7378 case TARGET_NR_readahead: 7379 #if TARGET_ABI_BITS == 32 7380 #ifdef TARGET_ARM 7381 if (((CPUARMState *)cpu_env)->eabi) 7382 { 7383 arg2 = arg3; 7384 arg3 = arg4; 7385 arg4 = arg5; 7386 } 7387 #endif 7388 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4)); 7389 #else 7390 ret = get_errno(readahead(arg1, arg2, arg3)); 7391 #endif 7392 break; 7393 #endif 7394 #ifdef TARGET_NR_setxattr 7395 case TARGET_NR_setxattr: 7396 case TARGET_NR_lsetxattr: 7397 case TARGET_NR_fsetxattr: 7398 case TARGET_NR_getxattr: 7399 case TARGET_NR_lgetxattr: 7400 case TARGET_NR_fgetxattr: 7401 case TARGET_NR_listxattr: 7402 case TARGET_NR_llistxattr: 7403 case TARGET_NR_flistxattr: 7404 case TARGET_NR_removexattr: 7405 case TARGET_NR_lremovexattr: 7406 case TARGET_NR_fremovexattr: 7407 ret = -TARGET_EOPNOTSUPP; 7408 break; 7409 #endif 7410 #ifdef TARGET_NR_set_thread_area 7411 case TARGET_NR_set_thread_area: 7412 #if defined(TARGET_MIPS) 7413 ((CPUMIPSState *) cpu_env)->tls_value = arg1; 7414 ret = 0; 7415 break; 7416 #elif defined(TARGET_CRIS) 7417 if (arg1 & 0xff) 7418 ret = -TARGET_EINVAL; 7419 else { 7420 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1; 7421 ret = 0; 7422 } 7423 break; 7424 #elif defined(TARGET_I386) && defined(TARGET_ABI32) 7425 ret = do_set_thread_area(cpu_env, arg1); 7426 break; 7427 #else 7428 goto unimplemented_nowarn; 7429 #endif 7430 #endif 7431 #ifdef TARGET_NR_get_thread_area 7432 case TARGET_NR_get_thread_area: 7433 #if defined(TARGET_I386) && defined(TARGET_ABI32) 7434 ret = do_get_thread_area(cpu_env, arg1); 7435 #else 7436 goto unimplemented_nowarn; 7437 #endif 7438 #endif 7439 #ifdef TARGET_NR_getdomainname 7440 case TARGET_NR_getdomainname: 7441 goto unimplemented_nowarn; 7442 #endif 7443 7444 #ifdef TARGET_NR_clock_gettime 7445 case TARGET_NR_clock_gettime: 7446 { 7447 struct timespec ts; 7448 ret = get_errno(clock_gettime(arg1, &ts)); 7449 if (!is_error(ret)) { 7450 host_to_target_timespec(arg2, &ts); 7451 } 7452 break; 7453 } 7454 #endif 7455 #ifdef TARGET_NR_clock_getres 7456 case TARGET_NR_clock_getres: 7457 { 7458 struct timespec ts; 7459 ret = get_errno(clock_getres(arg1, &ts)); 7460 if (!is_error(ret)) { 7461 host_to_target_timespec(arg2, &ts); 7462 } 7463 break; 7464 } 7465 #endif 7466 #ifdef TARGET_NR_clock_nanosleep 7467 case TARGET_NR_clock_nanosleep: 7468 { 7469 struct timespec ts; 7470 target_to_host_timespec(&ts, arg3); 7471 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL)); 7472 if (arg4) 7473 host_to_target_timespec(arg4, &ts); 7474 break; 7475 } 7476 #endif 7477 7478 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 7479 case TARGET_NR_set_tid_address: 7480 ret = get_errno(set_tid_address((int *)g2h(arg1))); 7481 break; 7482 #endif 7483 7484 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 7485 case TARGET_NR_tkill: 7486 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2))); 7487 break; 7488 #endif 7489 7490 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 7491 case TARGET_NR_tgkill: 7492 ret = get_errno(sys_tgkill((int)arg1, (int)arg2, 7493 target_to_host_signal(arg3))); 7494 break; 7495 #endif 7496 7497 #ifdef TARGET_NR_set_robust_list 7498 case TARGET_NR_set_robust_list: 7499 goto unimplemented_nowarn; 7500 #endif 7501 7502 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat) 7503 case TARGET_NR_utimensat: 7504 { 7505 struct timespec *tsp, ts[2]; 7506 if (!arg3) { 7507 tsp = NULL; 7508 } else { 7509 target_to_host_timespec(ts, arg3); 7510 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec)); 7511 tsp = ts; 7512 } 7513 if (!arg2) 7514 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 7515 else { 7516 if (!(p = lock_user_string(arg2))) { 7517 ret = -TARGET_EFAULT; 7518 goto fail; 7519 } 7520 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 7521 unlock_user(p, arg2, 0); 7522 } 7523 } 7524 break; 7525 #endif 7526 #if defined(CONFIG_USE_NPTL) 7527 case TARGET_NR_futex: 7528 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6); 7529 break; 7530 #endif 7531 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 7532 case TARGET_NR_inotify_init: 7533 ret = get_errno(sys_inotify_init()); 7534 break; 7535 #endif 7536 #ifdef CONFIG_INOTIFY1 7537 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 7538 case TARGET_NR_inotify_init1: 7539 ret = get_errno(sys_inotify_init1(arg1)); 7540 break; 7541 #endif 7542 #endif 7543 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 7544 case TARGET_NR_inotify_add_watch: 7545 p = lock_user_string(arg2); 7546 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3)); 7547 unlock_user(p, arg2, 0); 7548 break; 7549 #endif 7550 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 7551 case TARGET_NR_inotify_rm_watch: 7552 ret = get_errno(sys_inotify_rm_watch(arg1, arg2)); 7553 break; 7554 #endif 7555 7556 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 7557 case TARGET_NR_mq_open: 7558 { 7559 struct mq_attr posix_mq_attr; 7560 7561 p = lock_user_string(arg1 - 1); 7562 if (arg4 != 0) 7563 copy_from_user_mq_attr (&posix_mq_attr, arg4); 7564 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr)); 7565 unlock_user (p, arg1, 0); 7566 } 7567 break; 7568 7569 case TARGET_NR_mq_unlink: 7570 p = lock_user_string(arg1 - 1); 7571 ret = get_errno(mq_unlink(p)); 7572 unlock_user (p, arg1, 0); 7573 break; 7574 7575 case TARGET_NR_mq_timedsend: 7576 { 7577 struct timespec ts; 7578 7579 p = lock_user (VERIFY_READ, arg2, arg3, 1); 7580 if (arg5 != 0) { 7581 target_to_host_timespec(&ts, arg5); 7582 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts)); 7583 host_to_target_timespec(arg5, &ts); 7584 } 7585 else 7586 ret = get_errno(mq_send(arg1, p, arg3, arg4)); 7587 unlock_user (p, arg2, arg3); 7588 } 7589 break; 7590 7591 case TARGET_NR_mq_timedreceive: 7592 { 7593 struct timespec ts; 7594 unsigned int prio; 7595 7596 p = lock_user (VERIFY_READ, arg2, arg3, 1); 7597 if (arg5 != 0) { 7598 target_to_host_timespec(&ts, arg5); 7599 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts)); 7600 host_to_target_timespec(arg5, &ts); 7601 } 7602 else 7603 ret = get_errno(mq_receive(arg1, p, arg3, &prio)); 7604 unlock_user (p, arg2, arg3); 7605 if (arg4 != 0) 7606 put_user_u32(prio, arg4); 7607 } 7608 break; 7609 7610 /* Not implemented for now... */ 7611 /* case TARGET_NR_mq_notify: */ 7612 /* break; */ 7613 7614 case TARGET_NR_mq_getsetattr: 7615 { 7616 struct mq_attr posix_mq_attr_in, posix_mq_attr_out; 7617 ret = 0; 7618 if (arg3 != 0) { 7619 ret = mq_getattr(arg1, &posix_mq_attr_out); 7620 copy_to_user_mq_attr(arg3, &posix_mq_attr_out); 7621 } 7622 if (arg2 != 0) { 7623 copy_from_user_mq_attr(&posix_mq_attr_in, arg2); 7624 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out); 7625 } 7626 7627 } 7628 break; 7629 #endif 7630 7631 #ifdef CONFIG_SPLICE 7632 #ifdef TARGET_NR_tee 7633 case TARGET_NR_tee: 7634 { 7635 ret = get_errno(tee(arg1,arg2,arg3,arg4)); 7636 } 7637 break; 7638 #endif 7639 #ifdef TARGET_NR_splice 7640 case TARGET_NR_splice: 7641 { 7642 loff_t loff_in, loff_out; 7643 loff_t *ploff_in = NULL, *ploff_out = NULL; 7644 if(arg2) { 7645 get_user_u64(loff_in, arg2); 7646 ploff_in = &loff_in; 7647 } 7648 if(arg4) { 7649 get_user_u64(loff_out, arg2); 7650 ploff_out = &loff_out; 7651 } 7652 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); 7653 } 7654 break; 7655 #endif 7656 #ifdef TARGET_NR_vmsplice 7657 case TARGET_NR_vmsplice: 7658 { 7659 int count = arg3; 7660 struct iovec *vec; 7661 7662 vec = alloca(count * sizeof(struct iovec)); 7663 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0) 7664 goto efault; 7665 ret = get_errno(vmsplice(arg1, vec, count, arg4)); 7666 unlock_iovec(vec, arg2, count, 0); 7667 } 7668 break; 7669 #endif 7670 #endif /* CONFIG_SPLICE */ 7671 #ifdef CONFIG_EVENTFD 7672 #if defined(TARGET_NR_eventfd) 7673 case TARGET_NR_eventfd: 7674 ret = get_errno(eventfd(arg1, 0)); 7675 break; 7676 #endif 7677 #if defined(TARGET_NR_eventfd2) 7678 case TARGET_NR_eventfd2: 7679 ret = get_errno(eventfd(arg1, arg2)); 7680 break; 7681 #endif 7682 #endif /* CONFIG_EVENTFD */ 7683 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) 7684 case TARGET_NR_fallocate: 7685 ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); 7686 break; 7687 #endif 7688 #if defined(CONFIG_SYNC_FILE_RANGE) 7689 #if defined(TARGET_NR_sync_file_range) 7690 case TARGET_NR_sync_file_range: 7691 #if TARGET_ABI_BITS == 32 7692 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3), 7693 target_offset64(arg4, arg5), arg6)); 7694 #else 7695 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4)); 7696 #endif 7697 break; 7698 #endif 7699 #if defined(TARGET_NR_sync_file_range2) 7700 case TARGET_NR_sync_file_range2: 7701 /* This is like sync_file_range but the arguments are reordered */ 7702 #if TARGET_ABI_BITS == 32 7703 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 7704 target_offset64(arg5, arg6), arg2)); 7705 #else 7706 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2)); 7707 #endif 7708 break; 7709 #endif 7710 #endif 7711 #if defined(CONFIG_EPOLL) 7712 #if defined(TARGET_NR_epoll_create) 7713 case TARGET_NR_epoll_create: 7714 ret = get_errno(epoll_create(arg1)); 7715 break; 7716 #endif 7717 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1) 7718 case TARGET_NR_epoll_create1: 7719 ret = get_errno(epoll_create1(arg1)); 7720 break; 7721 #endif 7722 #if defined(TARGET_NR_epoll_ctl) 7723 case TARGET_NR_epoll_ctl: 7724 { 7725 struct epoll_event ep; 7726 struct epoll_event *epp = 0; 7727 if (arg4) { 7728 struct target_epoll_event *target_ep; 7729 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) { 7730 goto efault; 7731 } 7732 ep.events = tswap32(target_ep->events); 7733 /* The epoll_data_t union is just opaque data to the kernel, 7734 * so we transfer all 64 bits across and need not worry what 7735 * actual data type it is. 7736 */ 7737 ep.data.u64 = tswap64(target_ep->data.u64); 7738 unlock_user_struct(target_ep, arg4, 0); 7739 epp = &ep; 7740 } 7741 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp)); 7742 break; 7743 } 7744 #endif 7745 7746 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT) 7747 #define IMPLEMENT_EPOLL_PWAIT 7748 #endif 7749 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT) 7750 #if defined(TARGET_NR_epoll_wait) 7751 case TARGET_NR_epoll_wait: 7752 #endif 7753 #if defined(IMPLEMENT_EPOLL_PWAIT) 7754 case TARGET_NR_epoll_pwait: 7755 #endif 7756 { 7757 struct target_epoll_event *target_ep; 7758 struct epoll_event *ep; 7759 int epfd = arg1; 7760 int maxevents = arg3; 7761 int timeout = arg4; 7762 7763 target_ep = lock_user(VERIFY_WRITE, arg2, 7764 maxevents * sizeof(struct target_epoll_event), 1); 7765 if (!target_ep) { 7766 goto efault; 7767 } 7768 7769 ep = alloca(maxevents * sizeof(struct epoll_event)); 7770 7771 switch (num) { 7772 #if defined(IMPLEMENT_EPOLL_PWAIT) 7773 case TARGET_NR_epoll_pwait: 7774 { 7775 target_sigset_t *target_set; 7776 sigset_t _set, *set = &_set; 7777 7778 if (arg5) { 7779 target_set = lock_user(VERIFY_READ, arg5, 7780 sizeof(target_sigset_t), 1); 7781 if (!target_set) { 7782 unlock_user(target_ep, arg2, 0); 7783 goto efault; 7784 } 7785 target_to_host_sigset(set, target_set); 7786 unlock_user(target_set, arg5, 0); 7787 } else { 7788 set = NULL; 7789 } 7790 7791 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set)); 7792 break; 7793 } 7794 #endif 7795 #if defined(TARGET_NR_epoll_wait) 7796 case TARGET_NR_epoll_wait: 7797 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout)); 7798 break; 7799 #endif 7800 default: 7801 ret = -TARGET_ENOSYS; 7802 } 7803 if (!is_error(ret)) { 7804 int i; 7805 for (i = 0; i < ret; i++) { 7806 target_ep[i].events = tswap32(ep[i].events); 7807 target_ep[i].data.u64 = tswap64(ep[i].data.u64); 7808 } 7809 } 7810 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event)); 7811 break; 7812 } 7813 #endif 7814 #endif 7815 default: 7816 unimplemented: 7817 gemu_log("qemu: Unsupported syscall: %d\n", num); 7818 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list) 7819 unimplemented_nowarn: 7820 #endif 7821 ret = -TARGET_ENOSYS; 7822 break; 7823 } 7824 fail: 7825 #ifdef DEBUG 7826 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret); 7827 #endif 7828 if(do_strace) 7829 print_syscall_ret(num, ret); 7830 return ret; 7831 efault: 7832 ret = -TARGET_EFAULT; 7833 goto fail; 7834 } 7835