1 /* 2 * Linux syscalls 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #define _ATFILE_SOURCE 20 #include <stdlib.h> 21 #include <stdio.h> 22 #include <stdarg.h> 23 #include <string.h> 24 #include <elf.h> 25 #include <endian.h> 26 #include <errno.h> 27 #include <unistd.h> 28 #include <fcntl.h> 29 #include <time.h> 30 #include <limits.h> 31 #include <sys/types.h> 32 #include <sys/ipc.h> 33 #include <sys/msg.h> 34 #include <sys/wait.h> 35 #include <sys/time.h> 36 #include <sys/stat.h> 37 #include <sys/mount.h> 38 #include <sys/prctl.h> 39 #include <sys/resource.h> 40 #include <sys/mman.h> 41 #include <sys/swap.h> 42 #include <signal.h> 43 #include <sched.h> 44 #ifdef __ia64__ 45 int __clone2(int (*fn)(void *), void *child_stack_base, 46 size_t stack_size, int flags, void *arg, ...); 47 #endif 48 #include <sys/socket.h> 49 #include <sys/un.h> 50 #include <sys/uio.h> 51 #include <sys/poll.h> 52 #include <sys/times.h> 53 #include <sys/shm.h> 54 #include <sys/sem.h> 55 #include <sys/statfs.h> 56 #include <utime.h> 57 #include <sys/sysinfo.h> 58 #include <sys/utsname.h> 59 //#include <sys/user.h> 60 #include <netinet/ip.h> 61 #include <netinet/tcp.h> 62 #include <linux/wireless.h> 63 #include "qemu-common.h" 64 #ifdef TARGET_GPROF 65 #include <sys/gmon.h> 66 #endif 67 #ifdef CONFIG_EVENTFD 68 #include <sys/eventfd.h> 69 #endif 70 #ifdef CONFIG_EPOLL 71 #include <sys/epoll.h> 72 #endif 73 #ifdef CONFIG_ATTR 74 #include "qemu-xattr.h" 75 #endif 76 77 #define termios host_termios 78 #define winsize host_winsize 79 #define termio host_termio 80 #define sgttyb host_sgttyb /* same as target */ 81 #define tchars host_tchars /* same as target */ 82 #define ltchars host_ltchars /* same as target */ 83 84 #include <linux/termios.h> 85 #include <linux/unistd.h> 86 #include <linux/utsname.h> 87 #include <linux/cdrom.h> 88 #include <linux/hdreg.h> 89 #include <linux/soundcard.h> 90 #include <linux/kd.h> 91 #include <linux/mtio.h> 92 #include <linux/fs.h> 93 #if defined(CONFIG_FIEMAP) 94 #include <linux/fiemap.h> 95 #endif 96 #include <linux/fb.h> 97 #include <linux/vt.h> 98 #include <linux/dm-ioctl.h> 99 #include "linux_loop.h" 100 #include "cpu-uname.h" 101 102 #include "qemu.h" 103 104 #if defined(CONFIG_USE_NPTL) 105 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \ 106 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID) 107 #else 108 /* XXX: Hardcode the above values. */ 109 #define CLONE_NPTL_FLAGS2 0 110 #endif 111 112 //#define DEBUG 113 114 //#include <linux/msdos_fs.h> 115 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2]) 116 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2]) 117 118 119 #undef _syscall0 120 #undef _syscall1 121 #undef _syscall2 122 #undef _syscall3 123 #undef _syscall4 124 #undef _syscall5 125 #undef _syscall6 126 127 #define _syscall0(type,name) \ 128 static type name (void) \ 129 { \ 130 return syscall(__NR_##name); \ 131 } 132 133 #define _syscall1(type,name,type1,arg1) \ 134 static type name (type1 arg1) \ 135 { \ 136 return syscall(__NR_##name, arg1); \ 137 } 138 139 #define _syscall2(type,name,type1,arg1,type2,arg2) \ 140 static type name (type1 arg1,type2 arg2) \ 141 { \ 142 return syscall(__NR_##name, arg1, arg2); \ 143 } 144 145 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 146 static type name (type1 arg1,type2 arg2,type3 arg3) \ 147 { \ 148 return syscall(__NR_##name, arg1, arg2, arg3); \ 149 } 150 151 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 152 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ 153 { \ 154 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 155 } 156 157 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 158 type5,arg5) \ 159 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ 160 { \ 161 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 162 } 163 164 165 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 166 type5,arg5,type6,arg6) \ 167 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \ 168 type6 arg6) \ 169 { \ 170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 171 } 172 173 174 #define __NR_sys_uname __NR_uname 175 #define __NR_sys_faccessat __NR_faccessat 176 #define __NR_sys_fchmodat __NR_fchmodat 177 #define __NR_sys_fchownat __NR_fchownat 178 #define __NR_sys_fstatat64 __NR_fstatat64 179 #define __NR_sys_futimesat __NR_futimesat 180 #define __NR_sys_getcwd1 __NR_getcwd 181 #define __NR_sys_getdents __NR_getdents 182 #define __NR_sys_getdents64 __NR_getdents64 183 #define __NR_sys_getpriority __NR_getpriority 184 #define __NR_sys_linkat __NR_linkat 185 #define __NR_sys_mkdirat __NR_mkdirat 186 #define __NR_sys_mknodat __NR_mknodat 187 #define __NR_sys_newfstatat __NR_newfstatat 188 #define __NR_sys_openat __NR_openat 189 #define __NR_sys_readlinkat __NR_readlinkat 190 #define __NR_sys_renameat __NR_renameat 191 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo 192 #define __NR_sys_symlinkat __NR_symlinkat 193 #define __NR_sys_syslog __NR_syslog 194 #define __NR_sys_tgkill __NR_tgkill 195 #define __NR_sys_tkill __NR_tkill 196 #define __NR_sys_unlinkat __NR_unlinkat 197 #define __NR_sys_utimensat __NR_utimensat 198 #define __NR_sys_futex __NR_futex 199 #define __NR_sys_inotify_init __NR_inotify_init 200 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch 201 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch 202 203 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \ 204 defined(__s390x__) 205 #define __NR__llseek __NR_lseek 206 #endif 207 208 #ifdef __NR_gettid 209 _syscall0(int, gettid) 210 #else 211 /* This is a replacement for the host gettid() and must return a host 212 errno. */ 213 static int gettid(void) { 214 return -ENOSYS; 215 } 216 #endif 217 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count); 218 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 219 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count); 220 #endif 221 _syscall2(int, sys_getpriority, int, which, int, who); 222 #if defined(TARGET_NR__llseek) && defined(__NR_llseek) 223 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo, 224 loff_t *, res, uint, wh); 225 #endif 226 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo) 227 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len) 228 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 229 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig) 230 #endif 231 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 232 _syscall2(int,sys_tkill,int,tid,int,sig) 233 #endif 234 #ifdef __NR_exit_group 235 _syscall1(int,exit_group,int,error_code) 236 #endif 237 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 238 _syscall1(int,set_tid_address,int *,tidptr) 239 #endif 240 #if defined(CONFIG_USE_NPTL) 241 #if defined(TARGET_NR_futex) && defined(__NR_futex) 242 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, 243 const struct timespec *,timeout,int *,uaddr2,int,val3) 244 #endif 245 #endif 246 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity 247 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, 248 unsigned long *, user_mask_ptr); 249 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity 250 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len, 251 unsigned long *, user_mask_ptr); 252 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd, 253 void *, arg); 254 255 static bitmask_transtbl fcntl_flags_tbl[] = { 256 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, }, 257 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, }, 258 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, }, 259 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, }, 260 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, }, 261 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, }, 262 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, }, 263 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, }, 264 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, }, 265 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, }, 266 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, }, 267 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, }, 268 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, }, 269 #if defined(O_DIRECT) 270 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, }, 271 #endif 272 { 0, 0, 0, 0 } 273 }; 274 275 #define COPY_UTSNAME_FIELD(dest, src) \ 276 do { \ 277 /* __NEW_UTS_LEN doesn't include terminating null */ \ 278 (void) strncpy((dest), (src), __NEW_UTS_LEN); \ 279 (dest)[__NEW_UTS_LEN] = '\0'; \ 280 } while (0) 281 282 static int sys_uname(struct new_utsname *buf) 283 { 284 struct utsname uts_buf; 285 286 if (uname(&uts_buf) < 0) 287 return (-1); 288 289 /* 290 * Just in case these have some differences, we 291 * translate utsname to new_utsname (which is the 292 * struct linux kernel uses). 293 */ 294 295 memset(buf, 0, sizeof(*buf)); 296 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname); 297 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename); 298 COPY_UTSNAME_FIELD(buf->release, uts_buf.release); 299 COPY_UTSNAME_FIELD(buf->version, uts_buf.version); 300 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine); 301 #ifdef _GNU_SOURCE 302 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname); 303 #endif 304 return (0); 305 306 #undef COPY_UTSNAME_FIELD 307 } 308 309 static int sys_getcwd1(char *buf, size_t size) 310 { 311 if (getcwd(buf, size) == NULL) { 312 /* getcwd() sets errno */ 313 return (-1); 314 } 315 return strlen(buf)+1; 316 } 317 318 #ifdef CONFIG_ATFILE 319 /* 320 * Host system seems to have atfile syscall stubs available. We 321 * now enable them one by one as specified by target syscall_nr.h. 322 */ 323 324 #ifdef TARGET_NR_faccessat 325 static int sys_faccessat(int dirfd, const char *pathname, int mode) 326 { 327 return (faccessat(dirfd, pathname, mode, 0)); 328 } 329 #endif 330 #ifdef TARGET_NR_fchmodat 331 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode) 332 { 333 return (fchmodat(dirfd, pathname, mode, 0)); 334 } 335 #endif 336 #if defined(TARGET_NR_fchownat) 337 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner, 338 gid_t group, int flags) 339 { 340 return (fchownat(dirfd, pathname, owner, group, flags)); 341 } 342 #endif 343 #ifdef __NR_fstatat64 344 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf, 345 int flags) 346 { 347 return (fstatat(dirfd, pathname, buf, flags)); 348 } 349 #endif 350 #ifdef __NR_newfstatat 351 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf, 352 int flags) 353 { 354 return (fstatat(dirfd, pathname, buf, flags)); 355 } 356 #endif 357 #ifdef TARGET_NR_futimesat 358 static int sys_futimesat(int dirfd, const char *pathname, 359 const struct timeval times[2]) 360 { 361 return (futimesat(dirfd, pathname, times)); 362 } 363 #endif 364 #ifdef TARGET_NR_linkat 365 static int sys_linkat(int olddirfd, const char *oldpath, 366 int newdirfd, const char *newpath, int flags) 367 { 368 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags)); 369 } 370 #endif 371 #ifdef TARGET_NR_mkdirat 372 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode) 373 { 374 return (mkdirat(dirfd, pathname, mode)); 375 } 376 #endif 377 #ifdef TARGET_NR_mknodat 378 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode, 379 dev_t dev) 380 { 381 return (mknodat(dirfd, pathname, mode, dev)); 382 } 383 #endif 384 #ifdef TARGET_NR_openat 385 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode) 386 { 387 /* 388 * open(2) has extra parameter 'mode' when called with 389 * flag O_CREAT. 390 */ 391 if ((flags & O_CREAT) != 0) { 392 return (openat(dirfd, pathname, flags, mode)); 393 } 394 return (openat(dirfd, pathname, flags)); 395 } 396 #endif 397 #ifdef TARGET_NR_readlinkat 398 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz) 399 { 400 return (readlinkat(dirfd, pathname, buf, bufsiz)); 401 } 402 #endif 403 #ifdef TARGET_NR_renameat 404 static int sys_renameat(int olddirfd, const char *oldpath, 405 int newdirfd, const char *newpath) 406 { 407 return (renameat(olddirfd, oldpath, newdirfd, newpath)); 408 } 409 #endif 410 #ifdef TARGET_NR_symlinkat 411 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath) 412 { 413 return (symlinkat(oldpath, newdirfd, newpath)); 414 } 415 #endif 416 #ifdef TARGET_NR_unlinkat 417 static int sys_unlinkat(int dirfd, const char *pathname, int flags) 418 { 419 return (unlinkat(dirfd, pathname, flags)); 420 } 421 #endif 422 #else /* !CONFIG_ATFILE */ 423 424 /* 425 * Try direct syscalls instead 426 */ 427 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 428 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode) 429 #endif 430 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat) 431 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode) 432 #endif 433 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) 434 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname, 435 uid_t,owner,gid_t,group,int,flags) 436 #endif 437 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \ 438 defined(__NR_fstatat64) 439 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname, 440 struct stat *,buf,int,flags) 441 #endif 442 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat) 443 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname, 444 const struct timeval *,times) 445 #endif 446 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \ 447 defined(__NR_newfstatat) 448 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname, 449 struct stat *,buf,int,flags) 450 #endif 451 #if defined(TARGET_NR_linkat) && defined(__NR_linkat) 452 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath, 453 int,newdirfd,const char *,newpath,int,flags) 454 #endif 455 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat) 456 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode) 457 #endif 458 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat) 459 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname, 460 mode_t,mode,dev_t,dev) 461 #endif 462 #if defined(TARGET_NR_openat) && defined(__NR_openat) 463 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode) 464 #endif 465 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat) 466 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname, 467 char *,buf,size_t,bufsize) 468 #endif 469 #if defined(TARGET_NR_renameat) && defined(__NR_renameat) 470 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath, 471 int,newdirfd,const char *,newpath) 472 #endif 473 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat) 474 _syscall3(int,sys_symlinkat,const char *,oldpath, 475 int,newdirfd,const char *,newpath) 476 #endif 477 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat) 478 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags) 479 #endif 480 481 #endif /* CONFIG_ATFILE */ 482 483 #ifdef CONFIG_UTIMENSAT 484 static int sys_utimensat(int dirfd, const char *pathname, 485 const struct timespec times[2], int flags) 486 { 487 if (pathname == NULL) 488 return futimens(dirfd, times); 489 else 490 return utimensat(dirfd, pathname, times, flags); 491 } 492 #else 493 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat) 494 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname, 495 const struct timespec *,tsp,int,flags) 496 #endif 497 #endif /* CONFIG_UTIMENSAT */ 498 499 #ifdef CONFIG_INOTIFY 500 #include <sys/inotify.h> 501 502 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 503 static int sys_inotify_init(void) 504 { 505 return (inotify_init()); 506 } 507 #endif 508 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 509 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask) 510 { 511 return (inotify_add_watch(fd, pathname, mask)); 512 } 513 #endif 514 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 515 static int sys_inotify_rm_watch(int fd, int32_t wd) 516 { 517 return (inotify_rm_watch(fd, wd)); 518 } 519 #endif 520 #ifdef CONFIG_INOTIFY1 521 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 522 static int sys_inotify_init1(int flags) 523 { 524 return (inotify_init1(flags)); 525 } 526 #endif 527 #endif 528 #else 529 /* Userspace can usually survive runtime without inotify */ 530 #undef TARGET_NR_inotify_init 531 #undef TARGET_NR_inotify_init1 532 #undef TARGET_NR_inotify_add_watch 533 #undef TARGET_NR_inotify_rm_watch 534 #endif /* CONFIG_INOTIFY */ 535 536 #if defined(TARGET_NR_ppoll) 537 #ifndef __NR_ppoll 538 # define __NR_ppoll -1 539 #endif 540 #define __NR_sys_ppoll __NR_ppoll 541 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds, 542 struct timespec *, timeout, const __sigset_t *, sigmask, 543 size_t, sigsetsize) 544 #endif 545 546 #if defined(TARGET_NR_pselect6) 547 #ifndef __NR_pselect6 548 # define __NR_pselect6 -1 549 #endif 550 #define __NR_sys_pselect6 __NR_pselect6 551 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, 552 fd_set *, exceptfds, struct timespec *, timeout, void *, sig); 553 #endif 554 555 #if defined(TARGET_NR_prlimit64) 556 #ifndef __NR_prlimit64 557 # define __NR_prlimit64 -1 558 #endif 559 #define __NR_sys_prlimit64 __NR_prlimit64 560 /* The glibc rlimit structure may not be that used by the underlying syscall */ 561 struct host_rlimit64 { 562 uint64_t rlim_cur; 563 uint64_t rlim_max; 564 }; 565 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource, 566 const struct host_rlimit64 *, new_limit, 567 struct host_rlimit64 *, old_limit) 568 #endif 569 570 extern int personality(int); 571 extern int flock(int, int); 572 extern int setfsuid(int); 573 extern int setfsgid(int); 574 extern int setgroups(int, gid_t *); 575 576 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */ 577 #ifdef TARGET_ARM 578 static inline int regpairs_aligned(void *cpu_env) { 579 return ((((CPUARMState *)cpu_env)->eabi) == 1) ; 580 } 581 #elif defined(TARGET_MIPS) 582 static inline int regpairs_aligned(void *cpu_env) { return 1; } 583 #else 584 static inline int regpairs_aligned(void *cpu_env) { return 0; } 585 #endif 586 587 #define ERRNO_TABLE_SIZE 1200 588 589 /* target_to_host_errno_table[] is initialized from 590 * host_to_target_errno_table[] in syscall_init(). */ 591 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = { 592 }; 593 594 /* 595 * This list is the union of errno values overridden in asm-<arch>/errno.h 596 * minus the errnos that are not actually generic to all archs. 597 */ 598 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = { 599 [EIDRM] = TARGET_EIDRM, 600 [ECHRNG] = TARGET_ECHRNG, 601 [EL2NSYNC] = TARGET_EL2NSYNC, 602 [EL3HLT] = TARGET_EL3HLT, 603 [EL3RST] = TARGET_EL3RST, 604 [ELNRNG] = TARGET_ELNRNG, 605 [EUNATCH] = TARGET_EUNATCH, 606 [ENOCSI] = TARGET_ENOCSI, 607 [EL2HLT] = TARGET_EL2HLT, 608 [EDEADLK] = TARGET_EDEADLK, 609 [ENOLCK] = TARGET_ENOLCK, 610 [EBADE] = TARGET_EBADE, 611 [EBADR] = TARGET_EBADR, 612 [EXFULL] = TARGET_EXFULL, 613 [ENOANO] = TARGET_ENOANO, 614 [EBADRQC] = TARGET_EBADRQC, 615 [EBADSLT] = TARGET_EBADSLT, 616 [EBFONT] = TARGET_EBFONT, 617 [ENOSTR] = TARGET_ENOSTR, 618 [ENODATA] = TARGET_ENODATA, 619 [ETIME] = TARGET_ETIME, 620 [ENOSR] = TARGET_ENOSR, 621 [ENONET] = TARGET_ENONET, 622 [ENOPKG] = TARGET_ENOPKG, 623 [EREMOTE] = TARGET_EREMOTE, 624 [ENOLINK] = TARGET_ENOLINK, 625 [EADV] = TARGET_EADV, 626 [ESRMNT] = TARGET_ESRMNT, 627 [ECOMM] = TARGET_ECOMM, 628 [EPROTO] = TARGET_EPROTO, 629 [EDOTDOT] = TARGET_EDOTDOT, 630 [EMULTIHOP] = TARGET_EMULTIHOP, 631 [EBADMSG] = TARGET_EBADMSG, 632 [ENAMETOOLONG] = TARGET_ENAMETOOLONG, 633 [EOVERFLOW] = TARGET_EOVERFLOW, 634 [ENOTUNIQ] = TARGET_ENOTUNIQ, 635 [EBADFD] = TARGET_EBADFD, 636 [EREMCHG] = TARGET_EREMCHG, 637 [ELIBACC] = TARGET_ELIBACC, 638 [ELIBBAD] = TARGET_ELIBBAD, 639 [ELIBSCN] = TARGET_ELIBSCN, 640 [ELIBMAX] = TARGET_ELIBMAX, 641 [ELIBEXEC] = TARGET_ELIBEXEC, 642 [EILSEQ] = TARGET_EILSEQ, 643 [ENOSYS] = TARGET_ENOSYS, 644 [ELOOP] = TARGET_ELOOP, 645 [ERESTART] = TARGET_ERESTART, 646 [ESTRPIPE] = TARGET_ESTRPIPE, 647 [ENOTEMPTY] = TARGET_ENOTEMPTY, 648 [EUSERS] = TARGET_EUSERS, 649 [ENOTSOCK] = TARGET_ENOTSOCK, 650 [EDESTADDRREQ] = TARGET_EDESTADDRREQ, 651 [EMSGSIZE] = TARGET_EMSGSIZE, 652 [EPROTOTYPE] = TARGET_EPROTOTYPE, 653 [ENOPROTOOPT] = TARGET_ENOPROTOOPT, 654 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT, 655 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT, 656 [EOPNOTSUPP] = TARGET_EOPNOTSUPP, 657 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT, 658 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT, 659 [EADDRINUSE] = TARGET_EADDRINUSE, 660 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL, 661 [ENETDOWN] = TARGET_ENETDOWN, 662 [ENETUNREACH] = TARGET_ENETUNREACH, 663 [ENETRESET] = TARGET_ENETRESET, 664 [ECONNABORTED] = TARGET_ECONNABORTED, 665 [ECONNRESET] = TARGET_ECONNRESET, 666 [ENOBUFS] = TARGET_ENOBUFS, 667 [EISCONN] = TARGET_EISCONN, 668 [ENOTCONN] = TARGET_ENOTCONN, 669 [EUCLEAN] = TARGET_EUCLEAN, 670 [ENOTNAM] = TARGET_ENOTNAM, 671 [ENAVAIL] = TARGET_ENAVAIL, 672 [EISNAM] = TARGET_EISNAM, 673 [EREMOTEIO] = TARGET_EREMOTEIO, 674 [ESHUTDOWN] = TARGET_ESHUTDOWN, 675 [ETOOMANYREFS] = TARGET_ETOOMANYREFS, 676 [ETIMEDOUT] = TARGET_ETIMEDOUT, 677 [ECONNREFUSED] = TARGET_ECONNREFUSED, 678 [EHOSTDOWN] = TARGET_EHOSTDOWN, 679 [EHOSTUNREACH] = TARGET_EHOSTUNREACH, 680 [EALREADY] = TARGET_EALREADY, 681 [EINPROGRESS] = TARGET_EINPROGRESS, 682 [ESTALE] = TARGET_ESTALE, 683 [ECANCELED] = TARGET_ECANCELED, 684 [ENOMEDIUM] = TARGET_ENOMEDIUM, 685 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE, 686 #ifdef ENOKEY 687 [ENOKEY] = TARGET_ENOKEY, 688 #endif 689 #ifdef EKEYEXPIRED 690 [EKEYEXPIRED] = TARGET_EKEYEXPIRED, 691 #endif 692 #ifdef EKEYREVOKED 693 [EKEYREVOKED] = TARGET_EKEYREVOKED, 694 #endif 695 #ifdef EKEYREJECTED 696 [EKEYREJECTED] = TARGET_EKEYREJECTED, 697 #endif 698 #ifdef EOWNERDEAD 699 [EOWNERDEAD] = TARGET_EOWNERDEAD, 700 #endif 701 #ifdef ENOTRECOVERABLE 702 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE, 703 #endif 704 }; 705 706 static inline int host_to_target_errno(int err) 707 { 708 if(host_to_target_errno_table[err]) 709 return host_to_target_errno_table[err]; 710 return err; 711 } 712 713 static inline int target_to_host_errno(int err) 714 { 715 if (target_to_host_errno_table[err]) 716 return target_to_host_errno_table[err]; 717 return err; 718 } 719 720 static inline abi_long get_errno(abi_long ret) 721 { 722 if (ret == -1) 723 return -host_to_target_errno(errno); 724 else 725 return ret; 726 } 727 728 static inline int is_error(abi_long ret) 729 { 730 return (abi_ulong)ret >= (abi_ulong)(-4096); 731 } 732 733 char *target_strerror(int err) 734 { 735 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) { 736 return NULL; 737 } 738 return strerror(target_to_host_errno(err)); 739 } 740 741 static abi_ulong target_brk; 742 static abi_ulong target_original_brk; 743 static abi_ulong brk_page; 744 745 void target_set_brk(abi_ulong new_brk) 746 { 747 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk); 748 brk_page = HOST_PAGE_ALIGN(target_brk); 749 } 750 751 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0) 752 #define DEBUGF_BRK(message, args...) 753 754 /* do_brk() must return target values and target errnos. */ 755 abi_long do_brk(abi_ulong new_brk) 756 { 757 abi_long mapped_addr; 758 int new_alloc_size; 759 760 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk); 761 762 if (!new_brk) { 763 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk); 764 return target_brk; 765 } 766 if (new_brk < target_original_brk) { 767 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n", 768 target_brk); 769 return target_brk; 770 } 771 772 /* If the new brk is less than the highest page reserved to the 773 * target heap allocation, set it and we're almost done... */ 774 if (new_brk <= brk_page) { 775 /* Heap contents are initialized to zero, as for anonymous 776 * mapped pages. */ 777 if (new_brk > target_brk) { 778 memset(g2h(target_brk), 0, new_brk - target_brk); 779 } 780 target_brk = new_brk; 781 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk); 782 return target_brk; 783 } 784 785 /* We need to allocate more memory after the brk... Note that 786 * we don't use MAP_FIXED because that will map over the top of 787 * any existing mapping (like the one with the host libc or qemu 788 * itself); instead we treat "mapped but at wrong address" as 789 * a failure and unmap again. 790 */ 791 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page); 792 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, 793 PROT_READ|PROT_WRITE, 794 MAP_ANON|MAP_PRIVATE, 0, 0)); 795 796 if (mapped_addr == brk_page) { 797 /* Heap contents are initialized to zero, as for anonymous 798 * mapped pages. Technically the new pages are already 799 * initialized to zero since they *are* anonymous mapped 800 * pages, however we have to take care with the contents that 801 * come from the remaining part of the previous page: it may 802 * contains garbage data due to a previous heap usage (grown 803 * then shrunken). */ 804 memset(g2h(target_brk), 0, brk_page - target_brk); 805 806 target_brk = new_brk; 807 brk_page = HOST_PAGE_ALIGN(target_brk); 808 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n", 809 target_brk); 810 return target_brk; 811 } else if (mapped_addr != -1) { 812 /* Mapped but at wrong address, meaning there wasn't actually 813 * enough space for this brk. 814 */ 815 target_munmap(mapped_addr, new_alloc_size); 816 mapped_addr = -1; 817 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk); 818 } 819 else { 820 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk); 821 } 822 823 #if defined(TARGET_ALPHA) 824 /* We (partially) emulate OSF/1 on Alpha, which requires we 825 return a proper errno, not an unchanged brk value. */ 826 return -TARGET_ENOMEM; 827 #endif 828 /* For everything else, return the previous break. */ 829 return target_brk; 830 } 831 832 static inline abi_long copy_from_user_fdset(fd_set *fds, 833 abi_ulong target_fds_addr, 834 int n) 835 { 836 int i, nw, j, k; 837 abi_ulong b, *target_fds; 838 839 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 840 if (!(target_fds = lock_user(VERIFY_READ, 841 target_fds_addr, 842 sizeof(abi_ulong) * nw, 843 1))) 844 return -TARGET_EFAULT; 845 846 FD_ZERO(fds); 847 k = 0; 848 for (i = 0; i < nw; i++) { 849 /* grab the abi_ulong */ 850 __get_user(b, &target_fds[i]); 851 for (j = 0; j < TARGET_ABI_BITS; j++) { 852 /* check the bit inside the abi_ulong */ 853 if ((b >> j) & 1) 854 FD_SET(k, fds); 855 k++; 856 } 857 } 858 859 unlock_user(target_fds, target_fds_addr, 0); 860 861 return 0; 862 } 863 864 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr, 865 abi_ulong target_fds_addr, 866 int n) 867 { 868 if (target_fds_addr) { 869 if (copy_from_user_fdset(fds, target_fds_addr, n)) 870 return -TARGET_EFAULT; 871 *fds_ptr = fds; 872 } else { 873 *fds_ptr = NULL; 874 } 875 return 0; 876 } 877 878 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr, 879 const fd_set *fds, 880 int n) 881 { 882 int i, nw, j, k; 883 abi_long v; 884 abi_ulong *target_fds; 885 886 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 887 if (!(target_fds = lock_user(VERIFY_WRITE, 888 target_fds_addr, 889 sizeof(abi_ulong) * nw, 890 0))) 891 return -TARGET_EFAULT; 892 893 k = 0; 894 for (i = 0; i < nw; i++) { 895 v = 0; 896 for (j = 0; j < TARGET_ABI_BITS; j++) { 897 v |= ((FD_ISSET(k, fds) != 0) << j); 898 k++; 899 } 900 __put_user(v, &target_fds[i]); 901 } 902 903 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw); 904 905 return 0; 906 } 907 908 #if defined(__alpha__) 909 #define HOST_HZ 1024 910 #else 911 #define HOST_HZ 100 912 #endif 913 914 static inline abi_long host_to_target_clock_t(long ticks) 915 { 916 #if HOST_HZ == TARGET_HZ 917 return ticks; 918 #else 919 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ; 920 #endif 921 } 922 923 static inline abi_long host_to_target_rusage(abi_ulong target_addr, 924 const struct rusage *rusage) 925 { 926 struct target_rusage *target_rusage; 927 928 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0)) 929 return -TARGET_EFAULT; 930 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec); 931 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec); 932 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec); 933 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec); 934 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss); 935 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss); 936 target_rusage->ru_idrss = tswapal(rusage->ru_idrss); 937 target_rusage->ru_isrss = tswapal(rusage->ru_isrss); 938 target_rusage->ru_minflt = tswapal(rusage->ru_minflt); 939 target_rusage->ru_majflt = tswapal(rusage->ru_majflt); 940 target_rusage->ru_nswap = tswapal(rusage->ru_nswap); 941 target_rusage->ru_inblock = tswapal(rusage->ru_inblock); 942 target_rusage->ru_oublock = tswapal(rusage->ru_oublock); 943 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd); 944 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv); 945 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals); 946 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw); 947 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw); 948 unlock_user_struct(target_rusage, target_addr, 1); 949 950 return 0; 951 } 952 953 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim) 954 { 955 abi_ulong target_rlim_swap; 956 rlim_t result; 957 958 target_rlim_swap = tswapal(target_rlim); 959 if (target_rlim_swap == TARGET_RLIM_INFINITY) 960 return RLIM_INFINITY; 961 962 result = target_rlim_swap; 963 if (target_rlim_swap != (rlim_t)result) 964 return RLIM_INFINITY; 965 966 return result; 967 } 968 969 static inline abi_ulong host_to_target_rlim(rlim_t rlim) 970 { 971 abi_ulong target_rlim_swap; 972 abi_ulong result; 973 974 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim) 975 target_rlim_swap = TARGET_RLIM_INFINITY; 976 else 977 target_rlim_swap = rlim; 978 result = tswapal(target_rlim_swap); 979 980 return result; 981 } 982 983 static inline int target_to_host_resource(int code) 984 { 985 switch (code) { 986 case TARGET_RLIMIT_AS: 987 return RLIMIT_AS; 988 case TARGET_RLIMIT_CORE: 989 return RLIMIT_CORE; 990 case TARGET_RLIMIT_CPU: 991 return RLIMIT_CPU; 992 case TARGET_RLIMIT_DATA: 993 return RLIMIT_DATA; 994 case TARGET_RLIMIT_FSIZE: 995 return RLIMIT_FSIZE; 996 case TARGET_RLIMIT_LOCKS: 997 return RLIMIT_LOCKS; 998 case TARGET_RLIMIT_MEMLOCK: 999 return RLIMIT_MEMLOCK; 1000 case TARGET_RLIMIT_MSGQUEUE: 1001 return RLIMIT_MSGQUEUE; 1002 case TARGET_RLIMIT_NICE: 1003 return RLIMIT_NICE; 1004 case TARGET_RLIMIT_NOFILE: 1005 return RLIMIT_NOFILE; 1006 case TARGET_RLIMIT_NPROC: 1007 return RLIMIT_NPROC; 1008 case TARGET_RLIMIT_RSS: 1009 return RLIMIT_RSS; 1010 case TARGET_RLIMIT_RTPRIO: 1011 return RLIMIT_RTPRIO; 1012 case TARGET_RLIMIT_SIGPENDING: 1013 return RLIMIT_SIGPENDING; 1014 case TARGET_RLIMIT_STACK: 1015 return RLIMIT_STACK; 1016 default: 1017 return code; 1018 } 1019 } 1020 1021 static inline abi_long copy_from_user_timeval(struct timeval *tv, 1022 abi_ulong target_tv_addr) 1023 { 1024 struct target_timeval *target_tv; 1025 1026 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) 1027 return -TARGET_EFAULT; 1028 1029 __get_user(tv->tv_sec, &target_tv->tv_sec); 1030 __get_user(tv->tv_usec, &target_tv->tv_usec); 1031 1032 unlock_user_struct(target_tv, target_tv_addr, 0); 1033 1034 return 0; 1035 } 1036 1037 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr, 1038 const struct timeval *tv) 1039 { 1040 struct target_timeval *target_tv; 1041 1042 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) 1043 return -TARGET_EFAULT; 1044 1045 __put_user(tv->tv_sec, &target_tv->tv_sec); 1046 __put_user(tv->tv_usec, &target_tv->tv_usec); 1047 1048 unlock_user_struct(target_tv, target_tv_addr, 1); 1049 1050 return 0; 1051 } 1052 1053 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 1054 #include <mqueue.h> 1055 1056 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr, 1057 abi_ulong target_mq_attr_addr) 1058 { 1059 struct target_mq_attr *target_mq_attr; 1060 1061 if (!lock_user_struct(VERIFY_READ, target_mq_attr, 1062 target_mq_attr_addr, 1)) 1063 return -TARGET_EFAULT; 1064 1065 __get_user(attr->mq_flags, &target_mq_attr->mq_flags); 1066 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1067 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1068 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1069 1070 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0); 1071 1072 return 0; 1073 } 1074 1075 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr, 1076 const struct mq_attr *attr) 1077 { 1078 struct target_mq_attr *target_mq_attr; 1079 1080 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr, 1081 target_mq_attr_addr, 0)) 1082 return -TARGET_EFAULT; 1083 1084 __put_user(attr->mq_flags, &target_mq_attr->mq_flags); 1085 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1086 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1087 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1088 1089 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1); 1090 1091 return 0; 1092 } 1093 #endif 1094 1095 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) 1096 /* do_select() must return target values and target errnos. */ 1097 static abi_long do_select(int n, 1098 abi_ulong rfd_addr, abi_ulong wfd_addr, 1099 abi_ulong efd_addr, abi_ulong target_tv_addr) 1100 { 1101 fd_set rfds, wfds, efds; 1102 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 1103 struct timeval tv, *tv_ptr; 1104 abi_long ret; 1105 1106 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 1107 if (ret) { 1108 return ret; 1109 } 1110 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 1111 if (ret) { 1112 return ret; 1113 } 1114 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 1115 if (ret) { 1116 return ret; 1117 } 1118 1119 if (target_tv_addr) { 1120 if (copy_from_user_timeval(&tv, target_tv_addr)) 1121 return -TARGET_EFAULT; 1122 tv_ptr = &tv; 1123 } else { 1124 tv_ptr = NULL; 1125 } 1126 1127 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr)); 1128 1129 if (!is_error(ret)) { 1130 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 1131 return -TARGET_EFAULT; 1132 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 1133 return -TARGET_EFAULT; 1134 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 1135 return -TARGET_EFAULT; 1136 1137 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv)) 1138 return -TARGET_EFAULT; 1139 } 1140 1141 return ret; 1142 } 1143 #endif 1144 1145 static abi_long do_pipe2(int host_pipe[], int flags) 1146 { 1147 #ifdef CONFIG_PIPE2 1148 return pipe2(host_pipe, flags); 1149 #else 1150 return -ENOSYS; 1151 #endif 1152 } 1153 1154 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes, 1155 int flags, int is_pipe2) 1156 { 1157 int host_pipe[2]; 1158 abi_long ret; 1159 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe); 1160 1161 if (is_error(ret)) 1162 return get_errno(ret); 1163 1164 /* Several targets have special calling conventions for the original 1165 pipe syscall, but didn't replicate this into the pipe2 syscall. */ 1166 if (!is_pipe2) { 1167 #if defined(TARGET_ALPHA) 1168 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1]; 1169 return host_pipe[0]; 1170 #elif defined(TARGET_MIPS) 1171 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1]; 1172 return host_pipe[0]; 1173 #elif defined(TARGET_SH4) 1174 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1]; 1175 return host_pipe[0]; 1176 #endif 1177 } 1178 1179 if (put_user_s32(host_pipe[0], pipedes) 1180 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0]))) 1181 return -TARGET_EFAULT; 1182 return get_errno(ret); 1183 } 1184 1185 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn, 1186 abi_ulong target_addr, 1187 socklen_t len) 1188 { 1189 struct target_ip_mreqn *target_smreqn; 1190 1191 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1); 1192 if (!target_smreqn) 1193 return -TARGET_EFAULT; 1194 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr; 1195 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr; 1196 if (len == sizeof(struct target_ip_mreqn)) 1197 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex); 1198 unlock_user(target_smreqn, target_addr, 0); 1199 1200 return 0; 1201 } 1202 1203 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr, 1204 abi_ulong target_addr, 1205 socklen_t len) 1206 { 1207 const socklen_t unix_maxlen = sizeof (struct sockaddr_un); 1208 sa_family_t sa_family; 1209 struct target_sockaddr *target_saddr; 1210 1211 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 1212 if (!target_saddr) 1213 return -TARGET_EFAULT; 1214 1215 sa_family = tswap16(target_saddr->sa_family); 1216 1217 /* Oops. The caller might send a incomplete sun_path; sun_path 1218 * must be terminated by \0 (see the manual page), but 1219 * unfortunately it is quite common to specify sockaddr_un 1220 * length as "strlen(x->sun_path)" while it should be 1221 * "strlen(...) + 1". We'll fix that here if needed. 1222 * Linux kernel has a similar feature. 1223 */ 1224 1225 if (sa_family == AF_UNIX) { 1226 if (len < unix_maxlen && len > 0) { 1227 char *cp = (char*)target_saddr; 1228 1229 if ( cp[len-1] && !cp[len] ) 1230 len++; 1231 } 1232 if (len > unix_maxlen) 1233 len = unix_maxlen; 1234 } 1235 1236 memcpy(addr, target_saddr, len); 1237 addr->sa_family = sa_family; 1238 unlock_user(target_saddr, target_addr, 0); 1239 1240 return 0; 1241 } 1242 1243 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr, 1244 struct sockaddr *addr, 1245 socklen_t len) 1246 { 1247 struct target_sockaddr *target_saddr; 1248 1249 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0); 1250 if (!target_saddr) 1251 return -TARGET_EFAULT; 1252 memcpy(target_saddr, addr, len); 1253 target_saddr->sa_family = tswap16(addr->sa_family); 1254 unlock_user(target_saddr, target_addr, len); 1255 1256 return 0; 1257 } 1258 1259 /* ??? Should this also swap msgh->name? */ 1260 static inline abi_long target_to_host_cmsg(struct msghdr *msgh, 1261 struct target_msghdr *target_msgh) 1262 { 1263 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1264 abi_long msg_controllen; 1265 abi_ulong target_cmsg_addr; 1266 struct target_cmsghdr *target_cmsg; 1267 socklen_t space = 0; 1268 1269 msg_controllen = tswapal(target_msgh->msg_controllen); 1270 if (msg_controllen < sizeof (struct target_cmsghdr)) 1271 goto the_end; 1272 target_cmsg_addr = tswapal(target_msgh->msg_control); 1273 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1); 1274 if (!target_cmsg) 1275 return -TARGET_EFAULT; 1276 1277 while (cmsg && target_cmsg) { 1278 void *data = CMSG_DATA(cmsg); 1279 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1280 1281 int len = tswapal(target_cmsg->cmsg_len) 1282 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr)); 1283 1284 space += CMSG_SPACE(len); 1285 if (space > msgh->msg_controllen) { 1286 space -= CMSG_SPACE(len); 1287 gemu_log("Host cmsg overflow\n"); 1288 break; 1289 } 1290 1291 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level); 1292 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type); 1293 cmsg->cmsg_len = CMSG_LEN(len); 1294 1295 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) { 1296 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type); 1297 memcpy(data, target_data, len); 1298 } else { 1299 int *fd = (int *)data; 1300 int *target_fd = (int *)target_data; 1301 int i, numfds = len / sizeof(int); 1302 1303 for (i = 0; i < numfds; i++) 1304 fd[i] = tswap32(target_fd[i]); 1305 } 1306 1307 cmsg = CMSG_NXTHDR(msgh, cmsg); 1308 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1309 } 1310 unlock_user(target_cmsg, target_cmsg_addr, 0); 1311 the_end: 1312 msgh->msg_controllen = space; 1313 return 0; 1314 } 1315 1316 /* ??? Should this also swap msgh->name? */ 1317 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, 1318 struct msghdr *msgh) 1319 { 1320 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1321 abi_long msg_controllen; 1322 abi_ulong target_cmsg_addr; 1323 struct target_cmsghdr *target_cmsg; 1324 socklen_t space = 0; 1325 1326 msg_controllen = tswapal(target_msgh->msg_controllen); 1327 if (msg_controllen < sizeof (struct target_cmsghdr)) 1328 goto the_end; 1329 target_cmsg_addr = tswapal(target_msgh->msg_control); 1330 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0); 1331 if (!target_cmsg) 1332 return -TARGET_EFAULT; 1333 1334 while (cmsg && target_cmsg) { 1335 void *data = CMSG_DATA(cmsg); 1336 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1337 1338 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr)); 1339 1340 space += TARGET_CMSG_SPACE(len); 1341 if (space > msg_controllen) { 1342 space -= TARGET_CMSG_SPACE(len); 1343 gemu_log("Target cmsg overflow\n"); 1344 break; 1345 } 1346 1347 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level); 1348 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type); 1349 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len)); 1350 1351 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) { 1352 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type); 1353 memcpy(target_data, data, len); 1354 } else { 1355 int *fd = (int *)data; 1356 int *target_fd = (int *)target_data; 1357 int i, numfds = len / sizeof(int); 1358 1359 for (i = 0; i < numfds; i++) 1360 target_fd[i] = tswap32(fd[i]); 1361 } 1362 1363 cmsg = CMSG_NXTHDR(msgh, cmsg); 1364 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1365 } 1366 unlock_user(target_cmsg, target_cmsg_addr, space); 1367 the_end: 1368 target_msgh->msg_controllen = tswapal(space); 1369 return 0; 1370 } 1371 1372 /* do_setsockopt() Must return target values and target errnos. */ 1373 static abi_long do_setsockopt(int sockfd, int level, int optname, 1374 abi_ulong optval_addr, socklen_t optlen) 1375 { 1376 abi_long ret; 1377 int val; 1378 struct ip_mreqn *ip_mreq; 1379 struct ip_mreq_source *ip_mreq_source; 1380 1381 switch(level) { 1382 case SOL_TCP: 1383 /* TCP options all take an 'int' value. */ 1384 if (optlen < sizeof(uint32_t)) 1385 return -TARGET_EINVAL; 1386 1387 if (get_user_u32(val, optval_addr)) 1388 return -TARGET_EFAULT; 1389 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1390 break; 1391 case SOL_IP: 1392 switch(optname) { 1393 case IP_TOS: 1394 case IP_TTL: 1395 case IP_HDRINCL: 1396 case IP_ROUTER_ALERT: 1397 case IP_RECVOPTS: 1398 case IP_RETOPTS: 1399 case IP_PKTINFO: 1400 case IP_MTU_DISCOVER: 1401 case IP_RECVERR: 1402 case IP_RECVTOS: 1403 #ifdef IP_FREEBIND 1404 case IP_FREEBIND: 1405 #endif 1406 case IP_MULTICAST_TTL: 1407 case IP_MULTICAST_LOOP: 1408 val = 0; 1409 if (optlen >= sizeof(uint32_t)) { 1410 if (get_user_u32(val, optval_addr)) 1411 return -TARGET_EFAULT; 1412 } else if (optlen >= 1) { 1413 if (get_user_u8(val, optval_addr)) 1414 return -TARGET_EFAULT; 1415 } 1416 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1417 break; 1418 case IP_ADD_MEMBERSHIP: 1419 case IP_DROP_MEMBERSHIP: 1420 if (optlen < sizeof (struct target_ip_mreq) || 1421 optlen > sizeof (struct target_ip_mreqn)) 1422 return -TARGET_EINVAL; 1423 1424 ip_mreq = (struct ip_mreqn *) alloca(optlen); 1425 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen); 1426 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen)); 1427 break; 1428 1429 case IP_BLOCK_SOURCE: 1430 case IP_UNBLOCK_SOURCE: 1431 case IP_ADD_SOURCE_MEMBERSHIP: 1432 case IP_DROP_SOURCE_MEMBERSHIP: 1433 if (optlen != sizeof (struct target_ip_mreq_source)) 1434 return -TARGET_EINVAL; 1435 1436 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1); 1437 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen)); 1438 unlock_user (ip_mreq_source, optval_addr, 0); 1439 break; 1440 1441 default: 1442 goto unimplemented; 1443 } 1444 break; 1445 case TARGET_SOL_SOCKET: 1446 switch (optname) { 1447 /* Options with 'int' argument. */ 1448 case TARGET_SO_DEBUG: 1449 optname = SO_DEBUG; 1450 break; 1451 case TARGET_SO_REUSEADDR: 1452 optname = SO_REUSEADDR; 1453 break; 1454 case TARGET_SO_TYPE: 1455 optname = SO_TYPE; 1456 break; 1457 case TARGET_SO_ERROR: 1458 optname = SO_ERROR; 1459 break; 1460 case TARGET_SO_DONTROUTE: 1461 optname = SO_DONTROUTE; 1462 break; 1463 case TARGET_SO_BROADCAST: 1464 optname = SO_BROADCAST; 1465 break; 1466 case TARGET_SO_SNDBUF: 1467 optname = SO_SNDBUF; 1468 break; 1469 case TARGET_SO_RCVBUF: 1470 optname = SO_RCVBUF; 1471 break; 1472 case TARGET_SO_KEEPALIVE: 1473 optname = SO_KEEPALIVE; 1474 break; 1475 case TARGET_SO_OOBINLINE: 1476 optname = SO_OOBINLINE; 1477 break; 1478 case TARGET_SO_NO_CHECK: 1479 optname = SO_NO_CHECK; 1480 break; 1481 case TARGET_SO_PRIORITY: 1482 optname = SO_PRIORITY; 1483 break; 1484 #ifdef SO_BSDCOMPAT 1485 case TARGET_SO_BSDCOMPAT: 1486 optname = SO_BSDCOMPAT; 1487 break; 1488 #endif 1489 case TARGET_SO_PASSCRED: 1490 optname = SO_PASSCRED; 1491 break; 1492 case TARGET_SO_TIMESTAMP: 1493 optname = SO_TIMESTAMP; 1494 break; 1495 case TARGET_SO_RCVLOWAT: 1496 optname = SO_RCVLOWAT; 1497 break; 1498 case TARGET_SO_RCVTIMEO: 1499 optname = SO_RCVTIMEO; 1500 break; 1501 case TARGET_SO_SNDTIMEO: 1502 optname = SO_SNDTIMEO; 1503 break; 1504 break; 1505 default: 1506 goto unimplemented; 1507 } 1508 if (optlen < sizeof(uint32_t)) 1509 return -TARGET_EINVAL; 1510 1511 if (get_user_u32(val, optval_addr)) 1512 return -TARGET_EFAULT; 1513 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val))); 1514 break; 1515 default: 1516 unimplemented: 1517 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname); 1518 ret = -TARGET_ENOPROTOOPT; 1519 } 1520 return ret; 1521 } 1522 1523 /* do_getsockopt() Must return target values and target errnos. */ 1524 static abi_long do_getsockopt(int sockfd, int level, int optname, 1525 abi_ulong optval_addr, abi_ulong optlen) 1526 { 1527 abi_long ret; 1528 int len, val; 1529 socklen_t lv; 1530 1531 switch(level) { 1532 case TARGET_SOL_SOCKET: 1533 level = SOL_SOCKET; 1534 switch (optname) { 1535 /* These don't just return a single integer */ 1536 case TARGET_SO_LINGER: 1537 case TARGET_SO_RCVTIMEO: 1538 case TARGET_SO_SNDTIMEO: 1539 case TARGET_SO_PEERNAME: 1540 goto unimplemented; 1541 case TARGET_SO_PEERCRED: { 1542 struct ucred cr; 1543 socklen_t crlen; 1544 struct target_ucred *tcr; 1545 1546 if (get_user_u32(len, optlen)) { 1547 return -TARGET_EFAULT; 1548 } 1549 if (len < 0) { 1550 return -TARGET_EINVAL; 1551 } 1552 1553 crlen = sizeof(cr); 1554 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED, 1555 &cr, &crlen)); 1556 if (ret < 0) { 1557 return ret; 1558 } 1559 if (len > crlen) { 1560 len = crlen; 1561 } 1562 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) { 1563 return -TARGET_EFAULT; 1564 } 1565 __put_user(cr.pid, &tcr->pid); 1566 __put_user(cr.uid, &tcr->uid); 1567 __put_user(cr.gid, &tcr->gid); 1568 unlock_user_struct(tcr, optval_addr, 1); 1569 if (put_user_u32(len, optlen)) { 1570 return -TARGET_EFAULT; 1571 } 1572 break; 1573 } 1574 /* Options with 'int' argument. */ 1575 case TARGET_SO_DEBUG: 1576 optname = SO_DEBUG; 1577 goto int_case; 1578 case TARGET_SO_REUSEADDR: 1579 optname = SO_REUSEADDR; 1580 goto int_case; 1581 case TARGET_SO_TYPE: 1582 optname = SO_TYPE; 1583 goto int_case; 1584 case TARGET_SO_ERROR: 1585 optname = SO_ERROR; 1586 goto int_case; 1587 case TARGET_SO_DONTROUTE: 1588 optname = SO_DONTROUTE; 1589 goto int_case; 1590 case TARGET_SO_BROADCAST: 1591 optname = SO_BROADCAST; 1592 goto int_case; 1593 case TARGET_SO_SNDBUF: 1594 optname = SO_SNDBUF; 1595 goto int_case; 1596 case TARGET_SO_RCVBUF: 1597 optname = SO_RCVBUF; 1598 goto int_case; 1599 case TARGET_SO_KEEPALIVE: 1600 optname = SO_KEEPALIVE; 1601 goto int_case; 1602 case TARGET_SO_OOBINLINE: 1603 optname = SO_OOBINLINE; 1604 goto int_case; 1605 case TARGET_SO_NO_CHECK: 1606 optname = SO_NO_CHECK; 1607 goto int_case; 1608 case TARGET_SO_PRIORITY: 1609 optname = SO_PRIORITY; 1610 goto int_case; 1611 #ifdef SO_BSDCOMPAT 1612 case TARGET_SO_BSDCOMPAT: 1613 optname = SO_BSDCOMPAT; 1614 goto int_case; 1615 #endif 1616 case TARGET_SO_PASSCRED: 1617 optname = SO_PASSCRED; 1618 goto int_case; 1619 case TARGET_SO_TIMESTAMP: 1620 optname = SO_TIMESTAMP; 1621 goto int_case; 1622 case TARGET_SO_RCVLOWAT: 1623 optname = SO_RCVLOWAT; 1624 goto int_case; 1625 default: 1626 goto int_case; 1627 } 1628 break; 1629 case SOL_TCP: 1630 /* TCP options all take an 'int' value. */ 1631 int_case: 1632 if (get_user_u32(len, optlen)) 1633 return -TARGET_EFAULT; 1634 if (len < 0) 1635 return -TARGET_EINVAL; 1636 lv = sizeof(lv); 1637 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1638 if (ret < 0) 1639 return ret; 1640 if (len > lv) 1641 len = lv; 1642 if (len == 4) { 1643 if (put_user_u32(val, optval_addr)) 1644 return -TARGET_EFAULT; 1645 } else { 1646 if (put_user_u8(val, optval_addr)) 1647 return -TARGET_EFAULT; 1648 } 1649 if (put_user_u32(len, optlen)) 1650 return -TARGET_EFAULT; 1651 break; 1652 case SOL_IP: 1653 switch(optname) { 1654 case IP_TOS: 1655 case IP_TTL: 1656 case IP_HDRINCL: 1657 case IP_ROUTER_ALERT: 1658 case IP_RECVOPTS: 1659 case IP_RETOPTS: 1660 case IP_PKTINFO: 1661 case IP_MTU_DISCOVER: 1662 case IP_RECVERR: 1663 case IP_RECVTOS: 1664 #ifdef IP_FREEBIND 1665 case IP_FREEBIND: 1666 #endif 1667 case IP_MULTICAST_TTL: 1668 case IP_MULTICAST_LOOP: 1669 if (get_user_u32(len, optlen)) 1670 return -TARGET_EFAULT; 1671 if (len < 0) 1672 return -TARGET_EINVAL; 1673 lv = sizeof(lv); 1674 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1675 if (ret < 0) 1676 return ret; 1677 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 1678 len = 1; 1679 if (put_user_u32(len, optlen) 1680 || put_user_u8(val, optval_addr)) 1681 return -TARGET_EFAULT; 1682 } else { 1683 if (len > sizeof(int)) 1684 len = sizeof(int); 1685 if (put_user_u32(len, optlen) 1686 || put_user_u32(val, optval_addr)) 1687 return -TARGET_EFAULT; 1688 } 1689 break; 1690 default: 1691 ret = -TARGET_ENOPROTOOPT; 1692 break; 1693 } 1694 break; 1695 default: 1696 unimplemented: 1697 gemu_log("getsockopt level=%d optname=%d not yet supported\n", 1698 level, optname); 1699 ret = -TARGET_EOPNOTSUPP; 1700 break; 1701 } 1702 return ret; 1703 } 1704 1705 /* FIXME 1706 * lock_iovec()/unlock_iovec() have a return code of 0 for success where 1707 * other lock functions have a return code of 0 for failure. 1708 */ 1709 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr, 1710 int count, int copy) 1711 { 1712 struct target_iovec *target_vec; 1713 abi_ulong base; 1714 int i; 1715 1716 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1); 1717 if (!target_vec) 1718 return -TARGET_EFAULT; 1719 for(i = 0;i < count; i++) { 1720 base = tswapal(target_vec[i].iov_base); 1721 vec[i].iov_len = tswapal(target_vec[i].iov_len); 1722 if (vec[i].iov_len != 0) { 1723 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy); 1724 /* Don't check lock_user return value. We must call writev even 1725 if a element has invalid base address. */ 1726 } else { 1727 /* zero length pointer is ignored */ 1728 vec[i].iov_base = NULL; 1729 } 1730 } 1731 unlock_user (target_vec, target_addr, 0); 1732 return 0; 1733 } 1734 1735 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr, 1736 int count, int copy) 1737 { 1738 struct target_iovec *target_vec; 1739 abi_ulong base; 1740 int i; 1741 1742 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1); 1743 if (!target_vec) 1744 return -TARGET_EFAULT; 1745 for(i = 0;i < count; i++) { 1746 if (target_vec[i].iov_base) { 1747 base = tswapal(target_vec[i].iov_base); 1748 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0); 1749 } 1750 } 1751 unlock_user (target_vec, target_addr, 0); 1752 1753 return 0; 1754 } 1755 1756 /* do_socket() Must return target values and target errnos. */ 1757 static abi_long do_socket(int domain, int type, int protocol) 1758 { 1759 #if defined(TARGET_MIPS) 1760 switch(type) { 1761 case TARGET_SOCK_DGRAM: 1762 type = SOCK_DGRAM; 1763 break; 1764 case TARGET_SOCK_STREAM: 1765 type = SOCK_STREAM; 1766 break; 1767 case TARGET_SOCK_RAW: 1768 type = SOCK_RAW; 1769 break; 1770 case TARGET_SOCK_RDM: 1771 type = SOCK_RDM; 1772 break; 1773 case TARGET_SOCK_SEQPACKET: 1774 type = SOCK_SEQPACKET; 1775 break; 1776 case TARGET_SOCK_PACKET: 1777 type = SOCK_PACKET; 1778 break; 1779 } 1780 #endif 1781 if (domain == PF_NETLINK) 1782 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */ 1783 return get_errno(socket(domain, type, protocol)); 1784 } 1785 1786 /* do_bind() Must return target values and target errnos. */ 1787 static abi_long do_bind(int sockfd, abi_ulong target_addr, 1788 socklen_t addrlen) 1789 { 1790 void *addr; 1791 abi_long ret; 1792 1793 if ((int)addrlen < 0) { 1794 return -TARGET_EINVAL; 1795 } 1796 1797 addr = alloca(addrlen+1); 1798 1799 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1800 if (ret) 1801 return ret; 1802 1803 return get_errno(bind(sockfd, addr, addrlen)); 1804 } 1805 1806 /* do_connect() Must return target values and target errnos. */ 1807 static abi_long do_connect(int sockfd, abi_ulong target_addr, 1808 socklen_t addrlen) 1809 { 1810 void *addr; 1811 abi_long ret; 1812 1813 if ((int)addrlen < 0) { 1814 return -TARGET_EINVAL; 1815 } 1816 1817 addr = alloca(addrlen); 1818 1819 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1820 if (ret) 1821 return ret; 1822 1823 return get_errno(connect(sockfd, addr, addrlen)); 1824 } 1825 1826 /* do_sendrecvmsg() Must return target values and target errnos. */ 1827 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg, 1828 int flags, int send) 1829 { 1830 abi_long ret, len; 1831 struct target_msghdr *msgp; 1832 struct msghdr msg; 1833 int count; 1834 struct iovec *vec; 1835 abi_ulong target_vec; 1836 1837 /* FIXME */ 1838 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE, 1839 msgp, 1840 target_msg, 1841 send ? 1 : 0)) 1842 return -TARGET_EFAULT; 1843 if (msgp->msg_name) { 1844 msg.msg_namelen = tswap32(msgp->msg_namelen); 1845 msg.msg_name = alloca(msg.msg_namelen); 1846 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name), 1847 msg.msg_namelen); 1848 if (ret) { 1849 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 1850 return ret; 1851 } 1852 } else { 1853 msg.msg_name = NULL; 1854 msg.msg_namelen = 0; 1855 } 1856 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen); 1857 msg.msg_control = alloca(msg.msg_controllen); 1858 msg.msg_flags = tswap32(msgp->msg_flags); 1859 1860 count = tswapal(msgp->msg_iovlen); 1861 vec = alloca(count * sizeof(struct iovec)); 1862 target_vec = tswapal(msgp->msg_iov); 1863 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send); 1864 msg.msg_iovlen = count; 1865 msg.msg_iov = vec; 1866 1867 if (send) { 1868 ret = target_to_host_cmsg(&msg, msgp); 1869 if (ret == 0) 1870 ret = get_errno(sendmsg(fd, &msg, flags)); 1871 } else { 1872 ret = get_errno(recvmsg(fd, &msg, flags)); 1873 if (!is_error(ret)) { 1874 len = ret; 1875 ret = host_to_target_cmsg(msgp, &msg); 1876 if (!is_error(ret)) 1877 ret = len; 1878 } 1879 } 1880 unlock_iovec(vec, target_vec, count, !send); 1881 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 1882 return ret; 1883 } 1884 1885 /* do_accept() Must return target values and target errnos. */ 1886 static abi_long do_accept(int fd, abi_ulong target_addr, 1887 abi_ulong target_addrlen_addr) 1888 { 1889 socklen_t addrlen; 1890 void *addr; 1891 abi_long ret; 1892 1893 if (target_addr == 0) 1894 return get_errno(accept(fd, NULL, NULL)); 1895 1896 /* linux returns EINVAL if addrlen pointer is invalid */ 1897 if (get_user_u32(addrlen, target_addrlen_addr)) 1898 return -TARGET_EINVAL; 1899 1900 if ((int)addrlen < 0) { 1901 return -TARGET_EINVAL; 1902 } 1903 1904 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 1905 return -TARGET_EINVAL; 1906 1907 addr = alloca(addrlen); 1908 1909 ret = get_errno(accept(fd, addr, &addrlen)); 1910 if (!is_error(ret)) { 1911 host_to_target_sockaddr(target_addr, addr, addrlen); 1912 if (put_user_u32(addrlen, target_addrlen_addr)) 1913 ret = -TARGET_EFAULT; 1914 } 1915 return ret; 1916 } 1917 1918 /* do_getpeername() Must return target values and target errnos. */ 1919 static abi_long do_getpeername(int fd, abi_ulong target_addr, 1920 abi_ulong target_addrlen_addr) 1921 { 1922 socklen_t addrlen; 1923 void *addr; 1924 abi_long ret; 1925 1926 if (get_user_u32(addrlen, target_addrlen_addr)) 1927 return -TARGET_EFAULT; 1928 1929 if ((int)addrlen < 0) { 1930 return -TARGET_EINVAL; 1931 } 1932 1933 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 1934 return -TARGET_EFAULT; 1935 1936 addr = alloca(addrlen); 1937 1938 ret = get_errno(getpeername(fd, addr, &addrlen)); 1939 if (!is_error(ret)) { 1940 host_to_target_sockaddr(target_addr, addr, addrlen); 1941 if (put_user_u32(addrlen, target_addrlen_addr)) 1942 ret = -TARGET_EFAULT; 1943 } 1944 return ret; 1945 } 1946 1947 /* do_getsockname() Must return target values and target errnos. */ 1948 static abi_long do_getsockname(int fd, abi_ulong target_addr, 1949 abi_ulong target_addrlen_addr) 1950 { 1951 socklen_t addrlen; 1952 void *addr; 1953 abi_long ret; 1954 1955 if (get_user_u32(addrlen, target_addrlen_addr)) 1956 return -TARGET_EFAULT; 1957 1958 if ((int)addrlen < 0) { 1959 return -TARGET_EINVAL; 1960 } 1961 1962 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 1963 return -TARGET_EFAULT; 1964 1965 addr = alloca(addrlen); 1966 1967 ret = get_errno(getsockname(fd, addr, &addrlen)); 1968 if (!is_error(ret)) { 1969 host_to_target_sockaddr(target_addr, addr, addrlen); 1970 if (put_user_u32(addrlen, target_addrlen_addr)) 1971 ret = -TARGET_EFAULT; 1972 } 1973 return ret; 1974 } 1975 1976 /* do_socketpair() Must return target values and target errnos. */ 1977 static abi_long do_socketpair(int domain, int type, int protocol, 1978 abi_ulong target_tab_addr) 1979 { 1980 int tab[2]; 1981 abi_long ret; 1982 1983 ret = get_errno(socketpair(domain, type, protocol, tab)); 1984 if (!is_error(ret)) { 1985 if (put_user_s32(tab[0], target_tab_addr) 1986 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0]))) 1987 ret = -TARGET_EFAULT; 1988 } 1989 return ret; 1990 } 1991 1992 /* do_sendto() Must return target values and target errnos. */ 1993 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags, 1994 abi_ulong target_addr, socklen_t addrlen) 1995 { 1996 void *addr; 1997 void *host_msg; 1998 abi_long ret; 1999 2000 if ((int)addrlen < 0) { 2001 return -TARGET_EINVAL; 2002 } 2003 2004 host_msg = lock_user(VERIFY_READ, msg, len, 1); 2005 if (!host_msg) 2006 return -TARGET_EFAULT; 2007 if (target_addr) { 2008 addr = alloca(addrlen); 2009 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 2010 if (ret) { 2011 unlock_user(host_msg, msg, 0); 2012 return ret; 2013 } 2014 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen)); 2015 } else { 2016 ret = get_errno(send(fd, host_msg, len, flags)); 2017 } 2018 unlock_user(host_msg, msg, 0); 2019 return ret; 2020 } 2021 2022 /* do_recvfrom() Must return target values and target errnos. */ 2023 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags, 2024 abi_ulong target_addr, 2025 abi_ulong target_addrlen) 2026 { 2027 socklen_t addrlen; 2028 void *addr; 2029 void *host_msg; 2030 abi_long ret; 2031 2032 host_msg = lock_user(VERIFY_WRITE, msg, len, 0); 2033 if (!host_msg) 2034 return -TARGET_EFAULT; 2035 if (target_addr) { 2036 if (get_user_u32(addrlen, target_addrlen)) { 2037 ret = -TARGET_EFAULT; 2038 goto fail; 2039 } 2040 if ((int)addrlen < 0) { 2041 ret = -TARGET_EINVAL; 2042 goto fail; 2043 } 2044 addr = alloca(addrlen); 2045 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen)); 2046 } else { 2047 addr = NULL; /* To keep compiler quiet. */ 2048 ret = get_errno(qemu_recv(fd, host_msg, len, flags)); 2049 } 2050 if (!is_error(ret)) { 2051 if (target_addr) { 2052 host_to_target_sockaddr(target_addr, addr, addrlen); 2053 if (put_user_u32(addrlen, target_addrlen)) { 2054 ret = -TARGET_EFAULT; 2055 goto fail; 2056 } 2057 } 2058 unlock_user(host_msg, msg, len); 2059 } else { 2060 fail: 2061 unlock_user(host_msg, msg, 0); 2062 } 2063 return ret; 2064 } 2065 2066 #ifdef TARGET_NR_socketcall 2067 /* do_socketcall() Must return target values and target errnos. */ 2068 static abi_long do_socketcall(int num, abi_ulong vptr) 2069 { 2070 abi_long ret; 2071 const int n = sizeof(abi_ulong); 2072 2073 switch(num) { 2074 case SOCKOP_socket: 2075 { 2076 abi_ulong domain, type, protocol; 2077 2078 if (get_user_ual(domain, vptr) 2079 || get_user_ual(type, vptr + n) 2080 || get_user_ual(protocol, vptr + 2 * n)) 2081 return -TARGET_EFAULT; 2082 2083 ret = do_socket(domain, type, protocol); 2084 } 2085 break; 2086 case SOCKOP_bind: 2087 { 2088 abi_ulong sockfd; 2089 abi_ulong target_addr; 2090 socklen_t addrlen; 2091 2092 if (get_user_ual(sockfd, vptr) 2093 || get_user_ual(target_addr, vptr + n) 2094 || get_user_ual(addrlen, vptr + 2 * n)) 2095 return -TARGET_EFAULT; 2096 2097 ret = do_bind(sockfd, target_addr, addrlen); 2098 } 2099 break; 2100 case SOCKOP_connect: 2101 { 2102 abi_ulong sockfd; 2103 abi_ulong target_addr; 2104 socklen_t addrlen; 2105 2106 if (get_user_ual(sockfd, vptr) 2107 || get_user_ual(target_addr, vptr + n) 2108 || get_user_ual(addrlen, vptr + 2 * n)) 2109 return -TARGET_EFAULT; 2110 2111 ret = do_connect(sockfd, target_addr, addrlen); 2112 } 2113 break; 2114 case SOCKOP_listen: 2115 { 2116 abi_ulong sockfd, backlog; 2117 2118 if (get_user_ual(sockfd, vptr) 2119 || get_user_ual(backlog, vptr + n)) 2120 return -TARGET_EFAULT; 2121 2122 ret = get_errno(listen(sockfd, backlog)); 2123 } 2124 break; 2125 case SOCKOP_accept: 2126 { 2127 abi_ulong sockfd; 2128 abi_ulong target_addr, target_addrlen; 2129 2130 if (get_user_ual(sockfd, vptr) 2131 || get_user_ual(target_addr, vptr + n) 2132 || get_user_ual(target_addrlen, vptr + 2 * n)) 2133 return -TARGET_EFAULT; 2134 2135 ret = do_accept(sockfd, target_addr, target_addrlen); 2136 } 2137 break; 2138 case SOCKOP_getsockname: 2139 { 2140 abi_ulong sockfd; 2141 abi_ulong target_addr, target_addrlen; 2142 2143 if (get_user_ual(sockfd, vptr) 2144 || get_user_ual(target_addr, vptr + n) 2145 || get_user_ual(target_addrlen, vptr + 2 * n)) 2146 return -TARGET_EFAULT; 2147 2148 ret = do_getsockname(sockfd, target_addr, target_addrlen); 2149 } 2150 break; 2151 case SOCKOP_getpeername: 2152 { 2153 abi_ulong sockfd; 2154 abi_ulong target_addr, target_addrlen; 2155 2156 if (get_user_ual(sockfd, vptr) 2157 || get_user_ual(target_addr, vptr + n) 2158 || get_user_ual(target_addrlen, vptr + 2 * n)) 2159 return -TARGET_EFAULT; 2160 2161 ret = do_getpeername(sockfd, target_addr, target_addrlen); 2162 } 2163 break; 2164 case SOCKOP_socketpair: 2165 { 2166 abi_ulong domain, type, protocol; 2167 abi_ulong tab; 2168 2169 if (get_user_ual(domain, vptr) 2170 || get_user_ual(type, vptr + n) 2171 || get_user_ual(protocol, vptr + 2 * n) 2172 || get_user_ual(tab, vptr + 3 * n)) 2173 return -TARGET_EFAULT; 2174 2175 ret = do_socketpair(domain, type, protocol, tab); 2176 } 2177 break; 2178 case SOCKOP_send: 2179 { 2180 abi_ulong sockfd; 2181 abi_ulong msg; 2182 size_t len; 2183 abi_ulong flags; 2184 2185 if (get_user_ual(sockfd, vptr) 2186 || get_user_ual(msg, vptr + n) 2187 || get_user_ual(len, vptr + 2 * n) 2188 || get_user_ual(flags, vptr + 3 * n)) 2189 return -TARGET_EFAULT; 2190 2191 ret = do_sendto(sockfd, msg, len, flags, 0, 0); 2192 } 2193 break; 2194 case SOCKOP_recv: 2195 { 2196 abi_ulong sockfd; 2197 abi_ulong msg; 2198 size_t len; 2199 abi_ulong flags; 2200 2201 if (get_user_ual(sockfd, vptr) 2202 || get_user_ual(msg, vptr + n) 2203 || get_user_ual(len, vptr + 2 * n) 2204 || get_user_ual(flags, vptr + 3 * n)) 2205 return -TARGET_EFAULT; 2206 2207 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0); 2208 } 2209 break; 2210 case SOCKOP_sendto: 2211 { 2212 abi_ulong sockfd; 2213 abi_ulong msg; 2214 size_t len; 2215 abi_ulong flags; 2216 abi_ulong addr; 2217 socklen_t addrlen; 2218 2219 if (get_user_ual(sockfd, vptr) 2220 || get_user_ual(msg, vptr + n) 2221 || get_user_ual(len, vptr + 2 * n) 2222 || get_user_ual(flags, vptr + 3 * n) 2223 || get_user_ual(addr, vptr + 4 * n) 2224 || get_user_ual(addrlen, vptr + 5 * n)) 2225 return -TARGET_EFAULT; 2226 2227 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen); 2228 } 2229 break; 2230 case SOCKOP_recvfrom: 2231 { 2232 abi_ulong sockfd; 2233 abi_ulong msg; 2234 size_t len; 2235 abi_ulong flags; 2236 abi_ulong addr; 2237 socklen_t addrlen; 2238 2239 if (get_user_ual(sockfd, vptr) 2240 || get_user_ual(msg, vptr + n) 2241 || get_user_ual(len, vptr + 2 * n) 2242 || get_user_ual(flags, vptr + 3 * n) 2243 || get_user_ual(addr, vptr + 4 * n) 2244 || get_user_ual(addrlen, vptr + 5 * n)) 2245 return -TARGET_EFAULT; 2246 2247 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen); 2248 } 2249 break; 2250 case SOCKOP_shutdown: 2251 { 2252 abi_ulong sockfd, how; 2253 2254 if (get_user_ual(sockfd, vptr) 2255 || get_user_ual(how, vptr + n)) 2256 return -TARGET_EFAULT; 2257 2258 ret = get_errno(shutdown(sockfd, how)); 2259 } 2260 break; 2261 case SOCKOP_sendmsg: 2262 case SOCKOP_recvmsg: 2263 { 2264 abi_ulong fd; 2265 abi_ulong target_msg; 2266 abi_ulong flags; 2267 2268 if (get_user_ual(fd, vptr) 2269 || get_user_ual(target_msg, vptr + n) 2270 || get_user_ual(flags, vptr + 2 * n)) 2271 return -TARGET_EFAULT; 2272 2273 ret = do_sendrecvmsg(fd, target_msg, flags, 2274 (num == SOCKOP_sendmsg)); 2275 } 2276 break; 2277 case SOCKOP_setsockopt: 2278 { 2279 abi_ulong sockfd; 2280 abi_ulong level; 2281 abi_ulong optname; 2282 abi_ulong optval; 2283 socklen_t optlen; 2284 2285 if (get_user_ual(sockfd, vptr) 2286 || get_user_ual(level, vptr + n) 2287 || get_user_ual(optname, vptr + 2 * n) 2288 || get_user_ual(optval, vptr + 3 * n) 2289 || get_user_ual(optlen, vptr + 4 * n)) 2290 return -TARGET_EFAULT; 2291 2292 ret = do_setsockopt(sockfd, level, optname, optval, optlen); 2293 } 2294 break; 2295 case SOCKOP_getsockopt: 2296 { 2297 abi_ulong sockfd; 2298 abi_ulong level; 2299 abi_ulong optname; 2300 abi_ulong optval; 2301 socklen_t optlen; 2302 2303 if (get_user_ual(sockfd, vptr) 2304 || get_user_ual(level, vptr + n) 2305 || get_user_ual(optname, vptr + 2 * n) 2306 || get_user_ual(optval, vptr + 3 * n) 2307 || get_user_ual(optlen, vptr + 4 * n)) 2308 return -TARGET_EFAULT; 2309 2310 ret = do_getsockopt(sockfd, level, optname, optval, optlen); 2311 } 2312 break; 2313 default: 2314 gemu_log("Unsupported socketcall: %d\n", num); 2315 ret = -TARGET_ENOSYS; 2316 break; 2317 } 2318 return ret; 2319 } 2320 #endif 2321 2322 #define N_SHM_REGIONS 32 2323 2324 static struct shm_region { 2325 abi_ulong start; 2326 abi_ulong size; 2327 } shm_regions[N_SHM_REGIONS]; 2328 2329 struct target_ipc_perm 2330 { 2331 abi_long __key; 2332 abi_ulong uid; 2333 abi_ulong gid; 2334 abi_ulong cuid; 2335 abi_ulong cgid; 2336 unsigned short int mode; 2337 unsigned short int __pad1; 2338 unsigned short int __seq; 2339 unsigned short int __pad2; 2340 abi_ulong __unused1; 2341 abi_ulong __unused2; 2342 }; 2343 2344 struct target_semid_ds 2345 { 2346 struct target_ipc_perm sem_perm; 2347 abi_ulong sem_otime; 2348 abi_ulong __unused1; 2349 abi_ulong sem_ctime; 2350 abi_ulong __unused2; 2351 abi_ulong sem_nsems; 2352 abi_ulong __unused3; 2353 abi_ulong __unused4; 2354 }; 2355 2356 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip, 2357 abi_ulong target_addr) 2358 { 2359 struct target_ipc_perm *target_ip; 2360 struct target_semid_ds *target_sd; 2361 2362 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2363 return -TARGET_EFAULT; 2364 target_ip = &(target_sd->sem_perm); 2365 host_ip->__key = tswapal(target_ip->__key); 2366 host_ip->uid = tswapal(target_ip->uid); 2367 host_ip->gid = tswapal(target_ip->gid); 2368 host_ip->cuid = tswapal(target_ip->cuid); 2369 host_ip->cgid = tswapal(target_ip->cgid); 2370 host_ip->mode = tswap16(target_ip->mode); 2371 unlock_user_struct(target_sd, target_addr, 0); 2372 return 0; 2373 } 2374 2375 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr, 2376 struct ipc_perm *host_ip) 2377 { 2378 struct target_ipc_perm *target_ip; 2379 struct target_semid_ds *target_sd; 2380 2381 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2382 return -TARGET_EFAULT; 2383 target_ip = &(target_sd->sem_perm); 2384 target_ip->__key = tswapal(host_ip->__key); 2385 target_ip->uid = tswapal(host_ip->uid); 2386 target_ip->gid = tswapal(host_ip->gid); 2387 target_ip->cuid = tswapal(host_ip->cuid); 2388 target_ip->cgid = tswapal(host_ip->cgid); 2389 target_ip->mode = tswap16(host_ip->mode); 2390 unlock_user_struct(target_sd, target_addr, 1); 2391 return 0; 2392 } 2393 2394 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd, 2395 abi_ulong target_addr) 2396 { 2397 struct target_semid_ds *target_sd; 2398 2399 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2400 return -TARGET_EFAULT; 2401 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr)) 2402 return -TARGET_EFAULT; 2403 host_sd->sem_nsems = tswapal(target_sd->sem_nsems); 2404 host_sd->sem_otime = tswapal(target_sd->sem_otime); 2405 host_sd->sem_ctime = tswapal(target_sd->sem_ctime); 2406 unlock_user_struct(target_sd, target_addr, 0); 2407 return 0; 2408 } 2409 2410 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr, 2411 struct semid_ds *host_sd) 2412 { 2413 struct target_semid_ds *target_sd; 2414 2415 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2416 return -TARGET_EFAULT; 2417 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm))) 2418 return -TARGET_EFAULT; 2419 target_sd->sem_nsems = tswapal(host_sd->sem_nsems); 2420 target_sd->sem_otime = tswapal(host_sd->sem_otime); 2421 target_sd->sem_ctime = tswapal(host_sd->sem_ctime); 2422 unlock_user_struct(target_sd, target_addr, 1); 2423 return 0; 2424 } 2425 2426 struct target_seminfo { 2427 int semmap; 2428 int semmni; 2429 int semmns; 2430 int semmnu; 2431 int semmsl; 2432 int semopm; 2433 int semume; 2434 int semusz; 2435 int semvmx; 2436 int semaem; 2437 }; 2438 2439 static inline abi_long host_to_target_seminfo(abi_ulong target_addr, 2440 struct seminfo *host_seminfo) 2441 { 2442 struct target_seminfo *target_seminfo; 2443 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0)) 2444 return -TARGET_EFAULT; 2445 __put_user(host_seminfo->semmap, &target_seminfo->semmap); 2446 __put_user(host_seminfo->semmni, &target_seminfo->semmni); 2447 __put_user(host_seminfo->semmns, &target_seminfo->semmns); 2448 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu); 2449 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl); 2450 __put_user(host_seminfo->semopm, &target_seminfo->semopm); 2451 __put_user(host_seminfo->semume, &target_seminfo->semume); 2452 __put_user(host_seminfo->semusz, &target_seminfo->semusz); 2453 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx); 2454 __put_user(host_seminfo->semaem, &target_seminfo->semaem); 2455 unlock_user_struct(target_seminfo, target_addr, 1); 2456 return 0; 2457 } 2458 2459 union semun { 2460 int val; 2461 struct semid_ds *buf; 2462 unsigned short *array; 2463 struct seminfo *__buf; 2464 }; 2465 2466 union target_semun { 2467 int val; 2468 abi_ulong buf; 2469 abi_ulong array; 2470 abi_ulong __buf; 2471 }; 2472 2473 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array, 2474 abi_ulong target_addr) 2475 { 2476 int nsems; 2477 unsigned short *array; 2478 union semun semun; 2479 struct semid_ds semid_ds; 2480 int i, ret; 2481 2482 semun.buf = &semid_ds; 2483 2484 ret = semctl(semid, 0, IPC_STAT, semun); 2485 if (ret == -1) 2486 return get_errno(ret); 2487 2488 nsems = semid_ds.sem_nsems; 2489 2490 *host_array = malloc(nsems*sizeof(unsigned short)); 2491 array = lock_user(VERIFY_READ, target_addr, 2492 nsems*sizeof(unsigned short), 1); 2493 if (!array) 2494 return -TARGET_EFAULT; 2495 2496 for(i=0; i<nsems; i++) { 2497 __get_user((*host_array)[i], &array[i]); 2498 } 2499 unlock_user(array, target_addr, 0); 2500 2501 return 0; 2502 } 2503 2504 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr, 2505 unsigned short **host_array) 2506 { 2507 int nsems; 2508 unsigned short *array; 2509 union semun semun; 2510 struct semid_ds semid_ds; 2511 int i, ret; 2512 2513 semun.buf = &semid_ds; 2514 2515 ret = semctl(semid, 0, IPC_STAT, semun); 2516 if (ret == -1) 2517 return get_errno(ret); 2518 2519 nsems = semid_ds.sem_nsems; 2520 2521 array = lock_user(VERIFY_WRITE, target_addr, 2522 nsems*sizeof(unsigned short), 0); 2523 if (!array) 2524 return -TARGET_EFAULT; 2525 2526 for(i=0; i<nsems; i++) { 2527 __put_user((*host_array)[i], &array[i]); 2528 } 2529 free(*host_array); 2530 unlock_user(array, target_addr, 1); 2531 2532 return 0; 2533 } 2534 2535 static inline abi_long do_semctl(int semid, int semnum, int cmd, 2536 union target_semun target_su) 2537 { 2538 union semun arg; 2539 struct semid_ds dsarg; 2540 unsigned short *array = NULL; 2541 struct seminfo seminfo; 2542 abi_long ret = -TARGET_EINVAL; 2543 abi_long err; 2544 cmd &= 0xff; 2545 2546 switch( cmd ) { 2547 case GETVAL: 2548 case SETVAL: 2549 arg.val = tswap32(target_su.val); 2550 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2551 target_su.val = tswap32(arg.val); 2552 break; 2553 case GETALL: 2554 case SETALL: 2555 err = target_to_host_semarray(semid, &array, target_su.array); 2556 if (err) 2557 return err; 2558 arg.array = array; 2559 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2560 err = host_to_target_semarray(semid, target_su.array, &array); 2561 if (err) 2562 return err; 2563 break; 2564 case IPC_STAT: 2565 case IPC_SET: 2566 case SEM_STAT: 2567 err = target_to_host_semid_ds(&dsarg, target_su.buf); 2568 if (err) 2569 return err; 2570 arg.buf = &dsarg; 2571 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2572 err = host_to_target_semid_ds(target_su.buf, &dsarg); 2573 if (err) 2574 return err; 2575 break; 2576 case IPC_INFO: 2577 case SEM_INFO: 2578 arg.__buf = &seminfo; 2579 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2580 err = host_to_target_seminfo(target_su.__buf, &seminfo); 2581 if (err) 2582 return err; 2583 break; 2584 case IPC_RMID: 2585 case GETPID: 2586 case GETNCNT: 2587 case GETZCNT: 2588 ret = get_errno(semctl(semid, semnum, cmd, NULL)); 2589 break; 2590 } 2591 2592 return ret; 2593 } 2594 2595 struct target_sembuf { 2596 unsigned short sem_num; 2597 short sem_op; 2598 short sem_flg; 2599 }; 2600 2601 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf, 2602 abi_ulong target_addr, 2603 unsigned nsops) 2604 { 2605 struct target_sembuf *target_sembuf; 2606 int i; 2607 2608 target_sembuf = lock_user(VERIFY_READ, target_addr, 2609 nsops*sizeof(struct target_sembuf), 1); 2610 if (!target_sembuf) 2611 return -TARGET_EFAULT; 2612 2613 for(i=0; i<nsops; i++) { 2614 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num); 2615 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op); 2616 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg); 2617 } 2618 2619 unlock_user(target_sembuf, target_addr, 0); 2620 2621 return 0; 2622 } 2623 2624 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops) 2625 { 2626 struct sembuf sops[nsops]; 2627 2628 if (target_to_host_sembuf(sops, ptr, nsops)) 2629 return -TARGET_EFAULT; 2630 2631 return semop(semid, sops, nsops); 2632 } 2633 2634 struct target_msqid_ds 2635 { 2636 struct target_ipc_perm msg_perm; 2637 abi_ulong msg_stime; 2638 #if TARGET_ABI_BITS == 32 2639 abi_ulong __unused1; 2640 #endif 2641 abi_ulong msg_rtime; 2642 #if TARGET_ABI_BITS == 32 2643 abi_ulong __unused2; 2644 #endif 2645 abi_ulong msg_ctime; 2646 #if TARGET_ABI_BITS == 32 2647 abi_ulong __unused3; 2648 #endif 2649 abi_ulong __msg_cbytes; 2650 abi_ulong msg_qnum; 2651 abi_ulong msg_qbytes; 2652 abi_ulong msg_lspid; 2653 abi_ulong msg_lrpid; 2654 abi_ulong __unused4; 2655 abi_ulong __unused5; 2656 }; 2657 2658 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md, 2659 abi_ulong target_addr) 2660 { 2661 struct target_msqid_ds *target_md; 2662 2663 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1)) 2664 return -TARGET_EFAULT; 2665 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr)) 2666 return -TARGET_EFAULT; 2667 host_md->msg_stime = tswapal(target_md->msg_stime); 2668 host_md->msg_rtime = tswapal(target_md->msg_rtime); 2669 host_md->msg_ctime = tswapal(target_md->msg_ctime); 2670 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes); 2671 host_md->msg_qnum = tswapal(target_md->msg_qnum); 2672 host_md->msg_qbytes = tswapal(target_md->msg_qbytes); 2673 host_md->msg_lspid = tswapal(target_md->msg_lspid); 2674 host_md->msg_lrpid = tswapal(target_md->msg_lrpid); 2675 unlock_user_struct(target_md, target_addr, 0); 2676 return 0; 2677 } 2678 2679 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr, 2680 struct msqid_ds *host_md) 2681 { 2682 struct target_msqid_ds *target_md; 2683 2684 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0)) 2685 return -TARGET_EFAULT; 2686 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm))) 2687 return -TARGET_EFAULT; 2688 target_md->msg_stime = tswapal(host_md->msg_stime); 2689 target_md->msg_rtime = tswapal(host_md->msg_rtime); 2690 target_md->msg_ctime = tswapal(host_md->msg_ctime); 2691 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes); 2692 target_md->msg_qnum = tswapal(host_md->msg_qnum); 2693 target_md->msg_qbytes = tswapal(host_md->msg_qbytes); 2694 target_md->msg_lspid = tswapal(host_md->msg_lspid); 2695 target_md->msg_lrpid = tswapal(host_md->msg_lrpid); 2696 unlock_user_struct(target_md, target_addr, 1); 2697 return 0; 2698 } 2699 2700 struct target_msginfo { 2701 int msgpool; 2702 int msgmap; 2703 int msgmax; 2704 int msgmnb; 2705 int msgmni; 2706 int msgssz; 2707 int msgtql; 2708 unsigned short int msgseg; 2709 }; 2710 2711 static inline abi_long host_to_target_msginfo(abi_ulong target_addr, 2712 struct msginfo *host_msginfo) 2713 { 2714 struct target_msginfo *target_msginfo; 2715 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0)) 2716 return -TARGET_EFAULT; 2717 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool); 2718 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap); 2719 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax); 2720 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb); 2721 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni); 2722 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz); 2723 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql); 2724 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg); 2725 unlock_user_struct(target_msginfo, target_addr, 1); 2726 return 0; 2727 } 2728 2729 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr) 2730 { 2731 struct msqid_ds dsarg; 2732 struct msginfo msginfo; 2733 abi_long ret = -TARGET_EINVAL; 2734 2735 cmd &= 0xff; 2736 2737 switch (cmd) { 2738 case IPC_STAT: 2739 case IPC_SET: 2740 case MSG_STAT: 2741 if (target_to_host_msqid_ds(&dsarg,ptr)) 2742 return -TARGET_EFAULT; 2743 ret = get_errno(msgctl(msgid, cmd, &dsarg)); 2744 if (host_to_target_msqid_ds(ptr,&dsarg)) 2745 return -TARGET_EFAULT; 2746 break; 2747 case IPC_RMID: 2748 ret = get_errno(msgctl(msgid, cmd, NULL)); 2749 break; 2750 case IPC_INFO: 2751 case MSG_INFO: 2752 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo)); 2753 if (host_to_target_msginfo(ptr, &msginfo)) 2754 return -TARGET_EFAULT; 2755 break; 2756 } 2757 2758 return ret; 2759 } 2760 2761 struct target_msgbuf { 2762 abi_long mtype; 2763 char mtext[1]; 2764 }; 2765 2766 static inline abi_long do_msgsnd(int msqid, abi_long msgp, 2767 unsigned int msgsz, int msgflg) 2768 { 2769 struct target_msgbuf *target_mb; 2770 struct msgbuf *host_mb; 2771 abi_long ret = 0; 2772 2773 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0)) 2774 return -TARGET_EFAULT; 2775 host_mb = malloc(msgsz+sizeof(long)); 2776 host_mb->mtype = (abi_long) tswapal(target_mb->mtype); 2777 memcpy(host_mb->mtext, target_mb->mtext, msgsz); 2778 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg)); 2779 free(host_mb); 2780 unlock_user_struct(target_mb, msgp, 0); 2781 2782 return ret; 2783 } 2784 2785 static inline abi_long do_msgrcv(int msqid, abi_long msgp, 2786 unsigned int msgsz, abi_long msgtyp, 2787 int msgflg) 2788 { 2789 struct target_msgbuf *target_mb; 2790 char *target_mtext; 2791 struct msgbuf *host_mb; 2792 abi_long ret = 0; 2793 2794 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0)) 2795 return -TARGET_EFAULT; 2796 2797 host_mb = malloc(msgsz+sizeof(long)); 2798 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapal(msgtyp), msgflg)); 2799 2800 if (ret > 0) { 2801 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong); 2802 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0); 2803 if (!target_mtext) { 2804 ret = -TARGET_EFAULT; 2805 goto end; 2806 } 2807 memcpy(target_mb->mtext, host_mb->mtext, ret); 2808 unlock_user(target_mtext, target_mtext_addr, ret); 2809 } 2810 2811 target_mb->mtype = tswapal(host_mb->mtype); 2812 free(host_mb); 2813 2814 end: 2815 if (target_mb) 2816 unlock_user_struct(target_mb, msgp, 1); 2817 return ret; 2818 } 2819 2820 struct target_shmid_ds 2821 { 2822 struct target_ipc_perm shm_perm; 2823 abi_ulong shm_segsz; 2824 abi_ulong shm_atime; 2825 #if TARGET_ABI_BITS == 32 2826 abi_ulong __unused1; 2827 #endif 2828 abi_ulong shm_dtime; 2829 #if TARGET_ABI_BITS == 32 2830 abi_ulong __unused2; 2831 #endif 2832 abi_ulong shm_ctime; 2833 #if TARGET_ABI_BITS == 32 2834 abi_ulong __unused3; 2835 #endif 2836 int shm_cpid; 2837 int shm_lpid; 2838 abi_ulong shm_nattch; 2839 unsigned long int __unused4; 2840 unsigned long int __unused5; 2841 }; 2842 2843 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd, 2844 abi_ulong target_addr) 2845 { 2846 struct target_shmid_ds *target_sd; 2847 2848 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2849 return -TARGET_EFAULT; 2850 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr)) 2851 return -TARGET_EFAULT; 2852 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2853 __get_user(host_sd->shm_atime, &target_sd->shm_atime); 2854 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2855 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2856 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2857 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2858 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2859 unlock_user_struct(target_sd, target_addr, 0); 2860 return 0; 2861 } 2862 2863 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr, 2864 struct shmid_ds *host_sd) 2865 { 2866 struct target_shmid_ds *target_sd; 2867 2868 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2869 return -TARGET_EFAULT; 2870 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm))) 2871 return -TARGET_EFAULT; 2872 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2873 __put_user(host_sd->shm_atime, &target_sd->shm_atime); 2874 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2875 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2876 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2877 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2878 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2879 unlock_user_struct(target_sd, target_addr, 1); 2880 return 0; 2881 } 2882 2883 struct target_shminfo { 2884 abi_ulong shmmax; 2885 abi_ulong shmmin; 2886 abi_ulong shmmni; 2887 abi_ulong shmseg; 2888 abi_ulong shmall; 2889 }; 2890 2891 static inline abi_long host_to_target_shminfo(abi_ulong target_addr, 2892 struct shminfo *host_shminfo) 2893 { 2894 struct target_shminfo *target_shminfo; 2895 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0)) 2896 return -TARGET_EFAULT; 2897 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax); 2898 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin); 2899 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni); 2900 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg); 2901 __put_user(host_shminfo->shmall, &target_shminfo->shmall); 2902 unlock_user_struct(target_shminfo, target_addr, 1); 2903 return 0; 2904 } 2905 2906 struct target_shm_info { 2907 int used_ids; 2908 abi_ulong shm_tot; 2909 abi_ulong shm_rss; 2910 abi_ulong shm_swp; 2911 abi_ulong swap_attempts; 2912 abi_ulong swap_successes; 2913 }; 2914 2915 static inline abi_long host_to_target_shm_info(abi_ulong target_addr, 2916 struct shm_info *host_shm_info) 2917 { 2918 struct target_shm_info *target_shm_info; 2919 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0)) 2920 return -TARGET_EFAULT; 2921 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids); 2922 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot); 2923 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss); 2924 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp); 2925 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts); 2926 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes); 2927 unlock_user_struct(target_shm_info, target_addr, 1); 2928 return 0; 2929 } 2930 2931 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf) 2932 { 2933 struct shmid_ds dsarg; 2934 struct shminfo shminfo; 2935 struct shm_info shm_info; 2936 abi_long ret = -TARGET_EINVAL; 2937 2938 cmd &= 0xff; 2939 2940 switch(cmd) { 2941 case IPC_STAT: 2942 case IPC_SET: 2943 case SHM_STAT: 2944 if (target_to_host_shmid_ds(&dsarg, buf)) 2945 return -TARGET_EFAULT; 2946 ret = get_errno(shmctl(shmid, cmd, &dsarg)); 2947 if (host_to_target_shmid_ds(buf, &dsarg)) 2948 return -TARGET_EFAULT; 2949 break; 2950 case IPC_INFO: 2951 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo)); 2952 if (host_to_target_shminfo(buf, &shminfo)) 2953 return -TARGET_EFAULT; 2954 break; 2955 case SHM_INFO: 2956 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info)); 2957 if (host_to_target_shm_info(buf, &shm_info)) 2958 return -TARGET_EFAULT; 2959 break; 2960 case IPC_RMID: 2961 case SHM_LOCK: 2962 case SHM_UNLOCK: 2963 ret = get_errno(shmctl(shmid, cmd, NULL)); 2964 break; 2965 } 2966 2967 return ret; 2968 } 2969 2970 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg) 2971 { 2972 abi_long raddr; 2973 void *host_raddr; 2974 struct shmid_ds shm_info; 2975 int i,ret; 2976 2977 /* find out the length of the shared memory segment */ 2978 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); 2979 if (is_error(ret)) { 2980 /* can't get length, bail out */ 2981 return ret; 2982 } 2983 2984 mmap_lock(); 2985 2986 if (shmaddr) 2987 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg); 2988 else { 2989 abi_ulong mmap_start; 2990 2991 mmap_start = mmap_find_vma(0, shm_info.shm_segsz); 2992 2993 if (mmap_start == -1) { 2994 errno = ENOMEM; 2995 host_raddr = (void *)-1; 2996 } else 2997 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP); 2998 } 2999 3000 if (host_raddr == (void *)-1) { 3001 mmap_unlock(); 3002 return get_errno((long)host_raddr); 3003 } 3004 raddr=h2g((unsigned long)host_raddr); 3005 3006 page_set_flags(raddr, raddr + shm_info.shm_segsz, 3007 PAGE_VALID | PAGE_READ | 3008 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE)); 3009 3010 for (i = 0; i < N_SHM_REGIONS; i++) { 3011 if (shm_regions[i].start == 0) { 3012 shm_regions[i].start = raddr; 3013 shm_regions[i].size = shm_info.shm_segsz; 3014 break; 3015 } 3016 } 3017 3018 mmap_unlock(); 3019 return raddr; 3020 3021 } 3022 3023 static inline abi_long do_shmdt(abi_ulong shmaddr) 3024 { 3025 int i; 3026 3027 for (i = 0; i < N_SHM_REGIONS; ++i) { 3028 if (shm_regions[i].start == shmaddr) { 3029 shm_regions[i].start = 0; 3030 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0); 3031 break; 3032 } 3033 } 3034 3035 return get_errno(shmdt(g2h(shmaddr))); 3036 } 3037 3038 #ifdef TARGET_NR_ipc 3039 /* ??? This only works with linear mappings. */ 3040 /* do_ipc() must return target values and target errnos. */ 3041 static abi_long do_ipc(unsigned int call, int first, 3042 int second, int third, 3043 abi_long ptr, abi_long fifth) 3044 { 3045 int version; 3046 abi_long ret = 0; 3047 3048 version = call >> 16; 3049 call &= 0xffff; 3050 3051 switch (call) { 3052 case IPCOP_semop: 3053 ret = do_semop(first, ptr, second); 3054 break; 3055 3056 case IPCOP_semget: 3057 ret = get_errno(semget(first, second, third)); 3058 break; 3059 3060 case IPCOP_semctl: 3061 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr); 3062 break; 3063 3064 case IPCOP_msgget: 3065 ret = get_errno(msgget(first, second)); 3066 break; 3067 3068 case IPCOP_msgsnd: 3069 ret = do_msgsnd(first, ptr, second, third); 3070 break; 3071 3072 case IPCOP_msgctl: 3073 ret = do_msgctl(first, second, ptr); 3074 break; 3075 3076 case IPCOP_msgrcv: 3077 switch (version) { 3078 case 0: 3079 { 3080 struct target_ipc_kludge { 3081 abi_long msgp; 3082 abi_long msgtyp; 3083 } *tmp; 3084 3085 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) { 3086 ret = -TARGET_EFAULT; 3087 break; 3088 } 3089 3090 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third); 3091 3092 unlock_user_struct(tmp, ptr, 0); 3093 break; 3094 } 3095 default: 3096 ret = do_msgrcv(first, ptr, second, fifth, third); 3097 } 3098 break; 3099 3100 case IPCOP_shmat: 3101 switch (version) { 3102 default: 3103 { 3104 abi_ulong raddr; 3105 raddr = do_shmat(first, ptr, second); 3106 if (is_error(raddr)) 3107 return get_errno(raddr); 3108 if (put_user_ual(raddr, third)) 3109 return -TARGET_EFAULT; 3110 break; 3111 } 3112 case 1: 3113 ret = -TARGET_EINVAL; 3114 break; 3115 } 3116 break; 3117 case IPCOP_shmdt: 3118 ret = do_shmdt(ptr); 3119 break; 3120 3121 case IPCOP_shmget: 3122 /* IPC_* flag values are the same on all linux platforms */ 3123 ret = get_errno(shmget(first, second, third)); 3124 break; 3125 3126 /* IPC_* and SHM_* command values are the same on all linux platforms */ 3127 case IPCOP_shmctl: 3128 ret = do_shmctl(first, second, third); 3129 break; 3130 default: 3131 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version); 3132 ret = -TARGET_ENOSYS; 3133 break; 3134 } 3135 return ret; 3136 } 3137 #endif 3138 3139 /* kernel structure types definitions */ 3140 3141 #define STRUCT(name, ...) STRUCT_ ## name, 3142 #define STRUCT_SPECIAL(name) STRUCT_ ## name, 3143 enum { 3144 #include "syscall_types.h" 3145 }; 3146 #undef STRUCT 3147 #undef STRUCT_SPECIAL 3148 3149 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL }; 3150 #define STRUCT_SPECIAL(name) 3151 #include "syscall_types.h" 3152 #undef STRUCT 3153 #undef STRUCT_SPECIAL 3154 3155 typedef struct IOCTLEntry IOCTLEntry; 3156 3157 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp, 3158 int fd, abi_long cmd, abi_long arg); 3159 3160 struct IOCTLEntry { 3161 unsigned int target_cmd; 3162 unsigned int host_cmd; 3163 const char *name; 3164 int access; 3165 do_ioctl_fn *do_ioctl; 3166 const argtype arg_type[5]; 3167 }; 3168 3169 #define IOC_R 0x0001 3170 #define IOC_W 0x0002 3171 #define IOC_RW (IOC_R | IOC_W) 3172 3173 #define MAX_STRUCT_SIZE 4096 3174 3175 #ifdef CONFIG_FIEMAP 3176 /* So fiemap access checks don't overflow on 32 bit systems. 3177 * This is very slightly smaller than the limit imposed by 3178 * the underlying kernel. 3179 */ 3180 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \ 3181 / sizeof(struct fiemap_extent)) 3182 3183 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp, 3184 int fd, abi_long cmd, abi_long arg) 3185 { 3186 /* The parameter for this ioctl is a struct fiemap followed 3187 * by an array of struct fiemap_extent whose size is set 3188 * in fiemap->fm_extent_count. The array is filled in by the 3189 * ioctl. 3190 */ 3191 int target_size_in, target_size_out; 3192 struct fiemap *fm; 3193 const argtype *arg_type = ie->arg_type; 3194 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) }; 3195 void *argptr, *p; 3196 abi_long ret; 3197 int i, extent_size = thunk_type_size(extent_arg_type, 0); 3198 uint32_t outbufsz; 3199 int free_fm = 0; 3200 3201 assert(arg_type[0] == TYPE_PTR); 3202 assert(ie->access == IOC_RW); 3203 arg_type++; 3204 target_size_in = thunk_type_size(arg_type, 0); 3205 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1); 3206 if (!argptr) { 3207 return -TARGET_EFAULT; 3208 } 3209 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3210 unlock_user(argptr, arg, 0); 3211 fm = (struct fiemap *)buf_temp; 3212 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) { 3213 return -TARGET_EINVAL; 3214 } 3215 3216 outbufsz = sizeof (*fm) + 3217 (sizeof(struct fiemap_extent) * fm->fm_extent_count); 3218 3219 if (outbufsz > MAX_STRUCT_SIZE) { 3220 /* We can't fit all the extents into the fixed size buffer. 3221 * Allocate one that is large enough and use it instead. 3222 */ 3223 fm = malloc(outbufsz); 3224 if (!fm) { 3225 return -TARGET_ENOMEM; 3226 } 3227 memcpy(fm, buf_temp, sizeof(struct fiemap)); 3228 free_fm = 1; 3229 } 3230 ret = get_errno(ioctl(fd, ie->host_cmd, fm)); 3231 if (!is_error(ret)) { 3232 target_size_out = target_size_in; 3233 /* An extent_count of 0 means we were only counting the extents 3234 * so there are no structs to copy 3235 */ 3236 if (fm->fm_extent_count != 0) { 3237 target_size_out += fm->fm_mapped_extents * extent_size; 3238 } 3239 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0); 3240 if (!argptr) { 3241 ret = -TARGET_EFAULT; 3242 } else { 3243 /* Convert the struct fiemap */ 3244 thunk_convert(argptr, fm, arg_type, THUNK_TARGET); 3245 if (fm->fm_extent_count != 0) { 3246 p = argptr + target_size_in; 3247 /* ...and then all the struct fiemap_extents */ 3248 for (i = 0; i < fm->fm_mapped_extents; i++) { 3249 thunk_convert(p, &fm->fm_extents[i], extent_arg_type, 3250 THUNK_TARGET); 3251 p += extent_size; 3252 } 3253 } 3254 unlock_user(argptr, arg, target_size_out); 3255 } 3256 } 3257 if (free_fm) { 3258 free(fm); 3259 } 3260 return ret; 3261 } 3262 #endif 3263 3264 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, 3265 int fd, abi_long cmd, abi_long arg) 3266 { 3267 const argtype *arg_type = ie->arg_type; 3268 int target_size; 3269 void *argptr; 3270 int ret; 3271 struct ifconf *host_ifconf; 3272 uint32_t outbufsz; 3273 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) }; 3274 int target_ifreq_size; 3275 int nb_ifreq; 3276 int free_buf = 0; 3277 int i; 3278 int target_ifc_len; 3279 abi_long target_ifc_buf; 3280 int host_ifc_len; 3281 char *host_ifc_buf; 3282 3283 assert(arg_type[0] == TYPE_PTR); 3284 assert(ie->access == IOC_RW); 3285 3286 arg_type++; 3287 target_size = thunk_type_size(arg_type, 0); 3288 3289 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3290 if (!argptr) 3291 return -TARGET_EFAULT; 3292 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3293 unlock_user(argptr, arg, 0); 3294 3295 host_ifconf = (struct ifconf *)(unsigned long)buf_temp; 3296 target_ifc_len = host_ifconf->ifc_len; 3297 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf; 3298 3299 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0); 3300 nb_ifreq = target_ifc_len / target_ifreq_size; 3301 host_ifc_len = nb_ifreq * sizeof(struct ifreq); 3302 3303 outbufsz = sizeof(*host_ifconf) + host_ifc_len; 3304 if (outbufsz > MAX_STRUCT_SIZE) { 3305 /* We can't fit all the extents into the fixed size buffer. 3306 * Allocate one that is large enough and use it instead. 3307 */ 3308 host_ifconf = malloc(outbufsz); 3309 if (!host_ifconf) { 3310 return -TARGET_ENOMEM; 3311 } 3312 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf)); 3313 free_buf = 1; 3314 } 3315 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf); 3316 3317 host_ifconf->ifc_len = host_ifc_len; 3318 host_ifconf->ifc_buf = host_ifc_buf; 3319 3320 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf)); 3321 if (!is_error(ret)) { 3322 /* convert host ifc_len to target ifc_len */ 3323 3324 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq); 3325 target_ifc_len = nb_ifreq * target_ifreq_size; 3326 host_ifconf->ifc_len = target_ifc_len; 3327 3328 /* restore target ifc_buf */ 3329 3330 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf; 3331 3332 /* copy struct ifconf to target user */ 3333 3334 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3335 if (!argptr) 3336 return -TARGET_EFAULT; 3337 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET); 3338 unlock_user(argptr, arg, target_size); 3339 3340 /* copy ifreq[] to target user */ 3341 3342 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0); 3343 for (i = 0; i < nb_ifreq ; i++) { 3344 thunk_convert(argptr + i * target_ifreq_size, 3345 host_ifc_buf + i * sizeof(struct ifreq), 3346 ifreq_arg_type, THUNK_TARGET); 3347 } 3348 unlock_user(argptr, target_ifc_buf, target_ifc_len); 3349 } 3350 3351 if (free_buf) { 3352 free(host_ifconf); 3353 } 3354 3355 return ret; 3356 } 3357 3358 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 3359 abi_long cmd, abi_long arg) 3360 { 3361 void *argptr; 3362 struct dm_ioctl *host_dm; 3363 abi_long guest_data; 3364 uint32_t guest_data_size; 3365 int target_size; 3366 const argtype *arg_type = ie->arg_type; 3367 abi_long ret; 3368 void *big_buf = NULL; 3369 char *host_data; 3370 3371 arg_type++; 3372 target_size = thunk_type_size(arg_type, 0); 3373 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3374 if (!argptr) { 3375 ret = -TARGET_EFAULT; 3376 goto out; 3377 } 3378 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3379 unlock_user(argptr, arg, 0); 3380 3381 /* buf_temp is too small, so fetch things into a bigger buffer */ 3382 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2); 3383 memcpy(big_buf, buf_temp, target_size); 3384 buf_temp = big_buf; 3385 host_dm = big_buf; 3386 3387 guest_data = arg + host_dm->data_start; 3388 if ((guest_data - arg) < 0) { 3389 ret = -EINVAL; 3390 goto out; 3391 } 3392 guest_data_size = host_dm->data_size - host_dm->data_start; 3393 host_data = (char*)host_dm + host_dm->data_start; 3394 3395 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1); 3396 switch (ie->host_cmd) { 3397 case DM_REMOVE_ALL: 3398 case DM_LIST_DEVICES: 3399 case DM_DEV_CREATE: 3400 case DM_DEV_REMOVE: 3401 case DM_DEV_SUSPEND: 3402 case DM_DEV_STATUS: 3403 case DM_DEV_WAIT: 3404 case DM_TABLE_STATUS: 3405 case DM_TABLE_CLEAR: 3406 case DM_TABLE_DEPS: 3407 case DM_LIST_VERSIONS: 3408 /* no input data */ 3409 break; 3410 case DM_DEV_RENAME: 3411 case DM_DEV_SET_GEOMETRY: 3412 /* data contains only strings */ 3413 memcpy(host_data, argptr, guest_data_size); 3414 break; 3415 case DM_TARGET_MSG: 3416 memcpy(host_data, argptr, guest_data_size); 3417 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr); 3418 break; 3419 case DM_TABLE_LOAD: 3420 { 3421 void *gspec = argptr; 3422 void *cur_data = host_data; 3423 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 3424 int spec_size = thunk_type_size(arg_type, 0); 3425 int i; 3426 3427 for (i = 0; i < host_dm->target_count; i++) { 3428 struct dm_target_spec *spec = cur_data; 3429 uint32_t next; 3430 int slen; 3431 3432 thunk_convert(spec, gspec, arg_type, THUNK_HOST); 3433 slen = strlen((char*)gspec + spec_size) + 1; 3434 next = spec->next; 3435 spec->next = sizeof(*spec) + slen; 3436 strcpy((char*)&spec[1], gspec + spec_size); 3437 gspec += next; 3438 cur_data += spec->next; 3439 } 3440 break; 3441 } 3442 default: 3443 ret = -TARGET_EINVAL; 3444 goto out; 3445 } 3446 unlock_user(argptr, guest_data, 0); 3447 3448 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3449 if (!is_error(ret)) { 3450 guest_data = arg + host_dm->data_start; 3451 guest_data_size = host_dm->data_size - host_dm->data_start; 3452 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0); 3453 switch (ie->host_cmd) { 3454 case DM_REMOVE_ALL: 3455 case DM_DEV_CREATE: 3456 case DM_DEV_REMOVE: 3457 case DM_DEV_RENAME: 3458 case DM_DEV_SUSPEND: 3459 case DM_DEV_STATUS: 3460 case DM_TABLE_LOAD: 3461 case DM_TABLE_CLEAR: 3462 case DM_TARGET_MSG: 3463 case DM_DEV_SET_GEOMETRY: 3464 /* no return data */ 3465 break; 3466 case DM_LIST_DEVICES: 3467 { 3468 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start; 3469 uint32_t remaining_data = guest_data_size; 3470 void *cur_data = argptr; 3471 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) }; 3472 int nl_size = 12; /* can't use thunk_size due to alignment */ 3473 3474 while (1) { 3475 uint32_t next = nl->next; 3476 if (next) { 3477 nl->next = nl_size + (strlen(nl->name) + 1); 3478 } 3479 if (remaining_data < nl->next) { 3480 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3481 break; 3482 } 3483 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET); 3484 strcpy(cur_data + nl_size, nl->name); 3485 cur_data += nl->next; 3486 remaining_data -= nl->next; 3487 if (!next) { 3488 break; 3489 } 3490 nl = (void*)nl + next; 3491 } 3492 break; 3493 } 3494 case DM_DEV_WAIT: 3495 case DM_TABLE_STATUS: 3496 { 3497 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start; 3498 void *cur_data = argptr; 3499 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 3500 int spec_size = thunk_type_size(arg_type, 0); 3501 int i; 3502 3503 for (i = 0; i < host_dm->target_count; i++) { 3504 uint32_t next = spec->next; 3505 int slen = strlen((char*)&spec[1]) + 1; 3506 spec->next = (cur_data - argptr) + spec_size + slen; 3507 if (guest_data_size < spec->next) { 3508 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3509 break; 3510 } 3511 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET); 3512 strcpy(cur_data + spec_size, (char*)&spec[1]); 3513 cur_data = argptr + spec->next; 3514 spec = (void*)host_dm + host_dm->data_start + next; 3515 } 3516 break; 3517 } 3518 case DM_TABLE_DEPS: 3519 { 3520 void *hdata = (void*)host_dm + host_dm->data_start; 3521 int count = *(uint32_t*)hdata; 3522 uint64_t *hdev = hdata + 8; 3523 uint64_t *gdev = argptr + 8; 3524 int i; 3525 3526 *(uint32_t*)argptr = tswap32(count); 3527 for (i = 0; i < count; i++) { 3528 *gdev = tswap64(*hdev); 3529 gdev++; 3530 hdev++; 3531 } 3532 break; 3533 } 3534 case DM_LIST_VERSIONS: 3535 { 3536 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start; 3537 uint32_t remaining_data = guest_data_size; 3538 void *cur_data = argptr; 3539 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) }; 3540 int vers_size = thunk_type_size(arg_type, 0); 3541 3542 while (1) { 3543 uint32_t next = vers->next; 3544 if (next) { 3545 vers->next = vers_size + (strlen(vers->name) + 1); 3546 } 3547 if (remaining_data < vers->next) { 3548 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3549 break; 3550 } 3551 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET); 3552 strcpy(cur_data + vers_size, vers->name); 3553 cur_data += vers->next; 3554 remaining_data -= vers->next; 3555 if (!next) { 3556 break; 3557 } 3558 vers = (void*)vers + next; 3559 } 3560 break; 3561 } 3562 default: 3563 ret = -TARGET_EINVAL; 3564 goto out; 3565 } 3566 unlock_user(argptr, guest_data, guest_data_size); 3567 3568 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3569 if (!argptr) { 3570 ret = -TARGET_EFAULT; 3571 goto out; 3572 } 3573 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3574 unlock_user(argptr, arg, target_size); 3575 } 3576 out: 3577 if (big_buf) { 3578 free(big_buf); 3579 } 3580 return ret; 3581 } 3582 3583 static IOCTLEntry ioctl_entries[] = { 3584 #define IOCTL(cmd, access, ...) \ 3585 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } }, 3586 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \ 3587 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } }, 3588 #include "ioctls.h" 3589 { 0, 0, }, 3590 }; 3591 3592 /* ??? Implement proper locking for ioctls. */ 3593 /* do_ioctl() Must return target values and target errnos. */ 3594 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg) 3595 { 3596 const IOCTLEntry *ie; 3597 const argtype *arg_type; 3598 abi_long ret; 3599 uint8_t buf_temp[MAX_STRUCT_SIZE]; 3600 int target_size; 3601 void *argptr; 3602 3603 ie = ioctl_entries; 3604 for(;;) { 3605 if (ie->target_cmd == 0) { 3606 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd); 3607 return -TARGET_ENOSYS; 3608 } 3609 if (ie->target_cmd == cmd) 3610 break; 3611 ie++; 3612 } 3613 arg_type = ie->arg_type; 3614 #if defined(DEBUG) 3615 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name); 3616 #endif 3617 if (ie->do_ioctl) { 3618 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg); 3619 } 3620 3621 switch(arg_type[0]) { 3622 case TYPE_NULL: 3623 /* no argument */ 3624 ret = get_errno(ioctl(fd, ie->host_cmd)); 3625 break; 3626 case TYPE_PTRVOID: 3627 case TYPE_INT: 3628 /* int argment */ 3629 ret = get_errno(ioctl(fd, ie->host_cmd, arg)); 3630 break; 3631 case TYPE_PTR: 3632 arg_type++; 3633 target_size = thunk_type_size(arg_type, 0); 3634 switch(ie->access) { 3635 case IOC_R: 3636 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3637 if (!is_error(ret)) { 3638 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3639 if (!argptr) 3640 return -TARGET_EFAULT; 3641 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3642 unlock_user(argptr, arg, target_size); 3643 } 3644 break; 3645 case IOC_W: 3646 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3647 if (!argptr) 3648 return -TARGET_EFAULT; 3649 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3650 unlock_user(argptr, arg, 0); 3651 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3652 break; 3653 default: 3654 case IOC_RW: 3655 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3656 if (!argptr) 3657 return -TARGET_EFAULT; 3658 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3659 unlock_user(argptr, arg, 0); 3660 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3661 if (!is_error(ret)) { 3662 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3663 if (!argptr) 3664 return -TARGET_EFAULT; 3665 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3666 unlock_user(argptr, arg, target_size); 3667 } 3668 break; 3669 } 3670 break; 3671 default: 3672 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n", 3673 (long)cmd, arg_type[0]); 3674 ret = -TARGET_ENOSYS; 3675 break; 3676 } 3677 return ret; 3678 } 3679 3680 static const bitmask_transtbl iflag_tbl[] = { 3681 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK }, 3682 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT }, 3683 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR }, 3684 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK }, 3685 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK }, 3686 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP }, 3687 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR }, 3688 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR }, 3689 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL }, 3690 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC }, 3691 { TARGET_IXON, TARGET_IXON, IXON, IXON }, 3692 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY }, 3693 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF }, 3694 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL }, 3695 { 0, 0, 0, 0 } 3696 }; 3697 3698 static const bitmask_transtbl oflag_tbl[] = { 3699 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST }, 3700 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC }, 3701 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR }, 3702 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL }, 3703 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR }, 3704 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET }, 3705 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL }, 3706 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL }, 3707 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 }, 3708 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 }, 3709 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 }, 3710 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 }, 3711 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 }, 3712 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 }, 3713 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 }, 3714 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 }, 3715 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 }, 3716 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 }, 3717 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 }, 3718 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 }, 3719 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 }, 3720 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 }, 3721 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 }, 3722 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 }, 3723 { 0, 0, 0, 0 } 3724 }; 3725 3726 static const bitmask_transtbl cflag_tbl[] = { 3727 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 }, 3728 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 }, 3729 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 }, 3730 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 }, 3731 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 }, 3732 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 }, 3733 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 }, 3734 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 }, 3735 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 }, 3736 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 }, 3737 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 }, 3738 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 }, 3739 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 }, 3740 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 }, 3741 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 }, 3742 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 }, 3743 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 }, 3744 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 }, 3745 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 }, 3746 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 }, 3747 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 }, 3748 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 }, 3749 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 }, 3750 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 }, 3751 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB }, 3752 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD }, 3753 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB }, 3754 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD }, 3755 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL }, 3756 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL }, 3757 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS }, 3758 { 0, 0, 0, 0 } 3759 }; 3760 3761 static const bitmask_transtbl lflag_tbl[] = { 3762 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG }, 3763 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON }, 3764 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE }, 3765 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO }, 3766 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE }, 3767 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK }, 3768 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL }, 3769 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH }, 3770 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP }, 3771 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL }, 3772 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT }, 3773 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE }, 3774 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO }, 3775 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN }, 3776 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN }, 3777 { 0, 0, 0, 0 } 3778 }; 3779 3780 static void target_to_host_termios (void *dst, const void *src) 3781 { 3782 struct host_termios *host = dst; 3783 const struct target_termios *target = src; 3784 3785 host->c_iflag = 3786 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl); 3787 host->c_oflag = 3788 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl); 3789 host->c_cflag = 3790 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl); 3791 host->c_lflag = 3792 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl); 3793 host->c_line = target->c_line; 3794 3795 memset(host->c_cc, 0, sizeof(host->c_cc)); 3796 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR]; 3797 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT]; 3798 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE]; 3799 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL]; 3800 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF]; 3801 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME]; 3802 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN]; 3803 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC]; 3804 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART]; 3805 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP]; 3806 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP]; 3807 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL]; 3808 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT]; 3809 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD]; 3810 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE]; 3811 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT]; 3812 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2]; 3813 } 3814 3815 static void host_to_target_termios (void *dst, const void *src) 3816 { 3817 struct target_termios *target = dst; 3818 const struct host_termios *host = src; 3819 3820 target->c_iflag = 3821 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl)); 3822 target->c_oflag = 3823 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl)); 3824 target->c_cflag = 3825 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl)); 3826 target->c_lflag = 3827 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl)); 3828 target->c_line = host->c_line; 3829 3830 memset(target->c_cc, 0, sizeof(target->c_cc)); 3831 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR]; 3832 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT]; 3833 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE]; 3834 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL]; 3835 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF]; 3836 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME]; 3837 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN]; 3838 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC]; 3839 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART]; 3840 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP]; 3841 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP]; 3842 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL]; 3843 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT]; 3844 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD]; 3845 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE]; 3846 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT]; 3847 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2]; 3848 } 3849 3850 static const StructEntry struct_termios_def = { 3851 .convert = { host_to_target_termios, target_to_host_termios }, 3852 .size = { sizeof(struct target_termios), sizeof(struct host_termios) }, 3853 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) }, 3854 }; 3855 3856 static bitmask_transtbl mmap_flags_tbl[] = { 3857 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED }, 3858 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE }, 3859 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED }, 3860 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS }, 3861 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN }, 3862 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE }, 3863 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE }, 3864 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED }, 3865 { 0, 0, 0, 0 } 3866 }; 3867 3868 #if defined(TARGET_I386) 3869 3870 /* NOTE: there is really one LDT for all the threads */ 3871 static uint8_t *ldt_table; 3872 3873 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount) 3874 { 3875 int size; 3876 void *p; 3877 3878 if (!ldt_table) 3879 return 0; 3880 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE; 3881 if (size > bytecount) 3882 size = bytecount; 3883 p = lock_user(VERIFY_WRITE, ptr, size, 0); 3884 if (!p) 3885 return -TARGET_EFAULT; 3886 /* ??? Should this by byteswapped? */ 3887 memcpy(p, ldt_table, size); 3888 unlock_user(p, ptr, size); 3889 return size; 3890 } 3891 3892 /* XXX: add locking support */ 3893 static abi_long write_ldt(CPUX86State *env, 3894 abi_ulong ptr, unsigned long bytecount, int oldmode) 3895 { 3896 struct target_modify_ldt_ldt_s ldt_info; 3897 struct target_modify_ldt_ldt_s *target_ldt_info; 3898 int seg_32bit, contents, read_exec_only, limit_in_pages; 3899 int seg_not_present, useable, lm; 3900 uint32_t *lp, entry_1, entry_2; 3901 3902 if (bytecount != sizeof(ldt_info)) 3903 return -TARGET_EINVAL; 3904 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1)) 3905 return -TARGET_EFAULT; 3906 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 3907 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 3908 ldt_info.limit = tswap32(target_ldt_info->limit); 3909 ldt_info.flags = tswap32(target_ldt_info->flags); 3910 unlock_user_struct(target_ldt_info, ptr, 0); 3911 3912 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES) 3913 return -TARGET_EINVAL; 3914 seg_32bit = ldt_info.flags & 1; 3915 contents = (ldt_info.flags >> 1) & 3; 3916 read_exec_only = (ldt_info.flags >> 3) & 1; 3917 limit_in_pages = (ldt_info.flags >> 4) & 1; 3918 seg_not_present = (ldt_info.flags >> 5) & 1; 3919 useable = (ldt_info.flags >> 6) & 1; 3920 #ifdef TARGET_ABI32 3921 lm = 0; 3922 #else 3923 lm = (ldt_info.flags >> 7) & 1; 3924 #endif 3925 if (contents == 3) { 3926 if (oldmode) 3927 return -TARGET_EINVAL; 3928 if (seg_not_present == 0) 3929 return -TARGET_EINVAL; 3930 } 3931 /* allocate the LDT */ 3932 if (!ldt_table) { 3933 env->ldt.base = target_mmap(0, 3934 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE, 3935 PROT_READ|PROT_WRITE, 3936 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 3937 if (env->ldt.base == -1) 3938 return -TARGET_ENOMEM; 3939 memset(g2h(env->ldt.base), 0, 3940 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE); 3941 env->ldt.limit = 0xffff; 3942 ldt_table = g2h(env->ldt.base); 3943 } 3944 3945 /* NOTE: same code as Linux kernel */ 3946 /* Allow LDTs to be cleared by the user. */ 3947 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 3948 if (oldmode || 3949 (contents == 0 && 3950 read_exec_only == 1 && 3951 seg_32bit == 0 && 3952 limit_in_pages == 0 && 3953 seg_not_present == 1 && 3954 useable == 0 )) { 3955 entry_1 = 0; 3956 entry_2 = 0; 3957 goto install; 3958 } 3959 } 3960 3961 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 3962 (ldt_info.limit & 0x0ffff); 3963 entry_2 = (ldt_info.base_addr & 0xff000000) | 3964 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 3965 (ldt_info.limit & 0xf0000) | 3966 ((read_exec_only ^ 1) << 9) | 3967 (contents << 10) | 3968 ((seg_not_present ^ 1) << 15) | 3969 (seg_32bit << 22) | 3970 (limit_in_pages << 23) | 3971 (lm << 21) | 3972 0x7000; 3973 if (!oldmode) 3974 entry_2 |= (useable << 20); 3975 3976 /* Install the new entry ... */ 3977 install: 3978 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3)); 3979 lp[0] = tswap32(entry_1); 3980 lp[1] = tswap32(entry_2); 3981 return 0; 3982 } 3983 3984 /* specific and weird i386 syscalls */ 3985 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr, 3986 unsigned long bytecount) 3987 { 3988 abi_long ret; 3989 3990 switch (func) { 3991 case 0: 3992 ret = read_ldt(ptr, bytecount); 3993 break; 3994 case 1: 3995 ret = write_ldt(env, ptr, bytecount, 1); 3996 break; 3997 case 0x11: 3998 ret = write_ldt(env, ptr, bytecount, 0); 3999 break; 4000 default: 4001 ret = -TARGET_ENOSYS; 4002 break; 4003 } 4004 return ret; 4005 } 4006 4007 #if defined(TARGET_I386) && defined(TARGET_ABI32) 4008 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr) 4009 { 4010 uint64_t *gdt_table = g2h(env->gdt.base); 4011 struct target_modify_ldt_ldt_s ldt_info; 4012 struct target_modify_ldt_ldt_s *target_ldt_info; 4013 int seg_32bit, contents, read_exec_only, limit_in_pages; 4014 int seg_not_present, useable, lm; 4015 uint32_t *lp, entry_1, entry_2; 4016 int i; 4017 4018 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 4019 if (!target_ldt_info) 4020 return -TARGET_EFAULT; 4021 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 4022 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 4023 ldt_info.limit = tswap32(target_ldt_info->limit); 4024 ldt_info.flags = tswap32(target_ldt_info->flags); 4025 if (ldt_info.entry_number == -1) { 4026 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) { 4027 if (gdt_table[i] == 0) { 4028 ldt_info.entry_number = i; 4029 target_ldt_info->entry_number = tswap32(i); 4030 break; 4031 } 4032 } 4033 } 4034 unlock_user_struct(target_ldt_info, ptr, 1); 4035 4036 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 4037 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX) 4038 return -TARGET_EINVAL; 4039 seg_32bit = ldt_info.flags & 1; 4040 contents = (ldt_info.flags >> 1) & 3; 4041 read_exec_only = (ldt_info.flags >> 3) & 1; 4042 limit_in_pages = (ldt_info.flags >> 4) & 1; 4043 seg_not_present = (ldt_info.flags >> 5) & 1; 4044 useable = (ldt_info.flags >> 6) & 1; 4045 #ifdef TARGET_ABI32 4046 lm = 0; 4047 #else 4048 lm = (ldt_info.flags >> 7) & 1; 4049 #endif 4050 4051 if (contents == 3) { 4052 if (seg_not_present == 0) 4053 return -TARGET_EINVAL; 4054 } 4055 4056 /* NOTE: same code as Linux kernel */ 4057 /* Allow LDTs to be cleared by the user. */ 4058 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 4059 if ((contents == 0 && 4060 read_exec_only == 1 && 4061 seg_32bit == 0 && 4062 limit_in_pages == 0 && 4063 seg_not_present == 1 && 4064 useable == 0 )) { 4065 entry_1 = 0; 4066 entry_2 = 0; 4067 goto install; 4068 } 4069 } 4070 4071 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 4072 (ldt_info.limit & 0x0ffff); 4073 entry_2 = (ldt_info.base_addr & 0xff000000) | 4074 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 4075 (ldt_info.limit & 0xf0000) | 4076 ((read_exec_only ^ 1) << 9) | 4077 (contents << 10) | 4078 ((seg_not_present ^ 1) << 15) | 4079 (seg_32bit << 22) | 4080 (limit_in_pages << 23) | 4081 (useable << 20) | 4082 (lm << 21) | 4083 0x7000; 4084 4085 /* Install the new entry ... */ 4086 install: 4087 lp = (uint32_t *)(gdt_table + ldt_info.entry_number); 4088 lp[0] = tswap32(entry_1); 4089 lp[1] = tswap32(entry_2); 4090 return 0; 4091 } 4092 4093 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr) 4094 { 4095 struct target_modify_ldt_ldt_s *target_ldt_info; 4096 uint64_t *gdt_table = g2h(env->gdt.base); 4097 uint32_t base_addr, limit, flags; 4098 int seg_32bit, contents, read_exec_only, limit_in_pages, idx; 4099 int seg_not_present, useable, lm; 4100 uint32_t *lp, entry_1, entry_2; 4101 4102 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 4103 if (!target_ldt_info) 4104 return -TARGET_EFAULT; 4105 idx = tswap32(target_ldt_info->entry_number); 4106 if (idx < TARGET_GDT_ENTRY_TLS_MIN || 4107 idx > TARGET_GDT_ENTRY_TLS_MAX) { 4108 unlock_user_struct(target_ldt_info, ptr, 1); 4109 return -TARGET_EINVAL; 4110 } 4111 lp = (uint32_t *)(gdt_table + idx); 4112 entry_1 = tswap32(lp[0]); 4113 entry_2 = tswap32(lp[1]); 4114 4115 read_exec_only = ((entry_2 >> 9) & 1) ^ 1; 4116 contents = (entry_2 >> 10) & 3; 4117 seg_not_present = ((entry_2 >> 15) & 1) ^ 1; 4118 seg_32bit = (entry_2 >> 22) & 1; 4119 limit_in_pages = (entry_2 >> 23) & 1; 4120 useable = (entry_2 >> 20) & 1; 4121 #ifdef TARGET_ABI32 4122 lm = 0; 4123 #else 4124 lm = (entry_2 >> 21) & 1; 4125 #endif 4126 flags = (seg_32bit << 0) | (contents << 1) | 4127 (read_exec_only << 3) | (limit_in_pages << 4) | 4128 (seg_not_present << 5) | (useable << 6) | (lm << 7); 4129 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000); 4130 base_addr = (entry_1 >> 16) | 4131 (entry_2 & 0xff000000) | 4132 ((entry_2 & 0xff) << 16); 4133 target_ldt_info->base_addr = tswapal(base_addr); 4134 target_ldt_info->limit = tswap32(limit); 4135 target_ldt_info->flags = tswap32(flags); 4136 unlock_user_struct(target_ldt_info, ptr, 1); 4137 return 0; 4138 } 4139 #endif /* TARGET_I386 && TARGET_ABI32 */ 4140 4141 #ifndef TARGET_ABI32 4142 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 4143 { 4144 abi_long ret = 0; 4145 abi_ulong val; 4146 int idx; 4147 4148 switch(code) { 4149 case TARGET_ARCH_SET_GS: 4150 case TARGET_ARCH_SET_FS: 4151 if (code == TARGET_ARCH_SET_GS) 4152 idx = R_GS; 4153 else 4154 idx = R_FS; 4155 cpu_x86_load_seg(env, idx, 0); 4156 env->segs[idx].base = addr; 4157 break; 4158 case TARGET_ARCH_GET_GS: 4159 case TARGET_ARCH_GET_FS: 4160 if (code == TARGET_ARCH_GET_GS) 4161 idx = R_GS; 4162 else 4163 idx = R_FS; 4164 val = env->segs[idx].base; 4165 if (put_user(val, addr, abi_ulong)) 4166 ret = -TARGET_EFAULT; 4167 break; 4168 default: 4169 ret = -TARGET_EINVAL; 4170 break; 4171 } 4172 return ret; 4173 } 4174 #endif 4175 4176 #endif /* defined(TARGET_I386) */ 4177 4178 #define NEW_STACK_SIZE 0x40000 4179 4180 #if defined(CONFIG_USE_NPTL) 4181 4182 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; 4183 typedef struct { 4184 CPUArchState *env; 4185 pthread_mutex_t mutex; 4186 pthread_cond_t cond; 4187 pthread_t thread; 4188 uint32_t tid; 4189 abi_ulong child_tidptr; 4190 abi_ulong parent_tidptr; 4191 sigset_t sigmask; 4192 } new_thread_info; 4193 4194 static void *clone_func(void *arg) 4195 { 4196 new_thread_info *info = arg; 4197 CPUArchState *env; 4198 TaskState *ts; 4199 4200 env = info->env; 4201 thread_env = env; 4202 ts = (TaskState *)thread_env->opaque; 4203 info->tid = gettid(); 4204 env->host_tid = info->tid; 4205 task_settid(ts); 4206 if (info->child_tidptr) 4207 put_user_u32(info->tid, info->child_tidptr); 4208 if (info->parent_tidptr) 4209 put_user_u32(info->tid, info->parent_tidptr); 4210 /* Enable signals. */ 4211 sigprocmask(SIG_SETMASK, &info->sigmask, NULL); 4212 /* Signal to the parent that we're ready. */ 4213 pthread_mutex_lock(&info->mutex); 4214 pthread_cond_broadcast(&info->cond); 4215 pthread_mutex_unlock(&info->mutex); 4216 /* Wait until the parent has finshed initializing the tls state. */ 4217 pthread_mutex_lock(&clone_lock); 4218 pthread_mutex_unlock(&clone_lock); 4219 cpu_loop(env); 4220 /* never exits */ 4221 return NULL; 4222 } 4223 #else 4224 4225 static int clone_func(void *arg) 4226 { 4227 CPUArchState *env = arg; 4228 cpu_loop(env); 4229 /* never exits */ 4230 return 0; 4231 } 4232 #endif 4233 4234 /* do_fork() Must return host values and target errnos (unlike most 4235 do_*() functions). */ 4236 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, 4237 abi_ulong parent_tidptr, target_ulong newtls, 4238 abi_ulong child_tidptr) 4239 { 4240 int ret; 4241 TaskState *ts; 4242 CPUArchState *new_env; 4243 #if defined(CONFIG_USE_NPTL) 4244 unsigned int nptl_flags; 4245 sigset_t sigmask; 4246 #else 4247 uint8_t *new_stack; 4248 #endif 4249 4250 /* Emulate vfork() with fork() */ 4251 if (flags & CLONE_VFORK) 4252 flags &= ~(CLONE_VFORK | CLONE_VM); 4253 4254 if (flags & CLONE_VM) { 4255 TaskState *parent_ts = (TaskState *)env->opaque; 4256 #if defined(CONFIG_USE_NPTL) 4257 new_thread_info info; 4258 pthread_attr_t attr; 4259 #endif 4260 ts = g_malloc0(sizeof(TaskState)); 4261 init_task_state(ts); 4262 /* we create a new CPU instance. */ 4263 new_env = cpu_copy(env); 4264 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC) 4265 cpu_state_reset(new_env); 4266 #endif 4267 /* Init regs that differ from the parent. */ 4268 cpu_clone_regs(new_env, newsp); 4269 new_env->opaque = ts; 4270 ts->bprm = parent_ts->bprm; 4271 ts->info = parent_ts->info; 4272 #if defined(CONFIG_USE_NPTL) 4273 nptl_flags = flags; 4274 flags &= ~CLONE_NPTL_FLAGS2; 4275 4276 if (nptl_flags & CLONE_CHILD_CLEARTID) { 4277 ts->child_tidptr = child_tidptr; 4278 } 4279 4280 if (nptl_flags & CLONE_SETTLS) 4281 cpu_set_tls (new_env, newtls); 4282 4283 /* Grab a mutex so that thread setup appears atomic. */ 4284 pthread_mutex_lock(&clone_lock); 4285 4286 memset(&info, 0, sizeof(info)); 4287 pthread_mutex_init(&info.mutex, NULL); 4288 pthread_mutex_lock(&info.mutex); 4289 pthread_cond_init(&info.cond, NULL); 4290 info.env = new_env; 4291 if (nptl_flags & CLONE_CHILD_SETTID) 4292 info.child_tidptr = child_tidptr; 4293 if (nptl_flags & CLONE_PARENT_SETTID) 4294 info.parent_tidptr = parent_tidptr; 4295 4296 ret = pthread_attr_init(&attr); 4297 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE); 4298 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 4299 /* It is not safe to deliver signals until the child has finished 4300 initializing, so temporarily block all signals. */ 4301 sigfillset(&sigmask); 4302 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); 4303 4304 ret = pthread_create(&info.thread, &attr, clone_func, &info); 4305 /* TODO: Free new CPU state if thread creation failed. */ 4306 4307 sigprocmask(SIG_SETMASK, &info.sigmask, NULL); 4308 pthread_attr_destroy(&attr); 4309 if (ret == 0) { 4310 /* Wait for the child to initialize. */ 4311 pthread_cond_wait(&info.cond, &info.mutex); 4312 ret = info.tid; 4313 if (flags & CLONE_PARENT_SETTID) 4314 put_user_u32(ret, parent_tidptr); 4315 } else { 4316 ret = -1; 4317 } 4318 pthread_mutex_unlock(&info.mutex); 4319 pthread_cond_destroy(&info.cond); 4320 pthread_mutex_destroy(&info.mutex); 4321 pthread_mutex_unlock(&clone_lock); 4322 #else 4323 if (flags & CLONE_NPTL_FLAGS2) 4324 return -EINVAL; 4325 /* This is probably going to die very quickly, but do it anyway. */ 4326 new_stack = g_malloc0 (NEW_STACK_SIZE); 4327 #ifdef __ia64__ 4328 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env); 4329 #else 4330 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env); 4331 #endif 4332 #endif 4333 } else { 4334 /* if no CLONE_VM, we consider it is a fork */ 4335 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) 4336 return -EINVAL; 4337 fork_start(); 4338 ret = fork(); 4339 if (ret == 0) { 4340 /* Child Process. */ 4341 cpu_clone_regs(env, newsp); 4342 fork_end(1); 4343 #if defined(CONFIG_USE_NPTL) 4344 /* There is a race condition here. The parent process could 4345 theoretically read the TID in the child process before the child 4346 tid is set. This would require using either ptrace 4347 (not implemented) or having *_tidptr to point at a shared memory 4348 mapping. We can't repeat the spinlock hack used above because 4349 the child process gets its own copy of the lock. */ 4350 if (flags & CLONE_CHILD_SETTID) 4351 put_user_u32(gettid(), child_tidptr); 4352 if (flags & CLONE_PARENT_SETTID) 4353 put_user_u32(gettid(), parent_tidptr); 4354 ts = (TaskState *)env->opaque; 4355 if (flags & CLONE_SETTLS) 4356 cpu_set_tls (env, newtls); 4357 if (flags & CLONE_CHILD_CLEARTID) 4358 ts->child_tidptr = child_tidptr; 4359 #endif 4360 } else { 4361 fork_end(0); 4362 } 4363 } 4364 return ret; 4365 } 4366 4367 /* warning : doesn't handle linux specific flags... */ 4368 static int target_to_host_fcntl_cmd(int cmd) 4369 { 4370 switch(cmd) { 4371 case TARGET_F_DUPFD: 4372 case TARGET_F_GETFD: 4373 case TARGET_F_SETFD: 4374 case TARGET_F_GETFL: 4375 case TARGET_F_SETFL: 4376 return cmd; 4377 case TARGET_F_GETLK: 4378 return F_GETLK; 4379 case TARGET_F_SETLK: 4380 return F_SETLK; 4381 case TARGET_F_SETLKW: 4382 return F_SETLKW; 4383 case TARGET_F_GETOWN: 4384 return F_GETOWN; 4385 case TARGET_F_SETOWN: 4386 return F_SETOWN; 4387 case TARGET_F_GETSIG: 4388 return F_GETSIG; 4389 case TARGET_F_SETSIG: 4390 return F_SETSIG; 4391 #if TARGET_ABI_BITS == 32 4392 case TARGET_F_GETLK64: 4393 return F_GETLK64; 4394 case TARGET_F_SETLK64: 4395 return F_SETLK64; 4396 case TARGET_F_SETLKW64: 4397 return F_SETLKW64; 4398 #endif 4399 case TARGET_F_SETLEASE: 4400 return F_SETLEASE; 4401 case TARGET_F_GETLEASE: 4402 return F_GETLEASE; 4403 #ifdef F_DUPFD_CLOEXEC 4404 case TARGET_F_DUPFD_CLOEXEC: 4405 return F_DUPFD_CLOEXEC; 4406 #endif 4407 case TARGET_F_NOTIFY: 4408 return F_NOTIFY; 4409 default: 4410 return -TARGET_EINVAL; 4411 } 4412 return -TARGET_EINVAL; 4413 } 4414 4415 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) 4416 { 4417 struct flock fl; 4418 struct target_flock *target_fl; 4419 struct flock64 fl64; 4420 struct target_flock64 *target_fl64; 4421 abi_long ret; 4422 int host_cmd = target_to_host_fcntl_cmd(cmd); 4423 4424 if (host_cmd == -TARGET_EINVAL) 4425 return host_cmd; 4426 4427 switch(cmd) { 4428 case TARGET_F_GETLK: 4429 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4430 return -TARGET_EFAULT; 4431 fl.l_type = tswap16(target_fl->l_type); 4432 fl.l_whence = tswap16(target_fl->l_whence); 4433 fl.l_start = tswapal(target_fl->l_start); 4434 fl.l_len = tswapal(target_fl->l_len); 4435 fl.l_pid = tswap32(target_fl->l_pid); 4436 unlock_user_struct(target_fl, arg, 0); 4437 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4438 if (ret == 0) { 4439 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0)) 4440 return -TARGET_EFAULT; 4441 target_fl->l_type = tswap16(fl.l_type); 4442 target_fl->l_whence = tswap16(fl.l_whence); 4443 target_fl->l_start = tswapal(fl.l_start); 4444 target_fl->l_len = tswapal(fl.l_len); 4445 target_fl->l_pid = tswap32(fl.l_pid); 4446 unlock_user_struct(target_fl, arg, 1); 4447 } 4448 break; 4449 4450 case TARGET_F_SETLK: 4451 case TARGET_F_SETLKW: 4452 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4453 return -TARGET_EFAULT; 4454 fl.l_type = tswap16(target_fl->l_type); 4455 fl.l_whence = tswap16(target_fl->l_whence); 4456 fl.l_start = tswapal(target_fl->l_start); 4457 fl.l_len = tswapal(target_fl->l_len); 4458 fl.l_pid = tswap32(target_fl->l_pid); 4459 unlock_user_struct(target_fl, arg, 0); 4460 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4461 break; 4462 4463 case TARGET_F_GETLK64: 4464 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4465 return -TARGET_EFAULT; 4466 fl64.l_type = tswap16(target_fl64->l_type) >> 1; 4467 fl64.l_whence = tswap16(target_fl64->l_whence); 4468 fl64.l_start = tswap64(target_fl64->l_start); 4469 fl64.l_len = tswap64(target_fl64->l_len); 4470 fl64.l_pid = tswap32(target_fl64->l_pid); 4471 unlock_user_struct(target_fl64, arg, 0); 4472 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4473 if (ret == 0) { 4474 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0)) 4475 return -TARGET_EFAULT; 4476 target_fl64->l_type = tswap16(fl64.l_type) >> 1; 4477 target_fl64->l_whence = tswap16(fl64.l_whence); 4478 target_fl64->l_start = tswap64(fl64.l_start); 4479 target_fl64->l_len = tswap64(fl64.l_len); 4480 target_fl64->l_pid = tswap32(fl64.l_pid); 4481 unlock_user_struct(target_fl64, arg, 1); 4482 } 4483 break; 4484 case TARGET_F_SETLK64: 4485 case TARGET_F_SETLKW64: 4486 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4487 return -TARGET_EFAULT; 4488 fl64.l_type = tswap16(target_fl64->l_type) >> 1; 4489 fl64.l_whence = tswap16(target_fl64->l_whence); 4490 fl64.l_start = tswap64(target_fl64->l_start); 4491 fl64.l_len = tswap64(target_fl64->l_len); 4492 fl64.l_pid = tswap32(target_fl64->l_pid); 4493 unlock_user_struct(target_fl64, arg, 0); 4494 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4495 break; 4496 4497 case TARGET_F_GETFL: 4498 ret = get_errno(fcntl(fd, host_cmd, arg)); 4499 if (ret >= 0) { 4500 ret = host_to_target_bitmask(ret, fcntl_flags_tbl); 4501 } 4502 break; 4503 4504 case TARGET_F_SETFL: 4505 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl))); 4506 break; 4507 4508 case TARGET_F_SETOWN: 4509 case TARGET_F_GETOWN: 4510 case TARGET_F_SETSIG: 4511 case TARGET_F_GETSIG: 4512 case TARGET_F_SETLEASE: 4513 case TARGET_F_GETLEASE: 4514 ret = get_errno(fcntl(fd, host_cmd, arg)); 4515 break; 4516 4517 default: 4518 ret = get_errno(fcntl(fd, cmd, arg)); 4519 break; 4520 } 4521 return ret; 4522 } 4523 4524 #ifdef USE_UID16 4525 4526 static inline int high2lowuid(int uid) 4527 { 4528 if (uid > 65535) 4529 return 65534; 4530 else 4531 return uid; 4532 } 4533 4534 static inline int high2lowgid(int gid) 4535 { 4536 if (gid > 65535) 4537 return 65534; 4538 else 4539 return gid; 4540 } 4541 4542 static inline int low2highuid(int uid) 4543 { 4544 if ((int16_t)uid == -1) 4545 return -1; 4546 else 4547 return uid; 4548 } 4549 4550 static inline int low2highgid(int gid) 4551 { 4552 if ((int16_t)gid == -1) 4553 return -1; 4554 else 4555 return gid; 4556 } 4557 static inline int tswapid(int id) 4558 { 4559 return tswap16(id); 4560 } 4561 #else /* !USE_UID16 */ 4562 static inline int high2lowuid(int uid) 4563 { 4564 return uid; 4565 } 4566 static inline int high2lowgid(int gid) 4567 { 4568 return gid; 4569 } 4570 static inline int low2highuid(int uid) 4571 { 4572 return uid; 4573 } 4574 static inline int low2highgid(int gid) 4575 { 4576 return gid; 4577 } 4578 static inline int tswapid(int id) 4579 { 4580 return tswap32(id); 4581 } 4582 #endif /* USE_UID16 */ 4583 4584 void syscall_init(void) 4585 { 4586 IOCTLEntry *ie; 4587 const argtype *arg_type; 4588 int size; 4589 int i; 4590 4591 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def); 4592 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def); 4593 #include "syscall_types.h" 4594 #undef STRUCT 4595 #undef STRUCT_SPECIAL 4596 4597 /* we patch the ioctl size if necessary. We rely on the fact that 4598 no ioctl has all the bits at '1' in the size field */ 4599 ie = ioctl_entries; 4600 while (ie->target_cmd != 0) { 4601 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) == 4602 TARGET_IOC_SIZEMASK) { 4603 arg_type = ie->arg_type; 4604 if (arg_type[0] != TYPE_PTR) { 4605 fprintf(stderr, "cannot patch size for ioctl 0x%x\n", 4606 ie->target_cmd); 4607 exit(1); 4608 } 4609 arg_type++; 4610 size = thunk_type_size(arg_type, 0); 4611 ie->target_cmd = (ie->target_cmd & 4612 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) | 4613 (size << TARGET_IOC_SIZESHIFT); 4614 } 4615 4616 /* Build target_to_host_errno_table[] table from 4617 * host_to_target_errno_table[]. */ 4618 for (i=0; i < ERRNO_TABLE_SIZE; i++) 4619 target_to_host_errno_table[host_to_target_errno_table[i]] = i; 4620 4621 /* automatic consistency check if same arch */ 4622 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 4623 (defined(__x86_64__) && defined(TARGET_X86_64)) 4624 if (unlikely(ie->target_cmd != ie->host_cmd)) { 4625 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n", 4626 ie->name, ie->target_cmd, ie->host_cmd); 4627 } 4628 #endif 4629 ie++; 4630 } 4631 } 4632 4633 #if TARGET_ABI_BITS == 32 4634 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1) 4635 { 4636 #ifdef TARGET_WORDS_BIGENDIAN 4637 return ((uint64_t)word0 << 32) | word1; 4638 #else 4639 return ((uint64_t)word1 << 32) | word0; 4640 #endif 4641 } 4642 #else /* TARGET_ABI_BITS == 32 */ 4643 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1) 4644 { 4645 return word0; 4646 } 4647 #endif /* TARGET_ABI_BITS != 32 */ 4648 4649 #ifdef TARGET_NR_truncate64 4650 static inline abi_long target_truncate64(void *cpu_env, const char *arg1, 4651 abi_long arg2, 4652 abi_long arg3, 4653 abi_long arg4) 4654 { 4655 if (regpairs_aligned(cpu_env)) { 4656 arg2 = arg3; 4657 arg3 = arg4; 4658 } 4659 return get_errno(truncate64(arg1, target_offset64(arg2, arg3))); 4660 } 4661 #endif 4662 4663 #ifdef TARGET_NR_ftruncate64 4664 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1, 4665 abi_long arg2, 4666 abi_long arg3, 4667 abi_long arg4) 4668 { 4669 if (regpairs_aligned(cpu_env)) { 4670 arg2 = arg3; 4671 arg3 = arg4; 4672 } 4673 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3))); 4674 } 4675 #endif 4676 4677 static inline abi_long target_to_host_timespec(struct timespec *host_ts, 4678 abi_ulong target_addr) 4679 { 4680 struct target_timespec *target_ts; 4681 4682 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) 4683 return -TARGET_EFAULT; 4684 host_ts->tv_sec = tswapal(target_ts->tv_sec); 4685 host_ts->tv_nsec = tswapal(target_ts->tv_nsec); 4686 unlock_user_struct(target_ts, target_addr, 0); 4687 return 0; 4688 } 4689 4690 static inline abi_long host_to_target_timespec(abi_ulong target_addr, 4691 struct timespec *host_ts) 4692 { 4693 struct target_timespec *target_ts; 4694 4695 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) 4696 return -TARGET_EFAULT; 4697 target_ts->tv_sec = tswapal(host_ts->tv_sec); 4698 target_ts->tv_nsec = tswapal(host_ts->tv_nsec); 4699 unlock_user_struct(target_ts, target_addr, 1); 4700 return 0; 4701 } 4702 4703 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat) 4704 static inline abi_long host_to_target_stat64(void *cpu_env, 4705 abi_ulong target_addr, 4706 struct stat *host_st) 4707 { 4708 #ifdef TARGET_ARM 4709 if (((CPUARMState *)cpu_env)->eabi) { 4710 struct target_eabi_stat64 *target_st; 4711 4712 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 4713 return -TARGET_EFAULT; 4714 memset(target_st, 0, sizeof(struct target_eabi_stat64)); 4715 __put_user(host_st->st_dev, &target_st->st_dev); 4716 __put_user(host_st->st_ino, &target_st->st_ino); 4717 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 4718 __put_user(host_st->st_ino, &target_st->__st_ino); 4719 #endif 4720 __put_user(host_st->st_mode, &target_st->st_mode); 4721 __put_user(host_st->st_nlink, &target_st->st_nlink); 4722 __put_user(host_st->st_uid, &target_st->st_uid); 4723 __put_user(host_st->st_gid, &target_st->st_gid); 4724 __put_user(host_st->st_rdev, &target_st->st_rdev); 4725 __put_user(host_st->st_size, &target_st->st_size); 4726 __put_user(host_st->st_blksize, &target_st->st_blksize); 4727 __put_user(host_st->st_blocks, &target_st->st_blocks); 4728 __put_user(host_st->st_atime, &target_st->target_st_atime); 4729 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 4730 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 4731 unlock_user_struct(target_st, target_addr, 1); 4732 } else 4733 #endif 4734 { 4735 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA) 4736 struct target_stat *target_st; 4737 #else 4738 struct target_stat64 *target_st; 4739 #endif 4740 4741 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 4742 return -TARGET_EFAULT; 4743 memset(target_st, 0, sizeof(*target_st)); 4744 __put_user(host_st->st_dev, &target_st->st_dev); 4745 __put_user(host_st->st_ino, &target_st->st_ino); 4746 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 4747 __put_user(host_st->st_ino, &target_st->__st_ino); 4748 #endif 4749 __put_user(host_st->st_mode, &target_st->st_mode); 4750 __put_user(host_st->st_nlink, &target_st->st_nlink); 4751 __put_user(host_st->st_uid, &target_st->st_uid); 4752 __put_user(host_st->st_gid, &target_st->st_gid); 4753 __put_user(host_st->st_rdev, &target_st->st_rdev); 4754 /* XXX: better use of kernel struct */ 4755 __put_user(host_st->st_size, &target_st->st_size); 4756 __put_user(host_st->st_blksize, &target_st->st_blksize); 4757 __put_user(host_st->st_blocks, &target_st->st_blocks); 4758 __put_user(host_st->st_atime, &target_st->target_st_atime); 4759 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 4760 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 4761 unlock_user_struct(target_st, target_addr, 1); 4762 } 4763 4764 return 0; 4765 } 4766 #endif 4767 4768 #if defined(CONFIG_USE_NPTL) 4769 /* ??? Using host futex calls even when target atomic operations 4770 are not really atomic probably breaks things. However implementing 4771 futexes locally would make futexes shared between multiple processes 4772 tricky. However they're probably useless because guest atomic 4773 operations won't work either. */ 4774 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout, 4775 target_ulong uaddr2, int val3) 4776 { 4777 struct timespec ts, *pts; 4778 int base_op; 4779 4780 /* ??? We assume FUTEX_* constants are the same on both host 4781 and target. */ 4782 #ifdef FUTEX_CMD_MASK 4783 base_op = op & FUTEX_CMD_MASK; 4784 #else 4785 base_op = op; 4786 #endif 4787 switch (base_op) { 4788 case FUTEX_WAIT: 4789 if (timeout) { 4790 pts = &ts; 4791 target_to_host_timespec(pts, timeout); 4792 } else { 4793 pts = NULL; 4794 } 4795 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val), 4796 pts, NULL, 0)); 4797 case FUTEX_WAKE: 4798 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 4799 case FUTEX_FD: 4800 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 4801 case FUTEX_REQUEUE: 4802 case FUTEX_CMP_REQUEUE: 4803 case FUTEX_WAKE_OP: 4804 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 4805 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 4806 But the prototype takes a `struct timespec *'; insert casts 4807 to satisfy the compiler. We do not need to tswap TIMEOUT 4808 since it's not compared to guest memory. */ 4809 pts = (struct timespec *)(uintptr_t) timeout; 4810 return get_errno(sys_futex(g2h(uaddr), op, val, pts, 4811 g2h(uaddr2), 4812 (base_op == FUTEX_CMP_REQUEUE 4813 ? tswap32(val3) 4814 : val3))); 4815 default: 4816 return -TARGET_ENOSYS; 4817 } 4818 } 4819 #endif 4820 4821 /* Map host to target signal numbers for the wait family of syscalls. 4822 Assume all other status bits are the same. */ 4823 static int host_to_target_waitstatus(int status) 4824 { 4825 if (WIFSIGNALED(status)) { 4826 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f); 4827 } 4828 if (WIFSTOPPED(status)) { 4829 return (host_to_target_signal(WSTOPSIG(status)) << 8) 4830 | (status & 0xff); 4831 } 4832 return status; 4833 } 4834 4835 int get_osversion(void) 4836 { 4837 static int osversion; 4838 struct new_utsname buf; 4839 const char *s; 4840 int i, n, tmp; 4841 if (osversion) 4842 return osversion; 4843 if (qemu_uname_release && *qemu_uname_release) { 4844 s = qemu_uname_release; 4845 } else { 4846 if (sys_uname(&buf)) 4847 return 0; 4848 s = buf.release; 4849 } 4850 tmp = 0; 4851 for (i = 0; i < 3; i++) { 4852 n = 0; 4853 while (*s >= '0' && *s <= '9') { 4854 n *= 10; 4855 n += *s - '0'; 4856 s++; 4857 } 4858 tmp = (tmp << 8) + n; 4859 if (*s == '.') 4860 s++; 4861 } 4862 osversion = tmp; 4863 return osversion; 4864 } 4865 4866 4867 static int open_self_maps(void *cpu_env, int fd) 4868 { 4869 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 4870 4871 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n", 4872 (unsigned long long)ts->info->stack_limit, 4873 (unsigned long long)(ts->stack_base + (TARGET_PAGE_SIZE - 1)) 4874 & TARGET_PAGE_MASK, 4875 (unsigned long long)ts->stack_base); 4876 4877 return 0; 4878 } 4879 4880 static int open_self_stat(void *cpu_env, int fd) 4881 { 4882 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 4883 abi_ulong start_stack = ts->info->start_stack; 4884 int i; 4885 4886 for (i = 0; i < 44; i++) { 4887 char buf[128]; 4888 int len; 4889 uint64_t val = 0; 4890 4891 if (i == 0) { 4892 /* pid */ 4893 val = getpid(); 4894 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 4895 } else if (i == 1) { 4896 /* app name */ 4897 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]); 4898 } else if (i == 27) { 4899 /* stack bottom */ 4900 val = start_stack; 4901 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 4902 } else { 4903 /* for the rest, there is MasterCard */ 4904 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' '); 4905 } 4906 4907 len = strlen(buf); 4908 if (write(fd, buf, len) != len) { 4909 return -1; 4910 } 4911 } 4912 4913 return 0; 4914 } 4915 4916 static int open_self_auxv(void *cpu_env, int fd) 4917 { 4918 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 4919 abi_ulong auxv = ts->info->saved_auxv; 4920 abi_ulong len = ts->info->auxv_len; 4921 char *ptr; 4922 4923 /* 4924 * Auxiliary vector is stored in target process stack. 4925 * read in whole auxv vector and copy it to file 4926 */ 4927 ptr = lock_user(VERIFY_READ, auxv, len, 0); 4928 if (ptr != NULL) { 4929 while (len > 0) { 4930 ssize_t r; 4931 r = write(fd, ptr, len); 4932 if (r <= 0) { 4933 break; 4934 } 4935 len -= r; 4936 ptr += r; 4937 } 4938 lseek(fd, 0, SEEK_SET); 4939 unlock_user(ptr, auxv, len); 4940 } 4941 4942 return 0; 4943 } 4944 4945 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode) 4946 { 4947 struct fake_open { 4948 const char *filename; 4949 int (*fill)(void *cpu_env, int fd); 4950 }; 4951 const struct fake_open *fake_open; 4952 static const struct fake_open fakes[] = { 4953 { "/proc/self/maps", open_self_maps }, 4954 { "/proc/self/stat", open_self_stat }, 4955 { "/proc/self/auxv", open_self_auxv }, 4956 { NULL, NULL } 4957 }; 4958 4959 for (fake_open = fakes; fake_open->filename; fake_open++) { 4960 if (!strncmp(pathname, fake_open->filename, 4961 strlen(fake_open->filename))) { 4962 break; 4963 } 4964 } 4965 4966 if (fake_open->filename) { 4967 const char *tmpdir; 4968 char filename[PATH_MAX]; 4969 int fd, r; 4970 4971 /* create temporary file to map stat to */ 4972 tmpdir = getenv("TMPDIR"); 4973 if (!tmpdir) 4974 tmpdir = "/tmp"; 4975 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir); 4976 fd = mkstemp(filename); 4977 if (fd < 0) { 4978 return fd; 4979 } 4980 unlink(filename); 4981 4982 if ((r = fake_open->fill(cpu_env, fd))) { 4983 close(fd); 4984 return r; 4985 } 4986 lseek(fd, 0, SEEK_SET); 4987 4988 return fd; 4989 } 4990 4991 return get_errno(open(path(pathname), flags, mode)); 4992 } 4993 4994 /* do_syscall() should always have a single exit point at the end so 4995 that actions, such as logging of syscall results, can be performed. 4996 All errnos that do_syscall() returns must be -TARGET_<errcode>. */ 4997 abi_long do_syscall(void *cpu_env, int num, abi_long arg1, 4998 abi_long arg2, abi_long arg3, abi_long arg4, 4999 abi_long arg5, abi_long arg6, abi_long arg7, 5000 abi_long arg8) 5001 { 5002 abi_long ret; 5003 struct stat st; 5004 struct statfs stfs; 5005 void *p; 5006 5007 #ifdef DEBUG 5008 gemu_log("syscall %d", num); 5009 #endif 5010 if(do_strace) 5011 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); 5012 5013 switch(num) { 5014 case TARGET_NR_exit: 5015 #ifdef CONFIG_USE_NPTL 5016 /* In old applications this may be used to implement _exit(2). 5017 However in threaded applictions it is used for thread termination, 5018 and _exit_group is used for application termination. 5019 Do thread termination if we have more then one thread. */ 5020 /* FIXME: This probably breaks if a signal arrives. We should probably 5021 be disabling signals. */ 5022 if (first_cpu->next_cpu) { 5023 TaskState *ts; 5024 CPUArchState **lastp; 5025 CPUArchState *p; 5026 5027 cpu_list_lock(); 5028 lastp = &first_cpu; 5029 p = first_cpu; 5030 while (p && p != (CPUArchState *)cpu_env) { 5031 lastp = &p->next_cpu; 5032 p = p->next_cpu; 5033 } 5034 /* If we didn't find the CPU for this thread then something is 5035 horribly wrong. */ 5036 if (!p) 5037 abort(); 5038 /* Remove the CPU from the list. */ 5039 *lastp = p->next_cpu; 5040 cpu_list_unlock(); 5041 ts = ((CPUArchState *)cpu_env)->opaque; 5042 if (ts->child_tidptr) { 5043 put_user_u32(0, ts->child_tidptr); 5044 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX, 5045 NULL, NULL, 0); 5046 } 5047 thread_env = NULL; 5048 #ifdef ENV_GET_CPU 5049 object_delete(OBJECT(ENV_GET_CPU(cpu_env))); 5050 #else 5051 g_free(cpu_env); 5052 #endif 5053 g_free(ts); 5054 pthread_exit(NULL); 5055 } 5056 #endif 5057 #ifdef TARGET_GPROF 5058 _mcleanup(); 5059 #endif 5060 gdb_exit(cpu_env, arg1); 5061 _exit(arg1); 5062 ret = 0; /* avoid warning */ 5063 break; 5064 case TARGET_NR_read: 5065 if (arg3 == 0) 5066 ret = 0; 5067 else { 5068 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 5069 goto efault; 5070 ret = get_errno(read(arg1, p, arg3)); 5071 unlock_user(p, arg2, ret); 5072 } 5073 break; 5074 case TARGET_NR_write: 5075 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 5076 goto efault; 5077 ret = get_errno(write(arg1, p, arg3)); 5078 unlock_user(p, arg2, 0); 5079 break; 5080 case TARGET_NR_open: 5081 if (!(p = lock_user_string(arg1))) 5082 goto efault; 5083 ret = get_errno(do_open(cpu_env, p, 5084 target_to_host_bitmask(arg2, fcntl_flags_tbl), 5085 arg3)); 5086 unlock_user(p, arg1, 0); 5087 break; 5088 #if defined(TARGET_NR_openat) && defined(__NR_openat) 5089 case TARGET_NR_openat: 5090 if (!(p = lock_user_string(arg2))) 5091 goto efault; 5092 ret = get_errno(sys_openat(arg1, 5093 path(p), 5094 target_to_host_bitmask(arg3, fcntl_flags_tbl), 5095 arg4)); 5096 unlock_user(p, arg2, 0); 5097 break; 5098 #endif 5099 case TARGET_NR_close: 5100 ret = get_errno(close(arg1)); 5101 break; 5102 case TARGET_NR_brk: 5103 ret = do_brk(arg1); 5104 break; 5105 case TARGET_NR_fork: 5106 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0)); 5107 break; 5108 #ifdef TARGET_NR_waitpid 5109 case TARGET_NR_waitpid: 5110 { 5111 int status; 5112 ret = get_errno(waitpid(arg1, &status, arg3)); 5113 if (!is_error(ret) && arg2 && ret 5114 && put_user_s32(host_to_target_waitstatus(status), arg2)) 5115 goto efault; 5116 } 5117 break; 5118 #endif 5119 #ifdef TARGET_NR_waitid 5120 case TARGET_NR_waitid: 5121 { 5122 siginfo_t info; 5123 info.si_pid = 0; 5124 ret = get_errno(waitid(arg1, arg2, &info, arg4)); 5125 if (!is_error(ret) && arg3 && info.si_pid != 0) { 5126 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) 5127 goto efault; 5128 host_to_target_siginfo(p, &info); 5129 unlock_user(p, arg3, sizeof(target_siginfo_t)); 5130 } 5131 } 5132 break; 5133 #endif 5134 #ifdef TARGET_NR_creat /* not on alpha */ 5135 case TARGET_NR_creat: 5136 if (!(p = lock_user_string(arg1))) 5137 goto efault; 5138 ret = get_errno(creat(p, arg2)); 5139 unlock_user(p, arg1, 0); 5140 break; 5141 #endif 5142 case TARGET_NR_link: 5143 { 5144 void * p2; 5145 p = lock_user_string(arg1); 5146 p2 = lock_user_string(arg2); 5147 if (!p || !p2) 5148 ret = -TARGET_EFAULT; 5149 else 5150 ret = get_errno(link(p, p2)); 5151 unlock_user(p2, arg2, 0); 5152 unlock_user(p, arg1, 0); 5153 } 5154 break; 5155 #if defined(TARGET_NR_linkat) && defined(__NR_linkat) 5156 case TARGET_NR_linkat: 5157 { 5158 void * p2 = NULL; 5159 if (!arg2 || !arg4) 5160 goto efault; 5161 p = lock_user_string(arg2); 5162 p2 = lock_user_string(arg4); 5163 if (!p || !p2) 5164 ret = -TARGET_EFAULT; 5165 else 5166 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5)); 5167 unlock_user(p, arg2, 0); 5168 unlock_user(p2, arg4, 0); 5169 } 5170 break; 5171 #endif 5172 case TARGET_NR_unlink: 5173 if (!(p = lock_user_string(arg1))) 5174 goto efault; 5175 ret = get_errno(unlink(p)); 5176 unlock_user(p, arg1, 0); 5177 break; 5178 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat) 5179 case TARGET_NR_unlinkat: 5180 if (!(p = lock_user_string(arg2))) 5181 goto efault; 5182 ret = get_errno(sys_unlinkat(arg1, p, arg3)); 5183 unlock_user(p, arg2, 0); 5184 break; 5185 #endif 5186 case TARGET_NR_execve: 5187 { 5188 char **argp, **envp; 5189 int argc, envc; 5190 abi_ulong gp; 5191 abi_ulong guest_argp; 5192 abi_ulong guest_envp; 5193 abi_ulong addr; 5194 char **q; 5195 int total_size = 0; 5196 5197 argc = 0; 5198 guest_argp = arg2; 5199 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { 5200 if (get_user_ual(addr, gp)) 5201 goto efault; 5202 if (!addr) 5203 break; 5204 argc++; 5205 } 5206 envc = 0; 5207 guest_envp = arg3; 5208 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { 5209 if (get_user_ual(addr, gp)) 5210 goto efault; 5211 if (!addr) 5212 break; 5213 envc++; 5214 } 5215 5216 argp = alloca((argc + 1) * sizeof(void *)); 5217 envp = alloca((envc + 1) * sizeof(void *)); 5218 5219 for (gp = guest_argp, q = argp; gp; 5220 gp += sizeof(abi_ulong), q++) { 5221 if (get_user_ual(addr, gp)) 5222 goto execve_efault; 5223 if (!addr) 5224 break; 5225 if (!(*q = lock_user_string(addr))) 5226 goto execve_efault; 5227 total_size += strlen(*q) + 1; 5228 } 5229 *q = NULL; 5230 5231 for (gp = guest_envp, q = envp; gp; 5232 gp += sizeof(abi_ulong), q++) { 5233 if (get_user_ual(addr, gp)) 5234 goto execve_efault; 5235 if (!addr) 5236 break; 5237 if (!(*q = lock_user_string(addr))) 5238 goto execve_efault; 5239 total_size += strlen(*q) + 1; 5240 } 5241 *q = NULL; 5242 5243 /* This case will not be caught by the host's execve() if its 5244 page size is bigger than the target's. */ 5245 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) { 5246 ret = -TARGET_E2BIG; 5247 goto execve_end; 5248 } 5249 if (!(p = lock_user_string(arg1))) 5250 goto execve_efault; 5251 ret = get_errno(execve(p, argp, envp)); 5252 unlock_user(p, arg1, 0); 5253 5254 goto execve_end; 5255 5256 execve_efault: 5257 ret = -TARGET_EFAULT; 5258 5259 execve_end: 5260 for (gp = guest_argp, q = argp; *q; 5261 gp += sizeof(abi_ulong), q++) { 5262 if (get_user_ual(addr, gp) 5263 || !addr) 5264 break; 5265 unlock_user(*q, addr, 0); 5266 } 5267 for (gp = guest_envp, q = envp; *q; 5268 gp += sizeof(abi_ulong), q++) { 5269 if (get_user_ual(addr, gp) 5270 || !addr) 5271 break; 5272 unlock_user(*q, addr, 0); 5273 } 5274 } 5275 break; 5276 case TARGET_NR_chdir: 5277 if (!(p = lock_user_string(arg1))) 5278 goto efault; 5279 ret = get_errno(chdir(p)); 5280 unlock_user(p, arg1, 0); 5281 break; 5282 #ifdef TARGET_NR_time 5283 case TARGET_NR_time: 5284 { 5285 time_t host_time; 5286 ret = get_errno(time(&host_time)); 5287 if (!is_error(ret) 5288 && arg1 5289 && put_user_sal(host_time, arg1)) 5290 goto efault; 5291 } 5292 break; 5293 #endif 5294 case TARGET_NR_mknod: 5295 if (!(p = lock_user_string(arg1))) 5296 goto efault; 5297 ret = get_errno(mknod(p, arg2, arg3)); 5298 unlock_user(p, arg1, 0); 5299 break; 5300 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat) 5301 case TARGET_NR_mknodat: 5302 if (!(p = lock_user_string(arg2))) 5303 goto efault; 5304 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4)); 5305 unlock_user(p, arg2, 0); 5306 break; 5307 #endif 5308 case TARGET_NR_chmod: 5309 if (!(p = lock_user_string(arg1))) 5310 goto efault; 5311 ret = get_errno(chmod(p, arg2)); 5312 unlock_user(p, arg1, 0); 5313 break; 5314 #ifdef TARGET_NR_break 5315 case TARGET_NR_break: 5316 goto unimplemented; 5317 #endif 5318 #ifdef TARGET_NR_oldstat 5319 case TARGET_NR_oldstat: 5320 goto unimplemented; 5321 #endif 5322 case TARGET_NR_lseek: 5323 ret = get_errno(lseek(arg1, arg2, arg3)); 5324 break; 5325 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) 5326 /* Alpha specific */ 5327 case TARGET_NR_getxpid: 5328 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid(); 5329 ret = get_errno(getpid()); 5330 break; 5331 #endif 5332 #ifdef TARGET_NR_getpid 5333 case TARGET_NR_getpid: 5334 ret = get_errno(getpid()); 5335 break; 5336 #endif 5337 case TARGET_NR_mount: 5338 { 5339 /* need to look at the data field */ 5340 void *p2, *p3; 5341 p = lock_user_string(arg1); 5342 p2 = lock_user_string(arg2); 5343 p3 = lock_user_string(arg3); 5344 if (!p || !p2 || !p3) 5345 ret = -TARGET_EFAULT; 5346 else { 5347 /* FIXME - arg5 should be locked, but it isn't clear how to 5348 * do that since it's not guaranteed to be a NULL-terminated 5349 * string. 5350 */ 5351 if ( ! arg5 ) 5352 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL)); 5353 else 5354 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5))); 5355 } 5356 unlock_user(p, arg1, 0); 5357 unlock_user(p2, arg2, 0); 5358 unlock_user(p3, arg3, 0); 5359 break; 5360 } 5361 #ifdef TARGET_NR_umount 5362 case TARGET_NR_umount: 5363 if (!(p = lock_user_string(arg1))) 5364 goto efault; 5365 ret = get_errno(umount(p)); 5366 unlock_user(p, arg1, 0); 5367 break; 5368 #endif 5369 #ifdef TARGET_NR_stime /* not on alpha */ 5370 case TARGET_NR_stime: 5371 { 5372 time_t host_time; 5373 if (get_user_sal(host_time, arg1)) 5374 goto efault; 5375 ret = get_errno(stime(&host_time)); 5376 } 5377 break; 5378 #endif 5379 case TARGET_NR_ptrace: 5380 goto unimplemented; 5381 #ifdef TARGET_NR_alarm /* not on alpha */ 5382 case TARGET_NR_alarm: 5383 ret = alarm(arg1); 5384 break; 5385 #endif 5386 #ifdef TARGET_NR_oldfstat 5387 case TARGET_NR_oldfstat: 5388 goto unimplemented; 5389 #endif 5390 #ifdef TARGET_NR_pause /* not on alpha */ 5391 case TARGET_NR_pause: 5392 ret = get_errno(pause()); 5393 break; 5394 #endif 5395 #ifdef TARGET_NR_utime 5396 case TARGET_NR_utime: 5397 { 5398 struct utimbuf tbuf, *host_tbuf; 5399 struct target_utimbuf *target_tbuf; 5400 if (arg2) { 5401 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) 5402 goto efault; 5403 tbuf.actime = tswapal(target_tbuf->actime); 5404 tbuf.modtime = tswapal(target_tbuf->modtime); 5405 unlock_user_struct(target_tbuf, arg2, 0); 5406 host_tbuf = &tbuf; 5407 } else { 5408 host_tbuf = NULL; 5409 } 5410 if (!(p = lock_user_string(arg1))) 5411 goto efault; 5412 ret = get_errno(utime(p, host_tbuf)); 5413 unlock_user(p, arg1, 0); 5414 } 5415 break; 5416 #endif 5417 case TARGET_NR_utimes: 5418 { 5419 struct timeval *tvp, tv[2]; 5420 if (arg2) { 5421 if (copy_from_user_timeval(&tv[0], arg2) 5422 || copy_from_user_timeval(&tv[1], 5423 arg2 + sizeof(struct target_timeval))) 5424 goto efault; 5425 tvp = tv; 5426 } else { 5427 tvp = NULL; 5428 } 5429 if (!(p = lock_user_string(arg1))) 5430 goto efault; 5431 ret = get_errno(utimes(p, tvp)); 5432 unlock_user(p, arg1, 0); 5433 } 5434 break; 5435 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat) 5436 case TARGET_NR_futimesat: 5437 { 5438 struct timeval *tvp, tv[2]; 5439 if (arg3) { 5440 if (copy_from_user_timeval(&tv[0], arg3) 5441 || copy_from_user_timeval(&tv[1], 5442 arg3 + sizeof(struct target_timeval))) 5443 goto efault; 5444 tvp = tv; 5445 } else { 5446 tvp = NULL; 5447 } 5448 if (!(p = lock_user_string(arg2))) 5449 goto efault; 5450 ret = get_errno(sys_futimesat(arg1, path(p), tvp)); 5451 unlock_user(p, arg2, 0); 5452 } 5453 break; 5454 #endif 5455 #ifdef TARGET_NR_stty 5456 case TARGET_NR_stty: 5457 goto unimplemented; 5458 #endif 5459 #ifdef TARGET_NR_gtty 5460 case TARGET_NR_gtty: 5461 goto unimplemented; 5462 #endif 5463 case TARGET_NR_access: 5464 if (!(p = lock_user_string(arg1))) 5465 goto efault; 5466 ret = get_errno(access(path(p), arg2)); 5467 unlock_user(p, arg1, 0); 5468 break; 5469 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 5470 case TARGET_NR_faccessat: 5471 if (!(p = lock_user_string(arg2))) 5472 goto efault; 5473 ret = get_errno(sys_faccessat(arg1, p, arg3)); 5474 unlock_user(p, arg2, 0); 5475 break; 5476 #endif 5477 #ifdef TARGET_NR_nice /* not on alpha */ 5478 case TARGET_NR_nice: 5479 ret = get_errno(nice(arg1)); 5480 break; 5481 #endif 5482 #ifdef TARGET_NR_ftime 5483 case TARGET_NR_ftime: 5484 goto unimplemented; 5485 #endif 5486 case TARGET_NR_sync: 5487 sync(); 5488 ret = 0; 5489 break; 5490 case TARGET_NR_kill: 5491 ret = get_errno(kill(arg1, target_to_host_signal(arg2))); 5492 break; 5493 case TARGET_NR_rename: 5494 { 5495 void *p2; 5496 p = lock_user_string(arg1); 5497 p2 = lock_user_string(arg2); 5498 if (!p || !p2) 5499 ret = -TARGET_EFAULT; 5500 else 5501 ret = get_errno(rename(p, p2)); 5502 unlock_user(p2, arg2, 0); 5503 unlock_user(p, arg1, 0); 5504 } 5505 break; 5506 #if defined(TARGET_NR_renameat) && defined(__NR_renameat) 5507 case TARGET_NR_renameat: 5508 { 5509 void *p2; 5510 p = lock_user_string(arg2); 5511 p2 = lock_user_string(arg4); 5512 if (!p || !p2) 5513 ret = -TARGET_EFAULT; 5514 else 5515 ret = get_errno(sys_renameat(arg1, p, arg3, p2)); 5516 unlock_user(p2, arg4, 0); 5517 unlock_user(p, arg2, 0); 5518 } 5519 break; 5520 #endif 5521 case TARGET_NR_mkdir: 5522 if (!(p = lock_user_string(arg1))) 5523 goto efault; 5524 ret = get_errno(mkdir(p, arg2)); 5525 unlock_user(p, arg1, 0); 5526 break; 5527 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat) 5528 case TARGET_NR_mkdirat: 5529 if (!(p = lock_user_string(arg2))) 5530 goto efault; 5531 ret = get_errno(sys_mkdirat(arg1, p, arg3)); 5532 unlock_user(p, arg2, 0); 5533 break; 5534 #endif 5535 case TARGET_NR_rmdir: 5536 if (!(p = lock_user_string(arg1))) 5537 goto efault; 5538 ret = get_errno(rmdir(p)); 5539 unlock_user(p, arg1, 0); 5540 break; 5541 case TARGET_NR_dup: 5542 ret = get_errno(dup(arg1)); 5543 break; 5544 case TARGET_NR_pipe: 5545 ret = do_pipe(cpu_env, arg1, 0, 0); 5546 break; 5547 #ifdef TARGET_NR_pipe2 5548 case TARGET_NR_pipe2: 5549 ret = do_pipe(cpu_env, arg1, arg2, 1); 5550 break; 5551 #endif 5552 case TARGET_NR_times: 5553 { 5554 struct target_tms *tmsp; 5555 struct tms tms; 5556 ret = get_errno(times(&tms)); 5557 if (arg1) { 5558 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); 5559 if (!tmsp) 5560 goto efault; 5561 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime)); 5562 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime)); 5563 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime)); 5564 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime)); 5565 } 5566 if (!is_error(ret)) 5567 ret = host_to_target_clock_t(ret); 5568 } 5569 break; 5570 #ifdef TARGET_NR_prof 5571 case TARGET_NR_prof: 5572 goto unimplemented; 5573 #endif 5574 #ifdef TARGET_NR_signal 5575 case TARGET_NR_signal: 5576 goto unimplemented; 5577 #endif 5578 case TARGET_NR_acct: 5579 if (arg1 == 0) { 5580 ret = get_errno(acct(NULL)); 5581 } else { 5582 if (!(p = lock_user_string(arg1))) 5583 goto efault; 5584 ret = get_errno(acct(path(p))); 5585 unlock_user(p, arg1, 0); 5586 } 5587 break; 5588 #ifdef TARGET_NR_umount2 /* not on alpha */ 5589 case TARGET_NR_umount2: 5590 if (!(p = lock_user_string(arg1))) 5591 goto efault; 5592 ret = get_errno(umount2(p, arg2)); 5593 unlock_user(p, arg1, 0); 5594 break; 5595 #endif 5596 #ifdef TARGET_NR_lock 5597 case TARGET_NR_lock: 5598 goto unimplemented; 5599 #endif 5600 case TARGET_NR_ioctl: 5601 ret = do_ioctl(arg1, arg2, arg3); 5602 break; 5603 case TARGET_NR_fcntl: 5604 ret = do_fcntl(arg1, arg2, arg3); 5605 break; 5606 #ifdef TARGET_NR_mpx 5607 case TARGET_NR_mpx: 5608 goto unimplemented; 5609 #endif 5610 case TARGET_NR_setpgid: 5611 ret = get_errno(setpgid(arg1, arg2)); 5612 break; 5613 #ifdef TARGET_NR_ulimit 5614 case TARGET_NR_ulimit: 5615 goto unimplemented; 5616 #endif 5617 #ifdef TARGET_NR_oldolduname 5618 case TARGET_NR_oldolduname: 5619 goto unimplemented; 5620 #endif 5621 case TARGET_NR_umask: 5622 ret = get_errno(umask(arg1)); 5623 break; 5624 case TARGET_NR_chroot: 5625 if (!(p = lock_user_string(arg1))) 5626 goto efault; 5627 ret = get_errno(chroot(p)); 5628 unlock_user(p, arg1, 0); 5629 break; 5630 case TARGET_NR_ustat: 5631 goto unimplemented; 5632 case TARGET_NR_dup2: 5633 ret = get_errno(dup2(arg1, arg2)); 5634 break; 5635 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) 5636 case TARGET_NR_dup3: 5637 ret = get_errno(dup3(arg1, arg2, arg3)); 5638 break; 5639 #endif 5640 #ifdef TARGET_NR_getppid /* not on alpha */ 5641 case TARGET_NR_getppid: 5642 ret = get_errno(getppid()); 5643 break; 5644 #endif 5645 case TARGET_NR_getpgrp: 5646 ret = get_errno(getpgrp()); 5647 break; 5648 case TARGET_NR_setsid: 5649 ret = get_errno(setsid()); 5650 break; 5651 #ifdef TARGET_NR_sigaction 5652 case TARGET_NR_sigaction: 5653 { 5654 #if defined(TARGET_ALPHA) 5655 struct target_sigaction act, oact, *pact = 0; 5656 struct target_old_sigaction *old_act; 5657 if (arg2) { 5658 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5659 goto efault; 5660 act._sa_handler = old_act->_sa_handler; 5661 target_siginitset(&act.sa_mask, old_act->sa_mask); 5662 act.sa_flags = old_act->sa_flags; 5663 act.sa_restorer = 0; 5664 unlock_user_struct(old_act, arg2, 0); 5665 pact = &act; 5666 } 5667 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5668 if (!is_error(ret) && arg3) { 5669 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5670 goto efault; 5671 old_act->_sa_handler = oact._sa_handler; 5672 old_act->sa_mask = oact.sa_mask.sig[0]; 5673 old_act->sa_flags = oact.sa_flags; 5674 unlock_user_struct(old_act, arg3, 1); 5675 } 5676 #elif defined(TARGET_MIPS) 5677 struct target_sigaction act, oact, *pact, *old_act; 5678 5679 if (arg2) { 5680 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5681 goto efault; 5682 act._sa_handler = old_act->_sa_handler; 5683 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); 5684 act.sa_flags = old_act->sa_flags; 5685 unlock_user_struct(old_act, arg2, 0); 5686 pact = &act; 5687 } else { 5688 pact = NULL; 5689 } 5690 5691 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5692 5693 if (!is_error(ret) && arg3) { 5694 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5695 goto efault; 5696 old_act->_sa_handler = oact._sa_handler; 5697 old_act->sa_flags = oact.sa_flags; 5698 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; 5699 old_act->sa_mask.sig[1] = 0; 5700 old_act->sa_mask.sig[2] = 0; 5701 old_act->sa_mask.sig[3] = 0; 5702 unlock_user_struct(old_act, arg3, 1); 5703 } 5704 #else 5705 struct target_old_sigaction *old_act; 5706 struct target_sigaction act, oact, *pact; 5707 if (arg2) { 5708 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5709 goto efault; 5710 act._sa_handler = old_act->_sa_handler; 5711 target_siginitset(&act.sa_mask, old_act->sa_mask); 5712 act.sa_flags = old_act->sa_flags; 5713 act.sa_restorer = old_act->sa_restorer; 5714 unlock_user_struct(old_act, arg2, 0); 5715 pact = &act; 5716 } else { 5717 pact = NULL; 5718 } 5719 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5720 if (!is_error(ret) && arg3) { 5721 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5722 goto efault; 5723 old_act->_sa_handler = oact._sa_handler; 5724 old_act->sa_mask = oact.sa_mask.sig[0]; 5725 old_act->sa_flags = oact.sa_flags; 5726 old_act->sa_restorer = oact.sa_restorer; 5727 unlock_user_struct(old_act, arg3, 1); 5728 } 5729 #endif 5730 } 5731 break; 5732 #endif 5733 case TARGET_NR_rt_sigaction: 5734 { 5735 #if defined(TARGET_ALPHA) 5736 struct target_sigaction act, oact, *pact = 0; 5737 struct target_rt_sigaction *rt_act; 5738 /* ??? arg4 == sizeof(sigset_t). */ 5739 if (arg2) { 5740 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1)) 5741 goto efault; 5742 act._sa_handler = rt_act->_sa_handler; 5743 act.sa_mask = rt_act->sa_mask; 5744 act.sa_flags = rt_act->sa_flags; 5745 act.sa_restorer = arg5; 5746 unlock_user_struct(rt_act, arg2, 0); 5747 pact = &act; 5748 } 5749 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5750 if (!is_error(ret) && arg3) { 5751 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0)) 5752 goto efault; 5753 rt_act->_sa_handler = oact._sa_handler; 5754 rt_act->sa_mask = oact.sa_mask; 5755 rt_act->sa_flags = oact.sa_flags; 5756 unlock_user_struct(rt_act, arg3, 1); 5757 } 5758 #else 5759 struct target_sigaction *act; 5760 struct target_sigaction *oact; 5761 5762 if (arg2) { 5763 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) 5764 goto efault; 5765 } else 5766 act = NULL; 5767 if (arg3) { 5768 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { 5769 ret = -TARGET_EFAULT; 5770 goto rt_sigaction_fail; 5771 } 5772 } else 5773 oact = NULL; 5774 ret = get_errno(do_sigaction(arg1, act, oact)); 5775 rt_sigaction_fail: 5776 if (act) 5777 unlock_user_struct(act, arg2, 0); 5778 if (oact) 5779 unlock_user_struct(oact, arg3, 1); 5780 #endif 5781 } 5782 break; 5783 #ifdef TARGET_NR_sgetmask /* not on alpha */ 5784 case TARGET_NR_sgetmask: 5785 { 5786 sigset_t cur_set; 5787 abi_ulong target_set; 5788 sigprocmask(0, NULL, &cur_set); 5789 host_to_target_old_sigset(&target_set, &cur_set); 5790 ret = target_set; 5791 } 5792 break; 5793 #endif 5794 #ifdef TARGET_NR_ssetmask /* not on alpha */ 5795 case TARGET_NR_ssetmask: 5796 { 5797 sigset_t set, oset, cur_set; 5798 abi_ulong target_set = arg1; 5799 sigprocmask(0, NULL, &cur_set); 5800 target_to_host_old_sigset(&set, &target_set); 5801 sigorset(&set, &set, &cur_set); 5802 sigprocmask(SIG_SETMASK, &set, &oset); 5803 host_to_target_old_sigset(&target_set, &oset); 5804 ret = target_set; 5805 } 5806 break; 5807 #endif 5808 #ifdef TARGET_NR_sigprocmask 5809 case TARGET_NR_sigprocmask: 5810 { 5811 #if defined(TARGET_ALPHA) 5812 sigset_t set, oldset; 5813 abi_ulong mask; 5814 int how; 5815 5816 switch (arg1) { 5817 case TARGET_SIG_BLOCK: 5818 how = SIG_BLOCK; 5819 break; 5820 case TARGET_SIG_UNBLOCK: 5821 how = SIG_UNBLOCK; 5822 break; 5823 case TARGET_SIG_SETMASK: 5824 how = SIG_SETMASK; 5825 break; 5826 default: 5827 ret = -TARGET_EINVAL; 5828 goto fail; 5829 } 5830 mask = arg2; 5831 target_to_host_old_sigset(&set, &mask); 5832 5833 ret = get_errno(sigprocmask(how, &set, &oldset)); 5834 5835 if (!is_error(ret)) { 5836 host_to_target_old_sigset(&mask, &oldset); 5837 ret = mask; 5838 ((CPUAlphaState *)cpu_env)->[IR_V0] = 0; /* force no error */ 5839 } 5840 #else 5841 sigset_t set, oldset, *set_ptr; 5842 int how; 5843 5844 if (arg2) { 5845 switch (arg1) { 5846 case TARGET_SIG_BLOCK: 5847 how = SIG_BLOCK; 5848 break; 5849 case TARGET_SIG_UNBLOCK: 5850 how = SIG_UNBLOCK; 5851 break; 5852 case TARGET_SIG_SETMASK: 5853 how = SIG_SETMASK; 5854 break; 5855 default: 5856 ret = -TARGET_EINVAL; 5857 goto fail; 5858 } 5859 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 5860 goto efault; 5861 target_to_host_old_sigset(&set, p); 5862 unlock_user(p, arg2, 0); 5863 set_ptr = &set; 5864 } else { 5865 how = 0; 5866 set_ptr = NULL; 5867 } 5868 ret = get_errno(sigprocmask(how, set_ptr, &oldset)); 5869 if (!is_error(ret) && arg3) { 5870 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 5871 goto efault; 5872 host_to_target_old_sigset(p, &oldset); 5873 unlock_user(p, arg3, sizeof(target_sigset_t)); 5874 } 5875 #endif 5876 } 5877 break; 5878 #endif 5879 case TARGET_NR_rt_sigprocmask: 5880 { 5881 int how = arg1; 5882 sigset_t set, oldset, *set_ptr; 5883 5884 if (arg2) { 5885 switch(how) { 5886 case TARGET_SIG_BLOCK: 5887 how = SIG_BLOCK; 5888 break; 5889 case TARGET_SIG_UNBLOCK: 5890 how = SIG_UNBLOCK; 5891 break; 5892 case TARGET_SIG_SETMASK: 5893 how = SIG_SETMASK; 5894 break; 5895 default: 5896 ret = -TARGET_EINVAL; 5897 goto fail; 5898 } 5899 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 5900 goto efault; 5901 target_to_host_sigset(&set, p); 5902 unlock_user(p, arg2, 0); 5903 set_ptr = &set; 5904 } else { 5905 how = 0; 5906 set_ptr = NULL; 5907 } 5908 ret = get_errno(sigprocmask(how, set_ptr, &oldset)); 5909 if (!is_error(ret) && arg3) { 5910 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 5911 goto efault; 5912 host_to_target_sigset(p, &oldset); 5913 unlock_user(p, arg3, sizeof(target_sigset_t)); 5914 } 5915 } 5916 break; 5917 #ifdef TARGET_NR_sigpending 5918 case TARGET_NR_sigpending: 5919 { 5920 sigset_t set; 5921 ret = get_errno(sigpending(&set)); 5922 if (!is_error(ret)) { 5923 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 5924 goto efault; 5925 host_to_target_old_sigset(p, &set); 5926 unlock_user(p, arg1, sizeof(target_sigset_t)); 5927 } 5928 } 5929 break; 5930 #endif 5931 case TARGET_NR_rt_sigpending: 5932 { 5933 sigset_t set; 5934 ret = get_errno(sigpending(&set)); 5935 if (!is_error(ret)) { 5936 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 5937 goto efault; 5938 host_to_target_sigset(p, &set); 5939 unlock_user(p, arg1, sizeof(target_sigset_t)); 5940 } 5941 } 5942 break; 5943 #ifdef TARGET_NR_sigsuspend 5944 case TARGET_NR_sigsuspend: 5945 { 5946 sigset_t set; 5947 #if defined(TARGET_ALPHA) 5948 abi_ulong mask = arg1; 5949 target_to_host_old_sigset(&set, &mask); 5950 #else 5951 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 5952 goto efault; 5953 target_to_host_old_sigset(&set, p); 5954 unlock_user(p, arg1, 0); 5955 #endif 5956 ret = get_errno(sigsuspend(&set)); 5957 } 5958 break; 5959 #endif 5960 case TARGET_NR_rt_sigsuspend: 5961 { 5962 sigset_t set; 5963 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 5964 goto efault; 5965 target_to_host_sigset(&set, p); 5966 unlock_user(p, arg1, 0); 5967 ret = get_errno(sigsuspend(&set)); 5968 } 5969 break; 5970 case TARGET_NR_rt_sigtimedwait: 5971 { 5972 sigset_t set; 5973 struct timespec uts, *puts; 5974 siginfo_t uinfo; 5975 5976 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 5977 goto efault; 5978 target_to_host_sigset(&set, p); 5979 unlock_user(p, arg1, 0); 5980 if (arg3) { 5981 puts = &uts; 5982 target_to_host_timespec(puts, arg3); 5983 } else { 5984 puts = NULL; 5985 } 5986 ret = get_errno(sigtimedwait(&set, &uinfo, puts)); 5987 if (!is_error(ret) && arg2) { 5988 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0))) 5989 goto efault; 5990 host_to_target_siginfo(p, &uinfo); 5991 unlock_user(p, arg2, sizeof(target_siginfo_t)); 5992 } 5993 } 5994 break; 5995 case TARGET_NR_rt_sigqueueinfo: 5996 { 5997 siginfo_t uinfo; 5998 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1))) 5999 goto efault; 6000 target_to_host_siginfo(&uinfo, p); 6001 unlock_user(p, arg1, 0); 6002 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo)); 6003 } 6004 break; 6005 #ifdef TARGET_NR_sigreturn 6006 case TARGET_NR_sigreturn: 6007 /* NOTE: ret is eax, so not transcoding must be done */ 6008 ret = do_sigreturn(cpu_env); 6009 break; 6010 #endif 6011 case TARGET_NR_rt_sigreturn: 6012 /* NOTE: ret is eax, so not transcoding must be done */ 6013 ret = do_rt_sigreturn(cpu_env); 6014 break; 6015 case TARGET_NR_sethostname: 6016 if (!(p = lock_user_string(arg1))) 6017 goto efault; 6018 ret = get_errno(sethostname(p, arg2)); 6019 unlock_user(p, arg1, 0); 6020 break; 6021 case TARGET_NR_setrlimit: 6022 { 6023 int resource = target_to_host_resource(arg1); 6024 struct target_rlimit *target_rlim; 6025 struct rlimit rlim; 6026 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) 6027 goto efault; 6028 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur); 6029 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max); 6030 unlock_user_struct(target_rlim, arg2, 0); 6031 ret = get_errno(setrlimit(resource, &rlim)); 6032 } 6033 break; 6034 case TARGET_NR_getrlimit: 6035 { 6036 int resource = target_to_host_resource(arg1); 6037 struct target_rlimit *target_rlim; 6038 struct rlimit rlim; 6039 6040 ret = get_errno(getrlimit(resource, &rlim)); 6041 if (!is_error(ret)) { 6042 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 6043 goto efault; 6044 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 6045 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 6046 unlock_user_struct(target_rlim, arg2, 1); 6047 } 6048 } 6049 break; 6050 case TARGET_NR_getrusage: 6051 { 6052 struct rusage rusage; 6053 ret = get_errno(getrusage(arg1, &rusage)); 6054 if (!is_error(ret)) { 6055 host_to_target_rusage(arg2, &rusage); 6056 } 6057 } 6058 break; 6059 case TARGET_NR_gettimeofday: 6060 { 6061 struct timeval tv; 6062 ret = get_errno(gettimeofday(&tv, NULL)); 6063 if (!is_error(ret)) { 6064 if (copy_to_user_timeval(arg1, &tv)) 6065 goto efault; 6066 } 6067 } 6068 break; 6069 case TARGET_NR_settimeofday: 6070 { 6071 struct timeval tv; 6072 if (copy_from_user_timeval(&tv, arg1)) 6073 goto efault; 6074 ret = get_errno(settimeofday(&tv, NULL)); 6075 } 6076 break; 6077 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390) 6078 case TARGET_NR_select: 6079 { 6080 struct target_sel_arg_struct *sel; 6081 abi_ulong inp, outp, exp, tvp; 6082 long nsel; 6083 6084 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) 6085 goto efault; 6086 nsel = tswapal(sel->n); 6087 inp = tswapal(sel->inp); 6088 outp = tswapal(sel->outp); 6089 exp = tswapal(sel->exp); 6090 tvp = tswapal(sel->tvp); 6091 unlock_user_struct(sel, arg1, 0); 6092 ret = do_select(nsel, inp, outp, exp, tvp); 6093 } 6094 break; 6095 #endif 6096 #ifdef TARGET_NR_pselect6 6097 case TARGET_NR_pselect6: 6098 { 6099 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr; 6100 fd_set rfds, wfds, efds; 6101 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 6102 struct timespec ts, *ts_ptr; 6103 6104 /* 6105 * The 6th arg is actually two args smashed together, 6106 * so we cannot use the C library. 6107 */ 6108 sigset_t set; 6109 struct { 6110 sigset_t *set; 6111 size_t size; 6112 } sig, *sig_ptr; 6113 6114 abi_ulong arg_sigset, arg_sigsize, *arg7; 6115 target_sigset_t *target_sigset; 6116 6117 n = arg1; 6118 rfd_addr = arg2; 6119 wfd_addr = arg3; 6120 efd_addr = arg4; 6121 ts_addr = arg5; 6122 6123 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 6124 if (ret) { 6125 goto fail; 6126 } 6127 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 6128 if (ret) { 6129 goto fail; 6130 } 6131 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 6132 if (ret) { 6133 goto fail; 6134 } 6135 6136 /* 6137 * This takes a timespec, and not a timeval, so we cannot 6138 * use the do_select() helper ... 6139 */ 6140 if (ts_addr) { 6141 if (target_to_host_timespec(&ts, ts_addr)) { 6142 goto efault; 6143 } 6144 ts_ptr = &ts; 6145 } else { 6146 ts_ptr = NULL; 6147 } 6148 6149 /* Extract the two packed args for the sigset */ 6150 if (arg6) { 6151 sig_ptr = &sig; 6152 sig.size = _NSIG / 8; 6153 6154 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1); 6155 if (!arg7) { 6156 goto efault; 6157 } 6158 arg_sigset = tswapal(arg7[0]); 6159 arg_sigsize = tswapal(arg7[1]); 6160 unlock_user(arg7, arg6, 0); 6161 6162 if (arg_sigset) { 6163 sig.set = &set; 6164 if (arg_sigsize != sizeof(*target_sigset)) { 6165 /* Like the kernel, we enforce correct size sigsets */ 6166 ret = -TARGET_EINVAL; 6167 goto fail; 6168 } 6169 target_sigset = lock_user(VERIFY_READ, arg_sigset, 6170 sizeof(*target_sigset), 1); 6171 if (!target_sigset) { 6172 goto efault; 6173 } 6174 target_to_host_sigset(&set, target_sigset); 6175 unlock_user(target_sigset, arg_sigset, 0); 6176 } else { 6177 sig.set = NULL; 6178 } 6179 } else { 6180 sig_ptr = NULL; 6181 } 6182 6183 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 6184 ts_ptr, sig_ptr)); 6185 6186 if (!is_error(ret)) { 6187 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 6188 goto efault; 6189 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 6190 goto efault; 6191 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 6192 goto efault; 6193 6194 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) 6195 goto efault; 6196 } 6197 } 6198 break; 6199 #endif 6200 case TARGET_NR_symlink: 6201 { 6202 void *p2; 6203 p = lock_user_string(arg1); 6204 p2 = lock_user_string(arg2); 6205 if (!p || !p2) 6206 ret = -TARGET_EFAULT; 6207 else 6208 ret = get_errno(symlink(p, p2)); 6209 unlock_user(p2, arg2, 0); 6210 unlock_user(p, arg1, 0); 6211 } 6212 break; 6213 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat) 6214 case TARGET_NR_symlinkat: 6215 { 6216 void *p2; 6217 p = lock_user_string(arg1); 6218 p2 = lock_user_string(arg3); 6219 if (!p || !p2) 6220 ret = -TARGET_EFAULT; 6221 else 6222 ret = get_errno(sys_symlinkat(p, arg2, p2)); 6223 unlock_user(p2, arg3, 0); 6224 unlock_user(p, arg1, 0); 6225 } 6226 break; 6227 #endif 6228 #ifdef TARGET_NR_oldlstat 6229 case TARGET_NR_oldlstat: 6230 goto unimplemented; 6231 #endif 6232 case TARGET_NR_readlink: 6233 { 6234 void *p2, *temp; 6235 p = lock_user_string(arg1); 6236 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); 6237 if (!p || !p2) 6238 ret = -TARGET_EFAULT; 6239 else { 6240 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) { 6241 char real[PATH_MAX]; 6242 temp = realpath(exec_path,real); 6243 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ; 6244 snprintf((char *)p2, arg3, "%s", real); 6245 } 6246 else 6247 ret = get_errno(readlink(path(p), p2, arg3)); 6248 } 6249 unlock_user(p2, arg2, ret); 6250 unlock_user(p, arg1, 0); 6251 } 6252 break; 6253 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat) 6254 case TARGET_NR_readlinkat: 6255 { 6256 void *p2; 6257 p = lock_user_string(arg2); 6258 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); 6259 if (!p || !p2) 6260 ret = -TARGET_EFAULT; 6261 else 6262 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4)); 6263 unlock_user(p2, arg3, ret); 6264 unlock_user(p, arg2, 0); 6265 } 6266 break; 6267 #endif 6268 #ifdef TARGET_NR_uselib 6269 case TARGET_NR_uselib: 6270 goto unimplemented; 6271 #endif 6272 #ifdef TARGET_NR_swapon 6273 case TARGET_NR_swapon: 6274 if (!(p = lock_user_string(arg1))) 6275 goto efault; 6276 ret = get_errno(swapon(p, arg2)); 6277 unlock_user(p, arg1, 0); 6278 break; 6279 #endif 6280 case TARGET_NR_reboot: 6281 if (!(p = lock_user_string(arg4))) 6282 goto efault; 6283 ret = reboot(arg1, arg2, arg3, p); 6284 unlock_user(p, arg4, 0); 6285 break; 6286 #ifdef TARGET_NR_readdir 6287 case TARGET_NR_readdir: 6288 goto unimplemented; 6289 #endif 6290 #ifdef TARGET_NR_mmap 6291 case TARGET_NR_mmap: 6292 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \ 6293 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ 6294 || defined(TARGET_S390X) 6295 { 6296 abi_ulong *v; 6297 abi_ulong v1, v2, v3, v4, v5, v6; 6298 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) 6299 goto efault; 6300 v1 = tswapal(v[0]); 6301 v2 = tswapal(v[1]); 6302 v3 = tswapal(v[2]); 6303 v4 = tswapal(v[3]); 6304 v5 = tswapal(v[4]); 6305 v6 = tswapal(v[5]); 6306 unlock_user(v, arg1, 0); 6307 ret = get_errno(target_mmap(v1, v2, v3, 6308 target_to_host_bitmask(v4, mmap_flags_tbl), 6309 v5, v6)); 6310 } 6311 #else 6312 ret = get_errno(target_mmap(arg1, arg2, arg3, 6313 target_to_host_bitmask(arg4, mmap_flags_tbl), 6314 arg5, 6315 arg6)); 6316 #endif 6317 break; 6318 #endif 6319 #ifdef TARGET_NR_mmap2 6320 case TARGET_NR_mmap2: 6321 #ifndef MMAP_SHIFT 6322 #define MMAP_SHIFT 12 6323 #endif 6324 ret = get_errno(target_mmap(arg1, arg2, arg3, 6325 target_to_host_bitmask(arg4, mmap_flags_tbl), 6326 arg5, 6327 arg6 << MMAP_SHIFT)); 6328 break; 6329 #endif 6330 case TARGET_NR_munmap: 6331 ret = get_errno(target_munmap(arg1, arg2)); 6332 break; 6333 case TARGET_NR_mprotect: 6334 { 6335 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 6336 /* Special hack to detect libc making the stack executable. */ 6337 if ((arg3 & PROT_GROWSDOWN) 6338 && arg1 >= ts->info->stack_limit 6339 && arg1 <= ts->info->start_stack) { 6340 arg3 &= ~PROT_GROWSDOWN; 6341 arg2 = arg2 + arg1 - ts->info->stack_limit; 6342 arg1 = ts->info->stack_limit; 6343 } 6344 } 6345 ret = get_errno(target_mprotect(arg1, arg2, arg3)); 6346 break; 6347 #ifdef TARGET_NR_mremap 6348 case TARGET_NR_mremap: 6349 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); 6350 break; 6351 #endif 6352 /* ??? msync/mlock/munlock are broken for softmmu. */ 6353 #ifdef TARGET_NR_msync 6354 case TARGET_NR_msync: 6355 ret = get_errno(msync(g2h(arg1), arg2, arg3)); 6356 break; 6357 #endif 6358 #ifdef TARGET_NR_mlock 6359 case TARGET_NR_mlock: 6360 ret = get_errno(mlock(g2h(arg1), arg2)); 6361 break; 6362 #endif 6363 #ifdef TARGET_NR_munlock 6364 case TARGET_NR_munlock: 6365 ret = get_errno(munlock(g2h(arg1), arg2)); 6366 break; 6367 #endif 6368 #ifdef TARGET_NR_mlockall 6369 case TARGET_NR_mlockall: 6370 ret = get_errno(mlockall(arg1)); 6371 break; 6372 #endif 6373 #ifdef TARGET_NR_munlockall 6374 case TARGET_NR_munlockall: 6375 ret = get_errno(munlockall()); 6376 break; 6377 #endif 6378 case TARGET_NR_truncate: 6379 if (!(p = lock_user_string(arg1))) 6380 goto efault; 6381 ret = get_errno(truncate(p, arg2)); 6382 unlock_user(p, arg1, 0); 6383 break; 6384 case TARGET_NR_ftruncate: 6385 ret = get_errno(ftruncate(arg1, arg2)); 6386 break; 6387 case TARGET_NR_fchmod: 6388 ret = get_errno(fchmod(arg1, arg2)); 6389 break; 6390 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat) 6391 case TARGET_NR_fchmodat: 6392 if (!(p = lock_user_string(arg2))) 6393 goto efault; 6394 ret = get_errno(sys_fchmodat(arg1, p, arg3)); 6395 unlock_user(p, arg2, 0); 6396 break; 6397 #endif 6398 case TARGET_NR_getpriority: 6399 /* libc does special remapping of the return value of 6400 * sys_getpriority() so it's just easiest to call 6401 * sys_getpriority() directly rather than through libc. */ 6402 ret = get_errno(sys_getpriority(arg1, arg2)); 6403 break; 6404 case TARGET_NR_setpriority: 6405 ret = get_errno(setpriority(arg1, arg2, arg3)); 6406 break; 6407 #ifdef TARGET_NR_profil 6408 case TARGET_NR_profil: 6409 goto unimplemented; 6410 #endif 6411 case TARGET_NR_statfs: 6412 if (!(p = lock_user_string(arg1))) 6413 goto efault; 6414 ret = get_errno(statfs(path(p), &stfs)); 6415 unlock_user(p, arg1, 0); 6416 convert_statfs: 6417 if (!is_error(ret)) { 6418 struct target_statfs *target_stfs; 6419 6420 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) 6421 goto efault; 6422 __put_user(stfs.f_type, &target_stfs->f_type); 6423 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 6424 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 6425 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 6426 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 6427 __put_user(stfs.f_files, &target_stfs->f_files); 6428 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 6429 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 6430 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 6431 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 6432 unlock_user_struct(target_stfs, arg2, 1); 6433 } 6434 break; 6435 case TARGET_NR_fstatfs: 6436 ret = get_errno(fstatfs(arg1, &stfs)); 6437 goto convert_statfs; 6438 #ifdef TARGET_NR_statfs64 6439 case TARGET_NR_statfs64: 6440 if (!(p = lock_user_string(arg1))) 6441 goto efault; 6442 ret = get_errno(statfs(path(p), &stfs)); 6443 unlock_user(p, arg1, 0); 6444 convert_statfs64: 6445 if (!is_error(ret)) { 6446 struct target_statfs64 *target_stfs; 6447 6448 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) 6449 goto efault; 6450 __put_user(stfs.f_type, &target_stfs->f_type); 6451 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 6452 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 6453 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 6454 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 6455 __put_user(stfs.f_files, &target_stfs->f_files); 6456 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 6457 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 6458 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 6459 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 6460 unlock_user_struct(target_stfs, arg3, 1); 6461 } 6462 break; 6463 case TARGET_NR_fstatfs64: 6464 ret = get_errno(fstatfs(arg1, &stfs)); 6465 goto convert_statfs64; 6466 #endif 6467 #ifdef TARGET_NR_ioperm 6468 case TARGET_NR_ioperm: 6469 goto unimplemented; 6470 #endif 6471 #ifdef TARGET_NR_socketcall 6472 case TARGET_NR_socketcall: 6473 ret = do_socketcall(arg1, arg2); 6474 break; 6475 #endif 6476 #ifdef TARGET_NR_accept 6477 case TARGET_NR_accept: 6478 ret = do_accept(arg1, arg2, arg3); 6479 break; 6480 #endif 6481 #ifdef TARGET_NR_bind 6482 case TARGET_NR_bind: 6483 ret = do_bind(arg1, arg2, arg3); 6484 break; 6485 #endif 6486 #ifdef TARGET_NR_connect 6487 case TARGET_NR_connect: 6488 ret = do_connect(arg1, arg2, arg3); 6489 break; 6490 #endif 6491 #ifdef TARGET_NR_getpeername 6492 case TARGET_NR_getpeername: 6493 ret = do_getpeername(arg1, arg2, arg3); 6494 break; 6495 #endif 6496 #ifdef TARGET_NR_getsockname 6497 case TARGET_NR_getsockname: 6498 ret = do_getsockname(arg1, arg2, arg3); 6499 break; 6500 #endif 6501 #ifdef TARGET_NR_getsockopt 6502 case TARGET_NR_getsockopt: 6503 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5); 6504 break; 6505 #endif 6506 #ifdef TARGET_NR_listen 6507 case TARGET_NR_listen: 6508 ret = get_errno(listen(arg1, arg2)); 6509 break; 6510 #endif 6511 #ifdef TARGET_NR_recv 6512 case TARGET_NR_recv: 6513 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); 6514 break; 6515 #endif 6516 #ifdef TARGET_NR_recvfrom 6517 case TARGET_NR_recvfrom: 6518 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); 6519 break; 6520 #endif 6521 #ifdef TARGET_NR_recvmsg 6522 case TARGET_NR_recvmsg: 6523 ret = do_sendrecvmsg(arg1, arg2, arg3, 0); 6524 break; 6525 #endif 6526 #ifdef TARGET_NR_send 6527 case TARGET_NR_send: 6528 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0); 6529 break; 6530 #endif 6531 #ifdef TARGET_NR_sendmsg 6532 case TARGET_NR_sendmsg: 6533 ret = do_sendrecvmsg(arg1, arg2, arg3, 1); 6534 break; 6535 #endif 6536 #ifdef TARGET_NR_sendto 6537 case TARGET_NR_sendto: 6538 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); 6539 break; 6540 #endif 6541 #ifdef TARGET_NR_shutdown 6542 case TARGET_NR_shutdown: 6543 ret = get_errno(shutdown(arg1, arg2)); 6544 break; 6545 #endif 6546 #ifdef TARGET_NR_socket 6547 case TARGET_NR_socket: 6548 ret = do_socket(arg1, arg2, arg3); 6549 break; 6550 #endif 6551 #ifdef TARGET_NR_socketpair 6552 case TARGET_NR_socketpair: 6553 ret = do_socketpair(arg1, arg2, arg3, arg4); 6554 break; 6555 #endif 6556 #ifdef TARGET_NR_setsockopt 6557 case TARGET_NR_setsockopt: 6558 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); 6559 break; 6560 #endif 6561 6562 case TARGET_NR_syslog: 6563 if (!(p = lock_user_string(arg2))) 6564 goto efault; 6565 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); 6566 unlock_user(p, arg2, 0); 6567 break; 6568 6569 case TARGET_NR_setitimer: 6570 { 6571 struct itimerval value, ovalue, *pvalue; 6572 6573 if (arg2) { 6574 pvalue = &value; 6575 if (copy_from_user_timeval(&pvalue->it_interval, arg2) 6576 || copy_from_user_timeval(&pvalue->it_value, 6577 arg2 + sizeof(struct target_timeval))) 6578 goto efault; 6579 } else { 6580 pvalue = NULL; 6581 } 6582 ret = get_errno(setitimer(arg1, pvalue, &ovalue)); 6583 if (!is_error(ret) && arg3) { 6584 if (copy_to_user_timeval(arg3, 6585 &ovalue.it_interval) 6586 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), 6587 &ovalue.it_value)) 6588 goto efault; 6589 } 6590 } 6591 break; 6592 case TARGET_NR_getitimer: 6593 { 6594 struct itimerval value; 6595 6596 ret = get_errno(getitimer(arg1, &value)); 6597 if (!is_error(ret) && arg2) { 6598 if (copy_to_user_timeval(arg2, 6599 &value.it_interval) 6600 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), 6601 &value.it_value)) 6602 goto efault; 6603 } 6604 } 6605 break; 6606 case TARGET_NR_stat: 6607 if (!(p = lock_user_string(arg1))) 6608 goto efault; 6609 ret = get_errno(stat(path(p), &st)); 6610 unlock_user(p, arg1, 0); 6611 goto do_stat; 6612 case TARGET_NR_lstat: 6613 if (!(p = lock_user_string(arg1))) 6614 goto efault; 6615 ret = get_errno(lstat(path(p), &st)); 6616 unlock_user(p, arg1, 0); 6617 goto do_stat; 6618 case TARGET_NR_fstat: 6619 { 6620 ret = get_errno(fstat(arg1, &st)); 6621 do_stat: 6622 if (!is_error(ret)) { 6623 struct target_stat *target_st; 6624 6625 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) 6626 goto efault; 6627 memset(target_st, 0, sizeof(*target_st)); 6628 __put_user(st.st_dev, &target_st->st_dev); 6629 __put_user(st.st_ino, &target_st->st_ino); 6630 __put_user(st.st_mode, &target_st->st_mode); 6631 __put_user(st.st_uid, &target_st->st_uid); 6632 __put_user(st.st_gid, &target_st->st_gid); 6633 __put_user(st.st_nlink, &target_st->st_nlink); 6634 __put_user(st.st_rdev, &target_st->st_rdev); 6635 __put_user(st.st_size, &target_st->st_size); 6636 __put_user(st.st_blksize, &target_st->st_blksize); 6637 __put_user(st.st_blocks, &target_st->st_blocks); 6638 __put_user(st.st_atime, &target_st->target_st_atime); 6639 __put_user(st.st_mtime, &target_st->target_st_mtime); 6640 __put_user(st.st_ctime, &target_st->target_st_ctime); 6641 unlock_user_struct(target_st, arg2, 1); 6642 } 6643 } 6644 break; 6645 #ifdef TARGET_NR_olduname 6646 case TARGET_NR_olduname: 6647 goto unimplemented; 6648 #endif 6649 #ifdef TARGET_NR_iopl 6650 case TARGET_NR_iopl: 6651 goto unimplemented; 6652 #endif 6653 case TARGET_NR_vhangup: 6654 ret = get_errno(vhangup()); 6655 break; 6656 #ifdef TARGET_NR_idle 6657 case TARGET_NR_idle: 6658 goto unimplemented; 6659 #endif 6660 #ifdef TARGET_NR_syscall 6661 case TARGET_NR_syscall: 6662 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5, 6663 arg6, arg7, arg8, 0); 6664 break; 6665 #endif 6666 case TARGET_NR_wait4: 6667 { 6668 int status; 6669 abi_long status_ptr = arg2; 6670 struct rusage rusage, *rusage_ptr; 6671 abi_ulong target_rusage = arg4; 6672 if (target_rusage) 6673 rusage_ptr = &rusage; 6674 else 6675 rusage_ptr = NULL; 6676 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr)); 6677 if (!is_error(ret)) { 6678 if (status_ptr && ret) { 6679 status = host_to_target_waitstatus(status); 6680 if (put_user_s32(status, status_ptr)) 6681 goto efault; 6682 } 6683 if (target_rusage) 6684 host_to_target_rusage(target_rusage, &rusage); 6685 } 6686 } 6687 break; 6688 #ifdef TARGET_NR_swapoff 6689 case TARGET_NR_swapoff: 6690 if (!(p = lock_user_string(arg1))) 6691 goto efault; 6692 ret = get_errno(swapoff(p)); 6693 unlock_user(p, arg1, 0); 6694 break; 6695 #endif 6696 case TARGET_NR_sysinfo: 6697 { 6698 struct target_sysinfo *target_value; 6699 struct sysinfo value; 6700 ret = get_errno(sysinfo(&value)); 6701 if (!is_error(ret) && arg1) 6702 { 6703 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) 6704 goto efault; 6705 __put_user(value.uptime, &target_value->uptime); 6706 __put_user(value.loads[0], &target_value->loads[0]); 6707 __put_user(value.loads[1], &target_value->loads[1]); 6708 __put_user(value.loads[2], &target_value->loads[2]); 6709 __put_user(value.totalram, &target_value->totalram); 6710 __put_user(value.freeram, &target_value->freeram); 6711 __put_user(value.sharedram, &target_value->sharedram); 6712 __put_user(value.bufferram, &target_value->bufferram); 6713 __put_user(value.totalswap, &target_value->totalswap); 6714 __put_user(value.freeswap, &target_value->freeswap); 6715 __put_user(value.procs, &target_value->procs); 6716 __put_user(value.totalhigh, &target_value->totalhigh); 6717 __put_user(value.freehigh, &target_value->freehigh); 6718 __put_user(value.mem_unit, &target_value->mem_unit); 6719 unlock_user_struct(target_value, arg1, 1); 6720 } 6721 } 6722 break; 6723 #ifdef TARGET_NR_ipc 6724 case TARGET_NR_ipc: 6725 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6); 6726 break; 6727 #endif 6728 #ifdef TARGET_NR_semget 6729 case TARGET_NR_semget: 6730 ret = get_errno(semget(arg1, arg2, arg3)); 6731 break; 6732 #endif 6733 #ifdef TARGET_NR_semop 6734 case TARGET_NR_semop: 6735 ret = get_errno(do_semop(arg1, arg2, arg3)); 6736 break; 6737 #endif 6738 #ifdef TARGET_NR_semctl 6739 case TARGET_NR_semctl: 6740 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4); 6741 break; 6742 #endif 6743 #ifdef TARGET_NR_msgctl 6744 case TARGET_NR_msgctl: 6745 ret = do_msgctl(arg1, arg2, arg3); 6746 break; 6747 #endif 6748 #ifdef TARGET_NR_msgget 6749 case TARGET_NR_msgget: 6750 ret = get_errno(msgget(arg1, arg2)); 6751 break; 6752 #endif 6753 #ifdef TARGET_NR_msgrcv 6754 case TARGET_NR_msgrcv: 6755 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5); 6756 break; 6757 #endif 6758 #ifdef TARGET_NR_msgsnd 6759 case TARGET_NR_msgsnd: 6760 ret = do_msgsnd(arg1, arg2, arg3, arg4); 6761 break; 6762 #endif 6763 #ifdef TARGET_NR_shmget 6764 case TARGET_NR_shmget: 6765 ret = get_errno(shmget(arg1, arg2, arg3)); 6766 break; 6767 #endif 6768 #ifdef TARGET_NR_shmctl 6769 case TARGET_NR_shmctl: 6770 ret = do_shmctl(arg1, arg2, arg3); 6771 break; 6772 #endif 6773 #ifdef TARGET_NR_shmat 6774 case TARGET_NR_shmat: 6775 ret = do_shmat(arg1, arg2, arg3); 6776 break; 6777 #endif 6778 #ifdef TARGET_NR_shmdt 6779 case TARGET_NR_shmdt: 6780 ret = do_shmdt(arg1); 6781 break; 6782 #endif 6783 case TARGET_NR_fsync: 6784 ret = get_errno(fsync(arg1)); 6785 break; 6786 case TARGET_NR_clone: 6787 #if defined(TARGET_SH4) || defined(TARGET_ALPHA) 6788 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); 6789 #elif defined(TARGET_CRIS) 6790 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5)); 6791 #elif defined(TARGET_S390X) 6792 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); 6793 #else 6794 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); 6795 #endif 6796 break; 6797 #ifdef __NR_exit_group 6798 /* new thread calls */ 6799 case TARGET_NR_exit_group: 6800 #ifdef TARGET_GPROF 6801 _mcleanup(); 6802 #endif 6803 gdb_exit(cpu_env, arg1); 6804 ret = get_errno(exit_group(arg1)); 6805 break; 6806 #endif 6807 case TARGET_NR_setdomainname: 6808 if (!(p = lock_user_string(arg1))) 6809 goto efault; 6810 ret = get_errno(setdomainname(p, arg2)); 6811 unlock_user(p, arg1, 0); 6812 break; 6813 case TARGET_NR_uname: 6814 /* no need to transcode because we use the linux syscall */ 6815 { 6816 struct new_utsname * buf; 6817 6818 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) 6819 goto efault; 6820 ret = get_errno(sys_uname(buf)); 6821 if (!is_error(ret)) { 6822 /* Overrite the native machine name with whatever is being 6823 emulated. */ 6824 strcpy (buf->machine, cpu_to_uname_machine(cpu_env)); 6825 /* Allow the user to override the reported release. */ 6826 if (qemu_uname_release && *qemu_uname_release) 6827 strcpy (buf->release, qemu_uname_release); 6828 } 6829 unlock_user_struct(buf, arg1, 1); 6830 } 6831 break; 6832 #ifdef TARGET_I386 6833 case TARGET_NR_modify_ldt: 6834 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3); 6835 break; 6836 #if !defined(TARGET_X86_64) 6837 case TARGET_NR_vm86old: 6838 goto unimplemented; 6839 case TARGET_NR_vm86: 6840 ret = do_vm86(cpu_env, arg1, arg2); 6841 break; 6842 #endif 6843 #endif 6844 case TARGET_NR_adjtimex: 6845 goto unimplemented; 6846 #ifdef TARGET_NR_create_module 6847 case TARGET_NR_create_module: 6848 #endif 6849 case TARGET_NR_init_module: 6850 case TARGET_NR_delete_module: 6851 #ifdef TARGET_NR_get_kernel_syms 6852 case TARGET_NR_get_kernel_syms: 6853 #endif 6854 goto unimplemented; 6855 case TARGET_NR_quotactl: 6856 goto unimplemented; 6857 case TARGET_NR_getpgid: 6858 ret = get_errno(getpgid(arg1)); 6859 break; 6860 case TARGET_NR_fchdir: 6861 ret = get_errno(fchdir(arg1)); 6862 break; 6863 #ifdef TARGET_NR_bdflush /* not on x86_64 */ 6864 case TARGET_NR_bdflush: 6865 goto unimplemented; 6866 #endif 6867 #ifdef TARGET_NR_sysfs 6868 case TARGET_NR_sysfs: 6869 goto unimplemented; 6870 #endif 6871 case TARGET_NR_personality: 6872 ret = get_errno(personality(arg1)); 6873 break; 6874 #ifdef TARGET_NR_afs_syscall 6875 case TARGET_NR_afs_syscall: 6876 goto unimplemented; 6877 #endif 6878 #ifdef TARGET_NR__llseek /* Not on alpha */ 6879 case TARGET_NR__llseek: 6880 { 6881 int64_t res; 6882 #if !defined(__NR_llseek) 6883 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5); 6884 if (res == -1) { 6885 ret = get_errno(res); 6886 } else { 6887 ret = 0; 6888 } 6889 #else 6890 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); 6891 #endif 6892 if ((ret == 0) && put_user_s64(res, arg4)) { 6893 goto efault; 6894 } 6895 } 6896 break; 6897 #endif 6898 case TARGET_NR_getdents: 6899 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 6900 { 6901 struct target_dirent *target_dirp; 6902 struct linux_dirent *dirp; 6903 abi_long count = arg3; 6904 6905 dirp = malloc(count); 6906 if (!dirp) { 6907 ret = -TARGET_ENOMEM; 6908 goto fail; 6909 } 6910 6911 ret = get_errno(sys_getdents(arg1, dirp, count)); 6912 if (!is_error(ret)) { 6913 struct linux_dirent *de; 6914 struct target_dirent *tde; 6915 int len = ret; 6916 int reclen, treclen; 6917 int count1, tnamelen; 6918 6919 count1 = 0; 6920 de = dirp; 6921 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 6922 goto efault; 6923 tde = target_dirp; 6924 while (len > 0) { 6925 reclen = de->d_reclen; 6926 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long))); 6927 tde->d_reclen = tswap16(treclen); 6928 tde->d_ino = tswapal(de->d_ino); 6929 tde->d_off = tswapal(de->d_off); 6930 tnamelen = treclen - (2 * sizeof(abi_long) + 2); 6931 if (tnamelen > 256) 6932 tnamelen = 256; 6933 /* XXX: may not be correct */ 6934 pstrcpy(tde->d_name, tnamelen, de->d_name); 6935 de = (struct linux_dirent *)((char *)de + reclen); 6936 len -= reclen; 6937 tde = (struct target_dirent *)((char *)tde + treclen); 6938 count1 += treclen; 6939 } 6940 ret = count1; 6941 unlock_user(target_dirp, arg2, ret); 6942 } 6943 free(dirp); 6944 } 6945 #else 6946 { 6947 struct linux_dirent *dirp; 6948 abi_long count = arg3; 6949 6950 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 6951 goto efault; 6952 ret = get_errno(sys_getdents(arg1, dirp, count)); 6953 if (!is_error(ret)) { 6954 struct linux_dirent *de; 6955 int len = ret; 6956 int reclen; 6957 de = dirp; 6958 while (len > 0) { 6959 reclen = de->d_reclen; 6960 if (reclen > len) 6961 break; 6962 de->d_reclen = tswap16(reclen); 6963 tswapls(&de->d_ino); 6964 tswapls(&de->d_off); 6965 de = (struct linux_dirent *)((char *)de + reclen); 6966 len -= reclen; 6967 } 6968 } 6969 unlock_user(dirp, arg2, ret); 6970 } 6971 #endif 6972 break; 6973 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 6974 case TARGET_NR_getdents64: 6975 { 6976 struct linux_dirent64 *dirp; 6977 abi_long count = arg3; 6978 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 6979 goto efault; 6980 ret = get_errno(sys_getdents64(arg1, dirp, count)); 6981 if (!is_error(ret)) { 6982 struct linux_dirent64 *de; 6983 int len = ret; 6984 int reclen; 6985 de = dirp; 6986 while (len > 0) { 6987 reclen = de->d_reclen; 6988 if (reclen > len) 6989 break; 6990 de->d_reclen = tswap16(reclen); 6991 tswap64s((uint64_t *)&de->d_ino); 6992 tswap64s((uint64_t *)&de->d_off); 6993 de = (struct linux_dirent64 *)((char *)de + reclen); 6994 len -= reclen; 6995 } 6996 } 6997 unlock_user(dirp, arg2, ret); 6998 } 6999 break; 7000 #endif /* TARGET_NR_getdents64 */ 7001 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X) 7002 #ifdef TARGET_S390X 7003 case TARGET_NR_select: 7004 #else 7005 case TARGET_NR__newselect: 7006 #endif 7007 ret = do_select(arg1, arg2, arg3, arg4, arg5); 7008 break; 7009 #endif 7010 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) 7011 # ifdef TARGET_NR_poll 7012 case TARGET_NR_poll: 7013 # endif 7014 # ifdef TARGET_NR_ppoll 7015 case TARGET_NR_ppoll: 7016 # endif 7017 { 7018 struct target_pollfd *target_pfd; 7019 unsigned int nfds = arg2; 7020 int timeout = arg3; 7021 struct pollfd *pfd; 7022 unsigned int i; 7023 7024 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1); 7025 if (!target_pfd) 7026 goto efault; 7027 7028 pfd = alloca(sizeof(struct pollfd) * nfds); 7029 for(i = 0; i < nfds; i++) { 7030 pfd[i].fd = tswap32(target_pfd[i].fd); 7031 pfd[i].events = tswap16(target_pfd[i].events); 7032 } 7033 7034 # ifdef TARGET_NR_ppoll 7035 if (num == TARGET_NR_ppoll) { 7036 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; 7037 target_sigset_t *target_set; 7038 sigset_t _set, *set = &_set; 7039 7040 if (arg3) { 7041 if (target_to_host_timespec(timeout_ts, arg3)) { 7042 unlock_user(target_pfd, arg1, 0); 7043 goto efault; 7044 } 7045 } else { 7046 timeout_ts = NULL; 7047 } 7048 7049 if (arg4) { 7050 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1); 7051 if (!target_set) { 7052 unlock_user(target_pfd, arg1, 0); 7053 goto efault; 7054 } 7055 target_to_host_sigset(set, target_set); 7056 } else { 7057 set = NULL; 7058 } 7059 7060 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8)); 7061 7062 if (!is_error(ret) && arg3) { 7063 host_to_target_timespec(arg3, timeout_ts); 7064 } 7065 if (arg4) { 7066 unlock_user(target_set, arg4, 0); 7067 } 7068 } else 7069 # endif 7070 ret = get_errno(poll(pfd, nfds, timeout)); 7071 7072 if (!is_error(ret)) { 7073 for(i = 0; i < nfds; i++) { 7074 target_pfd[i].revents = tswap16(pfd[i].revents); 7075 } 7076 } 7077 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds); 7078 } 7079 break; 7080 #endif 7081 case TARGET_NR_flock: 7082 /* NOTE: the flock constant seems to be the same for every 7083 Linux platform */ 7084 ret = get_errno(flock(arg1, arg2)); 7085 break; 7086 case TARGET_NR_readv: 7087 { 7088 int count = arg3; 7089 struct iovec *vec; 7090 7091 vec = alloca(count * sizeof(struct iovec)); 7092 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0) 7093 goto efault; 7094 ret = get_errno(readv(arg1, vec, count)); 7095 unlock_iovec(vec, arg2, count, 1); 7096 } 7097 break; 7098 case TARGET_NR_writev: 7099 { 7100 int count = arg3; 7101 struct iovec *vec; 7102 7103 vec = alloca(count * sizeof(struct iovec)); 7104 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0) 7105 goto efault; 7106 ret = get_errno(writev(arg1, vec, count)); 7107 unlock_iovec(vec, arg2, count, 0); 7108 } 7109 break; 7110 case TARGET_NR_getsid: 7111 ret = get_errno(getsid(arg1)); 7112 break; 7113 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ 7114 case TARGET_NR_fdatasync: 7115 ret = get_errno(fdatasync(arg1)); 7116 break; 7117 #endif 7118 case TARGET_NR__sysctl: 7119 /* We don't implement this, but ENOTDIR is always a safe 7120 return value. */ 7121 ret = -TARGET_ENOTDIR; 7122 break; 7123 case TARGET_NR_sched_getaffinity: 7124 { 7125 unsigned int mask_size; 7126 unsigned long *mask; 7127 7128 /* 7129 * sched_getaffinity needs multiples of ulong, so need to take 7130 * care of mismatches between target ulong and host ulong sizes. 7131 */ 7132 if (arg2 & (sizeof(abi_ulong) - 1)) { 7133 ret = -TARGET_EINVAL; 7134 break; 7135 } 7136 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 7137 7138 mask = alloca(mask_size); 7139 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask)); 7140 7141 if (!is_error(ret)) { 7142 if (copy_to_user(arg3, mask, ret)) { 7143 goto efault; 7144 } 7145 } 7146 } 7147 break; 7148 case TARGET_NR_sched_setaffinity: 7149 { 7150 unsigned int mask_size; 7151 unsigned long *mask; 7152 7153 /* 7154 * sched_setaffinity needs multiples of ulong, so need to take 7155 * care of mismatches between target ulong and host ulong sizes. 7156 */ 7157 if (arg2 & (sizeof(abi_ulong) - 1)) { 7158 ret = -TARGET_EINVAL; 7159 break; 7160 } 7161 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 7162 7163 mask = alloca(mask_size); 7164 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) { 7165 goto efault; 7166 } 7167 memcpy(mask, p, arg2); 7168 unlock_user_struct(p, arg2, 0); 7169 7170 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask)); 7171 } 7172 break; 7173 case TARGET_NR_sched_setparam: 7174 { 7175 struct sched_param *target_schp; 7176 struct sched_param schp; 7177 7178 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) 7179 goto efault; 7180 schp.sched_priority = tswap32(target_schp->sched_priority); 7181 unlock_user_struct(target_schp, arg2, 0); 7182 ret = get_errno(sched_setparam(arg1, &schp)); 7183 } 7184 break; 7185 case TARGET_NR_sched_getparam: 7186 { 7187 struct sched_param *target_schp; 7188 struct sched_param schp; 7189 ret = get_errno(sched_getparam(arg1, &schp)); 7190 if (!is_error(ret)) { 7191 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) 7192 goto efault; 7193 target_schp->sched_priority = tswap32(schp.sched_priority); 7194 unlock_user_struct(target_schp, arg2, 1); 7195 } 7196 } 7197 break; 7198 case TARGET_NR_sched_setscheduler: 7199 { 7200 struct sched_param *target_schp; 7201 struct sched_param schp; 7202 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) 7203 goto efault; 7204 schp.sched_priority = tswap32(target_schp->sched_priority); 7205 unlock_user_struct(target_schp, arg3, 0); 7206 ret = get_errno(sched_setscheduler(arg1, arg2, &schp)); 7207 } 7208 break; 7209 case TARGET_NR_sched_getscheduler: 7210 ret = get_errno(sched_getscheduler(arg1)); 7211 break; 7212 case TARGET_NR_sched_yield: 7213 ret = get_errno(sched_yield()); 7214 break; 7215 case TARGET_NR_sched_get_priority_max: 7216 ret = get_errno(sched_get_priority_max(arg1)); 7217 break; 7218 case TARGET_NR_sched_get_priority_min: 7219 ret = get_errno(sched_get_priority_min(arg1)); 7220 break; 7221 case TARGET_NR_sched_rr_get_interval: 7222 { 7223 struct timespec ts; 7224 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 7225 if (!is_error(ret)) { 7226 host_to_target_timespec(arg2, &ts); 7227 } 7228 } 7229 break; 7230 case TARGET_NR_nanosleep: 7231 { 7232 struct timespec req, rem; 7233 target_to_host_timespec(&req, arg1); 7234 ret = get_errno(nanosleep(&req, &rem)); 7235 if (is_error(ret) && arg2) { 7236 host_to_target_timespec(arg2, &rem); 7237 } 7238 } 7239 break; 7240 #ifdef TARGET_NR_query_module 7241 case TARGET_NR_query_module: 7242 goto unimplemented; 7243 #endif 7244 #ifdef TARGET_NR_nfsservctl 7245 case TARGET_NR_nfsservctl: 7246 goto unimplemented; 7247 #endif 7248 case TARGET_NR_prctl: 7249 switch (arg1) { 7250 case PR_GET_PDEATHSIG: 7251 { 7252 int deathsig; 7253 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5)); 7254 if (!is_error(ret) && arg2 7255 && put_user_ual(deathsig, arg2)) { 7256 goto efault; 7257 } 7258 break; 7259 } 7260 #ifdef PR_GET_NAME 7261 case PR_GET_NAME: 7262 { 7263 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1); 7264 if (!name) { 7265 goto efault; 7266 } 7267 ret = get_errno(prctl(arg1, (unsigned long)name, 7268 arg3, arg4, arg5)); 7269 unlock_user(name, arg2, 16); 7270 break; 7271 } 7272 case PR_SET_NAME: 7273 { 7274 void *name = lock_user(VERIFY_READ, arg2, 16, 1); 7275 if (!name) { 7276 goto efault; 7277 } 7278 ret = get_errno(prctl(arg1, (unsigned long)name, 7279 arg3, arg4, arg5)); 7280 unlock_user(name, arg2, 0); 7281 break; 7282 } 7283 #endif 7284 default: 7285 /* Most prctl options have no pointer arguments */ 7286 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5)); 7287 break; 7288 } 7289 break; 7290 #ifdef TARGET_NR_arch_prctl 7291 case TARGET_NR_arch_prctl: 7292 #if defined(TARGET_I386) && !defined(TARGET_ABI32) 7293 ret = do_arch_prctl(cpu_env, arg1, arg2); 7294 break; 7295 #else 7296 goto unimplemented; 7297 #endif 7298 #endif 7299 #ifdef TARGET_NR_pread 7300 case TARGET_NR_pread: 7301 if (regpairs_aligned(cpu_env)) 7302 arg4 = arg5; 7303 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 7304 goto efault; 7305 ret = get_errno(pread(arg1, p, arg3, arg4)); 7306 unlock_user(p, arg2, ret); 7307 break; 7308 case TARGET_NR_pwrite: 7309 if (regpairs_aligned(cpu_env)) 7310 arg4 = arg5; 7311 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 7312 goto efault; 7313 ret = get_errno(pwrite(arg1, p, arg3, arg4)); 7314 unlock_user(p, arg2, 0); 7315 break; 7316 #endif 7317 #ifdef TARGET_NR_pread64 7318 case TARGET_NR_pread64: 7319 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 7320 goto efault; 7321 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); 7322 unlock_user(p, arg2, ret); 7323 break; 7324 case TARGET_NR_pwrite64: 7325 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 7326 goto efault; 7327 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); 7328 unlock_user(p, arg2, 0); 7329 break; 7330 #endif 7331 case TARGET_NR_getcwd: 7332 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) 7333 goto efault; 7334 ret = get_errno(sys_getcwd1(p, arg2)); 7335 unlock_user(p, arg1, ret); 7336 break; 7337 case TARGET_NR_capget: 7338 goto unimplemented; 7339 case TARGET_NR_capset: 7340 goto unimplemented; 7341 case TARGET_NR_sigaltstack: 7342 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \ 7343 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \ 7344 defined(TARGET_M68K) || defined(TARGET_S390X) 7345 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env)); 7346 break; 7347 #else 7348 goto unimplemented; 7349 #endif 7350 case TARGET_NR_sendfile: 7351 goto unimplemented; 7352 #ifdef TARGET_NR_getpmsg 7353 case TARGET_NR_getpmsg: 7354 goto unimplemented; 7355 #endif 7356 #ifdef TARGET_NR_putpmsg 7357 case TARGET_NR_putpmsg: 7358 goto unimplemented; 7359 #endif 7360 #ifdef TARGET_NR_vfork 7361 case TARGET_NR_vfork: 7362 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 7363 0, 0, 0, 0)); 7364 break; 7365 #endif 7366 #ifdef TARGET_NR_ugetrlimit 7367 case TARGET_NR_ugetrlimit: 7368 { 7369 struct rlimit rlim; 7370 int resource = target_to_host_resource(arg1); 7371 ret = get_errno(getrlimit(resource, &rlim)); 7372 if (!is_error(ret)) { 7373 struct target_rlimit *target_rlim; 7374 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 7375 goto efault; 7376 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 7377 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 7378 unlock_user_struct(target_rlim, arg2, 1); 7379 } 7380 break; 7381 } 7382 #endif 7383 #ifdef TARGET_NR_truncate64 7384 case TARGET_NR_truncate64: 7385 if (!(p = lock_user_string(arg1))) 7386 goto efault; 7387 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); 7388 unlock_user(p, arg1, 0); 7389 break; 7390 #endif 7391 #ifdef TARGET_NR_ftruncate64 7392 case TARGET_NR_ftruncate64: 7393 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); 7394 break; 7395 #endif 7396 #ifdef TARGET_NR_stat64 7397 case TARGET_NR_stat64: 7398 if (!(p = lock_user_string(arg1))) 7399 goto efault; 7400 ret = get_errno(stat(path(p), &st)); 7401 unlock_user(p, arg1, 0); 7402 if (!is_error(ret)) 7403 ret = host_to_target_stat64(cpu_env, arg2, &st); 7404 break; 7405 #endif 7406 #ifdef TARGET_NR_lstat64 7407 case TARGET_NR_lstat64: 7408 if (!(p = lock_user_string(arg1))) 7409 goto efault; 7410 ret = get_errno(lstat(path(p), &st)); 7411 unlock_user(p, arg1, 0); 7412 if (!is_error(ret)) 7413 ret = host_to_target_stat64(cpu_env, arg2, &st); 7414 break; 7415 #endif 7416 #ifdef TARGET_NR_fstat64 7417 case TARGET_NR_fstat64: 7418 ret = get_errno(fstat(arg1, &st)); 7419 if (!is_error(ret)) 7420 ret = host_to_target_stat64(cpu_env, arg2, &st); 7421 break; 7422 #endif 7423 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \ 7424 (defined(__NR_fstatat64) || defined(__NR_newfstatat)) 7425 #ifdef TARGET_NR_fstatat64 7426 case TARGET_NR_fstatat64: 7427 #endif 7428 #ifdef TARGET_NR_newfstatat 7429 case TARGET_NR_newfstatat: 7430 #endif 7431 if (!(p = lock_user_string(arg2))) 7432 goto efault; 7433 #ifdef __NR_fstatat64 7434 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4)); 7435 #else 7436 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4)); 7437 #endif 7438 if (!is_error(ret)) 7439 ret = host_to_target_stat64(cpu_env, arg3, &st); 7440 break; 7441 #endif 7442 case TARGET_NR_lchown: 7443 if (!(p = lock_user_string(arg1))) 7444 goto efault; 7445 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); 7446 unlock_user(p, arg1, 0); 7447 break; 7448 #ifdef TARGET_NR_getuid 7449 case TARGET_NR_getuid: 7450 ret = get_errno(high2lowuid(getuid())); 7451 break; 7452 #endif 7453 #ifdef TARGET_NR_getgid 7454 case TARGET_NR_getgid: 7455 ret = get_errno(high2lowgid(getgid())); 7456 break; 7457 #endif 7458 #ifdef TARGET_NR_geteuid 7459 case TARGET_NR_geteuid: 7460 ret = get_errno(high2lowuid(geteuid())); 7461 break; 7462 #endif 7463 #ifdef TARGET_NR_getegid 7464 case TARGET_NR_getegid: 7465 ret = get_errno(high2lowgid(getegid())); 7466 break; 7467 #endif 7468 case TARGET_NR_setreuid: 7469 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); 7470 break; 7471 case TARGET_NR_setregid: 7472 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); 7473 break; 7474 case TARGET_NR_getgroups: 7475 { 7476 int gidsetsize = arg1; 7477 target_id *target_grouplist; 7478 gid_t *grouplist; 7479 int i; 7480 7481 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7482 ret = get_errno(getgroups(gidsetsize, grouplist)); 7483 if (gidsetsize == 0) 7484 break; 7485 if (!is_error(ret)) { 7486 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0); 7487 if (!target_grouplist) 7488 goto efault; 7489 for(i = 0;i < ret; i++) 7490 target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); 7491 unlock_user(target_grouplist, arg2, gidsetsize * 2); 7492 } 7493 } 7494 break; 7495 case TARGET_NR_setgroups: 7496 { 7497 int gidsetsize = arg1; 7498 target_id *target_grouplist; 7499 gid_t *grouplist; 7500 int i; 7501 7502 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7503 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1); 7504 if (!target_grouplist) { 7505 ret = -TARGET_EFAULT; 7506 goto fail; 7507 } 7508 for(i = 0;i < gidsetsize; i++) 7509 grouplist[i] = low2highgid(tswapid(target_grouplist[i])); 7510 unlock_user(target_grouplist, arg2, 0); 7511 ret = get_errno(setgroups(gidsetsize, grouplist)); 7512 } 7513 break; 7514 case TARGET_NR_fchown: 7515 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); 7516 break; 7517 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) 7518 case TARGET_NR_fchownat: 7519 if (!(p = lock_user_string(arg2))) 7520 goto efault; 7521 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5)); 7522 unlock_user(p, arg2, 0); 7523 break; 7524 #endif 7525 #ifdef TARGET_NR_setresuid 7526 case TARGET_NR_setresuid: 7527 ret = get_errno(setresuid(low2highuid(arg1), 7528 low2highuid(arg2), 7529 low2highuid(arg3))); 7530 break; 7531 #endif 7532 #ifdef TARGET_NR_getresuid 7533 case TARGET_NR_getresuid: 7534 { 7535 uid_t ruid, euid, suid; 7536 ret = get_errno(getresuid(&ruid, &euid, &suid)); 7537 if (!is_error(ret)) { 7538 if (put_user_u16(high2lowuid(ruid), arg1) 7539 || put_user_u16(high2lowuid(euid), arg2) 7540 || put_user_u16(high2lowuid(suid), arg3)) 7541 goto efault; 7542 } 7543 } 7544 break; 7545 #endif 7546 #ifdef TARGET_NR_getresgid 7547 case TARGET_NR_setresgid: 7548 ret = get_errno(setresgid(low2highgid(arg1), 7549 low2highgid(arg2), 7550 low2highgid(arg3))); 7551 break; 7552 #endif 7553 #ifdef TARGET_NR_getresgid 7554 case TARGET_NR_getresgid: 7555 { 7556 gid_t rgid, egid, sgid; 7557 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 7558 if (!is_error(ret)) { 7559 if (put_user_u16(high2lowgid(rgid), arg1) 7560 || put_user_u16(high2lowgid(egid), arg2) 7561 || put_user_u16(high2lowgid(sgid), arg3)) 7562 goto efault; 7563 } 7564 } 7565 break; 7566 #endif 7567 case TARGET_NR_chown: 7568 if (!(p = lock_user_string(arg1))) 7569 goto efault; 7570 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); 7571 unlock_user(p, arg1, 0); 7572 break; 7573 case TARGET_NR_setuid: 7574 ret = get_errno(setuid(low2highuid(arg1))); 7575 break; 7576 case TARGET_NR_setgid: 7577 ret = get_errno(setgid(low2highgid(arg1))); 7578 break; 7579 case TARGET_NR_setfsuid: 7580 ret = get_errno(setfsuid(arg1)); 7581 break; 7582 case TARGET_NR_setfsgid: 7583 ret = get_errno(setfsgid(arg1)); 7584 break; 7585 7586 #ifdef TARGET_NR_lchown32 7587 case TARGET_NR_lchown32: 7588 if (!(p = lock_user_string(arg1))) 7589 goto efault; 7590 ret = get_errno(lchown(p, arg2, arg3)); 7591 unlock_user(p, arg1, 0); 7592 break; 7593 #endif 7594 #ifdef TARGET_NR_getuid32 7595 case TARGET_NR_getuid32: 7596 ret = get_errno(getuid()); 7597 break; 7598 #endif 7599 7600 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) 7601 /* Alpha specific */ 7602 case TARGET_NR_getxuid: 7603 { 7604 uid_t euid; 7605 euid=geteuid(); 7606 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid; 7607 } 7608 ret = get_errno(getuid()); 7609 break; 7610 #endif 7611 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) 7612 /* Alpha specific */ 7613 case TARGET_NR_getxgid: 7614 { 7615 uid_t egid; 7616 egid=getegid(); 7617 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid; 7618 } 7619 ret = get_errno(getgid()); 7620 break; 7621 #endif 7622 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) 7623 /* Alpha specific */ 7624 case TARGET_NR_osf_getsysinfo: 7625 ret = -TARGET_EOPNOTSUPP; 7626 switch (arg1) { 7627 case TARGET_GSI_IEEE_FP_CONTROL: 7628 { 7629 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env); 7630 7631 /* Copied from linux ieee_fpcr_to_swcr. */ 7632 swcr = (fpcr >> 35) & SWCR_STATUS_MASK; 7633 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ; 7634 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV 7635 | SWCR_TRAP_ENABLE_DZE 7636 | SWCR_TRAP_ENABLE_OVF); 7637 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF 7638 | SWCR_TRAP_ENABLE_INE); 7639 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ; 7640 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO; 7641 7642 if (put_user_u64 (swcr, arg2)) 7643 goto efault; 7644 ret = 0; 7645 } 7646 break; 7647 7648 /* case GSI_IEEE_STATE_AT_SIGNAL: 7649 -- Not implemented in linux kernel. 7650 case GSI_UACPROC: 7651 -- Retrieves current unaligned access state; not much used. 7652 case GSI_PROC_TYPE: 7653 -- Retrieves implver information; surely not used. 7654 case GSI_GET_HWRPB: 7655 -- Grabs a copy of the HWRPB; surely not used. 7656 */ 7657 } 7658 break; 7659 #endif 7660 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) 7661 /* Alpha specific */ 7662 case TARGET_NR_osf_setsysinfo: 7663 ret = -TARGET_EOPNOTSUPP; 7664 switch (arg1) { 7665 case TARGET_SSI_IEEE_FP_CONTROL: 7666 case TARGET_SSI_IEEE_RAISE_EXCEPTION: 7667 { 7668 uint64_t swcr, fpcr, orig_fpcr; 7669 7670 if (get_user_u64 (swcr, arg2)) 7671 goto efault; 7672 orig_fpcr = cpu_alpha_load_fpcr (cpu_env); 7673 fpcr = orig_fpcr & FPCR_DYN_MASK; 7674 7675 /* Copied from linux ieee_swcr_to_fpcr. */ 7676 fpcr |= (swcr & SWCR_STATUS_MASK) << 35; 7677 fpcr |= (swcr & SWCR_MAP_DMZ) << 36; 7678 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV 7679 | SWCR_TRAP_ENABLE_DZE 7680 | SWCR_TRAP_ENABLE_OVF)) << 48; 7681 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF 7682 | SWCR_TRAP_ENABLE_INE)) << 57; 7683 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0); 7684 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41; 7685 7686 cpu_alpha_store_fpcr (cpu_env, fpcr); 7687 ret = 0; 7688 7689 if (arg1 == TARGET_SSI_IEEE_RAISE_EXCEPTION) { 7690 /* Old exceptions are not signaled. */ 7691 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK); 7692 7693 /* If any exceptions set by this call, and are unmasked, 7694 send a signal. */ 7695 /* ??? FIXME */ 7696 } 7697 } 7698 break; 7699 7700 /* case SSI_NVPAIRS: 7701 -- Used with SSIN_UACPROC to enable unaligned accesses. 7702 case SSI_IEEE_STATE_AT_SIGNAL: 7703 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: 7704 -- Not implemented in linux kernel 7705 */ 7706 } 7707 break; 7708 #endif 7709 #ifdef TARGET_NR_osf_sigprocmask 7710 /* Alpha specific. */ 7711 case TARGET_NR_osf_sigprocmask: 7712 { 7713 abi_ulong mask; 7714 int how; 7715 sigset_t set, oldset; 7716 7717 switch(arg1) { 7718 case TARGET_SIG_BLOCK: 7719 how = SIG_BLOCK; 7720 break; 7721 case TARGET_SIG_UNBLOCK: 7722 how = SIG_UNBLOCK; 7723 break; 7724 case TARGET_SIG_SETMASK: 7725 how = SIG_SETMASK; 7726 break; 7727 default: 7728 ret = -TARGET_EINVAL; 7729 goto fail; 7730 } 7731 mask = arg2; 7732 target_to_host_old_sigset(&set, &mask); 7733 sigprocmask(how, &set, &oldset); 7734 host_to_target_old_sigset(&mask, &oldset); 7735 ret = mask; 7736 } 7737 break; 7738 #endif 7739 7740 #ifdef TARGET_NR_getgid32 7741 case TARGET_NR_getgid32: 7742 ret = get_errno(getgid()); 7743 break; 7744 #endif 7745 #ifdef TARGET_NR_geteuid32 7746 case TARGET_NR_geteuid32: 7747 ret = get_errno(geteuid()); 7748 break; 7749 #endif 7750 #ifdef TARGET_NR_getegid32 7751 case TARGET_NR_getegid32: 7752 ret = get_errno(getegid()); 7753 break; 7754 #endif 7755 #ifdef TARGET_NR_setreuid32 7756 case TARGET_NR_setreuid32: 7757 ret = get_errno(setreuid(arg1, arg2)); 7758 break; 7759 #endif 7760 #ifdef TARGET_NR_setregid32 7761 case TARGET_NR_setregid32: 7762 ret = get_errno(setregid(arg1, arg2)); 7763 break; 7764 #endif 7765 #ifdef TARGET_NR_getgroups32 7766 case TARGET_NR_getgroups32: 7767 { 7768 int gidsetsize = arg1; 7769 uint32_t *target_grouplist; 7770 gid_t *grouplist; 7771 int i; 7772 7773 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7774 ret = get_errno(getgroups(gidsetsize, grouplist)); 7775 if (gidsetsize == 0) 7776 break; 7777 if (!is_error(ret)) { 7778 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); 7779 if (!target_grouplist) { 7780 ret = -TARGET_EFAULT; 7781 goto fail; 7782 } 7783 for(i = 0;i < ret; i++) 7784 target_grouplist[i] = tswap32(grouplist[i]); 7785 unlock_user(target_grouplist, arg2, gidsetsize * 4); 7786 } 7787 } 7788 break; 7789 #endif 7790 #ifdef TARGET_NR_setgroups32 7791 case TARGET_NR_setgroups32: 7792 { 7793 int gidsetsize = arg1; 7794 uint32_t *target_grouplist; 7795 gid_t *grouplist; 7796 int i; 7797 7798 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7799 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); 7800 if (!target_grouplist) { 7801 ret = -TARGET_EFAULT; 7802 goto fail; 7803 } 7804 for(i = 0;i < gidsetsize; i++) 7805 grouplist[i] = tswap32(target_grouplist[i]); 7806 unlock_user(target_grouplist, arg2, 0); 7807 ret = get_errno(setgroups(gidsetsize, grouplist)); 7808 } 7809 break; 7810 #endif 7811 #ifdef TARGET_NR_fchown32 7812 case TARGET_NR_fchown32: 7813 ret = get_errno(fchown(arg1, arg2, arg3)); 7814 break; 7815 #endif 7816 #ifdef TARGET_NR_setresuid32 7817 case TARGET_NR_setresuid32: 7818 ret = get_errno(setresuid(arg1, arg2, arg3)); 7819 break; 7820 #endif 7821 #ifdef TARGET_NR_getresuid32 7822 case TARGET_NR_getresuid32: 7823 { 7824 uid_t ruid, euid, suid; 7825 ret = get_errno(getresuid(&ruid, &euid, &suid)); 7826 if (!is_error(ret)) { 7827 if (put_user_u32(ruid, arg1) 7828 || put_user_u32(euid, arg2) 7829 || put_user_u32(suid, arg3)) 7830 goto efault; 7831 } 7832 } 7833 break; 7834 #endif 7835 #ifdef TARGET_NR_setresgid32 7836 case TARGET_NR_setresgid32: 7837 ret = get_errno(setresgid(arg1, arg2, arg3)); 7838 break; 7839 #endif 7840 #ifdef TARGET_NR_getresgid32 7841 case TARGET_NR_getresgid32: 7842 { 7843 gid_t rgid, egid, sgid; 7844 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 7845 if (!is_error(ret)) { 7846 if (put_user_u32(rgid, arg1) 7847 || put_user_u32(egid, arg2) 7848 || put_user_u32(sgid, arg3)) 7849 goto efault; 7850 } 7851 } 7852 break; 7853 #endif 7854 #ifdef TARGET_NR_chown32 7855 case TARGET_NR_chown32: 7856 if (!(p = lock_user_string(arg1))) 7857 goto efault; 7858 ret = get_errno(chown(p, arg2, arg3)); 7859 unlock_user(p, arg1, 0); 7860 break; 7861 #endif 7862 #ifdef TARGET_NR_setuid32 7863 case TARGET_NR_setuid32: 7864 ret = get_errno(setuid(arg1)); 7865 break; 7866 #endif 7867 #ifdef TARGET_NR_setgid32 7868 case TARGET_NR_setgid32: 7869 ret = get_errno(setgid(arg1)); 7870 break; 7871 #endif 7872 #ifdef TARGET_NR_setfsuid32 7873 case TARGET_NR_setfsuid32: 7874 ret = get_errno(setfsuid(arg1)); 7875 break; 7876 #endif 7877 #ifdef TARGET_NR_setfsgid32 7878 case TARGET_NR_setfsgid32: 7879 ret = get_errno(setfsgid(arg1)); 7880 break; 7881 #endif 7882 7883 case TARGET_NR_pivot_root: 7884 goto unimplemented; 7885 #ifdef TARGET_NR_mincore 7886 case TARGET_NR_mincore: 7887 { 7888 void *a; 7889 ret = -TARGET_EFAULT; 7890 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0))) 7891 goto efault; 7892 if (!(p = lock_user_string(arg3))) 7893 goto mincore_fail; 7894 ret = get_errno(mincore(a, arg2, p)); 7895 unlock_user(p, arg3, ret); 7896 mincore_fail: 7897 unlock_user(a, arg1, 0); 7898 } 7899 break; 7900 #endif 7901 #ifdef TARGET_NR_arm_fadvise64_64 7902 case TARGET_NR_arm_fadvise64_64: 7903 { 7904 /* 7905 * arm_fadvise64_64 looks like fadvise64_64 but 7906 * with different argument order 7907 */ 7908 abi_long temp; 7909 temp = arg3; 7910 arg3 = arg4; 7911 arg4 = temp; 7912 } 7913 #endif 7914 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64) 7915 #ifdef TARGET_NR_fadvise64_64 7916 case TARGET_NR_fadvise64_64: 7917 #endif 7918 #ifdef TARGET_NR_fadvise64 7919 case TARGET_NR_fadvise64: 7920 #endif 7921 #ifdef TARGET_S390X 7922 switch (arg4) { 7923 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ 7924 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ 7925 case 6: arg4 = POSIX_FADV_DONTNEED; break; 7926 case 7: arg4 = POSIX_FADV_NOREUSE; break; 7927 default: break; 7928 } 7929 #endif 7930 ret = -posix_fadvise(arg1, arg2, arg3, arg4); 7931 break; 7932 #endif 7933 #ifdef TARGET_NR_madvise 7934 case TARGET_NR_madvise: 7935 /* A straight passthrough may not be safe because qemu sometimes 7936 turns private flie-backed mappings into anonymous mappings. 7937 This will break MADV_DONTNEED. 7938 This is a hint, so ignoring and returning success is ok. */ 7939 ret = get_errno(0); 7940 break; 7941 #endif 7942 #if TARGET_ABI_BITS == 32 7943 case TARGET_NR_fcntl64: 7944 { 7945 int cmd; 7946 struct flock64 fl; 7947 struct target_flock64 *target_fl; 7948 #ifdef TARGET_ARM 7949 struct target_eabi_flock64 *target_efl; 7950 #endif 7951 7952 cmd = target_to_host_fcntl_cmd(arg2); 7953 if (cmd == -TARGET_EINVAL) { 7954 ret = cmd; 7955 break; 7956 } 7957 7958 switch(arg2) { 7959 case TARGET_F_GETLK64: 7960 #ifdef TARGET_ARM 7961 if (((CPUARMState *)cpu_env)->eabi) { 7962 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 7963 goto efault; 7964 fl.l_type = tswap16(target_efl->l_type); 7965 fl.l_whence = tswap16(target_efl->l_whence); 7966 fl.l_start = tswap64(target_efl->l_start); 7967 fl.l_len = tswap64(target_efl->l_len); 7968 fl.l_pid = tswap32(target_efl->l_pid); 7969 unlock_user_struct(target_efl, arg3, 0); 7970 } else 7971 #endif 7972 { 7973 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 7974 goto efault; 7975 fl.l_type = tswap16(target_fl->l_type); 7976 fl.l_whence = tswap16(target_fl->l_whence); 7977 fl.l_start = tswap64(target_fl->l_start); 7978 fl.l_len = tswap64(target_fl->l_len); 7979 fl.l_pid = tswap32(target_fl->l_pid); 7980 unlock_user_struct(target_fl, arg3, 0); 7981 } 7982 ret = get_errno(fcntl(arg1, cmd, &fl)); 7983 if (ret == 0) { 7984 #ifdef TARGET_ARM 7985 if (((CPUARMState *)cpu_env)->eabi) { 7986 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0)) 7987 goto efault; 7988 target_efl->l_type = tswap16(fl.l_type); 7989 target_efl->l_whence = tswap16(fl.l_whence); 7990 target_efl->l_start = tswap64(fl.l_start); 7991 target_efl->l_len = tswap64(fl.l_len); 7992 target_efl->l_pid = tswap32(fl.l_pid); 7993 unlock_user_struct(target_efl, arg3, 1); 7994 } else 7995 #endif 7996 { 7997 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0)) 7998 goto efault; 7999 target_fl->l_type = tswap16(fl.l_type); 8000 target_fl->l_whence = tswap16(fl.l_whence); 8001 target_fl->l_start = tswap64(fl.l_start); 8002 target_fl->l_len = tswap64(fl.l_len); 8003 target_fl->l_pid = tswap32(fl.l_pid); 8004 unlock_user_struct(target_fl, arg3, 1); 8005 } 8006 } 8007 break; 8008 8009 case TARGET_F_SETLK64: 8010 case TARGET_F_SETLKW64: 8011 #ifdef TARGET_ARM 8012 if (((CPUARMState *)cpu_env)->eabi) { 8013 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 8014 goto efault; 8015 fl.l_type = tswap16(target_efl->l_type); 8016 fl.l_whence = tswap16(target_efl->l_whence); 8017 fl.l_start = tswap64(target_efl->l_start); 8018 fl.l_len = tswap64(target_efl->l_len); 8019 fl.l_pid = tswap32(target_efl->l_pid); 8020 unlock_user_struct(target_efl, arg3, 0); 8021 } else 8022 #endif 8023 { 8024 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 8025 goto efault; 8026 fl.l_type = tswap16(target_fl->l_type); 8027 fl.l_whence = tswap16(target_fl->l_whence); 8028 fl.l_start = tswap64(target_fl->l_start); 8029 fl.l_len = tswap64(target_fl->l_len); 8030 fl.l_pid = tswap32(target_fl->l_pid); 8031 unlock_user_struct(target_fl, arg3, 0); 8032 } 8033 ret = get_errno(fcntl(arg1, cmd, &fl)); 8034 break; 8035 default: 8036 ret = do_fcntl(arg1, arg2, arg3); 8037 break; 8038 } 8039 break; 8040 } 8041 #endif 8042 #ifdef TARGET_NR_cacheflush 8043 case TARGET_NR_cacheflush: 8044 /* self-modifying code is handled automatically, so nothing needed */ 8045 ret = 0; 8046 break; 8047 #endif 8048 #ifdef TARGET_NR_security 8049 case TARGET_NR_security: 8050 goto unimplemented; 8051 #endif 8052 #ifdef TARGET_NR_getpagesize 8053 case TARGET_NR_getpagesize: 8054 ret = TARGET_PAGE_SIZE; 8055 break; 8056 #endif 8057 case TARGET_NR_gettid: 8058 ret = get_errno(gettid()); 8059 break; 8060 #ifdef TARGET_NR_readahead 8061 case TARGET_NR_readahead: 8062 #if TARGET_ABI_BITS == 32 8063 if (regpairs_aligned(cpu_env)) { 8064 arg2 = arg3; 8065 arg3 = arg4; 8066 arg4 = arg5; 8067 } 8068 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4)); 8069 #else 8070 ret = get_errno(readahead(arg1, arg2, arg3)); 8071 #endif 8072 break; 8073 #endif 8074 #ifdef CONFIG_ATTR 8075 #ifdef TARGET_NR_setxattr 8076 case TARGET_NR_listxattr: 8077 case TARGET_NR_llistxattr: 8078 { 8079 void *p, *b = 0; 8080 if (arg2) { 8081 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 8082 if (!b) { 8083 ret = -TARGET_EFAULT; 8084 break; 8085 } 8086 } 8087 p = lock_user_string(arg1); 8088 if (p) { 8089 if (num == TARGET_NR_listxattr) { 8090 ret = get_errno(listxattr(p, b, arg3)); 8091 } else { 8092 ret = get_errno(llistxattr(p, b, arg3)); 8093 } 8094 } else { 8095 ret = -TARGET_EFAULT; 8096 } 8097 unlock_user(p, arg1, 0); 8098 unlock_user(b, arg2, arg3); 8099 break; 8100 } 8101 case TARGET_NR_flistxattr: 8102 { 8103 void *b = 0; 8104 if (arg2) { 8105 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 8106 if (!b) { 8107 ret = -TARGET_EFAULT; 8108 break; 8109 } 8110 } 8111 ret = get_errno(flistxattr(arg1, b, arg3)); 8112 unlock_user(b, arg2, arg3); 8113 break; 8114 } 8115 case TARGET_NR_setxattr: 8116 case TARGET_NR_lsetxattr: 8117 { 8118 void *p, *n, *v = 0; 8119 if (arg3) { 8120 v = lock_user(VERIFY_READ, arg3, arg4, 1); 8121 if (!v) { 8122 ret = -TARGET_EFAULT; 8123 break; 8124 } 8125 } 8126 p = lock_user_string(arg1); 8127 n = lock_user_string(arg2); 8128 if (p && n) { 8129 if (num == TARGET_NR_setxattr) { 8130 ret = get_errno(setxattr(p, n, v, arg4, arg5)); 8131 } else { 8132 ret = get_errno(lsetxattr(p, n, v, arg4, arg5)); 8133 } 8134 } else { 8135 ret = -TARGET_EFAULT; 8136 } 8137 unlock_user(p, arg1, 0); 8138 unlock_user(n, arg2, 0); 8139 unlock_user(v, arg3, 0); 8140 } 8141 break; 8142 case TARGET_NR_fsetxattr: 8143 { 8144 void *n, *v = 0; 8145 if (arg3) { 8146 v = lock_user(VERIFY_READ, arg3, arg4, 1); 8147 if (!v) { 8148 ret = -TARGET_EFAULT; 8149 break; 8150 } 8151 } 8152 n = lock_user_string(arg2); 8153 if (n) { 8154 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5)); 8155 } else { 8156 ret = -TARGET_EFAULT; 8157 } 8158 unlock_user(n, arg2, 0); 8159 unlock_user(v, arg3, 0); 8160 } 8161 break; 8162 case TARGET_NR_getxattr: 8163 case TARGET_NR_lgetxattr: 8164 { 8165 void *p, *n, *v = 0; 8166 if (arg3) { 8167 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 8168 if (!v) { 8169 ret = -TARGET_EFAULT; 8170 break; 8171 } 8172 } 8173 p = lock_user_string(arg1); 8174 n = lock_user_string(arg2); 8175 if (p && n) { 8176 if (num == TARGET_NR_getxattr) { 8177 ret = get_errno(getxattr(p, n, v, arg4)); 8178 } else { 8179 ret = get_errno(lgetxattr(p, n, v, arg4)); 8180 } 8181 } else { 8182 ret = -TARGET_EFAULT; 8183 } 8184 unlock_user(p, arg1, 0); 8185 unlock_user(n, arg2, 0); 8186 unlock_user(v, arg3, arg4); 8187 } 8188 break; 8189 case TARGET_NR_fgetxattr: 8190 { 8191 void *n, *v = 0; 8192 if (arg3) { 8193 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 8194 if (!v) { 8195 ret = -TARGET_EFAULT; 8196 break; 8197 } 8198 } 8199 n = lock_user_string(arg2); 8200 if (n) { 8201 ret = get_errno(fgetxattr(arg1, n, v, arg4)); 8202 } else { 8203 ret = -TARGET_EFAULT; 8204 } 8205 unlock_user(n, arg2, 0); 8206 unlock_user(v, arg3, arg4); 8207 } 8208 break; 8209 case TARGET_NR_removexattr: 8210 case TARGET_NR_lremovexattr: 8211 { 8212 void *p, *n; 8213 p = lock_user_string(arg1); 8214 n = lock_user_string(arg2); 8215 if (p && n) { 8216 if (num == TARGET_NR_removexattr) { 8217 ret = get_errno(removexattr(p, n)); 8218 } else { 8219 ret = get_errno(lremovexattr(p, n)); 8220 } 8221 } else { 8222 ret = -TARGET_EFAULT; 8223 } 8224 unlock_user(p, arg1, 0); 8225 unlock_user(n, arg2, 0); 8226 } 8227 break; 8228 case TARGET_NR_fremovexattr: 8229 { 8230 void *n; 8231 n = lock_user_string(arg2); 8232 if (n) { 8233 ret = get_errno(fremovexattr(arg1, n)); 8234 } else { 8235 ret = -TARGET_EFAULT; 8236 } 8237 unlock_user(n, arg2, 0); 8238 } 8239 break; 8240 #endif 8241 #endif /* CONFIG_ATTR */ 8242 #ifdef TARGET_NR_set_thread_area 8243 case TARGET_NR_set_thread_area: 8244 #if defined(TARGET_MIPS) 8245 ((CPUMIPSState *) cpu_env)->tls_value = arg1; 8246 ret = 0; 8247 break; 8248 #elif defined(TARGET_CRIS) 8249 if (arg1 & 0xff) 8250 ret = -TARGET_EINVAL; 8251 else { 8252 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1; 8253 ret = 0; 8254 } 8255 break; 8256 #elif defined(TARGET_I386) && defined(TARGET_ABI32) 8257 ret = do_set_thread_area(cpu_env, arg1); 8258 break; 8259 #else 8260 goto unimplemented_nowarn; 8261 #endif 8262 #endif 8263 #ifdef TARGET_NR_get_thread_area 8264 case TARGET_NR_get_thread_area: 8265 #if defined(TARGET_I386) && defined(TARGET_ABI32) 8266 ret = do_get_thread_area(cpu_env, arg1); 8267 #else 8268 goto unimplemented_nowarn; 8269 #endif 8270 #endif 8271 #ifdef TARGET_NR_getdomainname 8272 case TARGET_NR_getdomainname: 8273 goto unimplemented_nowarn; 8274 #endif 8275 8276 #ifdef TARGET_NR_clock_gettime 8277 case TARGET_NR_clock_gettime: 8278 { 8279 struct timespec ts; 8280 ret = get_errno(clock_gettime(arg1, &ts)); 8281 if (!is_error(ret)) { 8282 host_to_target_timespec(arg2, &ts); 8283 } 8284 break; 8285 } 8286 #endif 8287 #ifdef TARGET_NR_clock_getres 8288 case TARGET_NR_clock_getres: 8289 { 8290 struct timespec ts; 8291 ret = get_errno(clock_getres(arg1, &ts)); 8292 if (!is_error(ret)) { 8293 host_to_target_timespec(arg2, &ts); 8294 } 8295 break; 8296 } 8297 #endif 8298 #ifdef TARGET_NR_clock_nanosleep 8299 case TARGET_NR_clock_nanosleep: 8300 { 8301 struct timespec ts; 8302 target_to_host_timespec(&ts, arg3); 8303 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL)); 8304 if (arg4) 8305 host_to_target_timespec(arg4, &ts); 8306 break; 8307 } 8308 #endif 8309 8310 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 8311 case TARGET_NR_set_tid_address: 8312 ret = get_errno(set_tid_address((int *)g2h(arg1))); 8313 break; 8314 #endif 8315 8316 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 8317 case TARGET_NR_tkill: 8318 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2))); 8319 break; 8320 #endif 8321 8322 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 8323 case TARGET_NR_tgkill: 8324 ret = get_errno(sys_tgkill((int)arg1, (int)arg2, 8325 target_to_host_signal(arg3))); 8326 break; 8327 #endif 8328 8329 #ifdef TARGET_NR_set_robust_list 8330 case TARGET_NR_set_robust_list: 8331 goto unimplemented_nowarn; 8332 #endif 8333 8334 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat) 8335 case TARGET_NR_utimensat: 8336 { 8337 struct timespec *tsp, ts[2]; 8338 if (!arg3) { 8339 tsp = NULL; 8340 } else { 8341 target_to_host_timespec(ts, arg3); 8342 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec)); 8343 tsp = ts; 8344 } 8345 if (!arg2) 8346 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 8347 else { 8348 if (!(p = lock_user_string(arg2))) { 8349 ret = -TARGET_EFAULT; 8350 goto fail; 8351 } 8352 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 8353 unlock_user(p, arg2, 0); 8354 } 8355 } 8356 break; 8357 #endif 8358 #if defined(CONFIG_USE_NPTL) 8359 case TARGET_NR_futex: 8360 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6); 8361 break; 8362 #endif 8363 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 8364 case TARGET_NR_inotify_init: 8365 ret = get_errno(sys_inotify_init()); 8366 break; 8367 #endif 8368 #ifdef CONFIG_INOTIFY1 8369 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 8370 case TARGET_NR_inotify_init1: 8371 ret = get_errno(sys_inotify_init1(arg1)); 8372 break; 8373 #endif 8374 #endif 8375 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 8376 case TARGET_NR_inotify_add_watch: 8377 p = lock_user_string(arg2); 8378 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3)); 8379 unlock_user(p, arg2, 0); 8380 break; 8381 #endif 8382 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 8383 case TARGET_NR_inotify_rm_watch: 8384 ret = get_errno(sys_inotify_rm_watch(arg1, arg2)); 8385 break; 8386 #endif 8387 8388 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 8389 case TARGET_NR_mq_open: 8390 { 8391 struct mq_attr posix_mq_attr; 8392 8393 p = lock_user_string(arg1 - 1); 8394 if (arg4 != 0) 8395 copy_from_user_mq_attr (&posix_mq_attr, arg4); 8396 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr)); 8397 unlock_user (p, arg1, 0); 8398 } 8399 break; 8400 8401 case TARGET_NR_mq_unlink: 8402 p = lock_user_string(arg1 - 1); 8403 ret = get_errno(mq_unlink(p)); 8404 unlock_user (p, arg1, 0); 8405 break; 8406 8407 case TARGET_NR_mq_timedsend: 8408 { 8409 struct timespec ts; 8410 8411 p = lock_user (VERIFY_READ, arg2, arg3, 1); 8412 if (arg5 != 0) { 8413 target_to_host_timespec(&ts, arg5); 8414 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts)); 8415 host_to_target_timespec(arg5, &ts); 8416 } 8417 else 8418 ret = get_errno(mq_send(arg1, p, arg3, arg4)); 8419 unlock_user (p, arg2, arg3); 8420 } 8421 break; 8422 8423 case TARGET_NR_mq_timedreceive: 8424 { 8425 struct timespec ts; 8426 unsigned int prio; 8427 8428 p = lock_user (VERIFY_READ, arg2, arg3, 1); 8429 if (arg5 != 0) { 8430 target_to_host_timespec(&ts, arg5); 8431 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts)); 8432 host_to_target_timespec(arg5, &ts); 8433 } 8434 else 8435 ret = get_errno(mq_receive(arg1, p, arg3, &prio)); 8436 unlock_user (p, arg2, arg3); 8437 if (arg4 != 0) 8438 put_user_u32(prio, arg4); 8439 } 8440 break; 8441 8442 /* Not implemented for now... */ 8443 /* case TARGET_NR_mq_notify: */ 8444 /* break; */ 8445 8446 case TARGET_NR_mq_getsetattr: 8447 { 8448 struct mq_attr posix_mq_attr_in, posix_mq_attr_out; 8449 ret = 0; 8450 if (arg3 != 0) { 8451 ret = mq_getattr(arg1, &posix_mq_attr_out); 8452 copy_to_user_mq_attr(arg3, &posix_mq_attr_out); 8453 } 8454 if (arg2 != 0) { 8455 copy_from_user_mq_attr(&posix_mq_attr_in, arg2); 8456 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out); 8457 } 8458 8459 } 8460 break; 8461 #endif 8462 8463 #ifdef CONFIG_SPLICE 8464 #ifdef TARGET_NR_tee 8465 case TARGET_NR_tee: 8466 { 8467 ret = get_errno(tee(arg1,arg2,arg3,arg4)); 8468 } 8469 break; 8470 #endif 8471 #ifdef TARGET_NR_splice 8472 case TARGET_NR_splice: 8473 { 8474 loff_t loff_in, loff_out; 8475 loff_t *ploff_in = NULL, *ploff_out = NULL; 8476 if(arg2) { 8477 get_user_u64(loff_in, arg2); 8478 ploff_in = &loff_in; 8479 } 8480 if(arg4) { 8481 get_user_u64(loff_out, arg2); 8482 ploff_out = &loff_out; 8483 } 8484 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); 8485 } 8486 break; 8487 #endif 8488 #ifdef TARGET_NR_vmsplice 8489 case TARGET_NR_vmsplice: 8490 { 8491 int count = arg3; 8492 struct iovec *vec; 8493 8494 vec = alloca(count * sizeof(struct iovec)); 8495 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0) 8496 goto efault; 8497 ret = get_errno(vmsplice(arg1, vec, count, arg4)); 8498 unlock_iovec(vec, arg2, count, 0); 8499 } 8500 break; 8501 #endif 8502 #endif /* CONFIG_SPLICE */ 8503 #ifdef CONFIG_EVENTFD 8504 #if defined(TARGET_NR_eventfd) 8505 case TARGET_NR_eventfd: 8506 ret = get_errno(eventfd(arg1, 0)); 8507 break; 8508 #endif 8509 #if defined(TARGET_NR_eventfd2) 8510 case TARGET_NR_eventfd2: 8511 ret = get_errno(eventfd(arg1, arg2)); 8512 break; 8513 #endif 8514 #endif /* CONFIG_EVENTFD */ 8515 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) 8516 case TARGET_NR_fallocate: 8517 #if TARGET_ABI_BITS == 32 8518 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4), 8519 target_offset64(arg5, arg6))); 8520 #else 8521 ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); 8522 #endif 8523 break; 8524 #endif 8525 #if defined(CONFIG_SYNC_FILE_RANGE) 8526 #if defined(TARGET_NR_sync_file_range) 8527 case TARGET_NR_sync_file_range: 8528 #if TARGET_ABI_BITS == 32 8529 #if defined(TARGET_MIPS) 8530 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 8531 target_offset64(arg5, arg6), arg7)); 8532 #else 8533 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3), 8534 target_offset64(arg4, arg5), arg6)); 8535 #endif /* !TARGET_MIPS */ 8536 #else 8537 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4)); 8538 #endif 8539 break; 8540 #endif 8541 #if defined(TARGET_NR_sync_file_range2) 8542 case TARGET_NR_sync_file_range2: 8543 /* This is like sync_file_range but the arguments are reordered */ 8544 #if TARGET_ABI_BITS == 32 8545 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 8546 target_offset64(arg5, arg6), arg2)); 8547 #else 8548 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2)); 8549 #endif 8550 break; 8551 #endif 8552 #endif 8553 #if defined(CONFIG_EPOLL) 8554 #if defined(TARGET_NR_epoll_create) 8555 case TARGET_NR_epoll_create: 8556 ret = get_errno(epoll_create(arg1)); 8557 break; 8558 #endif 8559 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1) 8560 case TARGET_NR_epoll_create1: 8561 ret = get_errno(epoll_create1(arg1)); 8562 break; 8563 #endif 8564 #if defined(TARGET_NR_epoll_ctl) 8565 case TARGET_NR_epoll_ctl: 8566 { 8567 struct epoll_event ep; 8568 struct epoll_event *epp = 0; 8569 if (arg4) { 8570 struct target_epoll_event *target_ep; 8571 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) { 8572 goto efault; 8573 } 8574 ep.events = tswap32(target_ep->events); 8575 /* The epoll_data_t union is just opaque data to the kernel, 8576 * so we transfer all 64 bits across and need not worry what 8577 * actual data type it is. 8578 */ 8579 ep.data.u64 = tswap64(target_ep->data.u64); 8580 unlock_user_struct(target_ep, arg4, 0); 8581 epp = &ep; 8582 } 8583 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp)); 8584 break; 8585 } 8586 #endif 8587 8588 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT) 8589 #define IMPLEMENT_EPOLL_PWAIT 8590 #endif 8591 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT) 8592 #if defined(TARGET_NR_epoll_wait) 8593 case TARGET_NR_epoll_wait: 8594 #endif 8595 #if defined(IMPLEMENT_EPOLL_PWAIT) 8596 case TARGET_NR_epoll_pwait: 8597 #endif 8598 { 8599 struct target_epoll_event *target_ep; 8600 struct epoll_event *ep; 8601 int epfd = arg1; 8602 int maxevents = arg3; 8603 int timeout = arg4; 8604 8605 target_ep = lock_user(VERIFY_WRITE, arg2, 8606 maxevents * sizeof(struct target_epoll_event), 1); 8607 if (!target_ep) { 8608 goto efault; 8609 } 8610 8611 ep = alloca(maxevents * sizeof(struct epoll_event)); 8612 8613 switch (num) { 8614 #if defined(IMPLEMENT_EPOLL_PWAIT) 8615 case TARGET_NR_epoll_pwait: 8616 { 8617 target_sigset_t *target_set; 8618 sigset_t _set, *set = &_set; 8619 8620 if (arg5) { 8621 target_set = lock_user(VERIFY_READ, arg5, 8622 sizeof(target_sigset_t), 1); 8623 if (!target_set) { 8624 unlock_user(target_ep, arg2, 0); 8625 goto efault; 8626 } 8627 target_to_host_sigset(set, target_set); 8628 unlock_user(target_set, arg5, 0); 8629 } else { 8630 set = NULL; 8631 } 8632 8633 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set)); 8634 break; 8635 } 8636 #endif 8637 #if defined(TARGET_NR_epoll_wait) 8638 case TARGET_NR_epoll_wait: 8639 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout)); 8640 break; 8641 #endif 8642 default: 8643 ret = -TARGET_ENOSYS; 8644 } 8645 if (!is_error(ret)) { 8646 int i; 8647 for (i = 0; i < ret; i++) { 8648 target_ep[i].events = tswap32(ep[i].events); 8649 target_ep[i].data.u64 = tswap64(ep[i].data.u64); 8650 } 8651 } 8652 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event)); 8653 break; 8654 } 8655 #endif 8656 #endif 8657 #ifdef TARGET_NR_prlimit64 8658 case TARGET_NR_prlimit64: 8659 { 8660 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */ 8661 struct target_rlimit64 *target_rnew, *target_rold; 8662 struct host_rlimit64 rnew, rold, *rnewp = 0; 8663 if (arg3) { 8664 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { 8665 goto efault; 8666 } 8667 rnew.rlim_cur = tswap64(target_rnew->rlim_cur); 8668 rnew.rlim_max = tswap64(target_rnew->rlim_max); 8669 unlock_user_struct(target_rnew, arg3, 0); 8670 rnewp = &rnew; 8671 } 8672 8673 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0)); 8674 if (!is_error(ret) && arg4) { 8675 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { 8676 goto efault; 8677 } 8678 target_rold->rlim_cur = tswap64(rold.rlim_cur); 8679 target_rold->rlim_max = tswap64(rold.rlim_max); 8680 unlock_user_struct(target_rold, arg4, 1); 8681 } 8682 break; 8683 } 8684 #endif 8685 default: 8686 unimplemented: 8687 gemu_log("qemu: Unsupported syscall: %d\n", num); 8688 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list) 8689 unimplemented_nowarn: 8690 #endif 8691 ret = -TARGET_ENOSYS; 8692 break; 8693 } 8694 fail: 8695 #ifdef DEBUG 8696 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret); 8697 #endif 8698 if(do_strace) 8699 print_syscall_ret(num, ret); 8700 return ret; 8701 efault: 8702 ret = -TARGET_EFAULT; 8703 goto fail; 8704 } 8705