1 /* 2 * Linux syscalls 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #define _ATFILE_SOURCE 20 #include <stdlib.h> 21 #include <stdio.h> 22 #include <stdarg.h> 23 #include <string.h> 24 #include <elf.h> 25 #include <endian.h> 26 #include <errno.h> 27 #include <unistd.h> 28 #include <fcntl.h> 29 #include <time.h> 30 #include <limits.h> 31 #include <grp.h> 32 #include <sys/types.h> 33 #include <sys/ipc.h> 34 #include <sys/msg.h> 35 #include <sys/wait.h> 36 #include <sys/time.h> 37 #include <sys/stat.h> 38 #include <sys/mount.h> 39 #include <sys/file.h> 40 #include <sys/fsuid.h> 41 #include <sys/personality.h> 42 #include <sys/prctl.h> 43 #include <sys/resource.h> 44 #include <sys/mman.h> 45 #include <sys/swap.h> 46 #include <linux/capability.h> 47 #include <signal.h> 48 #include <sched.h> 49 #ifdef __ia64__ 50 int __clone2(int (*fn)(void *), void *child_stack_base, 51 size_t stack_size, int flags, void *arg, ...); 52 #endif 53 #include <sys/socket.h> 54 #include <sys/un.h> 55 #include <sys/uio.h> 56 #include <sys/poll.h> 57 #include <sys/times.h> 58 #include <sys/shm.h> 59 #include <sys/sem.h> 60 #include <sys/statfs.h> 61 #include <sys/timerfd.h> 62 #include <utime.h> 63 #include <sys/sysinfo.h> 64 //#include <sys/user.h> 65 #include <netinet/ip.h> 66 #include <netinet/tcp.h> 67 #include <linux/wireless.h> 68 #include <linux/icmp.h> 69 #include "qemu-common.h" 70 #ifdef TARGET_GPROF 71 #include <sys/gmon.h> 72 #endif 73 #ifdef CONFIG_EVENTFD 74 #include <sys/eventfd.h> 75 #endif 76 #ifdef CONFIG_EPOLL 77 #include <sys/epoll.h> 78 #endif 79 #ifdef CONFIG_ATTR 80 #include "qemu/xattr.h" 81 #endif 82 #ifdef CONFIG_SENDFILE 83 #include <sys/sendfile.h> 84 #endif 85 86 #define termios host_termios 87 #define winsize host_winsize 88 #define termio host_termio 89 #define sgttyb host_sgttyb /* same as target */ 90 #define tchars host_tchars /* same as target */ 91 #define ltchars host_ltchars /* same as target */ 92 93 #include <linux/termios.h> 94 #include <linux/unistd.h> 95 #include <linux/cdrom.h> 96 #include <linux/hdreg.h> 97 #include <linux/soundcard.h> 98 #include <linux/kd.h> 99 #include <linux/mtio.h> 100 #include <linux/fs.h> 101 #if defined(CONFIG_FIEMAP) 102 #include <linux/fiemap.h> 103 #endif 104 #include <linux/fb.h> 105 #include <linux/vt.h> 106 #include <linux/dm-ioctl.h> 107 #include <linux/reboot.h> 108 #include <linux/route.h> 109 #include <linux/filter.h> 110 #include <linux/blkpg.h> 111 #include "linux_loop.h" 112 #include "uname.h" 113 114 #include "qemu.h" 115 116 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \ 117 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID) 118 119 //#define DEBUG 120 121 //#include <linux/msdos_fs.h> 122 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2]) 123 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2]) 124 125 126 #undef _syscall0 127 #undef _syscall1 128 #undef _syscall2 129 #undef _syscall3 130 #undef _syscall4 131 #undef _syscall5 132 #undef _syscall6 133 134 #define _syscall0(type,name) \ 135 static type name (void) \ 136 { \ 137 return syscall(__NR_##name); \ 138 } 139 140 #define _syscall1(type,name,type1,arg1) \ 141 static type name (type1 arg1) \ 142 { \ 143 return syscall(__NR_##name, arg1); \ 144 } 145 146 #define _syscall2(type,name,type1,arg1,type2,arg2) \ 147 static type name (type1 arg1,type2 arg2) \ 148 { \ 149 return syscall(__NR_##name, arg1, arg2); \ 150 } 151 152 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 153 static type name (type1 arg1,type2 arg2,type3 arg3) \ 154 { \ 155 return syscall(__NR_##name, arg1, arg2, arg3); \ 156 } 157 158 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 159 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ 160 { \ 161 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 162 } 163 164 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 165 type5,arg5) \ 166 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ 167 { \ 168 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 169 } 170 171 172 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 173 type5,arg5,type6,arg6) \ 174 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \ 175 type6 arg6) \ 176 { \ 177 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 178 } 179 180 181 #define __NR_sys_uname __NR_uname 182 #define __NR_sys_getcwd1 __NR_getcwd 183 #define __NR_sys_getdents __NR_getdents 184 #define __NR_sys_getdents64 __NR_getdents64 185 #define __NR_sys_getpriority __NR_getpriority 186 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo 187 #define __NR_sys_syslog __NR_syslog 188 #define __NR_sys_tgkill __NR_tgkill 189 #define __NR_sys_tkill __NR_tkill 190 #define __NR_sys_futex __NR_futex 191 #define __NR_sys_inotify_init __NR_inotify_init 192 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch 193 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch 194 195 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \ 196 defined(__s390x__) 197 #define __NR__llseek __NR_lseek 198 #endif 199 200 /* Newer kernel ports have llseek() instead of _llseek() */ 201 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek) 202 #define TARGET_NR__llseek TARGET_NR_llseek 203 #endif 204 205 #ifdef __NR_gettid 206 _syscall0(int, gettid) 207 #else 208 /* This is a replacement for the host gettid() and must return a host 209 errno. */ 210 static int gettid(void) { 211 return -ENOSYS; 212 } 213 #endif 214 #ifdef __NR_getdents 215 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count); 216 #endif 217 #if !defined(__NR_getdents) || \ 218 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64)) 219 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count); 220 #endif 221 #if defined(TARGET_NR__llseek) && defined(__NR_llseek) 222 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo, 223 loff_t *, res, uint, wh); 224 #endif 225 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo) 226 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len) 227 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 228 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig) 229 #endif 230 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 231 _syscall2(int,sys_tkill,int,tid,int,sig) 232 #endif 233 #ifdef __NR_exit_group 234 _syscall1(int,exit_group,int,error_code) 235 #endif 236 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 237 _syscall1(int,set_tid_address,int *,tidptr) 238 #endif 239 #if defined(TARGET_NR_futex) && defined(__NR_futex) 240 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, 241 const struct timespec *,timeout,int *,uaddr2,int,val3) 242 #endif 243 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity 244 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, 245 unsigned long *, user_mask_ptr); 246 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity 247 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len, 248 unsigned long *, user_mask_ptr); 249 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd, 250 void *, arg); 251 _syscall2(int, capget, struct __user_cap_header_struct *, header, 252 struct __user_cap_data_struct *, data); 253 _syscall2(int, capset, struct __user_cap_header_struct *, header, 254 struct __user_cap_data_struct *, data); 255 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get) 256 _syscall2(int, ioprio_get, int, which, int, who) 257 #endif 258 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set) 259 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio) 260 #endif 261 262 static bitmask_transtbl fcntl_flags_tbl[] = { 263 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, }, 264 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, }, 265 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, }, 266 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, }, 267 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, }, 268 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, }, 269 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, }, 270 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, }, 271 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, }, 272 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, }, 273 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, }, 274 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, }, 275 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, }, 276 #if defined(O_DIRECT) 277 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, }, 278 #endif 279 #if defined(O_NOATIME) 280 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME }, 281 #endif 282 #if defined(O_CLOEXEC) 283 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC }, 284 #endif 285 #if defined(O_PATH) 286 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH }, 287 #endif 288 /* Don't terminate the list prematurely on 64-bit host+guest. */ 289 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0 290 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, }, 291 #endif 292 { 0, 0, 0, 0 } 293 }; 294 295 static int sys_getcwd1(char *buf, size_t size) 296 { 297 if (getcwd(buf, size) == NULL) { 298 /* getcwd() sets errno */ 299 return (-1); 300 } 301 return strlen(buf)+1; 302 } 303 304 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode) 305 { 306 /* 307 * open(2) has extra parameter 'mode' when called with 308 * flag O_CREAT. 309 */ 310 if ((flags & O_CREAT) != 0) { 311 return (openat(dirfd, pathname, flags, mode)); 312 } 313 return (openat(dirfd, pathname, flags)); 314 } 315 316 #ifdef TARGET_NR_utimensat 317 #ifdef CONFIG_UTIMENSAT 318 static int sys_utimensat(int dirfd, const char *pathname, 319 const struct timespec times[2], int flags) 320 { 321 if (pathname == NULL) 322 return futimens(dirfd, times); 323 else 324 return utimensat(dirfd, pathname, times, flags); 325 } 326 #elif defined(__NR_utimensat) 327 #define __NR_sys_utimensat __NR_utimensat 328 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname, 329 const struct timespec *,tsp,int,flags) 330 #else 331 static int sys_utimensat(int dirfd, const char *pathname, 332 const struct timespec times[2], int flags) 333 { 334 errno = ENOSYS; 335 return -1; 336 } 337 #endif 338 #endif /* TARGET_NR_utimensat */ 339 340 #ifdef CONFIG_INOTIFY 341 #include <sys/inotify.h> 342 343 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 344 static int sys_inotify_init(void) 345 { 346 return (inotify_init()); 347 } 348 #endif 349 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 350 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask) 351 { 352 return (inotify_add_watch(fd, pathname, mask)); 353 } 354 #endif 355 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 356 static int sys_inotify_rm_watch(int fd, int32_t wd) 357 { 358 return (inotify_rm_watch(fd, wd)); 359 } 360 #endif 361 #ifdef CONFIG_INOTIFY1 362 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 363 static int sys_inotify_init1(int flags) 364 { 365 return (inotify_init1(flags)); 366 } 367 #endif 368 #endif 369 #else 370 /* Userspace can usually survive runtime without inotify */ 371 #undef TARGET_NR_inotify_init 372 #undef TARGET_NR_inotify_init1 373 #undef TARGET_NR_inotify_add_watch 374 #undef TARGET_NR_inotify_rm_watch 375 #endif /* CONFIG_INOTIFY */ 376 377 #if defined(TARGET_NR_ppoll) 378 #ifndef __NR_ppoll 379 # define __NR_ppoll -1 380 #endif 381 #define __NR_sys_ppoll __NR_ppoll 382 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds, 383 struct timespec *, timeout, const sigset_t *, sigmask, 384 size_t, sigsetsize) 385 #endif 386 387 #if defined(TARGET_NR_pselect6) 388 #ifndef __NR_pselect6 389 # define __NR_pselect6 -1 390 #endif 391 #define __NR_sys_pselect6 __NR_pselect6 392 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, 393 fd_set *, exceptfds, struct timespec *, timeout, void *, sig); 394 #endif 395 396 #if defined(TARGET_NR_prlimit64) 397 #ifndef __NR_prlimit64 398 # define __NR_prlimit64 -1 399 #endif 400 #define __NR_sys_prlimit64 __NR_prlimit64 401 /* The glibc rlimit structure may not be that used by the underlying syscall */ 402 struct host_rlimit64 { 403 uint64_t rlim_cur; 404 uint64_t rlim_max; 405 }; 406 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource, 407 const struct host_rlimit64 *, new_limit, 408 struct host_rlimit64 *, old_limit) 409 #endif 410 411 412 #if defined(TARGET_NR_timer_create) 413 /* Maxiumum of 32 active POSIX timers allowed at any one time. */ 414 static timer_t g_posix_timers[32] = { 0, } ; 415 416 static inline int next_free_host_timer(void) 417 { 418 int k ; 419 /* FIXME: Does finding the next free slot require a lock? */ 420 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) { 421 if (g_posix_timers[k] == 0) { 422 g_posix_timers[k] = (timer_t) 1; 423 return k; 424 } 425 } 426 return -1; 427 } 428 #endif 429 430 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */ 431 #ifdef TARGET_ARM 432 static inline int regpairs_aligned(void *cpu_env) { 433 return ((((CPUARMState *)cpu_env)->eabi) == 1) ; 434 } 435 #elif defined(TARGET_MIPS) 436 static inline int regpairs_aligned(void *cpu_env) { return 1; } 437 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64) 438 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs 439 * of registers which translates to the same as ARM/MIPS, because we start with 440 * r3 as arg1 */ 441 static inline int regpairs_aligned(void *cpu_env) { return 1; } 442 #else 443 static inline int regpairs_aligned(void *cpu_env) { return 0; } 444 #endif 445 446 #define ERRNO_TABLE_SIZE 1200 447 448 /* target_to_host_errno_table[] is initialized from 449 * host_to_target_errno_table[] in syscall_init(). */ 450 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = { 451 }; 452 453 /* 454 * This list is the union of errno values overridden in asm-<arch>/errno.h 455 * minus the errnos that are not actually generic to all archs. 456 */ 457 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = { 458 [EIDRM] = TARGET_EIDRM, 459 [ECHRNG] = TARGET_ECHRNG, 460 [EL2NSYNC] = TARGET_EL2NSYNC, 461 [EL3HLT] = TARGET_EL3HLT, 462 [EL3RST] = TARGET_EL3RST, 463 [ELNRNG] = TARGET_ELNRNG, 464 [EUNATCH] = TARGET_EUNATCH, 465 [ENOCSI] = TARGET_ENOCSI, 466 [EL2HLT] = TARGET_EL2HLT, 467 [EDEADLK] = TARGET_EDEADLK, 468 [ENOLCK] = TARGET_ENOLCK, 469 [EBADE] = TARGET_EBADE, 470 [EBADR] = TARGET_EBADR, 471 [EXFULL] = TARGET_EXFULL, 472 [ENOANO] = TARGET_ENOANO, 473 [EBADRQC] = TARGET_EBADRQC, 474 [EBADSLT] = TARGET_EBADSLT, 475 [EBFONT] = TARGET_EBFONT, 476 [ENOSTR] = TARGET_ENOSTR, 477 [ENODATA] = TARGET_ENODATA, 478 [ETIME] = TARGET_ETIME, 479 [ENOSR] = TARGET_ENOSR, 480 [ENONET] = TARGET_ENONET, 481 [ENOPKG] = TARGET_ENOPKG, 482 [EREMOTE] = TARGET_EREMOTE, 483 [ENOLINK] = TARGET_ENOLINK, 484 [EADV] = TARGET_EADV, 485 [ESRMNT] = TARGET_ESRMNT, 486 [ECOMM] = TARGET_ECOMM, 487 [EPROTO] = TARGET_EPROTO, 488 [EDOTDOT] = TARGET_EDOTDOT, 489 [EMULTIHOP] = TARGET_EMULTIHOP, 490 [EBADMSG] = TARGET_EBADMSG, 491 [ENAMETOOLONG] = TARGET_ENAMETOOLONG, 492 [EOVERFLOW] = TARGET_EOVERFLOW, 493 [ENOTUNIQ] = TARGET_ENOTUNIQ, 494 [EBADFD] = TARGET_EBADFD, 495 [EREMCHG] = TARGET_EREMCHG, 496 [ELIBACC] = TARGET_ELIBACC, 497 [ELIBBAD] = TARGET_ELIBBAD, 498 [ELIBSCN] = TARGET_ELIBSCN, 499 [ELIBMAX] = TARGET_ELIBMAX, 500 [ELIBEXEC] = TARGET_ELIBEXEC, 501 [EILSEQ] = TARGET_EILSEQ, 502 [ENOSYS] = TARGET_ENOSYS, 503 [ELOOP] = TARGET_ELOOP, 504 [ERESTART] = TARGET_ERESTART, 505 [ESTRPIPE] = TARGET_ESTRPIPE, 506 [ENOTEMPTY] = TARGET_ENOTEMPTY, 507 [EUSERS] = TARGET_EUSERS, 508 [ENOTSOCK] = TARGET_ENOTSOCK, 509 [EDESTADDRREQ] = TARGET_EDESTADDRREQ, 510 [EMSGSIZE] = TARGET_EMSGSIZE, 511 [EPROTOTYPE] = TARGET_EPROTOTYPE, 512 [ENOPROTOOPT] = TARGET_ENOPROTOOPT, 513 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT, 514 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT, 515 [EOPNOTSUPP] = TARGET_EOPNOTSUPP, 516 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT, 517 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT, 518 [EADDRINUSE] = TARGET_EADDRINUSE, 519 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL, 520 [ENETDOWN] = TARGET_ENETDOWN, 521 [ENETUNREACH] = TARGET_ENETUNREACH, 522 [ENETRESET] = TARGET_ENETRESET, 523 [ECONNABORTED] = TARGET_ECONNABORTED, 524 [ECONNRESET] = TARGET_ECONNRESET, 525 [ENOBUFS] = TARGET_ENOBUFS, 526 [EISCONN] = TARGET_EISCONN, 527 [ENOTCONN] = TARGET_ENOTCONN, 528 [EUCLEAN] = TARGET_EUCLEAN, 529 [ENOTNAM] = TARGET_ENOTNAM, 530 [ENAVAIL] = TARGET_ENAVAIL, 531 [EISNAM] = TARGET_EISNAM, 532 [EREMOTEIO] = TARGET_EREMOTEIO, 533 [ESHUTDOWN] = TARGET_ESHUTDOWN, 534 [ETOOMANYREFS] = TARGET_ETOOMANYREFS, 535 [ETIMEDOUT] = TARGET_ETIMEDOUT, 536 [ECONNREFUSED] = TARGET_ECONNREFUSED, 537 [EHOSTDOWN] = TARGET_EHOSTDOWN, 538 [EHOSTUNREACH] = TARGET_EHOSTUNREACH, 539 [EALREADY] = TARGET_EALREADY, 540 [EINPROGRESS] = TARGET_EINPROGRESS, 541 [ESTALE] = TARGET_ESTALE, 542 [ECANCELED] = TARGET_ECANCELED, 543 [ENOMEDIUM] = TARGET_ENOMEDIUM, 544 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE, 545 #ifdef ENOKEY 546 [ENOKEY] = TARGET_ENOKEY, 547 #endif 548 #ifdef EKEYEXPIRED 549 [EKEYEXPIRED] = TARGET_EKEYEXPIRED, 550 #endif 551 #ifdef EKEYREVOKED 552 [EKEYREVOKED] = TARGET_EKEYREVOKED, 553 #endif 554 #ifdef EKEYREJECTED 555 [EKEYREJECTED] = TARGET_EKEYREJECTED, 556 #endif 557 #ifdef EOWNERDEAD 558 [EOWNERDEAD] = TARGET_EOWNERDEAD, 559 #endif 560 #ifdef ENOTRECOVERABLE 561 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE, 562 #endif 563 }; 564 565 static inline int host_to_target_errno(int err) 566 { 567 if(host_to_target_errno_table[err]) 568 return host_to_target_errno_table[err]; 569 return err; 570 } 571 572 static inline int target_to_host_errno(int err) 573 { 574 if (target_to_host_errno_table[err]) 575 return target_to_host_errno_table[err]; 576 return err; 577 } 578 579 static inline abi_long get_errno(abi_long ret) 580 { 581 if (ret == -1) 582 return -host_to_target_errno(errno); 583 else 584 return ret; 585 } 586 587 static inline int is_error(abi_long ret) 588 { 589 return (abi_ulong)ret >= (abi_ulong)(-4096); 590 } 591 592 char *target_strerror(int err) 593 { 594 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) { 595 return NULL; 596 } 597 return strerror(target_to_host_errno(err)); 598 } 599 600 static inline int host_to_target_sock_type(int host_type) 601 { 602 int target_type; 603 604 switch (host_type & 0xf /* SOCK_TYPE_MASK */) { 605 case SOCK_DGRAM: 606 target_type = TARGET_SOCK_DGRAM; 607 break; 608 case SOCK_STREAM: 609 target_type = TARGET_SOCK_STREAM; 610 break; 611 default: 612 target_type = host_type & 0xf /* SOCK_TYPE_MASK */; 613 break; 614 } 615 616 #if defined(SOCK_CLOEXEC) 617 if (host_type & SOCK_CLOEXEC) { 618 target_type |= TARGET_SOCK_CLOEXEC; 619 } 620 #endif 621 622 #if defined(SOCK_NONBLOCK) 623 if (host_type & SOCK_NONBLOCK) { 624 target_type |= TARGET_SOCK_NONBLOCK; 625 } 626 #endif 627 628 return target_type; 629 } 630 631 static abi_ulong target_brk; 632 static abi_ulong target_original_brk; 633 static abi_ulong brk_page; 634 635 void target_set_brk(abi_ulong new_brk) 636 { 637 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk); 638 brk_page = HOST_PAGE_ALIGN(target_brk); 639 } 640 641 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0) 642 #define DEBUGF_BRK(message, args...) 643 644 /* do_brk() must return target values and target errnos. */ 645 abi_long do_brk(abi_ulong new_brk) 646 { 647 abi_long mapped_addr; 648 int new_alloc_size; 649 650 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk); 651 652 if (!new_brk) { 653 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk); 654 return target_brk; 655 } 656 if (new_brk < target_original_brk) { 657 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n", 658 target_brk); 659 return target_brk; 660 } 661 662 /* If the new brk is less than the highest page reserved to the 663 * target heap allocation, set it and we're almost done... */ 664 if (new_brk <= brk_page) { 665 /* Heap contents are initialized to zero, as for anonymous 666 * mapped pages. */ 667 if (new_brk > target_brk) { 668 memset(g2h(target_brk), 0, new_brk - target_brk); 669 } 670 target_brk = new_brk; 671 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk); 672 return target_brk; 673 } 674 675 /* We need to allocate more memory after the brk... Note that 676 * we don't use MAP_FIXED because that will map over the top of 677 * any existing mapping (like the one with the host libc or qemu 678 * itself); instead we treat "mapped but at wrong address" as 679 * a failure and unmap again. 680 */ 681 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page); 682 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, 683 PROT_READ|PROT_WRITE, 684 MAP_ANON|MAP_PRIVATE, 0, 0)); 685 686 if (mapped_addr == brk_page) { 687 /* Heap contents are initialized to zero, as for anonymous 688 * mapped pages. Technically the new pages are already 689 * initialized to zero since they *are* anonymous mapped 690 * pages, however we have to take care with the contents that 691 * come from the remaining part of the previous page: it may 692 * contains garbage data due to a previous heap usage (grown 693 * then shrunken). */ 694 memset(g2h(target_brk), 0, brk_page - target_brk); 695 696 target_brk = new_brk; 697 brk_page = HOST_PAGE_ALIGN(target_brk); 698 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n", 699 target_brk); 700 return target_brk; 701 } else if (mapped_addr != -1) { 702 /* Mapped but at wrong address, meaning there wasn't actually 703 * enough space for this brk. 704 */ 705 target_munmap(mapped_addr, new_alloc_size); 706 mapped_addr = -1; 707 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk); 708 } 709 else { 710 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk); 711 } 712 713 #if defined(TARGET_ALPHA) 714 /* We (partially) emulate OSF/1 on Alpha, which requires we 715 return a proper errno, not an unchanged brk value. */ 716 return -TARGET_ENOMEM; 717 #endif 718 /* For everything else, return the previous break. */ 719 return target_brk; 720 } 721 722 static inline abi_long copy_from_user_fdset(fd_set *fds, 723 abi_ulong target_fds_addr, 724 int n) 725 { 726 int i, nw, j, k; 727 abi_ulong b, *target_fds; 728 729 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 730 if (!(target_fds = lock_user(VERIFY_READ, 731 target_fds_addr, 732 sizeof(abi_ulong) * nw, 733 1))) 734 return -TARGET_EFAULT; 735 736 FD_ZERO(fds); 737 k = 0; 738 for (i = 0; i < nw; i++) { 739 /* grab the abi_ulong */ 740 __get_user(b, &target_fds[i]); 741 for (j = 0; j < TARGET_ABI_BITS; j++) { 742 /* check the bit inside the abi_ulong */ 743 if ((b >> j) & 1) 744 FD_SET(k, fds); 745 k++; 746 } 747 } 748 749 unlock_user(target_fds, target_fds_addr, 0); 750 751 return 0; 752 } 753 754 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr, 755 abi_ulong target_fds_addr, 756 int n) 757 { 758 if (target_fds_addr) { 759 if (copy_from_user_fdset(fds, target_fds_addr, n)) 760 return -TARGET_EFAULT; 761 *fds_ptr = fds; 762 } else { 763 *fds_ptr = NULL; 764 } 765 return 0; 766 } 767 768 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr, 769 const fd_set *fds, 770 int n) 771 { 772 int i, nw, j, k; 773 abi_long v; 774 abi_ulong *target_fds; 775 776 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 777 if (!(target_fds = lock_user(VERIFY_WRITE, 778 target_fds_addr, 779 sizeof(abi_ulong) * nw, 780 0))) 781 return -TARGET_EFAULT; 782 783 k = 0; 784 for (i = 0; i < nw; i++) { 785 v = 0; 786 for (j = 0; j < TARGET_ABI_BITS; j++) { 787 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j); 788 k++; 789 } 790 __put_user(v, &target_fds[i]); 791 } 792 793 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw); 794 795 return 0; 796 } 797 798 #if defined(__alpha__) 799 #define HOST_HZ 1024 800 #else 801 #define HOST_HZ 100 802 #endif 803 804 static inline abi_long host_to_target_clock_t(long ticks) 805 { 806 #if HOST_HZ == TARGET_HZ 807 return ticks; 808 #else 809 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ; 810 #endif 811 } 812 813 static inline abi_long host_to_target_rusage(abi_ulong target_addr, 814 const struct rusage *rusage) 815 { 816 struct target_rusage *target_rusage; 817 818 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0)) 819 return -TARGET_EFAULT; 820 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec); 821 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec); 822 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec); 823 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec); 824 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss); 825 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss); 826 target_rusage->ru_idrss = tswapal(rusage->ru_idrss); 827 target_rusage->ru_isrss = tswapal(rusage->ru_isrss); 828 target_rusage->ru_minflt = tswapal(rusage->ru_minflt); 829 target_rusage->ru_majflt = tswapal(rusage->ru_majflt); 830 target_rusage->ru_nswap = tswapal(rusage->ru_nswap); 831 target_rusage->ru_inblock = tswapal(rusage->ru_inblock); 832 target_rusage->ru_oublock = tswapal(rusage->ru_oublock); 833 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd); 834 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv); 835 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals); 836 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw); 837 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw); 838 unlock_user_struct(target_rusage, target_addr, 1); 839 840 return 0; 841 } 842 843 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim) 844 { 845 abi_ulong target_rlim_swap; 846 rlim_t result; 847 848 target_rlim_swap = tswapal(target_rlim); 849 if (target_rlim_swap == TARGET_RLIM_INFINITY) 850 return RLIM_INFINITY; 851 852 result = target_rlim_swap; 853 if (target_rlim_swap != (rlim_t)result) 854 return RLIM_INFINITY; 855 856 return result; 857 } 858 859 static inline abi_ulong host_to_target_rlim(rlim_t rlim) 860 { 861 abi_ulong target_rlim_swap; 862 abi_ulong result; 863 864 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim) 865 target_rlim_swap = TARGET_RLIM_INFINITY; 866 else 867 target_rlim_swap = rlim; 868 result = tswapal(target_rlim_swap); 869 870 return result; 871 } 872 873 static inline int target_to_host_resource(int code) 874 { 875 switch (code) { 876 case TARGET_RLIMIT_AS: 877 return RLIMIT_AS; 878 case TARGET_RLIMIT_CORE: 879 return RLIMIT_CORE; 880 case TARGET_RLIMIT_CPU: 881 return RLIMIT_CPU; 882 case TARGET_RLIMIT_DATA: 883 return RLIMIT_DATA; 884 case TARGET_RLIMIT_FSIZE: 885 return RLIMIT_FSIZE; 886 case TARGET_RLIMIT_LOCKS: 887 return RLIMIT_LOCKS; 888 case TARGET_RLIMIT_MEMLOCK: 889 return RLIMIT_MEMLOCK; 890 case TARGET_RLIMIT_MSGQUEUE: 891 return RLIMIT_MSGQUEUE; 892 case TARGET_RLIMIT_NICE: 893 return RLIMIT_NICE; 894 case TARGET_RLIMIT_NOFILE: 895 return RLIMIT_NOFILE; 896 case TARGET_RLIMIT_NPROC: 897 return RLIMIT_NPROC; 898 case TARGET_RLIMIT_RSS: 899 return RLIMIT_RSS; 900 case TARGET_RLIMIT_RTPRIO: 901 return RLIMIT_RTPRIO; 902 case TARGET_RLIMIT_SIGPENDING: 903 return RLIMIT_SIGPENDING; 904 case TARGET_RLIMIT_STACK: 905 return RLIMIT_STACK; 906 default: 907 return code; 908 } 909 } 910 911 static inline abi_long copy_from_user_timeval(struct timeval *tv, 912 abi_ulong target_tv_addr) 913 { 914 struct target_timeval *target_tv; 915 916 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) 917 return -TARGET_EFAULT; 918 919 __get_user(tv->tv_sec, &target_tv->tv_sec); 920 __get_user(tv->tv_usec, &target_tv->tv_usec); 921 922 unlock_user_struct(target_tv, target_tv_addr, 0); 923 924 return 0; 925 } 926 927 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr, 928 const struct timeval *tv) 929 { 930 struct target_timeval *target_tv; 931 932 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) 933 return -TARGET_EFAULT; 934 935 __put_user(tv->tv_sec, &target_tv->tv_sec); 936 __put_user(tv->tv_usec, &target_tv->tv_usec); 937 938 unlock_user_struct(target_tv, target_tv_addr, 1); 939 940 return 0; 941 } 942 943 static inline abi_long copy_from_user_timezone(struct timezone *tz, 944 abi_ulong target_tz_addr) 945 { 946 struct target_timezone *target_tz; 947 948 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) { 949 return -TARGET_EFAULT; 950 } 951 952 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest); 953 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime); 954 955 unlock_user_struct(target_tz, target_tz_addr, 0); 956 957 return 0; 958 } 959 960 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 961 #include <mqueue.h> 962 963 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr, 964 abi_ulong target_mq_attr_addr) 965 { 966 struct target_mq_attr *target_mq_attr; 967 968 if (!lock_user_struct(VERIFY_READ, target_mq_attr, 969 target_mq_attr_addr, 1)) 970 return -TARGET_EFAULT; 971 972 __get_user(attr->mq_flags, &target_mq_attr->mq_flags); 973 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 974 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 975 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 976 977 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0); 978 979 return 0; 980 } 981 982 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr, 983 const struct mq_attr *attr) 984 { 985 struct target_mq_attr *target_mq_attr; 986 987 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr, 988 target_mq_attr_addr, 0)) 989 return -TARGET_EFAULT; 990 991 __put_user(attr->mq_flags, &target_mq_attr->mq_flags); 992 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 993 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 994 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 995 996 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1); 997 998 return 0; 999 } 1000 #endif 1001 1002 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) 1003 /* do_select() must return target values and target errnos. */ 1004 static abi_long do_select(int n, 1005 abi_ulong rfd_addr, abi_ulong wfd_addr, 1006 abi_ulong efd_addr, abi_ulong target_tv_addr) 1007 { 1008 fd_set rfds, wfds, efds; 1009 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 1010 struct timeval tv, *tv_ptr; 1011 abi_long ret; 1012 1013 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 1014 if (ret) { 1015 return ret; 1016 } 1017 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 1018 if (ret) { 1019 return ret; 1020 } 1021 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 1022 if (ret) { 1023 return ret; 1024 } 1025 1026 if (target_tv_addr) { 1027 if (copy_from_user_timeval(&tv, target_tv_addr)) 1028 return -TARGET_EFAULT; 1029 tv_ptr = &tv; 1030 } else { 1031 tv_ptr = NULL; 1032 } 1033 1034 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr)); 1035 1036 if (!is_error(ret)) { 1037 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 1038 return -TARGET_EFAULT; 1039 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 1040 return -TARGET_EFAULT; 1041 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 1042 return -TARGET_EFAULT; 1043 1044 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv)) 1045 return -TARGET_EFAULT; 1046 } 1047 1048 return ret; 1049 } 1050 #endif 1051 1052 static abi_long do_pipe2(int host_pipe[], int flags) 1053 { 1054 #ifdef CONFIG_PIPE2 1055 return pipe2(host_pipe, flags); 1056 #else 1057 return -ENOSYS; 1058 #endif 1059 } 1060 1061 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes, 1062 int flags, int is_pipe2) 1063 { 1064 int host_pipe[2]; 1065 abi_long ret; 1066 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe); 1067 1068 if (is_error(ret)) 1069 return get_errno(ret); 1070 1071 /* Several targets have special calling conventions for the original 1072 pipe syscall, but didn't replicate this into the pipe2 syscall. */ 1073 if (!is_pipe2) { 1074 #if defined(TARGET_ALPHA) 1075 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1]; 1076 return host_pipe[0]; 1077 #elif defined(TARGET_MIPS) 1078 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1]; 1079 return host_pipe[0]; 1080 #elif defined(TARGET_SH4) 1081 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1]; 1082 return host_pipe[0]; 1083 #elif defined(TARGET_SPARC) 1084 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1]; 1085 return host_pipe[0]; 1086 #endif 1087 } 1088 1089 if (put_user_s32(host_pipe[0], pipedes) 1090 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0]))) 1091 return -TARGET_EFAULT; 1092 return get_errno(ret); 1093 } 1094 1095 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn, 1096 abi_ulong target_addr, 1097 socklen_t len) 1098 { 1099 struct target_ip_mreqn *target_smreqn; 1100 1101 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1); 1102 if (!target_smreqn) 1103 return -TARGET_EFAULT; 1104 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr; 1105 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr; 1106 if (len == sizeof(struct target_ip_mreqn)) 1107 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex); 1108 unlock_user(target_smreqn, target_addr, 0); 1109 1110 return 0; 1111 } 1112 1113 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr, 1114 abi_ulong target_addr, 1115 socklen_t len) 1116 { 1117 const socklen_t unix_maxlen = sizeof (struct sockaddr_un); 1118 sa_family_t sa_family; 1119 struct target_sockaddr *target_saddr; 1120 1121 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 1122 if (!target_saddr) 1123 return -TARGET_EFAULT; 1124 1125 sa_family = tswap16(target_saddr->sa_family); 1126 1127 /* Oops. The caller might send a incomplete sun_path; sun_path 1128 * must be terminated by \0 (see the manual page), but 1129 * unfortunately it is quite common to specify sockaddr_un 1130 * length as "strlen(x->sun_path)" while it should be 1131 * "strlen(...) + 1". We'll fix that here if needed. 1132 * Linux kernel has a similar feature. 1133 */ 1134 1135 if (sa_family == AF_UNIX) { 1136 if (len < unix_maxlen && len > 0) { 1137 char *cp = (char*)target_saddr; 1138 1139 if ( cp[len-1] && !cp[len] ) 1140 len++; 1141 } 1142 if (len > unix_maxlen) 1143 len = unix_maxlen; 1144 } 1145 1146 memcpy(addr, target_saddr, len); 1147 addr->sa_family = sa_family; 1148 if (sa_family == AF_PACKET) { 1149 struct target_sockaddr_ll *lladdr; 1150 1151 lladdr = (struct target_sockaddr_ll *)addr; 1152 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex); 1153 lladdr->sll_hatype = tswap16(lladdr->sll_hatype); 1154 } 1155 unlock_user(target_saddr, target_addr, 0); 1156 1157 return 0; 1158 } 1159 1160 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr, 1161 struct sockaddr *addr, 1162 socklen_t len) 1163 { 1164 struct target_sockaddr *target_saddr; 1165 1166 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0); 1167 if (!target_saddr) 1168 return -TARGET_EFAULT; 1169 memcpy(target_saddr, addr, len); 1170 target_saddr->sa_family = tswap16(addr->sa_family); 1171 unlock_user(target_saddr, target_addr, len); 1172 1173 return 0; 1174 } 1175 1176 static inline abi_long target_to_host_cmsg(struct msghdr *msgh, 1177 struct target_msghdr *target_msgh) 1178 { 1179 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1180 abi_long msg_controllen; 1181 abi_ulong target_cmsg_addr; 1182 struct target_cmsghdr *target_cmsg; 1183 socklen_t space = 0; 1184 1185 msg_controllen = tswapal(target_msgh->msg_controllen); 1186 if (msg_controllen < sizeof (struct target_cmsghdr)) 1187 goto the_end; 1188 target_cmsg_addr = tswapal(target_msgh->msg_control); 1189 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1); 1190 if (!target_cmsg) 1191 return -TARGET_EFAULT; 1192 1193 while (cmsg && target_cmsg) { 1194 void *data = CMSG_DATA(cmsg); 1195 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1196 1197 int len = tswapal(target_cmsg->cmsg_len) 1198 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr)); 1199 1200 space += CMSG_SPACE(len); 1201 if (space > msgh->msg_controllen) { 1202 space -= CMSG_SPACE(len); 1203 gemu_log("Host cmsg overflow\n"); 1204 break; 1205 } 1206 1207 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) { 1208 cmsg->cmsg_level = SOL_SOCKET; 1209 } else { 1210 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level); 1211 } 1212 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type); 1213 cmsg->cmsg_len = CMSG_LEN(len); 1214 1215 if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) { 1216 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type); 1217 memcpy(data, target_data, len); 1218 } else { 1219 int *fd = (int *)data; 1220 int *target_fd = (int *)target_data; 1221 int i, numfds = len / sizeof(int); 1222 1223 for (i = 0; i < numfds; i++) 1224 fd[i] = tswap32(target_fd[i]); 1225 } 1226 1227 cmsg = CMSG_NXTHDR(msgh, cmsg); 1228 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1229 } 1230 unlock_user(target_cmsg, target_cmsg_addr, 0); 1231 the_end: 1232 msgh->msg_controllen = space; 1233 return 0; 1234 } 1235 1236 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, 1237 struct msghdr *msgh) 1238 { 1239 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1240 abi_long msg_controllen; 1241 abi_ulong target_cmsg_addr; 1242 struct target_cmsghdr *target_cmsg; 1243 socklen_t space = 0; 1244 1245 msg_controllen = tswapal(target_msgh->msg_controllen); 1246 if (msg_controllen < sizeof (struct target_cmsghdr)) 1247 goto the_end; 1248 target_cmsg_addr = tswapal(target_msgh->msg_control); 1249 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0); 1250 if (!target_cmsg) 1251 return -TARGET_EFAULT; 1252 1253 while (cmsg && target_cmsg) { 1254 void *data = CMSG_DATA(cmsg); 1255 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1256 1257 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr)); 1258 1259 space += TARGET_CMSG_SPACE(len); 1260 if (space > msg_controllen) { 1261 space -= TARGET_CMSG_SPACE(len); 1262 gemu_log("Target cmsg overflow\n"); 1263 break; 1264 } 1265 1266 if (cmsg->cmsg_level == SOL_SOCKET) { 1267 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET); 1268 } else { 1269 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level); 1270 } 1271 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type); 1272 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len)); 1273 1274 switch (cmsg->cmsg_level) { 1275 case SOL_SOCKET: 1276 switch (cmsg->cmsg_type) { 1277 case SCM_RIGHTS: 1278 { 1279 int *fd = (int *)data; 1280 int *target_fd = (int *)target_data; 1281 int i, numfds = len / sizeof(int); 1282 1283 for (i = 0; i < numfds; i++) 1284 target_fd[i] = tswap32(fd[i]); 1285 break; 1286 } 1287 case SO_TIMESTAMP: 1288 { 1289 struct timeval *tv = (struct timeval *)data; 1290 struct target_timeval *target_tv = 1291 (struct target_timeval *)target_data; 1292 1293 if (len != sizeof(struct timeval)) 1294 goto unimplemented; 1295 1296 /* copy struct timeval to target */ 1297 target_tv->tv_sec = tswapal(tv->tv_sec); 1298 target_tv->tv_usec = tswapal(tv->tv_usec); 1299 break; 1300 } 1301 case SCM_CREDENTIALS: 1302 { 1303 struct ucred *cred = (struct ucred *)data; 1304 struct target_ucred *target_cred = 1305 (struct target_ucred *)target_data; 1306 1307 __put_user(cred->pid, &target_cred->pid); 1308 __put_user(cred->uid, &target_cred->uid); 1309 __put_user(cred->gid, &target_cred->gid); 1310 break; 1311 } 1312 default: 1313 goto unimplemented; 1314 } 1315 break; 1316 1317 default: 1318 unimplemented: 1319 gemu_log("Unsupported ancillary data: %d/%d\n", 1320 cmsg->cmsg_level, cmsg->cmsg_type); 1321 memcpy(target_data, data, len); 1322 } 1323 1324 cmsg = CMSG_NXTHDR(msgh, cmsg); 1325 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1326 } 1327 unlock_user(target_cmsg, target_cmsg_addr, space); 1328 the_end: 1329 target_msgh->msg_controllen = tswapal(space); 1330 return 0; 1331 } 1332 1333 /* do_setsockopt() Must return target values and target errnos. */ 1334 static abi_long do_setsockopt(int sockfd, int level, int optname, 1335 abi_ulong optval_addr, socklen_t optlen) 1336 { 1337 abi_long ret; 1338 int val; 1339 struct ip_mreqn *ip_mreq; 1340 struct ip_mreq_source *ip_mreq_source; 1341 1342 switch(level) { 1343 case SOL_TCP: 1344 /* TCP options all take an 'int' value. */ 1345 if (optlen < sizeof(uint32_t)) 1346 return -TARGET_EINVAL; 1347 1348 if (get_user_u32(val, optval_addr)) 1349 return -TARGET_EFAULT; 1350 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1351 break; 1352 case SOL_IP: 1353 switch(optname) { 1354 case IP_TOS: 1355 case IP_TTL: 1356 case IP_HDRINCL: 1357 case IP_ROUTER_ALERT: 1358 case IP_RECVOPTS: 1359 case IP_RETOPTS: 1360 case IP_PKTINFO: 1361 case IP_MTU_DISCOVER: 1362 case IP_RECVERR: 1363 case IP_RECVTOS: 1364 #ifdef IP_FREEBIND 1365 case IP_FREEBIND: 1366 #endif 1367 case IP_MULTICAST_TTL: 1368 case IP_MULTICAST_LOOP: 1369 val = 0; 1370 if (optlen >= sizeof(uint32_t)) { 1371 if (get_user_u32(val, optval_addr)) 1372 return -TARGET_EFAULT; 1373 } else if (optlen >= 1) { 1374 if (get_user_u8(val, optval_addr)) 1375 return -TARGET_EFAULT; 1376 } 1377 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1378 break; 1379 case IP_ADD_MEMBERSHIP: 1380 case IP_DROP_MEMBERSHIP: 1381 if (optlen < sizeof (struct target_ip_mreq) || 1382 optlen > sizeof (struct target_ip_mreqn)) 1383 return -TARGET_EINVAL; 1384 1385 ip_mreq = (struct ip_mreqn *) alloca(optlen); 1386 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen); 1387 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen)); 1388 break; 1389 1390 case IP_BLOCK_SOURCE: 1391 case IP_UNBLOCK_SOURCE: 1392 case IP_ADD_SOURCE_MEMBERSHIP: 1393 case IP_DROP_SOURCE_MEMBERSHIP: 1394 if (optlen != sizeof (struct target_ip_mreq_source)) 1395 return -TARGET_EINVAL; 1396 1397 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1); 1398 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen)); 1399 unlock_user (ip_mreq_source, optval_addr, 0); 1400 break; 1401 1402 default: 1403 goto unimplemented; 1404 } 1405 break; 1406 case SOL_IPV6: 1407 switch (optname) { 1408 case IPV6_MTU_DISCOVER: 1409 case IPV6_MTU: 1410 case IPV6_V6ONLY: 1411 case IPV6_RECVPKTINFO: 1412 val = 0; 1413 if (optlen < sizeof(uint32_t)) { 1414 return -TARGET_EINVAL; 1415 } 1416 if (get_user_u32(val, optval_addr)) { 1417 return -TARGET_EFAULT; 1418 } 1419 ret = get_errno(setsockopt(sockfd, level, optname, 1420 &val, sizeof(val))); 1421 break; 1422 default: 1423 goto unimplemented; 1424 } 1425 break; 1426 case SOL_RAW: 1427 switch (optname) { 1428 case ICMP_FILTER: 1429 /* struct icmp_filter takes an u32 value */ 1430 if (optlen < sizeof(uint32_t)) { 1431 return -TARGET_EINVAL; 1432 } 1433 1434 if (get_user_u32(val, optval_addr)) { 1435 return -TARGET_EFAULT; 1436 } 1437 ret = get_errno(setsockopt(sockfd, level, optname, 1438 &val, sizeof(val))); 1439 break; 1440 1441 default: 1442 goto unimplemented; 1443 } 1444 break; 1445 case TARGET_SOL_SOCKET: 1446 switch (optname) { 1447 case TARGET_SO_RCVTIMEO: 1448 { 1449 struct timeval tv; 1450 1451 optname = SO_RCVTIMEO; 1452 1453 set_timeout: 1454 if (optlen != sizeof(struct target_timeval)) { 1455 return -TARGET_EINVAL; 1456 } 1457 1458 if (copy_from_user_timeval(&tv, optval_addr)) { 1459 return -TARGET_EFAULT; 1460 } 1461 1462 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 1463 &tv, sizeof(tv))); 1464 return ret; 1465 } 1466 case TARGET_SO_SNDTIMEO: 1467 optname = SO_SNDTIMEO; 1468 goto set_timeout; 1469 case TARGET_SO_ATTACH_FILTER: 1470 { 1471 struct target_sock_fprog *tfprog; 1472 struct target_sock_filter *tfilter; 1473 struct sock_fprog fprog; 1474 struct sock_filter *filter; 1475 int i; 1476 1477 if (optlen != sizeof(*tfprog)) { 1478 return -TARGET_EINVAL; 1479 } 1480 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) { 1481 return -TARGET_EFAULT; 1482 } 1483 if (!lock_user_struct(VERIFY_READ, tfilter, 1484 tswapal(tfprog->filter), 0)) { 1485 unlock_user_struct(tfprog, optval_addr, 1); 1486 return -TARGET_EFAULT; 1487 } 1488 1489 fprog.len = tswap16(tfprog->len); 1490 filter = malloc(fprog.len * sizeof(*filter)); 1491 if (filter == NULL) { 1492 unlock_user_struct(tfilter, tfprog->filter, 1); 1493 unlock_user_struct(tfprog, optval_addr, 1); 1494 return -TARGET_ENOMEM; 1495 } 1496 for (i = 0; i < fprog.len; i++) { 1497 filter[i].code = tswap16(tfilter[i].code); 1498 filter[i].jt = tfilter[i].jt; 1499 filter[i].jf = tfilter[i].jf; 1500 filter[i].k = tswap32(tfilter[i].k); 1501 } 1502 fprog.filter = filter; 1503 1504 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, 1505 SO_ATTACH_FILTER, &fprog, sizeof(fprog))); 1506 free(filter); 1507 1508 unlock_user_struct(tfilter, tfprog->filter, 1); 1509 unlock_user_struct(tfprog, optval_addr, 1); 1510 return ret; 1511 } 1512 case TARGET_SO_BINDTODEVICE: 1513 { 1514 char *dev_ifname, *addr_ifname; 1515 1516 if (optlen > IFNAMSIZ - 1) { 1517 optlen = IFNAMSIZ - 1; 1518 } 1519 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1); 1520 if (!dev_ifname) { 1521 return -TARGET_EFAULT; 1522 } 1523 optname = SO_BINDTODEVICE; 1524 addr_ifname = alloca(IFNAMSIZ); 1525 memcpy(addr_ifname, dev_ifname, optlen); 1526 addr_ifname[optlen] = 0; 1527 ret = get_errno(setsockopt(sockfd, level, optname, addr_ifname, optlen)); 1528 unlock_user (dev_ifname, optval_addr, 0); 1529 return ret; 1530 } 1531 /* Options with 'int' argument. */ 1532 case TARGET_SO_DEBUG: 1533 optname = SO_DEBUG; 1534 break; 1535 case TARGET_SO_REUSEADDR: 1536 optname = SO_REUSEADDR; 1537 break; 1538 case TARGET_SO_TYPE: 1539 optname = SO_TYPE; 1540 break; 1541 case TARGET_SO_ERROR: 1542 optname = SO_ERROR; 1543 break; 1544 case TARGET_SO_DONTROUTE: 1545 optname = SO_DONTROUTE; 1546 break; 1547 case TARGET_SO_BROADCAST: 1548 optname = SO_BROADCAST; 1549 break; 1550 case TARGET_SO_SNDBUF: 1551 optname = SO_SNDBUF; 1552 break; 1553 case TARGET_SO_SNDBUFFORCE: 1554 optname = SO_SNDBUFFORCE; 1555 break; 1556 case TARGET_SO_RCVBUF: 1557 optname = SO_RCVBUF; 1558 break; 1559 case TARGET_SO_RCVBUFFORCE: 1560 optname = SO_RCVBUFFORCE; 1561 break; 1562 case TARGET_SO_KEEPALIVE: 1563 optname = SO_KEEPALIVE; 1564 break; 1565 case TARGET_SO_OOBINLINE: 1566 optname = SO_OOBINLINE; 1567 break; 1568 case TARGET_SO_NO_CHECK: 1569 optname = SO_NO_CHECK; 1570 break; 1571 case TARGET_SO_PRIORITY: 1572 optname = SO_PRIORITY; 1573 break; 1574 #ifdef SO_BSDCOMPAT 1575 case TARGET_SO_BSDCOMPAT: 1576 optname = SO_BSDCOMPAT; 1577 break; 1578 #endif 1579 case TARGET_SO_PASSCRED: 1580 optname = SO_PASSCRED; 1581 break; 1582 case TARGET_SO_PASSSEC: 1583 optname = SO_PASSSEC; 1584 break; 1585 case TARGET_SO_TIMESTAMP: 1586 optname = SO_TIMESTAMP; 1587 break; 1588 case TARGET_SO_RCVLOWAT: 1589 optname = SO_RCVLOWAT; 1590 break; 1591 break; 1592 default: 1593 goto unimplemented; 1594 } 1595 if (optlen < sizeof(uint32_t)) 1596 return -TARGET_EINVAL; 1597 1598 if (get_user_u32(val, optval_addr)) 1599 return -TARGET_EFAULT; 1600 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val))); 1601 break; 1602 default: 1603 unimplemented: 1604 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname); 1605 ret = -TARGET_ENOPROTOOPT; 1606 } 1607 return ret; 1608 } 1609 1610 /* do_getsockopt() Must return target values and target errnos. */ 1611 static abi_long do_getsockopt(int sockfd, int level, int optname, 1612 abi_ulong optval_addr, abi_ulong optlen) 1613 { 1614 abi_long ret; 1615 int len, val; 1616 socklen_t lv; 1617 1618 switch(level) { 1619 case TARGET_SOL_SOCKET: 1620 level = SOL_SOCKET; 1621 switch (optname) { 1622 /* These don't just return a single integer */ 1623 case TARGET_SO_LINGER: 1624 case TARGET_SO_RCVTIMEO: 1625 case TARGET_SO_SNDTIMEO: 1626 case TARGET_SO_PEERNAME: 1627 goto unimplemented; 1628 case TARGET_SO_PEERCRED: { 1629 struct ucred cr; 1630 socklen_t crlen; 1631 struct target_ucred *tcr; 1632 1633 if (get_user_u32(len, optlen)) { 1634 return -TARGET_EFAULT; 1635 } 1636 if (len < 0) { 1637 return -TARGET_EINVAL; 1638 } 1639 1640 crlen = sizeof(cr); 1641 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED, 1642 &cr, &crlen)); 1643 if (ret < 0) { 1644 return ret; 1645 } 1646 if (len > crlen) { 1647 len = crlen; 1648 } 1649 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) { 1650 return -TARGET_EFAULT; 1651 } 1652 __put_user(cr.pid, &tcr->pid); 1653 __put_user(cr.uid, &tcr->uid); 1654 __put_user(cr.gid, &tcr->gid); 1655 unlock_user_struct(tcr, optval_addr, 1); 1656 if (put_user_u32(len, optlen)) { 1657 return -TARGET_EFAULT; 1658 } 1659 break; 1660 } 1661 /* Options with 'int' argument. */ 1662 case TARGET_SO_DEBUG: 1663 optname = SO_DEBUG; 1664 goto int_case; 1665 case TARGET_SO_REUSEADDR: 1666 optname = SO_REUSEADDR; 1667 goto int_case; 1668 case TARGET_SO_TYPE: 1669 optname = SO_TYPE; 1670 goto int_case; 1671 case TARGET_SO_ERROR: 1672 optname = SO_ERROR; 1673 goto int_case; 1674 case TARGET_SO_DONTROUTE: 1675 optname = SO_DONTROUTE; 1676 goto int_case; 1677 case TARGET_SO_BROADCAST: 1678 optname = SO_BROADCAST; 1679 goto int_case; 1680 case TARGET_SO_SNDBUF: 1681 optname = SO_SNDBUF; 1682 goto int_case; 1683 case TARGET_SO_RCVBUF: 1684 optname = SO_RCVBUF; 1685 goto int_case; 1686 case TARGET_SO_KEEPALIVE: 1687 optname = SO_KEEPALIVE; 1688 goto int_case; 1689 case TARGET_SO_OOBINLINE: 1690 optname = SO_OOBINLINE; 1691 goto int_case; 1692 case TARGET_SO_NO_CHECK: 1693 optname = SO_NO_CHECK; 1694 goto int_case; 1695 case TARGET_SO_PRIORITY: 1696 optname = SO_PRIORITY; 1697 goto int_case; 1698 #ifdef SO_BSDCOMPAT 1699 case TARGET_SO_BSDCOMPAT: 1700 optname = SO_BSDCOMPAT; 1701 goto int_case; 1702 #endif 1703 case TARGET_SO_PASSCRED: 1704 optname = SO_PASSCRED; 1705 goto int_case; 1706 case TARGET_SO_TIMESTAMP: 1707 optname = SO_TIMESTAMP; 1708 goto int_case; 1709 case TARGET_SO_RCVLOWAT: 1710 optname = SO_RCVLOWAT; 1711 goto int_case; 1712 case TARGET_SO_ACCEPTCONN: 1713 optname = SO_ACCEPTCONN; 1714 goto int_case; 1715 default: 1716 goto int_case; 1717 } 1718 break; 1719 case SOL_TCP: 1720 /* TCP options all take an 'int' value. */ 1721 int_case: 1722 if (get_user_u32(len, optlen)) 1723 return -TARGET_EFAULT; 1724 if (len < 0) 1725 return -TARGET_EINVAL; 1726 lv = sizeof(lv); 1727 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1728 if (ret < 0) 1729 return ret; 1730 if (optname == SO_TYPE) { 1731 val = host_to_target_sock_type(val); 1732 } 1733 if (len > lv) 1734 len = lv; 1735 if (len == 4) { 1736 if (put_user_u32(val, optval_addr)) 1737 return -TARGET_EFAULT; 1738 } else { 1739 if (put_user_u8(val, optval_addr)) 1740 return -TARGET_EFAULT; 1741 } 1742 if (put_user_u32(len, optlen)) 1743 return -TARGET_EFAULT; 1744 break; 1745 case SOL_IP: 1746 switch(optname) { 1747 case IP_TOS: 1748 case IP_TTL: 1749 case IP_HDRINCL: 1750 case IP_ROUTER_ALERT: 1751 case IP_RECVOPTS: 1752 case IP_RETOPTS: 1753 case IP_PKTINFO: 1754 case IP_MTU_DISCOVER: 1755 case IP_RECVERR: 1756 case IP_RECVTOS: 1757 #ifdef IP_FREEBIND 1758 case IP_FREEBIND: 1759 #endif 1760 case IP_MULTICAST_TTL: 1761 case IP_MULTICAST_LOOP: 1762 if (get_user_u32(len, optlen)) 1763 return -TARGET_EFAULT; 1764 if (len < 0) 1765 return -TARGET_EINVAL; 1766 lv = sizeof(lv); 1767 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1768 if (ret < 0) 1769 return ret; 1770 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 1771 len = 1; 1772 if (put_user_u32(len, optlen) 1773 || put_user_u8(val, optval_addr)) 1774 return -TARGET_EFAULT; 1775 } else { 1776 if (len > sizeof(int)) 1777 len = sizeof(int); 1778 if (put_user_u32(len, optlen) 1779 || put_user_u32(val, optval_addr)) 1780 return -TARGET_EFAULT; 1781 } 1782 break; 1783 default: 1784 ret = -TARGET_ENOPROTOOPT; 1785 break; 1786 } 1787 break; 1788 default: 1789 unimplemented: 1790 gemu_log("getsockopt level=%d optname=%d not yet supported\n", 1791 level, optname); 1792 ret = -TARGET_EOPNOTSUPP; 1793 break; 1794 } 1795 return ret; 1796 } 1797 1798 static struct iovec *lock_iovec(int type, abi_ulong target_addr, 1799 int count, int copy) 1800 { 1801 struct target_iovec *target_vec; 1802 struct iovec *vec; 1803 abi_ulong total_len, max_len; 1804 int i; 1805 int err = 0; 1806 bool bad_address = false; 1807 1808 if (count == 0) { 1809 errno = 0; 1810 return NULL; 1811 } 1812 if (count < 0 || count > IOV_MAX) { 1813 errno = EINVAL; 1814 return NULL; 1815 } 1816 1817 vec = calloc(count, sizeof(struct iovec)); 1818 if (vec == NULL) { 1819 errno = ENOMEM; 1820 return NULL; 1821 } 1822 1823 target_vec = lock_user(VERIFY_READ, target_addr, 1824 count * sizeof(struct target_iovec), 1); 1825 if (target_vec == NULL) { 1826 err = EFAULT; 1827 goto fail2; 1828 } 1829 1830 /* ??? If host page size > target page size, this will result in a 1831 value larger than what we can actually support. */ 1832 max_len = 0x7fffffff & TARGET_PAGE_MASK; 1833 total_len = 0; 1834 1835 for (i = 0; i < count; i++) { 1836 abi_ulong base = tswapal(target_vec[i].iov_base); 1837 abi_long len = tswapal(target_vec[i].iov_len); 1838 1839 if (len < 0) { 1840 err = EINVAL; 1841 goto fail; 1842 } else if (len == 0) { 1843 /* Zero length pointer is ignored. */ 1844 vec[i].iov_base = 0; 1845 } else { 1846 vec[i].iov_base = lock_user(type, base, len, copy); 1847 /* If the first buffer pointer is bad, this is a fault. But 1848 * subsequent bad buffers will result in a partial write; this 1849 * is realized by filling the vector with null pointers and 1850 * zero lengths. */ 1851 if (!vec[i].iov_base) { 1852 if (i == 0) { 1853 err = EFAULT; 1854 goto fail; 1855 } else { 1856 bad_address = true; 1857 } 1858 } 1859 if (bad_address) { 1860 len = 0; 1861 } 1862 if (len > max_len - total_len) { 1863 len = max_len - total_len; 1864 } 1865 } 1866 vec[i].iov_len = len; 1867 total_len += len; 1868 } 1869 1870 unlock_user(target_vec, target_addr, 0); 1871 return vec; 1872 1873 fail: 1874 unlock_user(target_vec, target_addr, 0); 1875 fail2: 1876 free(vec); 1877 errno = err; 1878 return NULL; 1879 } 1880 1881 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr, 1882 int count, int copy) 1883 { 1884 struct target_iovec *target_vec; 1885 int i; 1886 1887 target_vec = lock_user(VERIFY_READ, target_addr, 1888 count * sizeof(struct target_iovec), 1); 1889 if (target_vec) { 1890 for (i = 0; i < count; i++) { 1891 abi_ulong base = tswapal(target_vec[i].iov_base); 1892 abi_long len = tswapal(target_vec[i].iov_base); 1893 if (len < 0) { 1894 break; 1895 } 1896 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0); 1897 } 1898 unlock_user(target_vec, target_addr, 0); 1899 } 1900 1901 free(vec); 1902 } 1903 1904 static inline int target_to_host_sock_type(int *type) 1905 { 1906 int host_type = 0; 1907 int target_type = *type; 1908 1909 switch (target_type & TARGET_SOCK_TYPE_MASK) { 1910 case TARGET_SOCK_DGRAM: 1911 host_type = SOCK_DGRAM; 1912 break; 1913 case TARGET_SOCK_STREAM: 1914 host_type = SOCK_STREAM; 1915 break; 1916 default: 1917 host_type = target_type & TARGET_SOCK_TYPE_MASK; 1918 break; 1919 } 1920 if (target_type & TARGET_SOCK_CLOEXEC) { 1921 #if defined(SOCK_CLOEXEC) 1922 host_type |= SOCK_CLOEXEC; 1923 #else 1924 return -TARGET_EINVAL; 1925 #endif 1926 } 1927 if (target_type & TARGET_SOCK_NONBLOCK) { 1928 #if defined(SOCK_NONBLOCK) 1929 host_type |= SOCK_NONBLOCK; 1930 #elif !defined(O_NONBLOCK) 1931 return -TARGET_EINVAL; 1932 #endif 1933 } 1934 *type = host_type; 1935 return 0; 1936 } 1937 1938 /* Try to emulate socket type flags after socket creation. */ 1939 static int sock_flags_fixup(int fd, int target_type) 1940 { 1941 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK) 1942 if (target_type & TARGET_SOCK_NONBLOCK) { 1943 int flags = fcntl(fd, F_GETFL); 1944 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) { 1945 close(fd); 1946 return -TARGET_EINVAL; 1947 } 1948 } 1949 #endif 1950 return fd; 1951 } 1952 1953 /* do_socket() Must return target values and target errnos. */ 1954 static abi_long do_socket(int domain, int type, int protocol) 1955 { 1956 int target_type = type; 1957 int ret; 1958 1959 ret = target_to_host_sock_type(&type); 1960 if (ret) { 1961 return ret; 1962 } 1963 1964 if (domain == PF_NETLINK) 1965 return -TARGET_EAFNOSUPPORT; 1966 ret = get_errno(socket(domain, type, protocol)); 1967 if (ret >= 0) { 1968 ret = sock_flags_fixup(ret, target_type); 1969 } 1970 return ret; 1971 } 1972 1973 /* do_bind() Must return target values and target errnos. */ 1974 static abi_long do_bind(int sockfd, abi_ulong target_addr, 1975 socklen_t addrlen) 1976 { 1977 void *addr; 1978 abi_long ret; 1979 1980 if ((int)addrlen < 0) { 1981 return -TARGET_EINVAL; 1982 } 1983 1984 addr = alloca(addrlen+1); 1985 1986 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1987 if (ret) 1988 return ret; 1989 1990 return get_errno(bind(sockfd, addr, addrlen)); 1991 } 1992 1993 /* do_connect() Must return target values and target errnos. */ 1994 static abi_long do_connect(int sockfd, abi_ulong target_addr, 1995 socklen_t addrlen) 1996 { 1997 void *addr; 1998 abi_long ret; 1999 2000 if ((int)addrlen < 0) { 2001 return -TARGET_EINVAL; 2002 } 2003 2004 addr = alloca(addrlen+1); 2005 2006 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 2007 if (ret) 2008 return ret; 2009 2010 return get_errno(connect(sockfd, addr, addrlen)); 2011 } 2012 2013 /* do_sendrecvmsg_locked() Must return target values and target errnos. */ 2014 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp, 2015 int flags, int send) 2016 { 2017 abi_long ret, len; 2018 struct msghdr msg; 2019 int count; 2020 struct iovec *vec; 2021 abi_ulong target_vec; 2022 2023 if (msgp->msg_name) { 2024 msg.msg_namelen = tswap32(msgp->msg_namelen); 2025 msg.msg_name = alloca(msg.msg_namelen+1); 2026 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name), 2027 msg.msg_namelen); 2028 if (ret) { 2029 goto out2; 2030 } 2031 } else { 2032 msg.msg_name = NULL; 2033 msg.msg_namelen = 0; 2034 } 2035 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen); 2036 msg.msg_control = alloca(msg.msg_controllen); 2037 msg.msg_flags = tswap32(msgp->msg_flags); 2038 2039 count = tswapal(msgp->msg_iovlen); 2040 target_vec = tswapal(msgp->msg_iov); 2041 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, 2042 target_vec, count, send); 2043 if (vec == NULL) { 2044 ret = -host_to_target_errno(errno); 2045 goto out2; 2046 } 2047 msg.msg_iovlen = count; 2048 msg.msg_iov = vec; 2049 2050 if (send) { 2051 ret = target_to_host_cmsg(&msg, msgp); 2052 if (ret == 0) 2053 ret = get_errno(sendmsg(fd, &msg, flags)); 2054 } else { 2055 ret = get_errno(recvmsg(fd, &msg, flags)); 2056 if (!is_error(ret)) { 2057 len = ret; 2058 ret = host_to_target_cmsg(msgp, &msg); 2059 if (!is_error(ret)) { 2060 msgp->msg_namelen = tswap32(msg.msg_namelen); 2061 if (msg.msg_name != NULL) { 2062 ret = host_to_target_sockaddr(tswapal(msgp->msg_name), 2063 msg.msg_name, msg.msg_namelen); 2064 if (ret) { 2065 goto out; 2066 } 2067 } 2068 2069 ret = len; 2070 } 2071 } 2072 } 2073 2074 out: 2075 unlock_iovec(vec, target_vec, count, !send); 2076 out2: 2077 return ret; 2078 } 2079 2080 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg, 2081 int flags, int send) 2082 { 2083 abi_long ret; 2084 struct target_msghdr *msgp; 2085 2086 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE, 2087 msgp, 2088 target_msg, 2089 send ? 1 : 0)) { 2090 return -TARGET_EFAULT; 2091 } 2092 ret = do_sendrecvmsg_locked(fd, msgp, flags, send); 2093 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 2094 return ret; 2095 } 2096 2097 #ifdef TARGET_NR_sendmmsg 2098 /* We don't rely on the C library to have sendmmsg/recvmmsg support, 2099 * so it might not have this *mmsg-specific flag either. 2100 */ 2101 #ifndef MSG_WAITFORONE 2102 #define MSG_WAITFORONE 0x10000 2103 #endif 2104 2105 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec, 2106 unsigned int vlen, unsigned int flags, 2107 int send) 2108 { 2109 struct target_mmsghdr *mmsgp; 2110 abi_long ret = 0; 2111 int i; 2112 2113 if (vlen > UIO_MAXIOV) { 2114 vlen = UIO_MAXIOV; 2115 } 2116 2117 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1); 2118 if (!mmsgp) { 2119 return -TARGET_EFAULT; 2120 } 2121 2122 for (i = 0; i < vlen; i++) { 2123 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send); 2124 if (is_error(ret)) { 2125 break; 2126 } 2127 mmsgp[i].msg_len = tswap32(ret); 2128 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ 2129 if (flags & MSG_WAITFORONE) { 2130 flags |= MSG_DONTWAIT; 2131 } 2132 } 2133 2134 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i); 2135 2136 /* Return number of datagrams sent if we sent any at all; 2137 * otherwise return the error. 2138 */ 2139 if (i) { 2140 return i; 2141 } 2142 return ret; 2143 } 2144 #endif 2145 2146 /* If we don't have a system accept4() then just call accept. 2147 * The callsites to do_accept4() will ensure that they don't 2148 * pass a non-zero flags argument in this config. 2149 */ 2150 #ifndef CONFIG_ACCEPT4 2151 static inline int accept4(int sockfd, struct sockaddr *addr, 2152 socklen_t *addrlen, int flags) 2153 { 2154 assert(flags == 0); 2155 return accept(sockfd, addr, addrlen); 2156 } 2157 #endif 2158 2159 /* do_accept4() Must return target values and target errnos. */ 2160 static abi_long do_accept4(int fd, abi_ulong target_addr, 2161 abi_ulong target_addrlen_addr, int flags) 2162 { 2163 socklen_t addrlen; 2164 void *addr; 2165 abi_long ret; 2166 int host_flags; 2167 2168 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl); 2169 2170 if (target_addr == 0) { 2171 return get_errno(accept4(fd, NULL, NULL, host_flags)); 2172 } 2173 2174 /* linux returns EINVAL if addrlen pointer is invalid */ 2175 if (get_user_u32(addrlen, target_addrlen_addr)) 2176 return -TARGET_EINVAL; 2177 2178 if ((int)addrlen < 0) { 2179 return -TARGET_EINVAL; 2180 } 2181 2182 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2183 return -TARGET_EINVAL; 2184 2185 addr = alloca(addrlen); 2186 2187 ret = get_errno(accept4(fd, addr, &addrlen, host_flags)); 2188 if (!is_error(ret)) { 2189 host_to_target_sockaddr(target_addr, addr, addrlen); 2190 if (put_user_u32(addrlen, target_addrlen_addr)) 2191 ret = -TARGET_EFAULT; 2192 } 2193 return ret; 2194 } 2195 2196 /* do_getpeername() Must return target values and target errnos. */ 2197 static abi_long do_getpeername(int fd, abi_ulong target_addr, 2198 abi_ulong target_addrlen_addr) 2199 { 2200 socklen_t addrlen; 2201 void *addr; 2202 abi_long ret; 2203 2204 if (get_user_u32(addrlen, target_addrlen_addr)) 2205 return -TARGET_EFAULT; 2206 2207 if ((int)addrlen < 0) { 2208 return -TARGET_EINVAL; 2209 } 2210 2211 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2212 return -TARGET_EFAULT; 2213 2214 addr = alloca(addrlen); 2215 2216 ret = get_errno(getpeername(fd, addr, &addrlen)); 2217 if (!is_error(ret)) { 2218 host_to_target_sockaddr(target_addr, addr, addrlen); 2219 if (put_user_u32(addrlen, target_addrlen_addr)) 2220 ret = -TARGET_EFAULT; 2221 } 2222 return ret; 2223 } 2224 2225 /* do_getsockname() Must return target values and target errnos. */ 2226 static abi_long do_getsockname(int fd, abi_ulong target_addr, 2227 abi_ulong target_addrlen_addr) 2228 { 2229 socklen_t addrlen; 2230 void *addr; 2231 abi_long ret; 2232 2233 if (get_user_u32(addrlen, target_addrlen_addr)) 2234 return -TARGET_EFAULT; 2235 2236 if ((int)addrlen < 0) { 2237 return -TARGET_EINVAL; 2238 } 2239 2240 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2241 return -TARGET_EFAULT; 2242 2243 addr = alloca(addrlen); 2244 2245 ret = get_errno(getsockname(fd, addr, &addrlen)); 2246 if (!is_error(ret)) { 2247 host_to_target_sockaddr(target_addr, addr, addrlen); 2248 if (put_user_u32(addrlen, target_addrlen_addr)) 2249 ret = -TARGET_EFAULT; 2250 } 2251 return ret; 2252 } 2253 2254 /* do_socketpair() Must return target values and target errnos. */ 2255 static abi_long do_socketpair(int domain, int type, int protocol, 2256 abi_ulong target_tab_addr) 2257 { 2258 int tab[2]; 2259 abi_long ret; 2260 2261 target_to_host_sock_type(&type); 2262 2263 ret = get_errno(socketpair(domain, type, protocol, tab)); 2264 if (!is_error(ret)) { 2265 if (put_user_s32(tab[0], target_tab_addr) 2266 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0]))) 2267 ret = -TARGET_EFAULT; 2268 } 2269 return ret; 2270 } 2271 2272 /* do_sendto() Must return target values and target errnos. */ 2273 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags, 2274 abi_ulong target_addr, socklen_t addrlen) 2275 { 2276 void *addr; 2277 void *host_msg; 2278 abi_long ret; 2279 2280 if ((int)addrlen < 0) { 2281 return -TARGET_EINVAL; 2282 } 2283 2284 host_msg = lock_user(VERIFY_READ, msg, len, 1); 2285 if (!host_msg) 2286 return -TARGET_EFAULT; 2287 if (target_addr) { 2288 addr = alloca(addrlen+1); 2289 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 2290 if (ret) { 2291 unlock_user(host_msg, msg, 0); 2292 return ret; 2293 } 2294 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen)); 2295 } else { 2296 ret = get_errno(send(fd, host_msg, len, flags)); 2297 } 2298 unlock_user(host_msg, msg, 0); 2299 return ret; 2300 } 2301 2302 /* do_recvfrom() Must return target values and target errnos. */ 2303 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags, 2304 abi_ulong target_addr, 2305 abi_ulong target_addrlen) 2306 { 2307 socklen_t addrlen; 2308 void *addr; 2309 void *host_msg; 2310 abi_long ret; 2311 2312 host_msg = lock_user(VERIFY_WRITE, msg, len, 0); 2313 if (!host_msg) 2314 return -TARGET_EFAULT; 2315 if (target_addr) { 2316 if (get_user_u32(addrlen, target_addrlen)) { 2317 ret = -TARGET_EFAULT; 2318 goto fail; 2319 } 2320 if ((int)addrlen < 0) { 2321 ret = -TARGET_EINVAL; 2322 goto fail; 2323 } 2324 addr = alloca(addrlen); 2325 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen)); 2326 } else { 2327 addr = NULL; /* To keep compiler quiet. */ 2328 ret = get_errno(qemu_recv(fd, host_msg, len, flags)); 2329 } 2330 if (!is_error(ret)) { 2331 if (target_addr) { 2332 host_to_target_sockaddr(target_addr, addr, addrlen); 2333 if (put_user_u32(addrlen, target_addrlen)) { 2334 ret = -TARGET_EFAULT; 2335 goto fail; 2336 } 2337 } 2338 unlock_user(host_msg, msg, len); 2339 } else { 2340 fail: 2341 unlock_user(host_msg, msg, 0); 2342 } 2343 return ret; 2344 } 2345 2346 #ifdef TARGET_NR_socketcall 2347 /* do_socketcall() Must return target values and target errnos. */ 2348 static abi_long do_socketcall(int num, abi_ulong vptr) 2349 { 2350 static const unsigned ac[] = { /* number of arguments per call */ 2351 [SOCKOP_socket] = 3, /* domain, type, protocol */ 2352 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */ 2353 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */ 2354 [SOCKOP_listen] = 2, /* sockfd, backlog */ 2355 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */ 2356 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */ 2357 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */ 2358 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */ 2359 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */ 2360 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */ 2361 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */ 2362 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */ 2363 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */ 2364 [SOCKOP_shutdown] = 2, /* sockfd, how */ 2365 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */ 2366 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */ 2367 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */ 2368 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */ 2369 }; 2370 abi_long a[6]; /* max 6 args */ 2371 2372 /* first, collect the arguments in a[] according to ac[] */ 2373 if (num >= 0 && num < ARRAY_SIZE(ac)) { 2374 unsigned i; 2375 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */ 2376 for (i = 0; i < ac[num]; ++i) { 2377 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) { 2378 return -TARGET_EFAULT; 2379 } 2380 } 2381 } 2382 2383 /* now when we have the args, actually handle the call */ 2384 switch (num) { 2385 case SOCKOP_socket: /* domain, type, protocol */ 2386 return do_socket(a[0], a[1], a[2]); 2387 case SOCKOP_bind: /* sockfd, addr, addrlen */ 2388 return do_bind(a[0], a[1], a[2]); 2389 case SOCKOP_connect: /* sockfd, addr, addrlen */ 2390 return do_connect(a[0], a[1], a[2]); 2391 case SOCKOP_listen: /* sockfd, backlog */ 2392 return get_errno(listen(a[0], a[1])); 2393 case SOCKOP_accept: /* sockfd, addr, addrlen */ 2394 return do_accept4(a[0], a[1], a[2], 0); 2395 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */ 2396 return do_accept4(a[0], a[1], a[2], a[3]); 2397 case SOCKOP_getsockname: /* sockfd, addr, addrlen */ 2398 return do_getsockname(a[0], a[1], a[2]); 2399 case SOCKOP_getpeername: /* sockfd, addr, addrlen */ 2400 return do_getpeername(a[0], a[1], a[2]); 2401 case SOCKOP_socketpair: /* domain, type, protocol, tab */ 2402 return do_socketpair(a[0], a[1], a[2], a[3]); 2403 case SOCKOP_send: /* sockfd, msg, len, flags */ 2404 return do_sendto(a[0], a[1], a[2], a[3], 0, 0); 2405 case SOCKOP_recv: /* sockfd, msg, len, flags */ 2406 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0); 2407 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */ 2408 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]); 2409 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */ 2410 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]); 2411 case SOCKOP_shutdown: /* sockfd, how */ 2412 return get_errno(shutdown(a[0], a[1])); 2413 case SOCKOP_sendmsg: /* sockfd, msg, flags */ 2414 return do_sendrecvmsg(a[0], a[1], a[2], 1); 2415 case SOCKOP_recvmsg: /* sockfd, msg, flags */ 2416 return do_sendrecvmsg(a[0], a[1], a[2], 0); 2417 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */ 2418 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]); 2419 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */ 2420 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]); 2421 default: 2422 gemu_log("Unsupported socketcall: %d\n", num); 2423 return -TARGET_ENOSYS; 2424 } 2425 } 2426 #endif 2427 2428 #define N_SHM_REGIONS 32 2429 2430 static struct shm_region { 2431 abi_ulong start; 2432 abi_ulong size; 2433 } shm_regions[N_SHM_REGIONS]; 2434 2435 struct target_semid_ds 2436 { 2437 struct target_ipc_perm sem_perm; 2438 abi_ulong sem_otime; 2439 #if !defined(TARGET_PPC64) 2440 abi_ulong __unused1; 2441 #endif 2442 abi_ulong sem_ctime; 2443 #if !defined(TARGET_PPC64) 2444 abi_ulong __unused2; 2445 #endif 2446 abi_ulong sem_nsems; 2447 abi_ulong __unused3; 2448 abi_ulong __unused4; 2449 }; 2450 2451 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip, 2452 abi_ulong target_addr) 2453 { 2454 struct target_ipc_perm *target_ip; 2455 struct target_semid_ds *target_sd; 2456 2457 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2458 return -TARGET_EFAULT; 2459 target_ip = &(target_sd->sem_perm); 2460 host_ip->__key = tswap32(target_ip->__key); 2461 host_ip->uid = tswap32(target_ip->uid); 2462 host_ip->gid = tswap32(target_ip->gid); 2463 host_ip->cuid = tswap32(target_ip->cuid); 2464 host_ip->cgid = tswap32(target_ip->cgid); 2465 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 2466 host_ip->mode = tswap32(target_ip->mode); 2467 #else 2468 host_ip->mode = tswap16(target_ip->mode); 2469 #endif 2470 #if defined(TARGET_PPC) 2471 host_ip->__seq = tswap32(target_ip->__seq); 2472 #else 2473 host_ip->__seq = tswap16(target_ip->__seq); 2474 #endif 2475 unlock_user_struct(target_sd, target_addr, 0); 2476 return 0; 2477 } 2478 2479 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr, 2480 struct ipc_perm *host_ip) 2481 { 2482 struct target_ipc_perm *target_ip; 2483 struct target_semid_ds *target_sd; 2484 2485 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2486 return -TARGET_EFAULT; 2487 target_ip = &(target_sd->sem_perm); 2488 target_ip->__key = tswap32(host_ip->__key); 2489 target_ip->uid = tswap32(host_ip->uid); 2490 target_ip->gid = tswap32(host_ip->gid); 2491 target_ip->cuid = tswap32(host_ip->cuid); 2492 target_ip->cgid = tswap32(host_ip->cgid); 2493 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 2494 target_ip->mode = tswap32(host_ip->mode); 2495 #else 2496 target_ip->mode = tswap16(host_ip->mode); 2497 #endif 2498 #if defined(TARGET_PPC) 2499 target_ip->__seq = tswap32(host_ip->__seq); 2500 #else 2501 target_ip->__seq = tswap16(host_ip->__seq); 2502 #endif 2503 unlock_user_struct(target_sd, target_addr, 1); 2504 return 0; 2505 } 2506 2507 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd, 2508 abi_ulong target_addr) 2509 { 2510 struct target_semid_ds *target_sd; 2511 2512 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2513 return -TARGET_EFAULT; 2514 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr)) 2515 return -TARGET_EFAULT; 2516 host_sd->sem_nsems = tswapal(target_sd->sem_nsems); 2517 host_sd->sem_otime = tswapal(target_sd->sem_otime); 2518 host_sd->sem_ctime = tswapal(target_sd->sem_ctime); 2519 unlock_user_struct(target_sd, target_addr, 0); 2520 return 0; 2521 } 2522 2523 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr, 2524 struct semid_ds *host_sd) 2525 { 2526 struct target_semid_ds *target_sd; 2527 2528 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2529 return -TARGET_EFAULT; 2530 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm))) 2531 return -TARGET_EFAULT; 2532 target_sd->sem_nsems = tswapal(host_sd->sem_nsems); 2533 target_sd->sem_otime = tswapal(host_sd->sem_otime); 2534 target_sd->sem_ctime = tswapal(host_sd->sem_ctime); 2535 unlock_user_struct(target_sd, target_addr, 1); 2536 return 0; 2537 } 2538 2539 struct target_seminfo { 2540 int semmap; 2541 int semmni; 2542 int semmns; 2543 int semmnu; 2544 int semmsl; 2545 int semopm; 2546 int semume; 2547 int semusz; 2548 int semvmx; 2549 int semaem; 2550 }; 2551 2552 static inline abi_long host_to_target_seminfo(abi_ulong target_addr, 2553 struct seminfo *host_seminfo) 2554 { 2555 struct target_seminfo *target_seminfo; 2556 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0)) 2557 return -TARGET_EFAULT; 2558 __put_user(host_seminfo->semmap, &target_seminfo->semmap); 2559 __put_user(host_seminfo->semmni, &target_seminfo->semmni); 2560 __put_user(host_seminfo->semmns, &target_seminfo->semmns); 2561 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu); 2562 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl); 2563 __put_user(host_seminfo->semopm, &target_seminfo->semopm); 2564 __put_user(host_seminfo->semume, &target_seminfo->semume); 2565 __put_user(host_seminfo->semusz, &target_seminfo->semusz); 2566 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx); 2567 __put_user(host_seminfo->semaem, &target_seminfo->semaem); 2568 unlock_user_struct(target_seminfo, target_addr, 1); 2569 return 0; 2570 } 2571 2572 union semun { 2573 int val; 2574 struct semid_ds *buf; 2575 unsigned short *array; 2576 struct seminfo *__buf; 2577 }; 2578 2579 union target_semun { 2580 int val; 2581 abi_ulong buf; 2582 abi_ulong array; 2583 abi_ulong __buf; 2584 }; 2585 2586 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array, 2587 abi_ulong target_addr) 2588 { 2589 int nsems; 2590 unsigned short *array; 2591 union semun semun; 2592 struct semid_ds semid_ds; 2593 int i, ret; 2594 2595 semun.buf = &semid_ds; 2596 2597 ret = semctl(semid, 0, IPC_STAT, semun); 2598 if (ret == -1) 2599 return get_errno(ret); 2600 2601 nsems = semid_ds.sem_nsems; 2602 2603 *host_array = malloc(nsems*sizeof(unsigned short)); 2604 if (!*host_array) { 2605 return -TARGET_ENOMEM; 2606 } 2607 array = lock_user(VERIFY_READ, target_addr, 2608 nsems*sizeof(unsigned short), 1); 2609 if (!array) { 2610 free(*host_array); 2611 return -TARGET_EFAULT; 2612 } 2613 2614 for(i=0; i<nsems; i++) { 2615 __get_user((*host_array)[i], &array[i]); 2616 } 2617 unlock_user(array, target_addr, 0); 2618 2619 return 0; 2620 } 2621 2622 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr, 2623 unsigned short **host_array) 2624 { 2625 int nsems; 2626 unsigned short *array; 2627 union semun semun; 2628 struct semid_ds semid_ds; 2629 int i, ret; 2630 2631 semun.buf = &semid_ds; 2632 2633 ret = semctl(semid, 0, IPC_STAT, semun); 2634 if (ret == -1) 2635 return get_errno(ret); 2636 2637 nsems = semid_ds.sem_nsems; 2638 2639 array = lock_user(VERIFY_WRITE, target_addr, 2640 nsems*sizeof(unsigned short), 0); 2641 if (!array) 2642 return -TARGET_EFAULT; 2643 2644 for(i=0; i<nsems; i++) { 2645 __put_user((*host_array)[i], &array[i]); 2646 } 2647 free(*host_array); 2648 unlock_user(array, target_addr, 1); 2649 2650 return 0; 2651 } 2652 2653 static inline abi_long do_semctl(int semid, int semnum, int cmd, 2654 union target_semun target_su) 2655 { 2656 union semun arg; 2657 struct semid_ds dsarg; 2658 unsigned short *array = NULL; 2659 struct seminfo seminfo; 2660 abi_long ret = -TARGET_EINVAL; 2661 abi_long err; 2662 cmd &= 0xff; 2663 2664 switch( cmd ) { 2665 case GETVAL: 2666 case SETVAL: 2667 /* In 64 bit cross-endian situations, we will erroneously pick up 2668 * the wrong half of the union for the "val" element. To rectify 2669 * this, the entire 8-byte structure is byteswapped, followed by 2670 * a swap of the 4 byte val field. In other cases, the data is 2671 * already in proper host byte order. */ 2672 if (sizeof(target_su.val) != (sizeof(target_su.buf))) { 2673 target_su.buf = tswapal(target_su.buf); 2674 arg.val = tswap32(target_su.val); 2675 } else { 2676 arg.val = target_su.val; 2677 } 2678 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2679 break; 2680 case GETALL: 2681 case SETALL: 2682 err = target_to_host_semarray(semid, &array, target_su.array); 2683 if (err) 2684 return err; 2685 arg.array = array; 2686 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2687 err = host_to_target_semarray(semid, target_su.array, &array); 2688 if (err) 2689 return err; 2690 break; 2691 case IPC_STAT: 2692 case IPC_SET: 2693 case SEM_STAT: 2694 err = target_to_host_semid_ds(&dsarg, target_su.buf); 2695 if (err) 2696 return err; 2697 arg.buf = &dsarg; 2698 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2699 err = host_to_target_semid_ds(target_su.buf, &dsarg); 2700 if (err) 2701 return err; 2702 break; 2703 case IPC_INFO: 2704 case SEM_INFO: 2705 arg.__buf = &seminfo; 2706 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2707 err = host_to_target_seminfo(target_su.__buf, &seminfo); 2708 if (err) 2709 return err; 2710 break; 2711 case IPC_RMID: 2712 case GETPID: 2713 case GETNCNT: 2714 case GETZCNT: 2715 ret = get_errno(semctl(semid, semnum, cmd, NULL)); 2716 break; 2717 } 2718 2719 return ret; 2720 } 2721 2722 struct target_sembuf { 2723 unsigned short sem_num; 2724 short sem_op; 2725 short sem_flg; 2726 }; 2727 2728 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf, 2729 abi_ulong target_addr, 2730 unsigned nsops) 2731 { 2732 struct target_sembuf *target_sembuf; 2733 int i; 2734 2735 target_sembuf = lock_user(VERIFY_READ, target_addr, 2736 nsops*sizeof(struct target_sembuf), 1); 2737 if (!target_sembuf) 2738 return -TARGET_EFAULT; 2739 2740 for(i=0; i<nsops; i++) { 2741 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num); 2742 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op); 2743 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg); 2744 } 2745 2746 unlock_user(target_sembuf, target_addr, 0); 2747 2748 return 0; 2749 } 2750 2751 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops) 2752 { 2753 struct sembuf sops[nsops]; 2754 2755 if (target_to_host_sembuf(sops, ptr, nsops)) 2756 return -TARGET_EFAULT; 2757 2758 return get_errno(semop(semid, sops, nsops)); 2759 } 2760 2761 struct target_msqid_ds 2762 { 2763 struct target_ipc_perm msg_perm; 2764 abi_ulong msg_stime; 2765 #if TARGET_ABI_BITS == 32 2766 abi_ulong __unused1; 2767 #endif 2768 abi_ulong msg_rtime; 2769 #if TARGET_ABI_BITS == 32 2770 abi_ulong __unused2; 2771 #endif 2772 abi_ulong msg_ctime; 2773 #if TARGET_ABI_BITS == 32 2774 abi_ulong __unused3; 2775 #endif 2776 abi_ulong __msg_cbytes; 2777 abi_ulong msg_qnum; 2778 abi_ulong msg_qbytes; 2779 abi_ulong msg_lspid; 2780 abi_ulong msg_lrpid; 2781 abi_ulong __unused4; 2782 abi_ulong __unused5; 2783 }; 2784 2785 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md, 2786 abi_ulong target_addr) 2787 { 2788 struct target_msqid_ds *target_md; 2789 2790 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1)) 2791 return -TARGET_EFAULT; 2792 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr)) 2793 return -TARGET_EFAULT; 2794 host_md->msg_stime = tswapal(target_md->msg_stime); 2795 host_md->msg_rtime = tswapal(target_md->msg_rtime); 2796 host_md->msg_ctime = tswapal(target_md->msg_ctime); 2797 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes); 2798 host_md->msg_qnum = tswapal(target_md->msg_qnum); 2799 host_md->msg_qbytes = tswapal(target_md->msg_qbytes); 2800 host_md->msg_lspid = tswapal(target_md->msg_lspid); 2801 host_md->msg_lrpid = tswapal(target_md->msg_lrpid); 2802 unlock_user_struct(target_md, target_addr, 0); 2803 return 0; 2804 } 2805 2806 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr, 2807 struct msqid_ds *host_md) 2808 { 2809 struct target_msqid_ds *target_md; 2810 2811 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0)) 2812 return -TARGET_EFAULT; 2813 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm))) 2814 return -TARGET_EFAULT; 2815 target_md->msg_stime = tswapal(host_md->msg_stime); 2816 target_md->msg_rtime = tswapal(host_md->msg_rtime); 2817 target_md->msg_ctime = tswapal(host_md->msg_ctime); 2818 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes); 2819 target_md->msg_qnum = tswapal(host_md->msg_qnum); 2820 target_md->msg_qbytes = tswapal(host_md->msg_qbytes); 2821 target_md->msg_lspid = tswapal(host_md->msg_lspid); 2822 target_md->msg_lrpid = tswapal(host_md->msg_lrpid); 2823 unlock_user_struct(target_md, target_addr, 1); 2824 return 0; 2825 } 2826 2827 struct target_msginfo { 2828 int msgpool; 2829 int msgmap; 2830 int msgmax; 2831 int msgmnb; 2832 int msgmni; 2833 int msgssz; 2834 int msgtql; 2835 unsigned short int msgseg; 2836 }; 2837 2838 static inline abi_long host_to_target_msginfo(abi_ulong target_addr, 2839 struct msginfo *host_msginfo) 2840 { 2841 struct target_msginfo *target_msginfo; 2842 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0)) 2843 return -TARGET_EFAULT; 2844 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool); 2845 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap); 2846 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax); 2847 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb); 2848 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni); 2849 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz); 2850 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql); 2851 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg); 2852 unlock_user_struct(target_msginfo, target_addr, 1); 2853 return 0; 2854 } 2855 2856 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr) 2857 { 2858 struct msqid_ds dsarg; 2859 struct msginfo msginfo; 2860 abi_long ret = -TARGET_EINVAL; 2861 2862 cmd &= 0xff; 2863 2864 switch (cmd) { 2865 case IPC_STAT: 2866 case IPC_SET: 2867 case MSG_STAT: 2868 if (target_to_host_msqid_ds(&dsarg,ptr)) 2869 return -TARGET_EFAULT; 2870 ret = get_errno(msgctl(msgid, cmd, &dsarg)); 2871 if (host_to_target_msqid_ds(ptr,&dsarg)) 2872 return -TARGET_EFAULT; 2873 break; 2874 case IPC_RMID: 2875 ret = get_errno(msgctl(msgid, cmd, NULL)); 2876 break; 2877 case IPC_INFO: 2878 case MSG_INFO: 2879 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo)); 2880 if (host_to_target_msginfo(ptr, &msginfo)) 2881 return -TARGET_EFAULT; 2882 break; 2883 } 2884 2885 return ret; 2886 } 2887 2888 struct target_msgbuf { 2889 abi_long mtype; 2890 char mtext[1]; 2891 }; 2892 2893 static inline abi_long do_msgsnd(int msqid, abi_long msgp, 2894 ssize_t msgsz, int msgflg) 2895 { 2896 struct target_msgbuf *target_mb; 2897 struct msgbuf *host_mb; 2898 abi_long ret = 0; 2899 2900 if (msgsz < 0) { 2901 return -TARGET_EINVAL; 2902 } 2903 2904 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0)) 2905 return -TARGET_EFAULT; 2906 host_mb = malloc(msgsz+sizeof(long)); 2907 if (!host_mb) { 2908 unlock_user_struct(target_mb, msgp, 0); 2909 return -TARGET_ENOMEM; 2910 } 2911 host_mb->mtype = (abi_long) tswapal(target_mb->mtype); 2912 memcpy(host_mb->mtext, target_mb->mtext, msgsz); 2913 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg)); 2914 free(host_mb); 2915 unlock_user_struct(target_mb, msgp, 0); 2916 2917 return ret; 2918 } 2919 2920 static inline abi_long do_msgrcv(int msqid, abi_long msgp, 2921 unsigned int msgsz, abi_long msgtyp, 2922 int msgflg) 2923 { 2924 struct target_msgbuf *target_mb; 2925 char *target_mtext; 2926 struct msgbuf *host_mb; 2927 abi_long ret = 0; 2928 2929 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0)) 2930 return -TARGET_EFAULT; 2931 2932 host_mb = g_malloc(msgsz+sizeof(long)); 2933 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg)); 2934 2935 if (ret > 0) { 2936 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong); 2937 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0); 2938 if (!target_mtext) { 2939 ret = -TARGET_EFAULT; 2940 goto end; 2941 } 2942 memcpy(target_mb->mtext, host_mb->mtext, ret); 2943 unlock_user(target_mtext, target_mtext_addr, ret); 2944 } 2945 2946 target_mb->mtype = tswapal(host_mb->mtype); 2947 2948 end: 2949 if (target_mb) 2950 unlock_user_struct(target_mb, msgp, 1); 2951 g_free(host_mb); 2952 return ret; 2953 } 2954 2955 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd, 2956 abi_ulong target_addr) 2957 { 2958 struct target_shmid_ds *target_sd; 2959 2960 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2961 return -TARGET_EFAULT; 2962 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr)) 2963 return -TARGET_EFAULT; 2964 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2965 __get_user(host_sd->shm_atime, &target_sd->shm_atime); 2966 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2967 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2968 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2969 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2970 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2971 unlock_user_struct(target_sd, target_addr, 0); 2972 return 0; 2973 } 2974 2975 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr, 2976 struct shmid_ds *host_sd) 2977 { 2978 struct target_shmid_ds *target_sd; 2979 2980 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2981 return -TARGET_EFAULT; 2982 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm))) 2983 return -TARGET_EFAULT; 2984 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2985 __put_user(host_sd->shm_atime, &target_sd->shm_atime); 2986 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2987 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2988 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2989 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2990 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2991 unlock_user_struct(target_sd, target_addr, 1); 2992 return 0; 2993 } 2994 2995 struct target_shminfo { 2996 abi_ulong shmmax; 2997 abi_ulong shmmin; 2998 abi_ulong shmmni; 2999 abi_ulong shmseg; 3000 abi_ulong shmall; 3001 }; 3002 3003 static inline abi_long host_to_target_shminfo(abi_ulong target_addr, 3004 struct shminfo *host_shminfo) 3005 { 3006 struct target_shminfo *target_shminfo; 3007 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0)) 3008 return -TARGET_EFAULT; 3009 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax); 3010 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin); 3011 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni); 3012 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg); 3013 __put_user(host_shminfo->shmall, &target_shminfo->shmall); 3014 unlock_user_struct(target_shminfo, target_addr, 1); 3015 return 0; 3016 } 3017 3018 struct target_shm_info { 3019 int used_ids; 3020 abi_ulong shm_tot; 3021 abi_ulong shm_rss; 3022 abi_ulong shm_swp; 3023 abi_ulong swap_attempts; 3024 abi_ulong swap_successes; 3025 }; 3026 3027 static inline abi_long host_to_target_shm_info(abi_ulong target_addr, 3028 struct shm_info *host_shm_info) 3029 { 3030 struct target_shm_info *target_shm_info; 3031 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0)) 3032 return -TARGET_EFAULT; 3033 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids); 3034 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot); 3035 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss); 3036 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp); 3037 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts); 3038 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes); 3039 unlock_user_struct(target_shm_info, target_addr, 1); 3040 return 0; 3041 } 3042 3043 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf) 3044 { 3045 struct shmid_ds dsarg; 3046 struct shminfo shminfo; 3047 struct shm_info shm_info; 3048 abi_long ret = -TARGET_EINVAL; 3049 3050 cmd &= 0xff; 3051 3052 switch(cmd) { 3053 case IPC_STAT: 3054 case IPC_SET: 3055 case SHM_STAT: 3056 if (target_to_host_shmid_ds(&dsarg, buf)) 3057 return -TARGET_EFAULT; 3058 ret = get_errno(shmctl(shmid, cmd, &dsarg)); 3059 if (host_to_target_shmid_ds(buf, &dsarg)) 3060 return -TARGET_EFAULT; 3061 break; 3062 case IPC_INFO: 3063 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo)); 3064 if (host_to_target_shminfo(buf, &shminfo)) 3065 return -TARGET_EFAULT; 3066 break; 3067 case SHM_INFO: 3068 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info)); 3069 if (host_to_target_shm_info(buf, &shm_info)) 3070 return -TARGET_EFAULT; 3071 break; 3072 case IPC_RMID: 3073 case SHM_LOCK: 3074 case SHM_UNLOCK: 3075 ret = get_errno(shmctl(shmid, cmd, NULL)); 3076 break; 3077 } 3078 3079 return ret; 3080 } 3081 3082 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg) 3083 { 3084 abi_long raddr; 3085 void *host_raddr; 3086 struct shmid_ds shm_info; 3087 int i,ret; 3088 3089 /* find out the length of the shared memory segment */ 3090 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); 3091 if (is_error(ret)) { 3092 /* can't get length, bail out */ 3093 return ret; 3094 } 3095 3096 mmap_lock(); 3097 3098 if (shmaddr) 3099 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg); 3100 else { 3101 abi_ulong mmap_start; 3102 3103 mmap_start = mmap_find_vma(0, shm_info.shm_segsz); 3104 3105 if (mmap_start == -1) { 3106 errno = ENOMEM; 3107 host_raddr = (void *)-1; 3108 } else 3109 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP); 3110 } 3111 3112 if (host_raddr == (void *)-1) { 3113 mmap_unlock(); 3114 return get_errno((long)host_raddr); 3115 } 3116 raddr=h2g((unsigned long)host_raddr); 3117 3118 page_set_flags(raddr, raddr + shm_info.shm_segsz, 3119 PAGE_VALID | PAGE_READ | 3120 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE)); 3121 3122 for (i = 0; i < N_SHM_REGIONS; i++) { 3123 if (shm_regions[i].start == 0) { 3124 shm_regions[i].start = raddr; 3125 shm_regions[i].size = shm_info.shm_segsz; 3126 break; 3127 } 3128 } 3129 3130 mmap_unlock(); 3131 return raddr; 3132 3133 } 3134 3135 static inline abi_long do_shmdt(abi_ulong shmaddr) 3136 { 3137 int i; 3138 3139 for (i = 0; i < N_SHM_REGIONS; ++i) { 3140 if (shm_regions[i].start == shmaddr) { 3141 shm_regions[i].start = 0; 3142 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0); 3143 break; 3144 } 3145 } 3146 3147 return get_errno(shmdt(g2h(shmaddr))); 3148 } 3149 3150 #ifdef TARGET_NR_ipc 3151 /* ??? This only works with linear mappings. */ 3152 /* do_ipc() must return target values and target errnos. */ 3153 static abi_long do_ipc(unsigned int call, abi_long first, 3154 abi_long second, abi_long third, 3155 abi_long ptr, abi_long fifth) 3156 { 3157 int version; 3158 abi_long ret = 0; 3159 3160 version = call >> 16; 3161 call &= 0xffff; 3162 3163 switch (call) { 3164 case IPCOP_semop: 3165 ret = do_semop(first, ptr, second); 3166 break; 3167 3168 case IPCOP_semget: 3169 ret = get_errno(semget(first, second, third)); 3170 break; 3171 3172 case IPCOP_semctl: { 3173 /* The semun argument to semctl is passed by value, so dereference the 3174 * ptr argument. */ 3175 abi_ulong atptr; 3176 get_user_ual(atptr, ptr); 3177 ret = do_semctl(first, second, third, 3178 (union target_semun) atptr); 3179 break; 3180 } 3181 3182 case IPCOP_msgget: 3183 ret = get_errno(msgget(first, second)); 3184 break; 3185 3186 case IPCOP_msgsnd: 3187 ret = do_msgsnd(first, ptr, second, third); 3188 break; 3189 3190 case IPCOP_msgctl: 3191 ret = do_msgctl(first, second, ptr); 3192 break; 3193 3194 case IPCOP_msgrcv: 3195 switch (version) { 3196 case 0: 3197 { 3198 struct target_ipc_kludge { 3199 abi_long msgp; 3200 abi_long msgtyp; 3201 } *tmp; 3202 3203 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) { 3204 ret = -TARGET_EFAULT; 3205 break; 3206 } 3207 3208 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third); 3209 3210 unlock_user_struct(tmp, ptr, 0); 3211 break; 3212 } 3213 default: 3214 ret = do_msgrcv(first, ptr, second, fifth, third); 3215 } 3216 break; 3217 3218 case IPCOP_shmat: 3219 switch (version) { 3220 default: 3221 { 3222 abi_ulong raddr; 3223 raddr = do_shmat(first, ptr, second); 3224 if (is_error(raddr)) 3225 return get_errno(raddr); 3226 if (put_user_ual(raddr, third)) 3227 return -TARGET_EFAULT; 3228 break; 3229 } 3230 case 1: 3231 ret = -TARGET_EINVAL; 3232 break; 3233 } 3234 break; 3235 case IPCOP_shmdt: 3236 ret = do_shmdt(ptr); 3237 break; 3238 3239 case IPCOP_shmget: 3240 /* IPC_* flag values are the same on all linux platforms */ 3241 ret = get_errno(shmget(first, second, third)); 3242 break; 3243 3244 /* IPC_* and SHM_* command values are the same on all linux platforms */ 3245 case IPCOP_shmctl: 3246 ret = do_shmctl(first, second, ptr); 3247 break; 3248 default: 3249 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version); 3250 ret = -TARGET_ENOSYS; 3251 break; 3252 } 3253 return ret; 3254 } 3255 #endif 3256 3257 /* kernel structure types definitions */ 3258 3259 #define STRUCT(name, ...) STRUCT_ ## name, 3260 #define STRUCT_SPECIAL(name) STRUCT_ ## name, 3261 enum { 3262 #include "syscall_types.h" 3263 }; 3264 #undef STRUCT 3265 #undef STRUCT_SPECIAL 3266 3267 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL }; 3268 #define STRUCT_SPECIAL(name) 3269 #include "syscall_types.h" 3270 #undef STRUCT 3271 #undef STRUCT_SPECIAL 3272 3273 typedef struct IOCTLEntry IOCTLEntry; 3274 3275 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp, 3276 int fd, abi_long cmd, abi_long arg); 3277 3278 struct IOCTLEntry { 3279 unsigned int target_cmd; 3280 unsigned int host_cmd; 3281 const char *name; 3282 int access; 3283 do_ioctl_fn *do_ioctl; 3284 const argtype arg_type[5]; 3285 }; 3286 3287 #define IOC_R 0x0001 3288 #define IOC_W 0x0002 3289 #define IOC_RW (IOC_R | IOC_W) 3290 3291 #define MAX_STRUCT_SIZE 4096 3292 3293 #ifdef CONFIG_FIEMAP 3294 /* So fiemap access checks don't overflow on 32 bit systems. 3295 * This is very slightly smaller than the limit imposed by 3296 * the underlying kernel. 3297 */ 3298 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \ 3299 / sizeof(struct fiemap_extent)) 3300 3301 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp, 3302 int fd, abi_long cmd, abi_long arg) 3303 { 3304 /* The parameter for this ioctl is a struct fiemap followed 3305 * by an array of struct fiemap_extent whose size is set 3306 * in fiemap->fm_extent_count. The array is filled in by the 3307 * ioctl. 3308 */ 3309 int target_size_in, target_size_out; 3310 struct fiemap *fm; 3311 const argtype *arg_type = ie->arg_type; 3312 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) }; 3313 void *argptr, *p; 3314 abi_long ret; 3315 int i, extent_size = thunk_type_size(extent_arg_type, 0); 3316 uint32_t outbufsz; 3317 int free_fm = 0; 3318 3319 assert(arg_type[0] == TYPE_PTR); 3320 assert(ie->access == IOC_RW); 3321 arg_type++; 3322 target_size_in = thunk_type_size(arg_type, 0); 3323 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1); 3324 if (!argptr) { 3325 return -TARGET_EFAULT; 3326 } 3327 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3328 unlock_user(argptr, arg, 0); 3329 fm = (struct fiemap *)buf_temp; 3330 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) { 3331 return -TARGET_EINVAL; 3332 } 3333 3334 outbufsz = sizeof (*fm) + 3335 (sizeof(struct fiemap_extent) * fm->fm_extent_count); 3336 3337 if (outbufsz > MAX_STRUCT_SIZE) { 3338 /* We can't fit all the extents into the fixed size buffer. 3339 * Allocate one that is large enough and use it instead. 3340 */ 3341 fm = malloc(outbufsz); 3342 if (!fm) { 3343 return -TARGET_ENOMEM; 3344 } 3345 memcpy(fm, buf_temp, sizeof(struct fiemap)); 3346 free_fm = 1; 3347 } 3348 ret = get_errno(ioctl(fd, ie->host_cmd, fm)); 3349 if (!is_error(ret)) { 3350 target_size_out = target_size_in; 3351 /* An extent_count of 0 means we were only counting the extents 3352 * so there are no structs to copy 3353 */ 3354 if (fm->fm_extent_count != 0) { 3355 target_size_out += fm->fm_mapped_extents * extent_size; 3356 } 3357 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0); 3358 if (!argptr) { 3359 ret = -TARGET_EFAULT; 3360 } else { 3361 /* Convert the struct fiemap */ 3362 thunk_convert(argptr, fm, arg_type, THUNK_TARGET); 3363 if (fm->fm_extent_count != 0) { 3364 p = argptr + target_size_in; 3365 /* ...and then all the struct fiemap_extents */ 3366 for (i = 0; i < fm->fm_mapped_extents; i++) { 3367 thunk_convert(p, &fm->fm_extents[i], extent_arg_type, 3368 THUNK_TARGET); 3369 p += extent_size; 3370 } 3371 } 3372 unlock_user(argptr, arg, target_size_out); 3373 } 3374 } 3375 if (free_fm) { 3376 free(fm); 3377 } 3378 return ret; 3379 } 3380 #endif 3381 3382 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, 3383 int fd, abi_long cmd, abi_long arg) 3384 { 3385 const argtype *arg_type = ie->arg_type; 3386 int target_size; 3387 void *argptr; 3388 int ret; 3389 struct ifconf *host_ifconf; 3390 uint32_t outbufsz; 3391 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) }; 3392 int target_ifreq_size; 3393 int nb_ifreq; 3394 int free_buf = 0; 3395 int i; 3396 int target_ifc_len; 3397 abi_long target_ifc_buf; 3398 int host_ifc_len; 3399 char *host_ifc_buf; 3400 3401 assert(arg_type[0] == TYPE_PTR); 3402 assert(ie->access == IOC_RW); 3403 3404 arg_type++; 3405 target_size = thunk_type_size(arg_type, 0); 3406 3407 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3408 if (!argptr) 3409 return -TARGET_EFAULT; 3410 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3411 unlock_user(argptr, arg, 0); 3412 3413 host_ifconf = (struct ifconf *)(unsigned long)buf_temp; 3414 target_ifc_len = host_ifconf->ifc_len; 3415 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf; 3416 3417 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0); 3418 nb_ifreq = target_ifc_len / target_ifreq_size; 3419 host_ifc_len = nb_ifreq * sizeof(struct ifreq); 3420 3421 outbufsz = sizeof(*host_ifconf) + host_ifc_len; 3422 if (outbufsz > MAX_STRUCT_SIZE) { 3423 /* We can't fit all the extents into the fixed size buffer. 3424 * Allocate one that is large enough and use it instead. 3425 */ 3426 host_ifconf = malloc(outbufsz); 3427 if (!host_ifconf) { 3428 return -TARGET_ENOMEM; 3429 } 3430 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf)); 3431 free_buf = 1; 3432 } 3433 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf); 3434 3435 host_ifconf->ifc_len = host_ifc_len; 3436 host_ifconf->ifc_buf = host_ifc_buf; 3437 3438 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf)); 3439 if (!is_error(ret)) { 3440 /* convert host ifc_len to target ifc_len */ 3441 3442 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq); 3443 target_ifc_len = nb_ifreq * target_ifreq_size; 3444 host_ifconf->ifc_len = target_ifc_len; 3445 3446 /* restore target ifc_buf */ 3447 3448 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf; 3449 3450 /* copy struct ifconf to target user */ 3451 3452 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3453 if (!argptr) 3454 return -TARGET_EFAULT; 3455 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET); 3456 unlock_user(argptr, arg, target_size); 3457 3458 /* copy ifreq[] to target user */ 3459 3460 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0); 3461 for (i = 0; i < nb_ifreq ; i++) { 3462 thunk_convert(argptr + i * target_ifreq_size, 3463 host_ifc_buf + i * sizeof(struct ifreq), 3464 ifreq_arg_type, THUNK_TARGET); 3465 } 3466 unlock_user(argptr, target_ifc_buf, target_ifc_len); 3467 } 3468 3469 if (free_buf) { 3470 free(host_ifconf); 3471 } 3472 3473 return ret; 3474 } 3475 3476 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 3477 abi_long cmd, abi_long arg) 3478 { 3479 void *argptr; 3480 struct dm_ioctl *host_dm; 3481 abi_long guest_data; 3482 uint32_t guest_data_size; 3483 int target_size; 3484 const argtype *arg_type = ie->arg_type; 3485 abi_long ret; 3486 void *big_buf = NULL; 3487 char *host_data; 3488 3489 arg_type++; 3490 target_size = thunk_type_size(arg_type, 0); 3491 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3492 if (!argptr) { 3493 ret = -TARGET_EFAULT; 3494 goto out; 3495 } 3496 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3497 unlock_user(argptr, arg, 0); 3498 3499 /* buf_temp is too small, so fetch things into a bigger buffer */ 3500 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2); 3501 memcpy(big_buf, buf_temp, target_size); 3502 buf_temp = big_buf; 3503 host_dm = big_buf; 3504 3505 guest_data = arg + host_dm->data_start; 3506 if ((guest_data - arg) < 0) { 3507 ret = -EINVAL; 3508 goto out; 3509 } 3510 guest_data_size = host_dm->data_size - host_dm->data_start; 3511 host_data = (char*)host_dm + host_dm->data_start; 3512 3513 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1); 3514 switch (ie->host_cmd) { 3515 case DM_REMOVE_ALL: 3516 case DM_LIST_DEVICES: 3517 case DM_DEV_CREATE: 3518 case DM_DEV_REMOVE: 3519 case DM_DEV_SUSPEND: 3520 case DM_DEV_STATUS: 3521 case DM_DEV_WAIT: 3522 case DM_TABLE_STATUS: 3523 case DM_TABLE_CLEAR: 3524 case DM_TABLE_DEPS: 3525 case DM_LIST_VERSIONS: 3526 /* no input data */ 3527 break; 3528 case DM_DEV_RENAME: 3529 case DM_DEV_SET_GEOMETRY: 3530 /* data contains only strings */ 3531 memcpy(host_data, argptr, guest_data_size); 3532 break; 3533 case DM_TARGET_MSG: 3534 memcpy(host_data, argptr, guest_data_size); 3535 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr); 3536 break; 3537 case DM_TABLE_LOAD: 3538 { 3539 void *gspec = argptr; 3540 void *cur_data = host_data; 3541 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 3542 int spec_size = thunk_type_size(arg_type, 0); 3543 int i; 3544 3545 for (i = 0; i < host_dm->target_count; i++) { 3546 struct dm_target_spec *spec = cur_data; 3547 uint32_t next; 3548 int slen; 3549 3550 thunk_convert(spec, gspec, arg_type, THUNK_HOST); 3551 slen = strlen((char*)gspec + spec_size) + 1; 3552 next = spec->next; 3553 spec->next = sizeof(*spec) + slen; 3554 strcpy((char*)&spec[1], gspec + spec_size); 3555 gspec += next; 3556 cur_data += spec->next; 3557 } 3558 break; 3559 } 3560 default: 3561 ret = -TARGET_EINVAL; 3562 goto out; 3563 } 3564 unlock_user(argptr, guest_data, 0); 3565 3566 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3567 if (!is_error(ret)) { 3568 guest_data = arg + host_dm->data_start; 3569 guest_data_size = host_dm->data_size - host_dm->data_start; 3570 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0); 3571 switch (ie->host_cmd) { 3572 case DM_REMOVE_ALL: 3573 case DM_DEV_CREATE: 3574 case DM_DEV_REMOVE: 3575 case DM_DEV_RENAME: 3576 case DM_DEV_SUSPEND: 3577 case DM_DEV_STATUS: 3578 case DM_TABLE_LOAD: 3579 case DM_TABLE_CLEAR: 3580 case DM_TARGET_MSG: 3581 case DM_DEV_SET_GEOMETRY: 3582 /* no return data */ 3583 break; 3584 case DM_LIST_DEVICES: 3585 { 3586 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start; 3587 uint32_t remaining_data = guest_data_size; 3588 void *cur_data = argptr; 3589 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) }; 3590 int nl_size = 12; /* can't use thunk_size due to alignment */ 3591 3592 while (1) { 3593 uint32_t next = nl->next; 3594 if (next) { 3595 nl->next = nl_size + (strlen(nl->name) + 1); 3596 } 3597 if (remaining_data < nl->next) { 3598 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3599 break; 3600 } 3601 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET); 3602 strcpy(cur_data + nl_size, nl->name); 3603 cur_data += nl->next; 3604 remaining_data -= nl->next; 3605 if (!next) { 3606 break; 3607 } 3608 nl = (void*)nl + next; 3609 } 3610 break; 3611 } 3612 case DM_DEV_WAIT: 3613 case DM_TABLE_STATUS: 3614 { 3615 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start; 3616 void *cur_data = argptr; 3617 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 3618 int spec_size = thunk_type_size(arg_type, 0); 3619 int i; 3620 3621 for (i = 0; i < host_dm->target_count; i++) { 3622 uint32_t next = spec->next; 3623 int slen = strlen((char*)&spec[1]) + 1; 3624 spec->next = (cur_data - argptr) + spec_size + slen; 3625 if (guest_data_size < spec->next) { 3626 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3627 break; 3628 } 3629 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET); 3630 strcpy(cur_data + spec_size, (char*)&spec[1]); 3631 cur_data = argptr + spec->next; 3632 spec = (void*)host_dm + host_dm->data_start + next; 3633 } 3634 break; 3635 } 3636 case DM_TABLE_DEPS: 3637 { 3638 void *hdata = (void*)host_dm + host_dm->data_start; 3639 int count = *(uint32_t*)hdata; 3640 uint64_t *hdev = hdata + 8; 3641 uint64_t *gdev = argptr + 8; 3642 int i; 3643 3644 *(uint32_t*)argptr = tswap32(count); 3645 for (i = 0; i < count; i++) { 3646 *gdev = tswap64(*hdev); 3647 gdev++; 3648 hdev++; 3649 } 3650 break; 3651 } 3652 case DM_LIST_VERSIONS: 3653 { 3654 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start; 3655 uint32_t remaining_data = guest_data_size; 3656 void *cur_data = argptr; 3657 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) }; 3658 int vers_size = thunk_type_size(arg_type, 0); 3659 3660 while (1) { 3661 uint32_t next = vers->next; 3662 if (next) { 3663 vers->next = vers_size + (strlen(vers->name) + 1); 3664 } 3665 if (remaining_data < vers->next) { 3666 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3667 break; 3668 } 3669 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET); 3670 strcpy(cur_data + vers_size, vers->name); 3671 cur_data += vers->next; 3672 remaining_data -= vers->next; 3673 if (!next) { 3674 break; 3675 } 3676 vers = (void*)vers + next; 3677 } 3678 break; 3679 } 3680 default: 3681 ret = -TARGET_EINVAL; 3682 goto out; 3683 } 3684 unlock_user(argptr, guest_data, guest_data_size); 3685 3686 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3687 if (!argptr) { 3688 ret = -TARGET_EFAULT; 3689 goto out; 3690 } 3691 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3692 unlock_user(argptr, arg, target_size); 3693 } 3694 out: 3695 g_free(big_buf); 3696 return ret; 3697 } 3698 3699 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp, 3700 int fd, abi_long cmd, abi_long arg) 3701 { 3702 const argtype *arg_type = ie->arg_type; 3703 const StructEntry *se; 3704 const argtype *field_types; 3705 const int *dst_offsets, *src_offsets; 3706 int target_size; 3707 void *argptr; 3708 abi_ulong *target_rt_dev_ptr; 3709 unsigned long *host_rt_dev_ptr; 3710 abi_long ret; 3711 int i; 3712 3713 assert(ie->access == IOC_W); 3714 assert(*arg_type == TYPE_PTR); 3715 arg_type++; 3716 assert(*arg_type == TYPE_STRUCT); 3717 target_size = thunk_type_size(arg_type, 0); 3718 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3719 if (!argptr) { 3720 return -TARGET_EFAULT; 3721 } 3722 arg_type++; 3723 assert(*arg_type == (int)STRUCT_rtentry); 3724 se = struct_entries + *arg_type++; 3725 assert(se->convert[0] == NULL); 3726 /* convert struct here to be able to catch rt_dev string */ 3727 field_types = se->field_types; 3728 dst_offsets = se->field_offsets[THUNK_HOST]; 3729 src_offsets = se->field_offsets[THUNK_TARGET]; 3730 for (i = 0; i < se->nb_fields; i++) { 3731 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) { 3732 assert(*field_types == TYPE_PTRVOID); 3733 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]); 3734 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]); 3735 if (*target_rt_dev_ptr != 0) { 3736 *host_rt_dev_ptr = (unsigned long)lock_user_string( 3737 tswapal(*target_rt_dev_ptr)); 3738 if (!*host_rt_dev_ptr) { 3739 unlock_user(argptr, arg, 0); 3740 return -TARGET_EFAULT; 3741 } 3742 } else { 3743 *host_rt_dev_ptr = 0; 3744 } 3745 field_types++; 3746 continue; 3747 } 3748 field_types = thunk_convert(buf_temp + dst_offsets[i], 3749 argptr + src_offsets[i], 3750 field_types, THUNK_HOST); 3751 } 3752 unlock_user(argptr, arg, 0); 3753 3754 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3755 if (*host_rt_dev_ptr != 0) { 3756 unlock_user((void *)*host_rt_dev_ptr, 3757 *target_rt_dev_ptr, 0); 3758 } 3759 return ret; 3760 } 3761 3762 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp, 3763 int fd, abi_long cmd, abi_long arg) 3764 { 3765 int sig = target_to_host_signal(arg); 3766 return get_errno(ioctl(fd, ie->host_cmd, sig)); 3767 } 3768 3769 static IOCTLEntry ioctl_entries[] = { 3770 #define IOCTL(cmd, access, ...) \ 3771 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } }, 3772 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \ 3773 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } }, 3774 #include "ioctls.h" 3775 { 0, 0, }, 3776 }; 3777 3778 /* ??? Implement proper locking for ioctls. */ 3779 /* do_ioctl() Must return target values and target errnos. */ 3780 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg) 3781 { 3782 const IOCTLEntry *ie; 3783 const argtype *arg_type; 3784 abi_long ret; 3785 uint8_t buf_temp[MAX_STRUCT_SIZE]; 3786 int target_size; 3787 void *argptr; 3788 3789 ie = ioctl_entries; 3790 for(;;) { 3791 if (ie->target_cmd == 0) { 3792 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd); 3793 return -TARGET_ENOSYS; 3794 } 3795 if (ie->target_cmd == cmd) 3796 break; 3797 ie++; 3798 } 3799 arg_type = ie->arg_type; 3800 #if defined(DEBUG) 3801 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name); 3802 #endif 3803 if (ie->do_ioctl) { 3804 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg); 3805 } 3806 3807 switch(arg_type[0]) { 3808 case TYPE_NULL: 3809 /* no argument */ 3810 ret = get_errno(ioctl(fd, ie->host_cmd)); 3811 break; 3812 case TYPE_PTRVOID: 3813 case TYPE_INT: 3814 /* int argment */ 3815 ret = get_errno(ioctl(fd, ie->host_cmd, arg)); 3816 break; 3817 case TYPE_PTR: 3818 arg_type++; 3819 target_size = thunk_type_size(arg_type, 0); 3820 switch(ie->access) { 3821 case IOC_R: 3822 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3823 if (!is_error(ret)) { 3824 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3825 if (!argptr) 3826 return -TARGET_EFAULT; 3827 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3828 unlock_user(argptr, arg, target_size); 3829 } 3830 break; 3831 case IOC_W: 3832 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3833 if (!argptr) 3834 return -TARGET_EFAULT; 3835 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3836 unlock_user(argptr, arg, 0); 3837 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3838 break; 3839 default: 3840 case IOC_RW: 3841 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3842 if (!argptr) 3843 return -TARGET_EFAULT; 3844 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3845 unlock_user(argptr, arg, 0); 3846 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3847 if (!is_error(ret)) { 3848 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3849 if (!argptr) 3850 return -TARGET_EFAULT; 3851 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3852 unlock_user(argptr, arg, target_size); 3853 } 3854 break; 3855 } 3856 break; 3857 default: 3858 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n", 3859 (long)cmd, arg_type[0]); 3860 ret = -TARGET_ENOSYS; 3861 break; 3862 } 3863 return ret; 3864 } 3865 3866 static const bitmask_transtbl iflag_tbl[] = { 3867 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK }, 3868 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT }, 3869 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR }, 3870 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK }, 3871 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK }, 3872 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP }, 3873 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR }, 3874 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR }, 3875 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL }, 3876 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC }, 3877 { TARGET_IXON, TARGET_IXON, IXON, IXON }, 3878 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY }, 3879 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF }, 3880 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL }, 3881 { 0, 0, 0, 0 } 3882 }; 3883 3884 static const bitmask_transtbl oflag_tbl[] = { 3885 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST }, 3886 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC }, 3887 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR }, 3888 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL }, 3889 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR }, 3890 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET }, 3891 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL }, 3892 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL }, 3893 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 }, 3894 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 }, 3895 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 }, 3896 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 }, 3897 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 }, 3898 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 }, 3899 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 }, 3900 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 }, 3901 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 }, 3902 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 }, 3903 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 }, 3904 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 }, 3905 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 }, 3906 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 }, 3907 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 }, 3908 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 }, 3909 { 0, 0, 0, 0 } 3910 }; 3911 3912 static const bitmask_transtbl cflag_tbl[] = { 3913 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 }, 3914 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 }, 3915 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 }, 3916 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 }, 3917 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 }, 3918 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 }, 3919 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 }, 3920 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 }, 3921 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 }, 3922 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 }, 3923 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 }, 3924 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 }, 3925 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 }, 3926 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 }, 3927 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 }, 3928 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 }, 3929 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 }, 3930 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 }, 3931 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 }, 3932 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 }, 3933 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 }, 3934 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 }, 3935 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 }, 3936 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 }, 3937 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB }, 3938 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD }, 3939 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB }, 3940 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD }, 3941 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL }, 3942 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL }, 3943 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS }, 3944 { 0, 0, 0, 0 } 3945 }; 3946 3947 static const bitmask_transtbl lflag_tbl[] = { 3948 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG }, 3949 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON }, 3950 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE }, 3951 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO }, 3952 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE }, 3953 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK }, 3954 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL }, 3955 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH }, 3956 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP }, 3957 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL }, 3958 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT }, 3959 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE }, 3960 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO }, 3961 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN }, 3962 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN }, 3963 { 0, 0, 0, 0 } 3964 }; 3965 3966 static void target_to_host_termios (void *dst, const void *src) 3967 { 3968 struct host_termios *host = dst; 3969 const struct target_termios *target = src; 3970 3971 host->c_iflag = 3972 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl); 3973 host->c_oflag = 3974 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl); 3975 host->c_cflag = 3976 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl); 3977 host->c_lflag = 3978 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl); 3979 host->c_line = target->c_line; 3980 3981 memset(host->c_cc, 0, sizeof(host->c_cc)); 3982 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR]; 3983 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT]; 3984 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE]; 3985 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL]; 3986 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF]; 3987 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME]; 3988 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN]; 3989 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC]; 3990 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART]; 3991 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP]; 3992 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP]; 3993 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL]; 3994 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT]; 3995 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD]; 3996 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE]; 3997 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT]; 3998 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2]; 3999 } 4000 4001 static void host_to_target_termios (void *dst, const void *src) 4002 { 4003 struct target_termios *target = dst; 4004 const struct host_termios *host = src; 4005 4006 target->c_iflag = 4007 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl)); 4008 target->c_oflag = 4009 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl)); 4010 target->c_cflag = 4011 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl)); 4012 target->c_lflag = 4013 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl)); 4014 target->c_line = host->c_line; 4015 4016 memset(target->c_cc, 0, sizeof(target->c_cc)); 4017 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR]; 4018 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT]; 4019 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE]; 4020 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL]; 4021 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF]; 4022 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME]; 4023 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN]; 4024 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC]; 4025 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART]; 4026 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP]; 4027 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP]; 4028 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL]; 4029 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT]; 4030 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD]; 4031 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE]; 4032 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT]; 4033 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2]; 4034 } 4035 4036 static const StructEntry struct_termios_def = { 4037 .convert = { host_to_target_termios, target_to_host_termios }, 4038 .size = { sizeof(struct target_termios), sizeof(struct host_termios) }, 4039 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) }, 4040 }; 4041 4042 static bitmask_transtbl mmap_flags_tbl[] = { 4043 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED }, 4044 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE }, 4045 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED }, 4046 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS }, 4047 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN }, 4048 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE }, 4049 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE }, 4050 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED }, 4051 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE, 4052 MAP_NORESERVE }, 4053 { 0, 0, 0, 0 } 4054 }; 4055 4056 #if defined(TARGET_I386) 4057 4058 /* NOTE: there is really one LDT for all the threads */ 4059 static uint8_t *ldt_table; 4060 4061 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount) 4062 { 4063 int size; 4064 void *p; 4065 4066 if (!ldt_table) 4067 return 0; 4068 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE; 4069 if (size > bytecount) 4070 size = bytecount; 4071 p = lock_user(VERIFY_WRITE, ptr, size, 0); 4072 if (!p) 4073 return -TARGET_EFAULT; 4074 /* ??? Should this by byteswapped? */ 4075 memcpy(p, ldt_table, size); 4076 unlock_user(p, ptr, size); 4077 return size; 4078 } 4079 4080 /* XXX: add locking support */ 4081 static abi_long write_ldt(CPUX86State *env, 4082 abi_ulong ptr, unsigned long bytecount, int oldmode) 4083 { 4084 struct target_modify_ldt_ldt_s ldt_info; 4085 struct target_modify_ldt_ldt_s *target_ldt_info; 4086 int seg_32bit, contents, read_exec_only, limit_in_pages; 4087 int seg_not_present, useable, lm; 4088 uint32_t *lp, entry_1, entry_2; 4089 4090 if (bytecount != sizeof(ldt_info)) 4091 return -TARGET_EINVAL; 4092 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1)) 4093 return -TARGET_EFAULT; 4094 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 4095 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 4096 ldt_info.limit = tswap32(target_ldt_info->limit); 4097 ldt_info.flags = tswap32(target_ldt_info->flags); 4098 unlock_user_struct(target_ldt_info, ptr, 0); 4099 4100 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES) 4101 return -TARGET_EINVAL; 4102 seg_32bit = ldt_info.flags & 1; 4103 contents = (ldt_info.flags >> 1) & 3; 4104 read_exec_only = (ldt_info.flags >> 3) & 1; 4105 limit_in_pages = (ldt_info.flags >> 4) & 1; 4106 seg_not_present = (ldt_info.flags >> 5) & 1; 4107 useable = (ldt_info.flags >> 6) & 1; 4108 #ifdef TARGET_ABI32 4109 lm = 0; 4110 #else 4111 lm = (ldt_info.flags >> 7) & 1; 4112 #endif 4113 if (contents == 3) { 4114 if (oldmode) 4115 return -TARGET_EINVAL; 4116 if (seg_not_present == 0) 4117 return -TARGET_EINVAL; 4118 } 4119 /* allocate the LDT */ 4120 if (!ldt_table) { 4121 env->ldt.base = target_mmap(0, 4122 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE, 4123 PROT_READ|PROT_WRITE, 4124 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 4125 if (env->ldt.base == -1) 4126 return -TARGET_ENOMEM; 4127 memset(g2h(env->ldt.base), 0, 4128 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE); 4129 env->ldt.limit = 0xffff; 4130 ldt_table = g2h(env->ldt.base); 4131 } 4132 4133 /* NOTE: same code as Linux kernel */ 4134 /* Allow LDTs to be cleared by the user. */ 4135 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 4136 if (oldmode || 4137 (contents == 0 && 4138 read_exec_only == 1 && 4139 seg_32bit == 0 && 4140 limit_in_pages == 0 && 4141 seg_not_present == 1 && 4142 useable == 0 )) { 4143 entry_1 = 0; 4144 entry_2 = 0; 4145 goto install; 4146 } 4147 } 4148 4149 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 4150 (ldt_info.limit & 0x0ffff); 4151 entry_2 = (ldt_info.base_addr & 0xff000000) | 4152 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 4153 (ldt_info.limit & 0xf0000) | 4154 ((read_exec_only ^ 1) << 9) | 4155 (contents << 10) | 4156 ((seg_not_present ^ 1) << 15) | 4157 (seg_32bit << 22) | 4158 (limit_in_pages << 23) | 4159 (lm << 21) | 4160 0x7000; 4161 if (!oldmode) 4162 entry_2 |= (useable << 20); 4163 4164 /* Install the new entry ... */ 4165 install: 4166 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3)); 4167 lp[0] = tswap32(entry_1); 4168 lp[1] = tswap32(entry_2); 4169 return 0; 4170 } 4171 4172 /* specific and weird i386 syscalls */ 4173 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr, 4174 unsigned long bytecount) 4175 { 4176 abi_long ret; 4177 4178 switch (func) { 4179 case 0: 4180 ret = read_ldt(ptr, bytecount); 4181 break; 4182 case 1: 4183 ret = write_ldt(env, ptr, bytecount, 1); 4184 break; 4185 case 0x11: 4186 ret = write_ldt(env, ptr, bytecount, 0); 4187 break; 4188 default: 4189 ret = -TARGET_ENOSYS; 4190 break; 4191 } 4192 return ret; 4193 } 4194 4195 #if defined(TARGET_I386) && defined(TARGET_ABI32) 4196 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr) 4197 { 4198 uint64_t *gdt_table = g2h(env->gdt.base); 4199 struct target_modify_ldt_ldt_s ldt_info; 4200 struct target_modify_ldt_ldt_s *target_ldt_info; 4201 int seg_32bit, contents, read_exec_only, limit_in_pages; 4202 int seg_not_present, useable, lm; 4203 uint32_t *lp, entry_1, entry_2; 4204 int i; 4205 4206 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 4207 if (!target_ldt_info) 4208 return -TARGET_EFAULT; 4209 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 4210 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 4211 ldt_info.limit = tswap32(target_ldt_info->limit); 4212 ldt_info.flags = tswap32(target_ldt_info->flags); 4213 if (ldt_info.entry_number == -1) { 4214 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) { 4215 if (gdt_table[i] == 0) { 4216 ldt_info.entry_number = i; 4217 target_ldt_info->entry_number = tswap32(i); 4218 break; 4219 } 4220 } 4221 } 4222 unlock_user_struct(target_ldt_info, ptr, 1); 4223 4224 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 4225 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX) 4226 return -TARGET_EINVAL; 4227 seg_32bit = ldt_info.flags & 1; 4228 contents = (ldt_info.flags >> 1) & 3; 4229 read_exec_only = (ldt_info.flags >> 3) & 1; 4230 limit_in_pages = (ldt_info.flags >> 4) & 1; 4231 seg_not_present = (ldt_info.flags >> 5) & 1; 4232 useable = (ldt_info.flags >> 6) & 1; 4233 #ifdef TARGET_ABI32 4234 lm = 0; 4235 #else 4236 lm = (ldt_info.flags >> 7) & 1; 4237 #endif 4238 4239 if (contents == 3) { 4240 if (seg_not_present == 0) 4241 return -TARGET_EINVAL; 4242 } 4243 4244 /* NOTE: same code as Linux kernel */ 4245 /* Allow LDTs to be cleared by the user. */ 4246 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 4247 if ((contents == 0 && 4248 read_exec_only == 1 && 4249 seg_32bit == 0 && 4250 limit_in_pages == 0 && 4251 seg_not_present == 1 && 4252 useable == 0 )) { 4253 entry_1 = 0; 4254 entry_2 = 0; 4255 goto install; 4256 } 4257 } 4258 4259 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 4260 (ldt_info.limit & 0x0ffff); 4261 entry_2 = (ldt_info.base_addr & 0xff000000) | 4262 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 4263 (ldt_info.limit & 0xf0000) | 4264 ((read_exec_only ^ 1) << 9) | 4265 (contents << 10) | 4266 ((seg_not_present ^ 1) << 15) | 4267 (seg_32bit << 22) | 4268 (limit_in_pages << 23) | 4269 (useable << 20) | 4270 (lm << 21) | 4271 0x7000; 4272 4273 /* Install the new entry ... */ 4274 install: 4275 lp = (uint32_t *)(gdt_table + ldt_info.entry_number); 4276 lp[0] = tswap32(entry_1); 4277 lp[1] = tswap32(entry_2); 4278 return 0; 4279 } 4280 4281 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr) 4282 { 4283 struct target_modify_ldt_ldt_s *target_ldt_info; 4284 uint64_t *gdt_table = g2h(env->gdt.base); 4285 uint32_t base_addr, limit, flags; 4286 int seg_32bit, contents, read_exec_only, limit_in_pages, idx; 4287 int seg_not_present, useable, lm; 4288 uint32_t *lp, entry_1, entry_2; 4289 4290 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 4291 if (!target_ldt_info) 4292 return -TARGET_EFAULT; 4293 idx = tswap32(target_ldt_info->entry_number); 4294 if (idx < TARGET_GDT_ENTRY_TLS_MIN || 4295 idx > TARGET_GDT_ENTRY_TLS_MAX) { 4296 unlock_user_struct(target_ldt_info, ptr, 1); 4297 return -TARGET_EINVAL; 4298 } 4299 lp = (uint32_t *)(gdt_table + idx); 4300 entry_1 = tswap32(lp[0]); 4301 entry_2 = tswap32(lp[1]); 4302 4303 read_exec_only = ((entry_2 >> 9) & 1) ^ 1; 4304 contents = (entry_2 >> 10) & 3; 4305 seg_not_present = ((entry_2 >> 15) & 1) ^ 1; 4306 seg_32bit = (entry_2 >> 22) & 1; 4307 limit_in_pages = (entry_2 >> 23) & 1; 4308 useable = (entry_2 >> 20) & 1; 4309 #ifdef TARGET_ABI32 4310 lm = 0; 4311 #else 4312 lm = (entry_2 >> 21) & 1; 4313 #endif 4314 flags = (seg_32bit << 0) | (contents << 1) | 4315 (read_exec_only << 3) | (limit_in_pages << 4) | 4316 (seg_not_present << 5) | (useable << 6) | (lm << 7); 4317 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000); 4318 base_addr = (entry_1 >> 16) | 4319 (entry_2 & 0xff000000) | 4320 ((entry_2 & 0xff) << 16); 4321 target_ldt_info->base_addr = tswapal(base_addr); 4322 target_ldt_info->limit = tswap32(limit); 4323 target_ldt_info->flags = tswap32(flags); 4324 unlock_user_struct(target_ldt_info, ptr, 1); 4325 return 0; 4326 } 4327 #endif /* TARGET_I386 && TARGET_ABI32 */ 4328 4329 #ifndef TARGET_ABI32 4330 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 4331 { 4332 abi_long ret = 0; 4333 abi_ulong val; 4334 int idx; 4335 4336 switch(code) { 4337 case TARGET_ARCH_SET_GS: 4338 case TARGET_ARCH_SET_FS: 4339 if (code == TARGET_ARCH_SET_GS) 4340 idx = R_GS; 4341 else 4342 idx = R_FS; 4343 cpu_x86_load_seg(env, idx, 0); 4344 env->segs[idx].base = addr; 4345 break; 4346 case TARGET_ARCH_GET_GS: 4347 case TARGET_ARCH_GET_FS: 4348 if (code == TARGET_ARCH_GET_GS) 4349 idx = R_GS; 4350 else 4351 idx = R_FS; 4352 val = env->segs[idx].base; 4353 if (put_user(val, addr, abi_ulong)) 4354 ret = -TARGET_EFAULT; 4355 break; 4356 default: 4357 ret = -TARGET_EINVAL; 4358 break; 4359 } 4360 return ret; 4361 } 4362 #endif 4363 4364 #endif /* defined(TARGET_I386) */ 4365 4366 #define NEW_STACK_SIZE 0x40000 4367 4368 4369 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; 4370 typedef struct { 4371 CPUArchState *env; 4372 pthread_mutex_t mutex; 4373 pthread_cond_t cond; 4374 pthread_t thread; 4375 uint32_t tid; 4376 abi_ulong child_tidptr; 4377 abi_ulong parent_tidptr; 4378 sigset_t sigmask; 4379 } new_thread_info; 4380 4381 static void *clone_func(void *arg) 4382 { 4383 new_thread_info *info = arg; 4384 CPUArchState *env; 4385 CPUState *cpu; 4386 TaskState *ts; 4387 4388 env = info->env; 4389 cpu = ENV_GET_CPU(env); 4390 thread_cpu = cpu; 4391 ts = (TaskState *)cpu->opaque; 4392 info->tid = gettid(); 4393 cpu->host_tid = info->tid; 4394 task_settid(ts); 4395 if (info->child_tidptr) 4396 put_user_u32(info->tid, info->child_tidptr); 4397 if (info->parent_tidptr) 4398 put_user_u32(info->tid, info->parent_tidptr); 4399 /* Enable signals. */ 4400 sigprocmask(SIG_SETMASK, &info->sigmask, NULL); 4401 /* Signal to the parent that we're ready. */ 4402 pthread_mutex_lock(&info->mutex); 4403 pthread_cond_broadcast(&info->cond); 4404 pthread_mutex_unlock(&info->mutex); 4405 /* Wait until the parent has finshed initializing the tls state. */ 4406 pthread_mutex_lock(&clone_lock); 4407 pthread_mutex_unlock(&clone_lock); 4408 cpu_loop(env); 4409 /* never exits */ 4410 return NULL; 4411 } 4412 4413 /* do_fork() Must return host values and target errnos (unlike most 4414 do_*() functions). */ 4415 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, 4416 abi_ulong parent_tidptr, target_ulong newtls, 4417 abi_ulong child_tidptr) 4418 { 4419 CPUState *cpu = ENV_GET_CPU(env); 4420 int ret; 4421 TaskState *ts; 4422 CPUState *new_cpu; 4423 CPUArchState *new_env; 4424 unsigned int nptl_flags; 4425 sigset_t sigmask; 4426 4427 /* Emulate vfork() with fork() */ 4428 if (flags & CLONE_VFORK) 4429 flags &= ~(CLONE_VFORK | CLONE_VM); 4430 4431 if (flags & CLONE_VM) { 4432 TaskState *parent_ts = (TaskState *)cpu->opaque; 4433 new_thread_info info; 4434 pthread_attr_t attr; 4435 4436 ts = g_malloc0(sizeof(TaskState)); 4437 init_task_state(ts); 4438 /* we create a new CPU instance. */ 4439 new_env = cpu_copy(env); 4440 /* Init regs that differ from the parent. */ 4441 cpu_clone_regs(new_env, newsp); 4442 new_cpu = ENV_GET_CPU(new_env); 4443 new_cpu->opaque = ts; 4444 ts->bprm = parent_ts->bprm; 4445 ts->info = parent_ts->info; 4446 nptl_flags = flags; 4447 flags &= ~CLONE_NPTL_FLAGS2; 4448 4449 if (nptl_flags & CLONE_CHILD_CLEARTID) { 4450 ts->child_tidptr = child_tidptr; 4451 } 4452 4453 if (nptl_flags & CLONE_SETTLS) 4454 cpu_set_tls (new_env, newtls); 4455 4456 /* Grab a mutex so that thread setup appears atomic. */ 4457 pthread_mutex_lock(&clone_lock); 4458 4459 memset(&info, 0, sizeof(info)); 4460 pthread_mutex_init(&info.mutex, NULL); 4461 pthread_mutex_lock(&info.mutex); 4462 pthread_cond_init(&info.cond, NULL); 4463 info.env = new_env; 4464 if (nptl_flags & CLONE_CHILD_SETTID) 4465 info.child_tidptr = child_tidptr; 4466 if (nptl_flags & CLONE_PARENT_SETTID) 4467 info.parent_tidptr = parent_tidptr; 4468 4469 ret = pthread_attr_init(&attr); 4470 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE); 4471 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 4472 /* It is not safe to deliver signals until the child has finished 4473 initializing, so temporarily block all signals. */ 4474 sigfillset(&sigmask); 4475 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); 4476 4477 ret = pthread_create(&info.thread, &attr, clone_func, &info); 4478 /* TODO: Free new CPU state if thread creation failed. */ 4479 4480 sigprocmask(SIG_SETMASK, &info.sigmask, NULL); 4481 pthread_attr_destroy(&attr); 4482 if (ret == 0) { 4483 /* Wait for the child to initialize. */ 4484 pthread_cond_wait(&info.cond, &info.mutex); 4485 ret = info.tid; 4486 if (flags & CLONE_PARENT_SETTID) 4487 put_user_u32(ret, parent_tidptr); 4488 } else { 4489 ret = -1; 4490 } 4491 pthread_mutex_unlock(&info.mutex); 4492 pthread_cond_destroy(&info.cond); 4493 pthread_mutex_destroy(&info.mutex); 4494 pthread_mutex_unlock(&clone_lock); 4495 } else { 4496 /* if no CLONE_VM, we consider it is a fork */ 4497 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) 4498 return -EINVAL; 4499 fork_start(); 4500 ret = fork(); 4501 if (ret == 0) { 4502 /* Child Process. */ 4503 cpu_clone_regs(env, newsp); 4504 fork_end(1); 4505 /* There is a race condition here. The parent process could 4506 theoretically read the TID in the child process before the child 4507 tid is set. This would require using either ptrace 4508 (not implemented) or having *_tidptr to point at a shared memory 4509 mapping. We can't repeat the spinlock hack used above because 4510 the child process gets its own copy of the lock. */ 4511 if (flags & CLONE_CHILD_SETTID) 4512 put_user_u32(gettid(), child_tidptr); 4513 if (flags & CLONE_PARENT_SETTID) 4514 put_user_u32(gettid(), parent_tidptr); 4515 ts = (TaskState *)cpu->opaque; 4516 if (flags & CLONE_SETTLS) 4517 cpu_set_tls (env, newtls); 4518 if (flags & CLONE_CHILD_CLEARTID) 4519 ts->child_tidptr = child_tidptr; 4520 } else { 4521 fork_end(0); 4522 } 4523 } 4524 return ret; 4525 } 4526 4527 /* warning : doesn't handle linux specific flags... */ 4528 static int target_to_host_fcntl_cmd(int cmd) 4529 { 4530 switch(cmd) { 4531 case TARGET_F_DUPFD: 4532 case TARGET_F_GETFD: 4533 case TARGET_F_SETFD: 4534 case TARGET_F_GETFL: 4535 case TARGET_F_SETFL: 4536 return cmd; 4537 case TARGET_F_GETLK: 4538 return F_GETLK; 4539 case TARGET_F_SETLK: 4540 return F_SETLK; 4541 case TARGET_F_SETLKW: 4542 return F_SETLKW; 4543 case TARGET_F_GETOWN: 4544 return F_GETOWN; 4545 case TARGET_F_SETOWN: 4546 return F_SETOWN; 4547 case TARGET_F_GETSIG: 4548 return F_GETSIG; 4549 case TARGET_F_SETSIG: 4550 return F_SETSIG; 4551 #if TARGET_ABI_BITS == 32 4552 case TARGET_F_GETLK64: 4553 return F_GETLK64; 4554 case TARGET_F_SETLK64: 4555 return F_SETLK64; 4556 case TARGET_F_SETLKW64: 4557 return F_SETLKW64; 4558 #endif 4559 case TARGET_F_SETLEASE: 4560 return F_SETLEASE; 4561 case TARGET_F_GETLEASE: 4562 return F_GETLEASE; 4563 #ifdef F_DUPFD_CLOEXEC 4564 case TARGET_F_DUPFD_CLOEXEC: 4565 return F_DUPFD_CLOEXEC; 4566 #endif 4567 case TARGET_F_NOTIFY: 4568 return F_NOTIFY; 4569 #ifdef F_GETOWN_EX 4570 case TARGET_F_GETOWN_EX: 4571 return F_GETOWN_EX; 4572 #endif 4573 #ifdef F_SETOWN_EX 4574 case TARGET_F_SETOWN_EX: 4575 return F_SETOWN_EX; 4576 #endif 4577 default: 4578 return -TARGET_EINVAL; 4579 } 4580 return -TARGET_EINVAL; 4581 } 4582 4583 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a } 4584 static const bitmask_transtbl flock_tbl[] = { 4585 TRANSTBL_CONVERT(F_RDLCK), 4586 TRANSTBL_CONVERT(F_WRLCK), 4587 TRANSTBL_CONVERT(F_UNLCK), 4588 TRANSTBL_CONVERT(F_EXLCK), 4589 TRANSTBL_CONVERT(F_SHLCK), 4590 { 0, 0, 0, 0 } 4591 }; 4592 4593 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) 4594 { 4595 struct flock fl; 4596 struct target_flock *target_fl; 4597 struct flock64 fl64; 4598 struct target_flock64 *target_fl64; 4599 #ifdef F_GETOWN_EX 4600 struct f_owner_ex fox; 4601 struct target_f_owner_ex *target_fox; 4602 #endif 4603 abi_long ret; 4604 int host_cmd = target_to_host_fcntl_cmd(cmd); 4605 4606 if (host_cmd == -TARGET_EINVAL) 4607 return host_cmd; 4608 4609 switch(cmd) { 4610 case TARGET_F_GETLK: 4611 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4612 return -TARGET_EFAULT; 4613 fl.l_type = 4614 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl); 4615 fl.l_whence = tswap16(target_fl->l_whence); 4616 fl.l_start = tswapal(target_fl->l_start); 4617 fl.l_len = tswapal(target_fl->l_len); 4618 fl.l_pid = tswap32(target_fl->l_pid); 4619 unlock_user_struct(target_fl, arg, 0); 4620 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4621 if (ret == 0) { 4622 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0)) 4623 return -TARGET_EFAULT; 4624 target_fl->l_type = 4625 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl); 4626 target_fl->l_whence = tswap16(fl.l_whence); 4627 target_fl->l_start = tswapal(fl.l_start); 4628 target_fl->l_len = tswapal(fl.l_len); 4629 target_fl->l_pid = tswap32(fl.l_pid); 4630 unlock_user_struct(target_fl, arg, 1); 4631 } 4632 break; 4633 4634 case TARGET_F_SETLK: 4635 case TARGET_F_SETLKW: 4636 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4637 return -TARGET_EFAULT; 4638 fl.l_type = 4639 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl); 4640 fl.l_whence = tswap16(target_fl->l_whence); 4641 fl.l_start = tswapal(target_fl->l_start); 4642 fl.l_len = tswapal(target_fl->l_len); 4643 fl.l_pid = tswap32(target_fl->l_pid); 4644 unlock_user_struct(target_fl, arg, 0); 4645 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4646 break; 4647 4648 case TARGET_F_GETLK64: 4649 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4650 return -TARGET_EFAULT; 4651 fl64.l_type = 4652 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1; 4653 fl64.l_whence = tswap16(target_fl64->l_whence); 4654 fl64.l_start = tswap64(target_fl64->l_start); 4655 fl64.l_len = tswap64(target_fl64->l_len); 4656 fl64.l_pid = tswap32(target_fl64->l_pid); 4657 unlock_user_struct(target_fl64, arg, 0); 4658 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4659 if (ret == 0) { 4660 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0)) 4661 return -TARGET_EFAULT; 4662 target_fl64->l_type = 4663 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1; 4664 target_fl64->l_whence = tswap16(fl64.l_whence); 4665 target_fl64->l_start = tswap64(fl64.l_start); 4666 target_fl64->l_len = tswap64(fl64.l_len); 4667 target_fl64->l_pid = tswap32(fl64.l_pid); 4668 unlock_user_struct(target_fl64, arg, 1); 4669 } 4670 break; 4671 case TARGET_F_SETLK64: 4672 case TARGET_F_SETLKW64: 4673 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4674 return -TARGET_EFAULT; 4675 fl64.l_type = 4676 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1; 4677 fl64.l_whence = tswap16(target_fl64->l_whence); 4678 fl64.l_start = tswap64(target_fl64->l_start); 4679 fl64.l_len = tswap64(target_fl64->l_len); 4680 fl64.l_pid = tswap32(target_fl64->l_pid); 4681 unlock_user_struct(target_fl64, arg, 0); 4682 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4683 break; 4684 4685 case TARGET_F_GETFL: 4686 ret = get_errno(fcntl(fd, host_cmd, arg)); 4687 if (ret >= 0) { 4688 ret = host_to_target_bitmask(ret, fcntl_flags_tbl); 4689 } 4690 break; 4691 4692 case TARGET_F_SETFL: 4693 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl))); 4694 break; 4695 4696 #ifdef F_GETOWN_EX 4697 case TARGET_F_GETOWN_EX: 4698 ret = get_errno(fcntl(fd, host_cmd, &fox)); 4699 if (ret >= 0) { 4700 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0)) 4701 return -TARGET_EFAULT; 4702 target_fox->type = tswap32(fox.type); 4703 target_fox->pid = tswap32(fox.pid); 4704 unlock_user_struct(target_fox, arg, 1); 4705 } 4706 break; 4707 #endif 4708 4709 #ifdef F_SETOWN_EX 4710 case TARGET_F_SETOWN_EX: 4711 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1)) 4712 return -TARGET_EFAULT; 4713 fox.type = tswap32(target_fox->type); 4714 fox.pid = tswap32(target_fox->pid); 4715 unlock_user_struct(target_fox, arg, 0); 4716 ret = get_errno(fcntl(fd, host_cmd, &fox)); 4717 break; 4718 #endif 4719 4720 case TARGET_F_SETOWN: 4721 case TARGET_F_GETOWN: 4722 case TARGET_F_SETSIG: 4723 case TARGET_F_GETSIG: 4724 case TARGET_F_SETLEASE: 4725 case TARGET_F_GETLEASE: 4726 ret = get_errno(fcntl(fd, host_cmd, arg)); 4727 break; 4728 4729 default: 4730 ret = get_errno(fcntl(fd, cmd, arg)); 4731 break; 4732 } 4733 return ret; 4734 } 4735 4736 #ifdef USE_UID16 4737 4738 static inline int high2lowuid(int uid) 4739 { 4740 if (uid > 65535) 4741 return 65534; 4742 else 4743 return uid; 4744 } 4745 4746 static inline int high2lowgid(int gid) 4747 { 4748 if (gid > 65535) 4749 return 65534; 4750 else 4751 return gid; 4752 } 4753 4754 static inline int low2highuid(int uid) 4755 { 4756 if ((int16_t)uid == -1) 4757 return -1; 4758 else 4759 return uid; 4760 } 4761 4762 static inline int low2highgid(int gid) 4763 { 4764 if ((int16_t)gid == -1) 4765 return -1; 4766 else 4767 return gid; 4768 } 4769 static inline int tswapid(int id) 4770 { 4771 return tswap16(id); 4772 } 4773 4774 #define put_user_id(x, gaddr) put_user_u16(x, gaddr) 4775 4776 #else /* !USE_UID16 */ 4777 static inline int high2lowuid(int uid) 4778 { 4779 return uid; 4780 } 4781 static inline int high2lowgid(int gid) 4782 { 4783 return gid; 4784 } 4785 static inline int low2highuid(int uid) 4786 { 4787 return uid; 4788 } 4789 static inline int low2highgid(int gid) 4790 { 4791 return gid; 4792 } 4793 static inline int tswapid(int id) 4794 { 4795 return tswap32(id); 4796 } 4797 4798 #define put_user_id(x, gaddr) put_user_u32(x, gaddr) 4799 4800 #endif /* USE_UID16 */ 4801 4802 void syscall_init(void) 4803 { 4804 IOCTLEntry *ie; 4805 const argtype *arg_type; 4806 int size; 4807 int i; 4808 4809 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def); 4810 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def); 4811 #include "syscall_types.h" 4812 #undef STRUCT 4813 #undef STRUCT_SPECIAL 4814 4815 /* Build target_to_host_errno_table[] table from 4816 * host_to_target_errno_table[]. */ 4817 for (i = 0; i < ERRNO_TABLE_SIZE; i++) { 4818 target_to_host_errno_table[host_to_target_errno_table[i]] = i; 4819 } 4820 4821 /* we patch the ioctl size if necessary. We rely on the fact that 4822 no ioctl has all the bits at '1' in the size field */ 4823 ie = ioctl_entries; 4824 while (ie->target_cmd != 0) { 4825 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) == 4826 TARGET_IOC_SIZEMASK) { 4827 arg_type = ie->arg_type; 4828 if (arg_type[0] != TYPE_PTR) { 4829 fprintf(stderr, "cannot patch size for ioctl 0x%x\n", 4830 ie->target_cmd); 4831 exit(1); 4832 } 4833 arg_type++; 4834 size = thunk_type_size(arg_type, 0); 4835 ie->target_cmd = (ie->target_cmd & 4836 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) | 4837 (size << TARGET_IOC_SIZESHIFT); 4838 } 4839 4840 /* automatic consistency check if same arch */ 4841 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 4842 (defined(__x86_64__) && defined(TARGET_X86_64)) 4843 if (unlikely(ie->target_cmd != ie->host_cmd)) { 4844 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n", 4845 ie->name, ie->target_cmd, ie->host_cmd); 4846 } 4847 #endif 4848 ie++; 4849 } 4850 } 4851 4852 #if TARGET_ABI_BITS == 32 4853 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1) 4854 { 4855 #ifdef TARGET_WORDS_BIGENDIAN 4856 return ((uint64_t)word0 << 32) | word1; 4857 #else 4858 return ((uint64_t)word1 << 32) | word0; 4859 #endif 4860 } 4861 #else /* TARGET_ABI_BITS == 32 */ 4862 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1) 4863 { 4864 return word0; 4865 } 4866 #endif /* TARGET_ABI_BITS != 32 */ 4867 4868 #ifdef TARGET_NR_truncate64 4869 static inline abi_long target_truncate64(void *cpu_env, const char *arg1, 4870 abi_long arg2, 4871 abi_long arg3, 4872 abi_long arg4) 4873 { 4874 if (regpairs_aligned(cpu_env)) { 4875 arg2 = arg3; 4876 arg3 = arg4; 4877 } 4878 return get_errno(truncate64(arg1, target_offset64(arg2, arg3))); 4879 } 4880 #endif 4881 4882 #ifdef TARGET_NR_ftruncate64 4883 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1, 4884 abi_long arg2, 4885 abi_long arg3, 4886 abi_long arg4) 4887 { 4888 if (regpairs_aligned(cpu_env)) { 4889 arg2 = arg3; 4890 arg3 = arg4; 4891 } 4892 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3))); 4893 } 4894 #endif 4895 4896 static inline abi_long target_to_host_timespec(struct timespec *host_ts, 4897 abi_ulong target_addr) 4898 { 4899 struct target_timespec *target_ts; 4900 4901 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) 4902 return -TARGET_EFAULT; 4903 host_ts->tv_sec = tswapal(target_ts->tv_sec); 4904 host_ts->tv_nsec = tswapal(target_ts->tv_nsec); 4905 unlock_user_struct(target_ts, target_addr, 0); 4906 return 0; 4907 } 4908 4909 static inline abi_long host_to_target_timespec(abi_ulong target_addr, 4910 struct timespec *host_ts) 4911 { 4912 struct target_timespec *target_ts; 4913 4914 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) 4915 return -TARGET_EFAULT; 4916 target_ts->tv_sec = tswapal(host_ts->tv_sec); 4917 target_ts->tv_nsec = tswapal(host_ts->tv_nsec); 4918 unlock_user_struct(target_ts, target_addr, 1); 4919 return 0; 4920 } 4921 4922 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec, 4923 abi_ulong target_addr) 4924 { 4925 struct target_itimerspec *target_itspec; 4926 4927 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) { 4928 return -TARGET_EFAULT; 4929 } 4930 4931 host_itspec->it_interval.tv_sec = 4932 tswapal(target_itspec->it_interval.tv_sec); 4933 host_itspec->it_interval.tv_nsec = 4934 tswapal(target_itspec->it_interval.tv_nsec); 4935 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec); 4936 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec); 4937 4938 unlock_user_struct(target_itspec, target_addr, 1); 4939 return 0; 4940 } 4941 4942 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr, 4943 struct itimerspec *host_its) 4944 { 4945 struct target_itimerspec *target_itspec; 4946 4947 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) { 4948 return -TARGET_EFAULT; 4949 } 4950 4951 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec); 4952 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec); 4953 4954 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec); 4955 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec); 4956 4957 unlock_user_struct(target_itspec, target_addr, 0); 4958 return 0; 4959 } 4960 4961 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp, 4962 abi_ulong target_addr) 4963 { 4964 struct target_sigevent *target_sevp; 4965 4966 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) { 4967 return -TARGET_EFAULT; 4968 } 4969 4970 /* This union is awkward on 64 bit systems because it has a 32 bit 4971 * integer and a pointer in it; we follow the conversion approach 4972 * used for handling sigval types in signal.c so the guest should get 4973 * the correct value back even if we did a 64 bit byteswap and it's 4974 * using the 32 bit integer. 4975 */ 4976 host_sevp->sigev_value.sival_ptr = 4977 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr); 4978 host_sevp->sigev_signo = 4979 target_to_host_signal(tswap32(target_sevp->sigev_signo)); 4980 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify); 4981 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid); 4982 4983 unlock_user_struct(target_sevp, target_addr, 1); 4984 return 0; 4985 } 4986 4987 #if defined(TARGET_NR_mlockall) 4988 static inline int target_to_host_mlockall_arg(int arg) 4989 { 4990 int result = 0; 4991 4992 if (arg & TARGET_MLOCKALL_MCL_CURRENT) { 4993 result |= MCL_CURRENT; 4994 } 4995 if (arg & TARGET_MLOCKALL_MCL_FUTURE) { 4996 result |= MCL_FUTURE; 4997 } 4998 return result; 4999 } 5000 #endif 5001 5002 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat) 5003 static inline abi_long host_to_target_stat64(void *cpu_env, 5004 abi_ulong target_addr, 5005 struct stat *host_st) 5006 { 5007 #if defined(TARGET_ARM) && defined(TARGET_ABI32) 5008 if (((CPUARMState *)cpu_env)->eabi) { 5009 struct target_eabi_stat64 *target_st; 5010 5011 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 5012 return -TARGET_EFAULT; 5013 memset(target_st, 0, sizeof(struct target_eabi_stat64)); 5014 __put_user(host_st->st_dev, &target_st->st_dev); 5015 __put_user(host_st->st_ino, &target_st->st_ino); 5016 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 5017 __put_user(host_st->st_ino, &target_st->__st_ino); 5018 #endif 5019 __put_user(host_st->st_mode, &target_st->st_mode); 5020 __put_user(host_st->st_nlink, &target_st->st_nlink); 5021 __put_user(host_st->st_uid, &target_st->st_uid); 5022 __put_user(host_st->st_gid, &target_st->st_gid); 5023 __put_user(host_st->st_rdev, &target_st->st_rdev); 5024 __put_user(host_st->st_size, &target_st->st_size); 5025 __put_user(host_st->st_blksize, &target_st->st_blksize); 5026 __put_user(host_st->st_blocks, &target_st->st_blocks); 5027 __put_user(host_st->st_atime, &target_st->target_st_atime); 5028 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 5029 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 5030 unlock_user_struct(target_st, target_addr, 1); 5031 } else 5032 #endif 5033 { 5034 #if defined(TARGET_HAS_STRUCT_STAT64) 5035 struct target_stat64 *target_st; 5036 #else 5037 struct target_stat *target_st; 5038 #endif 5039 5040 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 5041 return -TARGET_EFAULT; 5042 memset(target_st, 0, sizeof(*target_st)); 5043 __put_user(host_st->st_dev, &target_st->st_dev); 5044 __put_user(host_st->st_ino, &target_st->st_ino); 5045 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 5046 __put_user(host_st->st_ino, &target_st->__st_ino); 5047 #endif 5048 __put_user(host_st->st_mode, &target_st->st_mode); 5049 __put_user(host_st->st_nlink, &target_st->st_nlink); 5050 __put_user(host_st->st_uid, &target_st->st_uid); 5051 __put_user(host_st->st_gid, &target_st->st_gid); 5052 __put_user(host_st->st_rdev, &target_st->st_rdev); 5053 /* XXX: better use of kernel struct */ 5054 __put_user(host_st->st_size, &target_st->st_size); 5055 __put_user(host_st->st_blksize, &target_st->st_blksize); 5056 __put_user(host_st->st_blocks, &target_st->st_blocks); 5057 __put_user(host_st->st_atime, &target_st->target_st_atime); 5058 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 5059 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 5060 unlock_user_struct(target_st, target_addr, 1); 5061 } 5062 5063 return 0; 5064 } 5065 #endif 5066 5067 /* ??? Using host futex calls even when target atomic operations 5068 are not really atomic probably breaks things. However implementing 5069 futexes locally would make futexes shared between multiple processes 5070 tricky. However they're probably useless because guest atomic 5071 operations won't work either. */ 5072 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout, 5073 target_ulong uaddr2, int val3) 5074 { 5075 struct timespec ts, *pts; 5076 int base_op; 5077 5078 /* ??? We assume FUTEX_* constants are the same on both host 5079 and target. */ 5080 #ifdef FUTEX_CMD_MASK 5081 base_op = op & FUTEX_CMD_MASK; 5082 #else 5083 base_op = op; 5084 #endif 5085 switch (base_op) { 5086 case FUTEX_WAIT: 5087 case FUTEX_WAIT_BITSET: 5088 if (timeout) { 5089 pts = &ts; 5090 target_to_host_timespec(pts, timeout); 5091 } else { 5092 pts = NULL; 5093 } 5094 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val), 5095 pts, NULL, val3)); 5096 case FUTEX_WAKE: 5097 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 5098 case FUTEX_FD: 5099 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 5100 case FUTEX_REQUEUE: 5101 case FUTEX_CMP_REQUEUE: 5102 case FUTEX_WAKE_OP: 5103 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 5104 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 5105 But the prototype takes a `struct timespec *'; insert casts 5106 to satisfy the compiler. We do not need to tswap TIMEOUT 5107 since it's not compared to guest memory. */ 5108 pts = (struct timespec *)(uintptr_t) timeout; 5109 return get_errno(sys_futex(g2h(uaddr), op, val, pts, 5110 g2h(uaddr2), 5111 (base_op == FUTEX_CMP_REQUEUE 5112 ? tswap32(val3) 5113 : val3))); 5114 default: 5115 return -TARGET_ENOSYS; 5116 } 5117 } 5118 5119 /* Map host to target signal numbers for the wait family of syscalls. 5120 Assume all other status bits are the same. */ 5121 int host_to_target_waitstatus(int status) 5122 { 5123 if (WIFSIGNALED(status)) { 5124 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f); 5125 } 5126 if (WIFSTOPPED(status)) { 5127 return (host_to_target_signal(WSTOPSIG(status)) << 8) 5128 | (status & 0xff); 5129 } 5130 return status; 5131 } 5132 5133 static int open_self_cmdline(void *cpu_env, int fd) 5134 { 5135 int fd_orig = -1; 5136 bool word_skipped = false; 5137 5138 fd_orig = open("/proc/self/cmdline", O_RDONLY); 5139 if (fd_orig < 0) { 5140 return fd_orig; 5141 } 5142 5143 while (true) { 5144 ssize_t nb_read; 5145 char buf[128]; 5146 char *cp_buf = buf; 5147 5148 nb_read = read(fd_orig, buf, sizeof(buf)); 5149 if (nb_read < 0) { 5150 fd_orig = close(fd_orig); 5151 return -1; 5152 } else if (nb_read == 0) { 5153 break; 5154 } 5155 5156 if (!word_skipped) { 5157 /* Skip the first string, which is the path to qemu-*-static 5158 instead of the actual command. */ 5159 cp_buf = memchr(buf, 0, sizeof(buf)); 5160 if (cp_buf) { 5161 /* Null byte found, skip one string */ 5162 cp_buf++; 5163 nb_read -= cp_buf - buf; 5164 word_skipped = true; 5165 } 5166 } 5167 5168 if (word_skipped) { 5169 if (write(fd, cp_buf, nb_read) != nb_read) { 5170 close(fd_orig); 5171 return -1; 5172 } 5173 } 5174 } 5175 5176 return close(fd_orig); 5177 } 5178 5179 static int open_self_maps(void *cpu_env, int fd) 5180 { 5181 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 5182 TaskState *ts = cpu->opaque; 5183 FILE *fp; 5184 char *line = NULL; 5185 size_t len = 0; 5186 ssize_t read; 5187 5188 fp = fopen("/proc/self/maps", "r"); 5189 if (fp == NULL) { 5190 return -EACCES; 5191 } 5192 5193 while ((read = getline(&line, &len, fp)) != -1) { 5194 int fields, dev_maj, dev_min, inode; 5195 uint64_t min, max, offset; 5196 char flag_r, flag_w, flag_x, flag_p; 5197 char path[512] = ""; 5198 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d" 5199 " %512s", &min, &max, &flag_r, &flag_w, &flag_x, 5200 &flag_p, &offset, &dev_maj, &dev_min, &inode, path); 5201 5202 if ((fields < 10) || (fields > 11)) { 5203 continue; 5204 } 5205 if (h2g_valid(min)) { 5206 int flags = page_get_flags(h2g(min)); 5207 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX); 5208 if (page_check_range(h2g(min), max - min, flags) == -1) { 5209 continue; 5210 } 5211 if (h2g(min) == ts->info->stack_limit) { 5212 pstrcpy(path, sizeof(path), " [stack]"); 5213 } 5214 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx 5215 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n", 5216 h2g(min), h2g(max - 1) + 1, flag_r, flag_w, 5217 flag_x, flag_p, offset, dev_maj, dev_min, inode, 5218 path[0] ? " " : "", path); 5219 } 5220 } 5221 5222 free(line); 5223 fclose(fp); 5224 5225 return 0; 5226 } 5227 5228 static int open_self_stat(void *cpu_env, int fd) 5229 { 5230 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 5231 TaskState *ts = cpu->opaque; 5232 abi_ulong start_stack = ts->info->start_stack; 5233 int i; 5234 5235 for (i = 0; i < 44; i++) { 5236 char buf[128]; 5237 int len; 5238 uint64_t val = 0; 5239 5240 if (i == 0) { 5241 /* pid */ 5242 val = getpid(); 5243 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 5244 } else if (i == 1) { 5245 /* app name */ 5246 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]); 5247 } else if (i == 27) { 5248 /* stack bottom */ 5249 val = start_stack; 5250 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 5251 } else { 5252 /* for the rest, there is MasterCard */ 5253 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' '); 5254 } 5255 5256 len = strlen(buf); 5257 if (write(fd, buf, len) != len) { 5258 return -1; 5259 } 5260 } 5261 5262 return 0; 5263 } 5264 5265 static int open_self_auxv(void *cpu_env, int fd) 5266 { 5267 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 5268 TaskState *ts = cpu->opaque; 5269 abi_ulong auxv = ts->info->saved_auxv; 5270 abi_ulong len = ts->info->auxv_len; 5271 char *ptr; 5272 5273 /* 5274 * Auxiliary vector is stored in target process stack. 5275 * read in whole auxv vector and copy it to file 5276 */ 5277 ptr = lock_user(VERIFY_READ, auxv, len, 0); 5278 if (ptr != NULL) { 5279 while (len > 0) { 5280 ssize_t r; 5281 r = write(fd, ptr, len); 5282 if (r <= 0) { 5283 break; 5284 } 5285 len -= r; 5286 ptr += r; 5287 } 5288 lseek(fd, 0, SEEK_SET); 5289 unlock_user(ptr, auxv, len); 5290 } 5291 5292 return 0; 5293 } 5294 5295 static int is_proc_myself(const char *filename, const char *entry) 5296 { 5297 if (!strncmp(filename, "/proc/", strlen("/proc/"))) { 5298 filename += strlen("/proc/"); 5299 if (!strncmp(filename, "self/", strlen("self/"))) { 5300 filename += strlen("self/"); 5301 } else if (*filename >= '1' && *filename <= '9') { 5302 char myself[80]; 5303 snprintf(myself, sizeof(myself), "%d/", getpid()); 5304 if (!strncmp(filename, myself, strlen(myself))) { 5305 filename += strlen(myself); 5306 } else { 5307 return 0; 5308 } 5309 } else { 5310 return 0; 5311 } 5312 if (!strcmp(filename, entry)) { 5313 return 1; 5314 } 5315 } 5316 return 0; 5317 } 5318 5319 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 5320 static int is_proc(const char *filename, const char *entry) 5321 { 5322 return strcmp(filename, entry) == 0; 5323 } 5324 5325 static int open_net_route(void *cpu_env, int fd) 5326 { 5327 FILE *fp; 5328 char *line = NULL; 5329 size_t len = 0; 5330 ssize_t read; 5331 5332 fp = fopen("/proc/net/route", "r"); 5333 if (fp == NULL) { 5334 return -EACCES; 5335 } 5336 5337 /* read header */ 5338 5339 read = getline(&line, &len, fp); 5340 dprintf(fd, "%s", line); 5341 5342 /* read routes */ 5343 5344 while ((read = getline(&line, &len, fp)) != -1) { 5345 char iface[16]; 5346 uint32_t dest, gw, mask; 5347 unsigned int flags, refcnt, use, metric, mtu, window, irtt; 5348 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 5349 iface, &dest, &gw, &flags, &refcnt, &use, &metric, 5350 &mask, &mtu, &window, &irtt); 5351 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 5352 iface, tswap32(dest), tswap32(gw), flags, refcnt, use, 5353 metric, tswap32(mask), mtu, window, irtt); 5354 } 5355 5356 free(line); 5357 fclose(fp); 5358 5359 return 0; 5360 } 5361 #endif 5362 5363 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode) 5364 { 5365 struct fake_open { 5366 const char *filename; 5367 int (*fill)(void *cpu_env, int fd); 5368 int (*cmp)(const char *s1, const char *s2); 5369 }; 5370 const struct fake_open *fake_open; 5371 static const struct fake_open fakes[] = { 5372 { "maps", open_self_maps, is_proc_myself }, 5373 { "stat", open_self_stat, is_proc_myself }, 5374 { "auxv", open_self_auxv, is_proc_myself }, 5375 { "cmdline", open_self_cmdline, is_proc_myself }, 5376 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 5377 { "/proc/net/route", open_net_route, is_proc }, 5378 #endif 5379 { NULL, NULL, NULL } 5380 }; 5381 5382 if (is_proc_myself(pathname, "exe")) { 5383 int execfd = qemu_getauxval(AT_EXECFD); 5384 return execfd ? execfd : get_errno(sys_openat(dirfd, exec_path, flags, mode)); 5385 } 5386 5387 for (fake_open = fakes; fake_open->filename; fake_open++) { 5388 if (fake_open->cmp(pathname, fake_open->filename)) { 5389 break; 5390 } 5391 } 5392 5393 if (fake_open->filename) { 5394 const char *tmpdir; 5395 char filename[PATH_MAX]; 5396 int fd, r; 5397 5398 /* create temporary file to map stat to */ 5399 tmpdir = getenv("TMPDIR"); 5400 if (!tmpdir) 5401 tmpdir = "/tmp"; 5402 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir); 5403 fd = mkstemp(filename); 5404 if (fd < 0) { 5405 return fd; 5406 } 5407 unlink(filename); 5408 5409 if ((r = fake_open->fill(cpu_env, fd))) { 5410 close(fd); 5411 return r; 5412 } 5413 lseek(fd, 0, SEEK_SET); 5414 5415 return fd; 5416 } 5417 5418 return get_errno(sys_openat(dirfd, path(pathname), flags, mode)); 5419 } 5420 5421 /* do_syscall() should always have a single exit point at the end so 5422 that actions, such as logging of syscall results, can be performed. 5423 All errnos that do_syscall() returns must be -TARGET_<errcode>. */ 5424 abi_long do_syscall(void *cpu_env, int num, abi_long arg1, 5425 abi_long arg2, abi_long arg3, abi_long arg4, 5426 abi_long arg5, abi_long arg6, abi_long arg7, 5427 abi_long arg8) 5428 { 5429 CPUState *cpu = ENV_GET_CPU(cpu_env); 5430 abi_long ret; 5431 struct stat st; 5432 struct statfs stfs; 5433 void *p; 5434 5435 #ifdef DEBUG 5436 gemu_log("syscall %d", num); 5437 #endif 5438 if(do_strace) 5439 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); 5440 5441 switch(num) { 5442 case TARGET_NR_exit: 5443 /* In old applications this may be used to implement _exit(2). 5444 However in threaded applictions it is used for thread termination, 5445 and _exit_group is used for application termination. 5446 Do thread termination if we have more then one thread. */ 5447 /* FIXME: This probably breaks if a signal arrives. We should probably 5448 be disabling signals. */ 5449 if (CPU_NEXT(first_cpu)) { 5450 TaskState *ts; 5451 5452 cpu_list_lock(); 5453 /* Remove the CPU from the list. */ 5454 QTAILQ_REMOVE(&cpus, cpu, node); 5455 cpu_list_unlock(); 5456 ts = cpu->opaque; 5457 if (ts->child_tidptr) { 5458 put_user_u32(0, ts->child_tidptr); 5459 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX, 5460 NULL, NULL, 0); 5461 } 5462 thread_cpu = NULL; 5463 object_unref(OBJECT(cpu)); 5464 g_free(ts); 5465 pthread_exit(NULL); 5466 } 5467 #ifdef TARGET_GPROF 5468 _mcleanup(); 5469 #endif 5470 gdb_exit(cpu_env, arg1); 5471 _exit(arg1); 5472 ret = 0; /* avoid warning */ 5473 break; 5474 case TARGET_NR_read: 5475 if (arg3 == 0) 5476 ret = 0; 5477 else { 5478 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 5479 goto efault; 5480 ret = get_errno(read(arg1, p, arg3)); 5481 unlock_user(p, arg2, ret); 5482 } 5483 break; 5484 case TARGET_NR_write: 5485 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 5486 goto efault; 5487 ret = get_errno(write(arg1, p, arg3)); 5488 unlock_user(p, arg2, 0); 5489 break; 5490 case TARGET_NR_open: 5491 if (!(p = lock_user_string(arg1))) 5492 goto efault; 5493 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p, 5494 target_to_host_bitmask(arg2, fcntl_flags_tbl), 5495 arg3)); 5496 unlock_user(p, arg1, 0); 5497 break; 5498 case TARGET_NR_openat: 5499 if (!(p = lock_user_string(arg2))) 5500 goto efault; 5501 ret = get_errno(do_openat(cpu_env, arg1, p, 5502 target_to_host_bitmask(arg3, fcntl_flags_tbl), 5503 arg4)); 5504 unlock_user(p, arg2, 0); 5505 break; 5506 case TARGET_NR_close: 5507 ret = get_errno(close(arg1)); 5508 break; 5509 case TARGET_NR_brk: 5510 ret = do_brk(arg1); 5511 break; 5512 case TARGET_NR_fork: 5513 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0)); 5514 break; 5515 #ifdef TARGET_NR_waitpid 5516 case TARGET_NR_waitpid: 5517 { 5518 int status; 5519 ret = get_errno(waitpid(arg1, &status, arg3)); 5520 if (!is_error(ret) && arg2 && ret 5521 && put_user_s32(host_to_target_waitstatus(status), arg2)) 5522 goto efault; 5523 } 5524 break; 5525 #endif 5526 #ifdef TARGET_NR_waitid 5527 case TARGET_NR_waitid: 5528 { 5529 siginfo_t info; 5530 info.si_pid = 0; 5531 ret = get_errno(waitid(arg1, arg2, &info, arg4)); 5532 if (!is_error(ret) && arg3 && info.si_pid != 0) { 5533 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) 5534 goto efault; 5535 host_to_target_siginfo(p, &info); 5536 unlock_user(p, arg3, sizeof(target_siginfo_t)); 5537 } 5538 } 5539 break; 5540 #endif 5541 #ifdef TARGET_NR_creat /* not on alpha */ 5542 case TARGET_NR_creat: 5543 if (!(p = lock_user_string(arg1))) 5544 goto efault; 5545 ret = get_errno(creat(p, arg2)); 5546 unlock_user(p, arg1, 0); 5547 break; 5548 #endif 5549 case TARGET_NR_link: 5550 { 5551 void * p2; 5552 p = lock_user_string(arg1); 5553 p2 = lock_user_string(arg2); 5554 if (!p || !p2) 5555 ret = -TARGET_EFAULT; 5556 else 5557 ret = get_errno(link(p, p2)); 5558 unlock_user(p2, arg2, 0); 5559 unlock_user(p, arg1, 0); 5560 } 5561 break; 5562 #if defined(TARGET_NR_linkat) 5563 case TARGET_NR_linkat: 5564 { 5565 void * p2 = NULL; 5566 if (!arg2 || !arg4) 5567 goto efault; 5568 p = lock_user_string(arg2); 5569 p2 = lock_user_string(arg4); 5570 if (!p || !p2) 5571 ret = -TARGET_EFAULT; 5572 else 5573 ret = get_errno(linkat(arg1, p, arg3, p2, arg5)); 5574 unlock_user(p, arg2, 0); 5575 unlock_user(p2, arg4, 0); 5576 } 5577 break; 5578 #endif 5579 case TARGET_NR_unlink: 5580 if (!(p = lock_user_string(arg1))) 5581 goto efault; 5582 ret = get_errno(unlink(p)); 5583 unlock_user(p, arg1, 0); 5584 break; 5585 #if defined(TARGET_NR_unlinkat) 5586 case TARGET_NR_unlinkat: 5587 if (!(p = lock_user_string(arg2))) 5588 goto efault; 5589 ret = get_errno(unlinkat(arg1, p, arg3)); 5590 unlock_user(p, arg2, 0); 5591 break; 5592 #endif 5593 case TARGET_NR_execve: 5594 { 5595 char **argp, **envp; 5596 int argc, envc; 5597 abi_ulong gp; 5598 abi_ulong guest_argp; 5599 abi_ulong guest_envp; 5600 abi_ulong addr; 5601 char **q; 5602 int total_size = 0; 5603 5604 argc = 0; 5605 guest_argp = arg2; 5606 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { 5607 if (get_user_ual(addr, gp)) 5608 goto efault; 5609 if (!addr) 5610 break; 5611 argc++; 5612 } 5613 envc = 0; 5614 guest_envp = arg3; 5615 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { 5616 if (get_user_ual(addr, gp)) 5617 goto efault; 5618 if (!addr) 5619 break; 5620 envc++; 5621 } 5622 5623 argp = alloca((argc + 1) * sizeof(void *)); 5624 envp = alloca((envc + 1) * sizeof(void *)); 5625 5626 for (gp = guest_argp, q = argp; gp; 5627 gp += sizeof(abi_ulong), q++) { 5628 if (get_user_ual(addr, gp)) 5629 goto execve_efault; 5630 if (!addr) 5631 break; 5632 if (!(*q = lock_user_string(addr))) 5633 goto execve_efault; 5634 total_size += strlen(*q) + 1; 5635 } 5636 *q = NULL; 5637 5638 for (gp = guest_envp, q = envp; gp; 5639 gp += sizeof(abi_ulong), q++) { 5640 if (get_user_ual(addr, gp)) 5641 goto execve_efault; 5642 if (!addr) 5643 break; 5644 if (!(*q = lock_user_string(addr))) 5645 goto execve_efault; 5646 total_size += strlen(*q) + 1; 5647 } 5648 *q = NULL; 5649 5650 /* This case will not be caught by the host's execve() if its 5651 page size is bigger than the target's. */ 5652 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) { 5653 ret = -TARGET_E2BIG; 5654 goto execve_end; 5655 } 5656 if (!(p = lock_user_string(arg1))) 5657 goto execve_efault; 5658 ret = get_errno(execve(p, argp, envp)); 5659 unlock_user(p, arg1, 0); 5660 5661 goto execve_end; 5662 5663 execve_efault: 5664 ret = -TARGET_EFAULT; 5665 5666 execve_end: 5667 for (gp = guest_argp, q = argp; *q; 5668 gp += sizeof(abi_ulong), q++) { 5669 if (get_user_ual(addr, gp) 5670 || !addr) 5671 break; 5672 unlock_user(*q, addr, 0); 5673 } 5674 for (gp = guest_envp, q = envp; *q; 5675 gp += sizeof(abi_ulong), q++) { 5676 if (get_user_ual(addr, gp) 5677 || !addr) 5678 break; 5679 unlock_user(*q, addr, 0); 5680 } 5681 } 5682 break; 5683 case TARGET_NR_chdir: 5684 if (!(p = lock_user_string(arg1))) 5685 goto efault; 5686 ret = get_errno(chdir(p)); 5687 unlock_user(p, arg1, 0); 5688 break; 5689 #ifdef TARGET_NR_time 5690 case TARGET_NR_time: 5691 { 5692 time_t host_time; 5693 ret = get_errno(time(&host_time)); 5694 if (!is_error(ret) 5695 && arg1 5696 && put_user_sal(host_time, arg1)) 5697 goto efault; 5698 } 5699 break; 5700 #endif 5701 case TARGET_NR_mknod: 5702 if (!(p = lock_user_string(arg1))) 5703 goto efault; 5704 ret = get_errno(mknod(p, arg2, arg3)); 5705 unlock_user(p, arg1, 0); 5706 break; 5707 #if defined(TARGET_NR_mknodat) 5708 case TARGET_NR_mknodat: 5709 if (!(p = lock_user_string(arg2))) 5710 goto efault; 5711 ret = get_errno(mknodat(arg1, p, arg3, arg4)); 5712 unlock_user(p, arg2, 0); 5713 break; 5714 #endif 5715 case TARGET_NR_chmod: 5716 if (!(p = lock_user_string(arg1))) 5717 goto efault; 5718 ret = get_errno(chmod(p, arg2)); 5719 unlock_user(p, arg1, 0); 5720 break; 5721 #ifdef TARGET_NR_break 5722 case TARGET_NR_break: 5723 goto unimplemented; 5724 #endif 5725 #ifdef TARGET_NR_oldstat 5726 case TARGET_NR_oldstat: 5727 goto unimplemented; 5728 #endif 5729 case TARGET_NR_lseek: 5730 ret = get_errno(lseek(arg1, arg2, arg3)); 5731 break; 5732 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) 5733 /* Alpha specific */ 5734 case TARGET_NR_getxpid: 5735 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid(); 5736 ret = get_errno(getpid()); 5737 break; 5738 #endif 5739 #ifdef TARGET_NR_getpid 5740 case TARGET_NR_getpid: 5741 ret = get_errno(getpid()); 5742 break; 5743 #endif 5744 case TARGET_NR_mount: 5745 { 5746 /* need to look at the data field */ 5747 void *p2, *p3; 5748 5749 if (arg1) { 5750 p = lock_user_string(arg1); 5751 if (!p) { 5752 goto efault; 5753 } 5754 } else { 5755 p = NULL; 5756 } 5757 5758 p2 = lock_user_string(arg2); 5759 if (!p2) { 5760 if (arg1) { 5761 unlock_user(p, arg1, 0); 5762 } 5763 goto efault; 5764 } 5765 5766 if (arg3) { 5767 p3 = lock_user_string(arg3); 5768 if (!p3) { 5769 if (arg1) { 5770 unlock_user(p, arg1, 0); 5771 } 5772 unlock_user(p2, arg2, 0); 5773 goto efault; 5774 } 5775 } else { 5776 p3 = NULL; 5777 } 5778 5779 /* FIXME - arg5 should be locked, but it isn't clear how to 5780 * do that since it's not guaranteed to be a NULL-terminated 5781 * string. 5782 */ 5783 if (!arg5) { 5784 ret = mount(p, p2, p3, (unsigned long)arg4, NULL); 5785 } else { 5786 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)); 5787 } 5788 ret = get_errno(ret); 5789 5790 if (arg1) { 5791 unlock_user(p, arg1, 0); 5792 } 5793 unlock_user(p2, arg2, 0); 5794 if (arg3) { 5795 unlock_user(p3, arg3, 0); 5796 } 5797 } 5798 break; 5799 #ifdef TARGET_NR_umount 5800 case TARGET_NR_umount: 5801 if (!(p = lock_user_string(arg1))) 5802 goto efault; 5803 ret = get_errno(umount(p)); 5804 unlock_user(p, arg1, 0); 5805 break; 5806 #endif 5807 #ifdef TARGET_NR_stime /* not on alpha */ 5808 case TARGET_NR_stime: 5809 { 5810 time_t host_time; 5811 if (get_user_sal(host_time, arg1)) 5812 goto efault; 5813 ret = get_errno(stime(&host_time)); 5814 } 5815 break; 5816 #endif 5817 case TARGET_NR_ptrace: 5818 goto unimplemented; 5819 #ifdef TARGET_NR_alarm /* not on alpha */ 5820 case TARGET_NR_alarm: 5821 ret = alarm(arg1); 5822 break; 5823 #endif 5824 #ifdef TARGET_NR_oldfstat 5825 case TARGET_NR_oldfstat: 5826 goto unimplemented; 5827 #endif 5828 #ifdef TARGET_NR_pause /* not on alpha */ 5829 case TARGET_NR_pause: 5830 ret = get_errno(pause()); 5831 break; 5832 #endif 5833 #ifdef TARGET_NR_utime 5834 case TARGET_NR_utime: 5835 { 5836 struct utimbuf tbuf, *host_tbuf; 5837 struct target_utimbuf *target_tbuf; 5838 if (arg2) { 5839 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) 5840 goto efault; 5841 tbuf.actime = tswapal(target_tbuf->actime); 5842 tbuf.modtime = tswapal(target_tbuf->modtime); 5843 unlock_user_struct(target_tbuf, arg2, 0); 5844 host_tbuf = &tbuf; 5845 } else { 5846 host_tbuf = NULL; 5847 } 5848 if (!(p = lock_user_string(arg1))) 5849 goto efault; 5850 ret = get_errno(utime(p, host_tbuf)); 5851 unlock_user(p, arg1, 0); 5852 } 5853 break; 5854 #endif 5855 case TARGET_NR_utimes: 5856 { 5857 struct timeval *tvp, tv[2]; 5858 if (arg2) { 5859 if (copy_from_user_timeval(&tv[0], arg2) 5860 || copy_from_user_timeval(&tv[1], 5861 arg2 + sizeof(struct target_timeval))) 5862 goto efault; 5863 tvp = tv; 5864 } else { 5865 tvp = NULL; 5866 } 5867 if (!(p = lock_user_string(arg1))) 5868 goto efault; 5869 ret = get_errno(utimes(p, tvp)); 5870 unlock_user(p, arg1, 0); 5871 } 5872 break; 5873 #if defined(TARGET_NR_futimesat) 5874 case TARGET_NR_futimesat: 5875 { 5876 struct timeval *tvp, tv[2]; 5877 if (arg3) { 5878 if (copy_from_user_timeval(&tv[0], arg3) 5879 || copy_from_user_timeval(&tv[1], 5880 arg3 + sizeof(struct target_timeval))) 5881 goto efault; 5882 tvp = tv; 5883 } else { 5884 tvp = NULL; 5885 } 5886 if (!(p = lock_user_string(arg2))) 5887 goto efault; 5888 ret = get_errno(futimesat(arg1, path(p), tvp)); 5889 unlock_user(p, arg2, 0); 5890 } 5891 break; 5892 #endif 5893 #ifdef TARGET_NR_stty 5894 case TARGET_NR_stty: 5895 goto unimplemented; 5896 #endif 5897 #ifdef TARGET_NR_gtty 5898 case TARGET_NR_gtty: 5899 goto unimplemented; 5900 #endif 5901 case TARGET_NR_access: 5902 if (!(p = lock_user_string(arg1))) 5903 goto efault; 5904 ret = get_errno(access(path(p), arg2)); 5905 unlock_user(p, arg1, 0); 5906 break; 5907 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 5908 case TARGET_NR_faccessat: 5909 if (!(p = lock_user_string(arg2))) 5910 goto efault; 5911 ret = get_errno(faccessat(arg1, p, arg3, 0)); 5912 unlock_user(p, arg2, 0); 5913 break; 5914 #endif 5915 #ifdef TARGET_NR_nice /* not on alpha */ 5916 case TARGET_NR_nice: 5917 ret = get_errno(nice(arg1)); 5918 break; 5919 #endif 5920 #ifdef TARGET_NR_ftime 5921 case TARGET_NR_ftime: 5922 goto unimplemented; 5923 #endif 5924 case TARGET_NR_sync: 5925 sync(); 5926 ret = 0; 5927 break; 5928 case TARGET_NR_kill: 5929 ret = get_errno(kill(arg1, target_to_host_signal(arg2))); 5930 break; 5931 case TARGET_NR_rename: 5932 { 5933 void *p2; 5934 p = lock_user_string(arg1); 5935 p2 = lock_user_string(arg2); 5936 if (!p || !p2) 5937 ret = -TARGET_EFAULT; 5938 else 5939 ret = get_errno(rename(p, p2)); 5940 unlock_user(p2, arg2, 0); 5941 unlock_user(p, arg1, 0); 5942 } 5943 break; 5944 #if defined(TARGET_NR_renameat) 5945 case TARGET_NR_renameat: 5946 { 5947 void *p2; 5948 p = lock_user_string(arg2); 5949 p2 = lock_user_string(arg4); 5950 if (!p || !p2) 5951 ret = -TARGET_EFAULT; 5952 else 5953 ret = get_errno(renameat(arg1, p, arg3, p2)); 5954 unlock_user(p2, arg4, 0); 5955 unlock_user(p, arg2, 0); 5956 } 5957 break; 5958 #endif 5959 case TARGET_NR_mkdir: 5960 if (!(p = lock_user_string(arg1))) 5961 goto efault; 5962 ret = get_errno(mkdir(p, arg2)); 5963 unlock_user(p, arg1, 0); 5964 break; 5965 #if defined(TARGET_NR_mkdirat) 5966 case TARGET_NR_mkdirat: 5967 if (!(p = lock_user_string(arg2))) 5968 goto efault; 5969 ret = get_errno(mkdirat(arg1, p, arg3)); 5970 unlock_user(p, arg2, 0); 5971 break; 5972 #endif 5973 case TARGET_NR_rmdir: 5974 if (!(p = lock_user_string(arg1))) 5975 goto efault; 5976 ret = get_errno(rmdir(p)); 5977 unlock_user(p, arg1, 0); 5978 break; 5979 case TARGET_NR_dup: 5980 ret = get_errno(dup(arg1)); 5981 break; 5982 case TARGET_NR_pipe: 5983 ret = do_pipe(cpu_env, arg1, 0, 0); 5984 break; 5985 #ifdef TARGET_NR_pipe2 5986 case TARGET_NR_pipe2: 5987 ret = do_pipe(cpu_env, arg1, 5988 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1); 5989 break; 5990 #endif 5991 case TARGET_NR_times: 5992 { 5993 struct target_tms *tmsp; 5994 struct tms tms; 5995 ret = get_errno(times(&tms)); 5996 if (arg1) { 5997 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); 5998 if (!tmsp) 5999 goto efault; 6000 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime)); 6001 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime)); 6002 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime)); 6003 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime)); 6004 } 6005 if (!is_error(ret)) 6006 ret = host_to_target_clock_t(ret); 6007 } 6008 break; 6009 #ifdef TARGET_NR_prof 6010 case TARGET_NR_prof: 6011 goto unimplemented; 6012 #endif 6013 #ifdef TARGET_NR_signal 6014 case TARGET_NR_signal: 6015 goto unimplemented; 6016 #endif 6017 case TARGET_NR_acct: 6018 if (arg1 == 0) { 6019 ret = get_errno(acct(NULL)); 6020 } else { 6021 if (!(p = lock_user_string(arg1))) 6022 goto efault; 6023 ret = get_errno(acct(path(p))); 6024 unlock_user(p, arg1, 0); 6025 } 6026 break; 6027 #ifdef TARGET_NR_umount2 6028 case TARGET_NR_umount2: 6029 if (!(p = lock_user_string(arg1))) 6030 goto efault; 6031 ret = get_errno(umount2(p, arg2)); 6032 unlock_user(p, arg1, 0); 6033 break; 6034 #endif 6035 #ifdef TARGET_NR_lock 6036 case TARGET_NR_lock: 6037 goto unimplemented; 6038 #endif 6039 case TARGET_NR_ioctl: 6040 ret = do_ioctl(arg1, arg2, arg3); 6041 break; 6042 case TARGET_NR_fcntl: 6043 ret = do_fcntl(arg1, arg2, arg3); 6044 break; 6045 #ifdef TARGET_NR_mpx 6046 case TARGET_NR_mpx: 6047 goto unimplemented; 6048 #endif 6049 case TARGET_NR_setpgid: 6050 ret = get_errno(setpgid(arg1, arg2)); 6051 break; 6052 #ifdef TARGET_NR_ulimit 6053 case TARGET_NR_ulimit: 6054 goto unimplemented; 6055 #endif 6056 #ifdef TARGET_NR_oldolduname 6057 case TARGET_NR_oldolduname: 6058 goto unimplemented; 6059 #endif 6060 case TARGET_NR_umask: 6061 ret = get_errno(umask(arg1)); 6062 break; 6063 case TARGET_NR_chroot: 6064 if (!(p = lock_user_string(arg1))) 6065 goto efault; 6066 ret = get_errno(chroot(p)); 6067 unlock_user(p, arg1, 0); 6068 break; 6069 case TARGET_NR_ustat: 6070 goto unimplemented; 6071 case TARGET_NR_dup2: 6072 ret = get_errno(dup2(arg1, arg2)); 6073 break; 6074 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) 6075 case TARGET_NR_dup3: 6076 ret = get_errno(dup3(arg1, arg2, arg3)); 6077 break; 6078 #endif 6079 #ifdef TARGET_NR_getppid /* not on alpha */ 6080 case TARGET_NR_getppid: 6081 ret = get_errno(getppid()); 6082 break; 6083 #endif 6084 case TARGET_NR_getpgrp: 6085 ret = get_errno(getpgrp()); 6086 break; 6087 case TARGET_NR_setsid: 6088 ret = get_errno(setsid()); 6089 break; 6090 #ifdef TARGET_NR_sigaction 6091 case TARGET_NR_sigaction: 6092 { 6093 #if defined(TARGET_ALPHA) 6094 struct target_sigaction act, oact, *pact = 0; 6095 struct target_old_sigaction *old_act; 6096 if (arg2) { 6097 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 6098 goto efault; 6099 act._sa_handler = old_act->_sa_handler; 6100 target_siginitset(&act.sa_mask, old_act->sa_mask); 6101 act.sa_flags = old_act->sa_flags; 6102 act.sa_restorer = 0; 6103 unlock_user_struct(old_act, arg2, 0); 6104 pact = &act; 6105 } 6106 ret = get_errno(do_sigaction(arg1, pact, &oact)); 6107 if (!is_error(ret) && arg3) { 6108 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 6109 goto efault; 6110 old_act->_sa_handler = oact._sa_handler; 6111 old_act->sa_mask = oact.sa_mask.sig[0]; 6112 old_act->sa_flags = oact.sa_flags; 6113 unlock_user_struct(old_act, arg3, 1); 6114 } 6115 #elif defined(TARGET_MIPS) 6116 struct target_sigaction act, oact, *pact, *old_act; 6117 6118 if (arg2) { 6119 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 6120 goto efault; 6121 act._sa_handler = old_act->_sa_handler; 6122 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); 6123 act.sa_flags = old_act->sa_flags; 6124 unlock_user_struct(old_act, arg2, 0); 6125 pact = &act; 6126 } else { 6127 pact = NULL; 6128 } 6129 6130 ret = get_errno(do_sigaction(arg1, pact, &oact)); 6131 6132 if (!is_error(ret) && arg3) { 6133 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 6134 goto efault; 6135 old_act->_sa_handler = oact._sa_handler; 6136 old_act->sa_flags = oact.sa_flags; 6137 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; 6138 old_act->sa_mask.sig[1] = 0; 6139 old_act->sa_mask.sig[2] = 0; 6140 old_act->sa_mask.sig[3] = 0; 6141 unlock_user_struct(old_act, arg3, 1); 6142 } 6143 #else 6144 struct target_old_sigaction *old_act; 6145 struct target_sigaction act, oact, *pact; 6146 if (arg2) { 6147 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 6148 goto efault; 6149 act._sa_handler = old_act->_sa_handler; 6150 target_siginitset(&act.sa_mask, old_act->sa_mask); 6151 act.sa_flags = old_act->sa_flags; 6152 act.sa_restorer = old_act->sa_restorer; 6153 unlock_user_struct(old_act, arg2, 0); 6154 pact = &act; 6155 } else { 6156 pact = NULL; 6157 } 6158 ret = get_errno(do_sigaction(arg1, pact, &oact)); 6159 if (!is_error(ret) && arg3) { 6160 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 6161 goto efault; 6162 old_act->_sa_handler = oact._sa_handler; 6163 old_act->sa_mask = oact.sa_mask.sig[0]; 6164 old_act->sa_flags = oact.sa_flags; 6165 old_act->sa_restorer = oact.sa_restorer; 6166 unlock_user_struct(old_act, arg3, 1); 6167 } 6168 #endif 6169 } 6170 break; 6171 #endif 6172 case TARGET_NR_rt_sigaction: 6173 { 6174 #if defined(TARGET_ALPHA) 6175 struct target_sigaction act, oact, *pact = 0; 6176 struct target_rt_sigaction *rt_act; 6177 /* ??? arg4 == sizeof(sigset_t). */ 6178 if (arg2) { 6179 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1)) 6180 goto efault; 6181 act._sa_handler = rt_act->_sa_handler; 6182 act.sa_mask = rt_act->sa_mask; 6183 act.sa_flags = rt_act->sa_flags; 6184 act.sa_restorer = arg5; 6185 unlock_user_struct(rt_act, arg2, 0); 6186 pact = &act; 6187 } 6188 ret = get_errno(do_sigaction(arg1, pact, &oact)); 6189 if (!is_error(ret) && arg3) { 6190 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0)) 6191 goto efault; 6192 rt_act->_sa_handler = oact._sa_handler; 6193 rt_act->sa_mask = oact.sa_mask; 6194 rt_act->sa_flags = oact.sa_flags; 6195 unlock_user_struct(rt_act, arg3, 1); 6196 } 6197 #else 6198 struct target_sigaction *act; 6199 struct target_sigaction *oact; 6200 6201 if (arg2) { 6202 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) 6203 goto efault; 6204 } else 6205 act = NULL; 6206 if (arg3) { 6207 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { 6208 ret = -TARGET_EFAULT; 6209 goto rt_sigaction_fail; 6210 } 6211 } else 6212 oact = NULL; 6213 ret = get_errno(do_sigaction(arg1, act, oact)); 6214 rt_sigaction_fail: 6215 if (act) 6216 unlock_user_struct(act, arg2, 0); 6217 if (oact) 6218 unlock_user_struct(oact, arg3, 1); 6219 #endif 6220 } 6221 break; 6222 #ifdef TARGET_NR_sgetmask /* not on alpha */ 6223 case TARGET_NR_sgetmask: 6224 { 6225 sigset_t cur_set; 6226 abi_ulong target_set; 6227 do_sigprocmask(0, NULL, &cur_set); 6228 host_to_target_old_sigset(&target_set, &cur_set); 6229 ret = target_set; 6230 } 6231 break; 6232 #endif 6233 #ifdef TARGET_NR_ssetmask /* not on alpha */ 6234 case TARGET_NR_ssetmask: 6235 { 6236 sigset_t set, oset, cur_set; 6237 abi_ulong target_set = arg1; 6238 do_sigprocmask(0, NULL, &cur_set); 6239 target_to_host_old_sigset(&set, &target_set); 6240 sigorset(&set, &set, &cur_set); 6241 do_sigprocmask(SIG_SETMASK, &set, &oset); 6242 host_to_target_old_sigset(&target_set, &oset); 6243 ret = target_set; 6244 } 6245 break; 6246 #endif 6247 #ifdef TARGET_NR_sigprocmask 6248 case TARGET_NR_sigprocmask: 6249 { 6250 #if defined(TARGET_ALPHA) 6251 sigset_t set, oldset; 6252 abi_ulong mask; 6253 int how; 6254 6255 switch (arg1) { 6256 case TARGET_SIG_BLOCK: 6257 how = SIG_BLOCK; 6258 break; 6259 case TARGET_SIG_UNBLOCK: 6260 how = SIG_UNBLOCK; 6261 break; 6262 case TARGET_SIG_SETMASK: 6263 how = SIG_SETMASK; 6264 break; 6265 default: 6266 ret = -TARGET_EINVAL; 6267 goto fail; 6268 } 6269 mask = arg2; 6270 target_to_host_old_sigset(&set, &mask); 6271 6272 ret = get_errno(do_sigprocmask(how, &set, &oldset)); 6273 if (!is_error(ret)) { 6274 host_to_target_old_sigset(&mask, &oldset); 6275 ret = mask; 6276 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */ 6277 } 6278 #else 6279 sigset_t set, oldset, *set_ptr; 6280 int how; 6281 6282 if (arg2) { 6283 switch (arg1) { 6284 case TARGET_SIG_BLOCK: 6285 how = SIG_BLOCK; 6286 break; 6287 case TARGET_SIG_UNBLOCK: 6288 how = SIG_UNBLOCK; 6289 break; 6290 case TARGET_SIG_SETMASK: 6291 how = SIG_SETMASK; 6292 break; 6293 default: 6294 ret = -TARGET_EINVAL; 6295 goto fail; 6296 } 6297 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 6298 goto efault; 6299 target_to_host_old_sigset(&set, p); 6300 unlock_user(p, arg2, 0); 6301 set_ptr = &set; 6302 } else { 6303 how = 0; 6304 set_ptr = NULL; 6305 } 6306 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset)); 6307 if (!is_error(ret) && arg3) { 6308 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 6309 goto efault; 6310 host_to_target_old_sigset(p, &oldset); 6311 unlock_user(p, arg3, sizeof(target_sigset_t)); 6312 } 6313 #endif 6314 } 6315 break; 6316 #endif 6317 case TARGET_NR_rt_sigprocmask: 6318 { 6319 int how = arg1; 6320 sigset_t set, oldset, *set_ptr; 6321 6322 if (arg2) { 6323 switch(how) { 6324 case TARGET_SIG_BLOCK: 6325 how = SIG_BLOCK; 6326 break; 6327 case TARGET_SIG_UNBLOCK: 6328 how = SIG_UNBLOCK; 6329 break; 6330 case TARGET_SIG_SETMASK: 6331 how = SIG_SETMASK; 6332 break; 6333 default: 6334 ret = -TARGET_EINVAL; 6335 goto fail; 6336 } 6337 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 6338 goto efault; 6339 target_to_host_sigset(&set, p); 6340 unlock_user(p, arg2, 0); 6341 set_ptr = &set; 6342 } else { 6343 how = 0; 6344 set_ptr = NULL; 6345 } 6346 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset)); 6347 if (!is_error(ret) && arg3) { 6348 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 6349 goto efault; 6350 host_to_target_sigset(p, &oldset); 6351 unlock_user(p, arg3, sizeof(target_sigset_t)); 6352 } 6353 } 6354 break; 6355 #ifdef TARGET_NR_sigpending 6356 case TARGET_NR_sigpending: 6357 { 6358 sigset_t set; 6359 ret = get_errno(sigpending(&set)); 6360 if (!is_error(ret)) { 6361 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 6362 goto efault; 6363 host_to_target_old_sigset(p, &set); 6364 unlock_user(p, arg1, sizeof(target_sigset_t)); 6365 } 6366 } 6367 break; 6368 #endif 6369 case TARGET_NR_rt_sigpending: 6370 { 6371 sigset_t set; 6372 ret = get_errno(sigpending(&set)); 6373 if (!is_error(ret)) { 6374 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 6375 goto efault; 6376 host_to_target_sigset(p, &set); 6377 unlock_user(p, arg1, sizeof(target_sigset_t)); 6378 } 6379 } 6380 break; 6381 #ifdef TARGET_NR_sigsuspend 6382 case TARGET_NR_sigsuspend: 6383 { 6384 sigset_t set; 6385 #if defined(TARGET_ALPHA) 6386 abi_ulong mask = arg1; 6387 target_to_host_old_sigset(&set, &mask); 6388 #else 6389 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6390 goto efault; 6391 target_to_host_old_sigset(&set, p); 6392 unlock_user(p, arg1, 0); 6393 #endif 6394 ret = get_errno(sigsuspend(&set)); 6395 } 6396 break; 6397 #endif 6398 case TARGET_NR_rt_sigsuspend: 6399 { 6400 sigset_t set; 6401 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6402 goto efault; 6403 target_to_host_sigset(&set, p); 6404 unlock_user(p, arg1, 0); 6405 ret = get_errno(sigsuspend(&set)); 6406 } 6407 break; 6408 case TARGET_NR_rt_sigtimedwait: 6409 { 6410 sigset_t set; 6411 struct timespec uts, *puts; 6412 siginfo_t uinfo; 6413 6414 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6415 goto efault; 6416 target_to_host_sigset(&set, p); 6417 unlock_user(p, arg1, 0); 6418 if (arg3) { 6419 puts = &uts; 6420 target_to_host_timespec(puts, arg3); 6421 } else { 6422 puts = NULL; 6423 } 6424 ret = get_errno(sigtimedwait(&set, &uinfo, puts)); 6425 if (!is_error(ret)) { 6426 if (arg2) { 6427 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 6428 0); 6429 if (!p) { 6430 goto efault; 6431 } 6432 host_to_target_siginfo(p, &uinfo); 6433 unlock_user(p, arg2, sizeof(target_siginfo_t)); 6434 } 6435 ret = host_to_target_signal(ret); 6436 } 6437 } 6438 break; 6439 case TARGET_NR_rt_sigqueueinfo: 6440 { 6441 siginfo_t uinfo; 6442 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1))) 6443 goto efault; 6444 target_to_host_siginfo(&uinfo, p); 6445 unlock_user(p, arg1, 0); 6446 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo)); 6447 } 6448 break; 6449 #ifdef TARGET_NR_sigreturn 6450 case TARGET_NR_sigreturn: 6451 /* NOTE: ret is eax, so not transcoding must be done */ 6452 ret = do_sigreturn(cpu_env); 6453 break; 6454 #endif 6455 case TARGET_NR_rt_sigreturn: 6456 /* NOTE: ret is eax, so not transcoding must be done */ 6457 ret = do_rt_sigreturn(cpu_env); 6458 break; 6459 case TARGET_NR_sethostname: 6460 if (!(p = lock_user_string(arg1))) 6461 goto efault; 6462 ret = get_errno(sethostname(p, arg2)); 6463 unlock_user(p, arg1, 0); 6464 break; 6465 case TARGET_NR_setrlimit: 6466 { 6467 int resource = target_to_host_resource(arg1); 6468 struct target_rlimit *target_rlim; 6469 struct rlimit rlim; 6470 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) 6471 goto efault; 6472 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur); 6473 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max); 6474 unlock_user_struct(target_rlim, arg2, 0); 6475 ret = get_errno(setrlimit(resource, &rlim)); 6476 } 6477 break; 6478 case TARGET_NR_getrlimit: 6479 { 6480 int resource = target_to_host_resource(arg1); 6481 struct target_rlimit *target_rlim; 6482 struct rlimit rlim; 6483 6484 ret = get_errno(getrlimit(resource, &rlim)); 6485 if (!is_error(ret)) { 6486 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 6487 goto efault; 6488 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 6489 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 6490 unlock_user_struct(target_rlim, arg2, 1); 6491 } 6492 } 6493 break; 6494 case TARGET_NR_getrusage: 6495 { 6496 struct rusage rusage; 6497 ret = get_errno(getrusage(arg1, &rusage)); 6498 if (!is_error(ret)) { 6499 ret = host_to_target_rusage(arg2, &rusage); 6500 } 6501 } 6502 break; 6503 case TARGET_NR_gettimeofday: 6504 { 6505 struct timeval tv; 6506 ret = get_errno(gettimeofday(&tv, NULL)); 6507 if (!is_error(ret)) { 6508 if (copy_to_user_timeval(arg1, &tv)) 6509 goto efault; 6510 } 6511 } 6512 break; 6513 case TARGET_NR_settimeofday: 6514 { 6515 struct timeval tv, *ptv = NULL; 6516 struct timezone tz, *ptz = NULL; 6517 6518 if (arg1) { 6519 if (copy_from_user_timeval(&tv, arg1)) { 6520 goto efault; 6521 } 6522 ptv = &tv; 6523 } 6524 6525 if (arg2) { 6526 if (copy_from_user_timezone(&tz, arg2)) { 6527 goto efault; 6528 } 6529 ptz = &tz; 6530 } 6531 6532 ret = get_errno(settimeofday(ptv, ptz)); 6533 } 6534 break; 6535 #if defined(TARGET_NR_select) 6536 case TARGET_NR_select: 6537 #if defined(TARGET_S390X) || defined(TARGET_ALPHA) 6538 ret = do_select(arg1, arg2, arg3, arg4, arg5); 6539 #else 6540 { 6541 struct target_sel_arg_struct *sel; 6542 abi_ulong inp, outp, exp, tvp; 6543 long nsel; 6544 6545 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) 6546 goto efault; 6547 nsel = tswapal(sel->n); 6548 inp = tswapal(sel->inp); 6549 outp = tswapal(sel->outp); 6550 exp = tswapal(sel->exp); 6551 tvp = tswapal(sel->tvp); 6552 unlock_user_struct(sel, arg1, 0); 6553 ret = do_select(nsel, inp, outp, exp, tvp); 6554 } 6555 #endif 6556 break; 6557 #endif 6558 #ifdef TARGET_NR_pselect6 6559 case TARGET_NR_pselect6: 6560 { 6561 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr; 6562 fd_set rfds, wfds, efds; 6563 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 6564 struct timespec ts, *ts_ptr; 6565 6566 /* 6567 * The 6th arg is actually two args smashed together, 6568 * so we cannot use the C library. 6569 */ 6570 sigset_t set; 6571 struct { 6572 sigset_t *set; 6573 size_t size; 6574 } sig, *sig_ptr; 6575 6576 abi_ulong arg_sigset, arg_sigsize, *arg7; 6577 target_sigset_t *target_sigset; 6578 6579 n = arg1; 6580 rfd_addr = arg2; 6581 wfd_addr = arg3; 6582 efd_addr = arg4; 6583 ts_addr = arg5; 6584 6585 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 6586 if (ret) { 6587 goto fail; 6588 } 6589 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 6590 if (ret) { 6591 goto fail; 6592 } 6593 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 6594 if (ret) { 6595 goto fail; 6596 } 6597 6598 /* 6599 * This takes a timespec, and not a timeval, so we cannot 6600 * use the do_select() helper ... 6601 */ 6602 if (ts_addr) { 6603 if (target_to_host_timespec(&ts, ts_addr)) { 6604 goto efault; 6605 } 6606 ts_ptr = &ts; 6607 } else { 6608 ts_ptr = NULL; 6609 } 6610 6611 /* Extract the two packed args for the sigset */ 6612 if (arg6) { 6613 sig_ptr = &sig; 6614 sig.size = _NSIG / 8; 6615 6616 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1); 6617 if (!arg7) { 6618 goto efault; 6619 } 6620 arg_sigset = tswapal(arg7[0]); 6621 arg_sigsize = tswapal(arg7[1]); 6622 unlock_user(arg7, arg6, 0); 6623 6624 if (arg_sigset) { 6625 sig.set = &set; 6626 if (arg_sigsize != sizeof(*target_sigset)) { 6627 /* Like the kernel, we enforce correct size sigsets */ 6628 ret = -TARGET_EINVAL; 6629 goto fail; 6630 } 6631 target_sigset = lock_user(VERIFY_READ, arg_sigset, 6632 sizeof(*target_sigset), 1); 6633 if (!target_sigset) { 6634 goto efault; 6635 } 6636 target_to_host_sigset(&set, target_sigset); 6637 unlock_user(target_sigset, arg_sigset, 0); 6638 } else { 6639 sig.set = NULL; 6640 } 6641 } else { 6642 sig_ptr = NULL; 6643 } 6644 6645 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 6646 ts_ptr, sig_ptr)); 6647 6648 if (!is_error(ret)) { 6649 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 6650 goto efault; 6651 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 6652 goto efault; 6653 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 6654 goto efault; 6655 6656 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) 6657 goto efault; 6658 } 6659 } 6660 break; 6661 #endif 6662 case TARGET_NR_symlink: 6663 { 6664 void *p2; 6665 p = lock_user_string(arg1); 6666 p2 = lock_user_string(arg2); 6667 if (!p || !p2) 6668 ret = -TARGET_EFAULT; 6669 else 6670 ret = get_errno(symlink(p, p2)); 6671 unlock_user(p2, arg2, 0); 6672 unlock_user(p, arg1, 0); 6673 } 6674 break; 6675 #if defined(TARGET_NR_symlinkat) 6676 case TARGET_NR_symlinkat: 6677 { 6678 void *p2; 6679 p = lock_user_string(arg1); 6680 p2 = lock_user_string(arg3); 6681 if (!p || !p2) 6682 ret = -TARGET_EFAULT; 6683 else 6684 ret = get_errno(symlinkat(p, arg2, p2)); 6685 unlock_user(p2, arg3, 0); 6686 unlock_user(p, arg1, 0); 6687 } 6688 break; 6689 #endif 6690 #ifdef TARGET_NR_oldlstat 6691 case TARGET_NR_oldlstat: 6692 goto unimplemented; 6693 #endif 6694 case TARGET_NR_readlink: 6695 { 6696 void *p2; 6697 p = lock_user_string(arg1); 6698 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); 6699 if (!p || !p2) { 6700 ret = -TARGET_EFAULT; 6701 } else if (!arg3) { 6702 /* Short circuit this for the magic exe check. */ 6703 ret = -TARGET_EINVAL; 6704 } else if (is_proc_myself((const char *)p, "exe")) { 6705 char real[PATH_MAX], *temp; 6706 temp = realpath(exec_path, real); 6707 /* Return value is # of bytes that we wrote to the buffer. */ 6708 if (temp == NULL) { 6709 ret = get_errno(-1); 6710 } else { 6711 /* Don't worry about sign mismatch as earlier mapping 6712 * logic would have thrown a bad address error. */ 6713 ret = MIN(strlen(real), arg3); 6714 /* We cannot NUL terminate the string. */ 6715 memcpy(p2, real, ret); 6716 } 6717 } else { 6718 ret = get_errno(readlink(path(p), p2, arg3)); 6719 } 6720 unlock_user(p2, arg2, ret); 6721 unlock_user(p, arg1, 0); 6722 } 6723 break; 6724 #if defined(TARGET_NR_readlinkat) 6725 case TARGET_NR_readlinkat: 6726 { 6727 void *p2; 6728 p = lock_user_string(arg2); 6729 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); 6730 if (!p || !p2) { 6731 ret = -TARGET_EFAULT; 6732 } else if (is_proc_myself((const char *)p, "exe")) { 6733 char real[PATH_MAX], *temp; 6734 temp = realpath(exec_path, real); 6735 ret = temp == NULL ? get_errno(-1) : strlen(real) ; 6736 snprintf((char *)p2, arg4, "%s", real); 6737 } else { 6738 ret = get_errno(readlinkat(arg1, path(p), p2, arg4)); 6739 } 6740 unlock_user(p2, arg3, ret); 6741 unlock_user(p, arg2, 0); 6742 } 6743 break; 6744 #endif 6745 #ifdef TARGET_NR_uselib 6746 case TARGET_NR_uselib: 6747 goto unimplemented; 6748 #endif 6749 #ifdef TARGET_NR_swapon 6750 case TARGET_NR_swapon: 6751 if (!(p = lock_user_string(arg1))) 6752 goto efault; 6753 ret = get_errno(swapon(p, arg2)); 6754 unlock_user(p, arg1, 0); 6755 break; 6756 #endif 6757 case TARGET_NR_reboot: 6758 if (arg3 == LINUX_REBOOT_CMD_RESTART2) { 6759 /* arg4 must be ignored in all other cases */ 6760 p = lock_user_string(arg4); 6761 if (!p) { 6762 goto efault; 6763 } 6764 ret = get_errno(reboot(arg1, arg2, arg3, p)); 6765 unlock_user(p, arg4, 0); 6766 } else { 6767 ret = get_errno(reboot(arg1, arg2, arg3, NULL)); 6768 } 6769 break; 6770 #ifdef TARGET_NR_readdir 6771 case TARGET_NR_readdir: 6772 goto unimplemented; 6773 #endif 6774 #ifdef TARGET_NR_mmap 6775 case TARGET_NR_mmap: 6776 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 6777 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \ 6778 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ 6779 || defined(TARGET_S390X) 6780 { 6781 abi_ulong *v; 6782 abi_ulong v1, v2, v3, v4, v5, v6; 6783 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) 6784 goto efault; 6785 v1 = tswapal(v[0]); 6786 v2 = tswapal(v[1]); 6787 v3 = tswapal(v[2]); 6788 v4 = tswapal(v[3]); 6789 v5 = tswapal(v[4]); 6790 v6 = tswapal(v[5]); 6791 unlock_user(v, arg1, 0); 6792 ret = get_errno(target_mmap(v1, v2, v3, 6793 target_to_host_bitmask(v4, mmap_flags_tbl), 6794 v5, v6)); 6795 } 6796 #else 6797 ret = get_errno(target_mmap(arg1, arg2, arg3, 6798 target_to_host_bitmask(arg4, mmap_flags_tbl), 6799 arg5, 6800 arg6)); 6801 #endif 6802 break; 6803 #endif 6804 #ifdef TARGET_NR_mmap2 6805 case TARGET_NR_mmap2: 6806 #ifndef MMAP_SHIFT 6807 #define MMAP_SHIFT 12 6808 #endif 6809 ret = get_errno(target_mmap(arg1, arg2, arg3, 6810 target_to_host_bitmask(arg4, mmap_flags_tbl), 6811 arg5, 6812 arg6 << MMAP_SHIFT)); 6813 break; 6814 #endif 6815 case TARGET_NR_munmap: 6816 ret = get_errno(target_munmap(arg1, arg2)); 6817 break; 6818 case TARGET_NR_mprotect: 6819 { 6820 TaskState *ts = cpu->opaque; 6821 /* Special hack to detect libc making the stack executable. */ 6822 if ((arg3 & PROT_GROWSDOWN) 6823 && arg1 >= ts->info->stack_limit 6824 && arg1 <= ts->info->start_stack) { 6825 arg3 &= ~PROT_GROWSDOWN; 6826 arg2 = arg2 + arg1 - ts->info->stack_limit; 6827 arg1 = ts->info->stack_limit; 6828 } 6829 } 6830 ret = get_errno(target_mprotect(arg1, arg2, arg3)); 6831 break; 6832 #ifdef TARGET_NR_mremap 6833 case TARGET_NR_mremap: 6834 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); 6835 break; 6836 #endif 6837 /* ??? msync/mlock/munlock are broken for softmmu. */ 6838 #ifdef TARGET_NR_msync 6839 case TARGET_NR_msync: 6840 ret = get_errno(msync(g2h(arg1), arg2, arg3)); 6841 break; 6842 #endif 6843 #ifdef TARGET_NR_mlock 6844 case TARGET_NR_mlock: 6845 ret = get_errno(mlock(g2h(arg1), arg2)); 6846 break; 6847 #endif 6848 #ifdef TARGET_NR_munlock 6849 case TARGET_NR_munlock: 6850 ret = get_errno(munlock(g2h(arg1), arg2)); 6851 break; 6852 #endif 6853 #ifdef TARGET_NR_mlockall 6854 case TARGET_NR_mlockall: 6855 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1))); 6856 break; 6857 #endif 6858 #ifdef TARGET_NR_munlockall 6859 case TARGET_NR_munlockall: 6860 ret = get_errno(munlockall()); 6861 break; 6862 #endif 6863 case TARGET_NR_truncate: 6864 if (!(p = lock_user_string(arg1))) 6865 goto efault; 6866 ret = get_errno(truncate(p, arg2)); 6867 unlock_user(p, arg1, 0); 6868 break; 6869 case TARGET_NR_ftruncate: 6870 ret = get_errno(ftruncate(arg1, arg2)); 6871 break; 6872 case TARGET_NR_fchmod: 6873 ret = get_errno(fchmod(arg1, arg2)); 6874 break; 6875 #if defined(TARGET_NR_fchmodat) 6876 case TARGET_NR_fchmodat: 6877 if (!(p = lock_user_string(arg2))) 6878 goto efault; 6879 ret = get_errno(fchmodat(arg1, p, arg3, 0)); 6880 unlock_user(p, arg2, 0); 6881 break; 6882 #endif 6883 case TARGET_NR_getpriority: 6884 /* Note that negative values are valid for getpriority, so we must 6885 differentiate based on errno settings. */ 6886 errno = 0; 6887 ret = getpriority(arg1, arg2); 6888 if (ret == -1 && errno != 0) { 6889 ret = -host_to_target_errno(errno); 6890 break; 6891 } 6892 #ifdef TARGET_ALPHA 6893 /* Return value is the unbiased priority. Signal no error. */ 6894 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; 6895 #else 6896 /* Return value is a biased priority to avoid negative numbers. */ 6897 ret = 20 - ret; 6898 #endif 6899 break; 6900 case TARGET_NR_setpriority: 6901 ret = get_errno(setpriority(arg1, arg2, arg3)); 6902 break; 6903 #ifdef TARGET_NR_profil 6904 case TARGET_NR_profil: 6905 goto unimplemented; 6906 #endif 6907 case TARGET_NR_statfs: 6908 if (!(p = lock_user_string(arg1))) 6909 goto efault; 6910 ret = get_errno(statfs(path(p), &stfs)); 6911 unlock_user(p, arg1, 0); 6912 convert_statfs: 6913 if (!is_error(ret)) { 6914 struct target_statfs *target_stfs; 6915 6916 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) 6917 goto efault; 6918 __put_user(stfs.f_type, &target_stfs->f_type); 6919 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 6920 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 6921 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 6922 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 6923 __put_user(stfs.f_files, &target_stfs->f_files); 6924 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 6925 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 6926 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 6927 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 6928 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 6929 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 6930 unlock_user_struct(target_stfs, arg2, 1); 6931 } 6932 break; 6933 case TARGET_NR_fstatfs: 6934 ret = get_errno(fstatfs(arg1, &stfs)); 6935 goto convert_statfs; 6936 #ifdef TARGET_NR_statfs64 6937 case TARGET_NR_statfs64: 6938 if (!(p = lock_user_string(arg1))) 6939 goto efault; 6940 ret = get_errno(statfs(path(p), &stfs)); 6941 unlock_user(p, arg1, 0); 6942 convert_statfs64: 6943 if (!is_error(ret)) { 6944 struct target_statfs64 *target_stfs; 6945 6946 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) 6947 goto efault; 6948 __put_user(stfs.f_type, &target_stfs->f_type); 6949 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 6950 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 6951 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 6952 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 6953 __put_user(stfs.f_files, &target_stfs->f_files); 6954 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 6955 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 6956 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 6957 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 6958 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 6959 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 6960 unlock_user_struct(target_stfs, arg3, 1); 6961 } 6962 break; 6963 case TARGET_NR_fstatfs64: 6964 ret = get_errno(fstatfs(arg1, &stfs)); 6965 goto convert_statfs64; 6966 #endif 6967 #ifdef TARGET_NR_ioperm 6968 case TARGET_NR_ioperm: 6969 goto unimplemented; 6970 #endif 6971 #ifdef TARGET_NR_socketcall 6972 case TARGET_NR_socketcall: 6973 ret = do_socketcall(arg1, arg2); 6974 break; 6975 #endif 6976 #ifdef TARGET_NR_accept 6977 case TARGET_NR_accept: 6978 ret = do_accept4(arg1, arg2, arg3, 0); 6979 break; 6980 #endif 6981 #ifdef TARGET_NR_accept4 6982 case TARGET_NR_accept4: 6983 #ifdef CONFIG_ACCEPT4 6984 ret = do_accept4(arg1, arg2, arg3, arg4); 6985 #else 6986 goto unimplemented; 6987 #endif 6988 break; 6989 #endif 6990 #ifdef TARGET_NR_bind 6991 case TARGET_NR_bind: 6992 ret = do_bind(arg1, arg2, arg3); 6993 break; 6994 #endif 6995 #ifdef TARGET_NR_connect 6996 case TARGET_NR_connect: 6997 ret = do_connect(arg1, arg2, arg3); 6998 break; 6999 #endif 7000 #ifdef TARGET_NR_getpeername 7001 case TARGET_NR_getpeername: 7002 ret = do_getpeername(arg1, arg2, arg3); 7003 break; 7004 #endif 7005 #ifdef TARGET_NR_getsockname 7006 case TARGET_NR_getsockname: 7007 ret = do_getsockname(arg1, arg2, arg3); 7008 break; 7009 #endif 7010 #ifdef TARGET_NR_getsockopt 7011 case TARGET_NR_getsockopt: 7012 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5); 7013 break; 7014 #endif 7015 #ifdef TARGET_NR_listen 7016 case TARGET_NR_listen: 7017 ret = get_errno(listen(arg1, arg2)); 7018 break; 7019 #endif 7020 #ifdef TARGET_NR_recv 7021 case TARGET_NR_recv: 7022 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); 7023 break; 7024 #endif 7025 #ifdef TARGET_NR_recvfrom 7026 case TARGET_NR_recvfrom: 7027 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); 7028 break; 7029 #endif 7030 #ifdef TARGET_NR_recvmsg 7031 case TARGET_NR_recvmsg: 7032 ret = do_sendrecvmsg(arg1, arg2, arg3, 0); 7033 break; 7034 #endif 7035 #ifdef TARGET_NR_send 7036 case TARGET_NR_send: 7037 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0); 7038 break; 7039 #endif 7040 #ifdef TARGET_NR_sendmsg 7041 case TARGET_NR_sendmsg: 7042 ret = do_sendrecvmsg(arg1, arg2, arg3, 1); 7043 break; 7044 #endif 7045 #ifdef TARGET_NR_sendmmsg 7046 case TARGET_NR_sendmmsg: 7047 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1); 7048 break; 7049 case TARGET_NR_recvmmsg: 7050 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0); 7051 break; 7052 #endif 7053 #ifdef TARGET_NR_sendto 7054 case TARGET_NR_sendto: 7055 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); 7056 break; 7057 #endif 7058 #ifdef TARGET_NR_shutdown 7059 case TARGET_NR_shutdown: 7060 ret = get_errno(shutdown(arg1, arg2)); 7061 break; 7062 #endif 7063 #ifdef TARGET_NR_socket 7064 case TARGET_NR_socket: 7065 ret = do_socket(arg1, arg2, arg3); 7066 break; 7067 #endif 7068 #ifdef TARGET_NR_socketpair 7069 case TARGET_NR_socketpair: 7070 ret = do_socketpair(arg1, arg2, arg3, arg4); 7071 break; 7072 #endif 7073 #ifdef TARGET_NR_setsockopt 7074 case TARGET_NR_setsockopt: 7075 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); 7076 break; 7077 #endif 7078 7079 case TARGET_NR_syslog: 7080 if (!(p = lock_user_string(arg2))) 7081 goto efault; 7082 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); 7083 unlock_user(p, arg2, 0); 7084 break; 7085 7086 case TARGET_NR_setitimer: 7087 { 7088 struct itimerval value, ovalue, *pvalue; 7089 7090 if (arg2) { 7091 pvalue = &value; 7092 if (copy_from_user_timeval(&pvalue->it_interval, arg2) 7093 || copy_from_user_timeval(&pvalue->it_value, 7094 arg2 + sizeof(struct target_timeval))) 7095 goto efault; 7096 } else { 7097 pvalue = NULL; 7098 } 7099 ret = get_errno(setitimer(arg1, pvalue, &ovalue)); 7100 if (!is_error(ret) && arg3) { 7101 if (copy_to_user_timeval(arg3, 7102 &ovalue.it_interval) 7103 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), 7104 &ovalue.it_value)) 7105 goto efault; 7106 } 7107 } 7108 break; 7109 case TARGET_NR_getitimer: 7110 { 7111 struct itimerval value; 7112 7113 ret = get_errno(getitimer(arg1, &value)); 7114 if (!is_error(ret) && arg2) { 7115 if (copy_to_user_timeval(arg2, 7116 &value.it_interval) 7117 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), 7118 &value.it_value)) 7119 goto efault; 7120 } 7121 } 7122 break; 7123 case TARGET_NR_stat: 7124 if (!(p = lock_user_string(arg1))) 7125 goto efault; 7126 ret = get_errno(stat(path(p), &st)); 7127 unlock_user(p, arg1, 0); 7128 goto do_stat; 7129 case TARGET_NR_lstat: 7130 if (!(p = lock_user_string(arg1))) 7131 goto efault; 7132 ret = get_errno(lstat(path(p), &st)); 7133 unlock_user(p, arg1, 0); 7134 goto do_stat; 7135 case TARGET_NR_fstat: 7136 { 7137 ret = get_errno(fstat(arg1, &st)); 7138 do_stat: 7139 if (!is_error(ret)) { 7140 struct target_stat *target_st; 7141 7142 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) 7143 goto efault; 7144 memset(target_st, 0, sizeof(*target_st)); 7145 __put_user(st.st_dev, &target_st->st_dev); 7146 __put_user(st.st_ino, &target_st->st_ino); 7147 __put_user(st.st_mode, &target_st->st_mode); 7148 __put_user(st.st_uid, &target_st->st_uid); 7149 __put_user(st.st_gid, &target_st->st_gid); 7150 __put_user(st.st_nlink, &target_st->st_nlink); 7151 __put_user(st.st_rdev, &target_st->st_rdev); 7152 __put_user(st.st_size, &target_st->st_size); 7153 __put_user(st.st_blksize, &target_st->st_blksize); 7154 __put_user(st.st_blocks, &target_st->st_blocks); 7155 __put_user(st.st_atime, &target_st->target_st_atime); 7156 __put_user(st.st_mtime, &target_st->target_st_mtime); 7157 __put_user(st.st_ctime, &target_st->target_st_ctime); 7158 unlock_user_struct(target_st, arg2, 1); 7159 } 7160 } 7161 break; 7162 #ifdef TARGET_NR_olduname 7163 case TARGET_NR_olduname: 7164 goto unimplemented; 7165 #endif 7166 #ifdef TARGET_NR_iopl 7167 case TARGET_NR_iopl: 7168 goto unimplemented; 7169 #endif 7170 case TARGET_NR_vhangup: 7171 ret = get_errno(vhangup()); 7172 break; 7173 #ifdef TARGET_NR_idle 7174 case TARGET_NR_idle: 7175 goto unimplemented; 7176 #endif 7177 #ifdef TARGET_NR_syscall 7178 case TARGET_NR_syscall: 7179 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5, 7180 arg6, arg7, arg8, 0); 7181 break; 7182 #endif 7183 case TARGET_NR_wait4: 7184 { 7185 int status; 7186 abi_long status_ptr = arg2; 7187 struct rusage rusage, *rusage_ptr; 7188 abi_ulong target_rusage = arg4; 7189 abi_long rusage_err; 7190 if (target_rusage) 7191 rusage_ptr = &rusage; 7192 else 7193 rusage_ptr = NULL; 7194 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr)); 7195 if (!is_error(ret)) { 7196 if (status_ptr && ret) { 7197 status = host_to_target_waitstatus(status); 7198 if (put_user_s32(status, status_ptr)) 7199 goto efault; 7200 } 7201 if (target_rusage) { 7202 rusage_err = host_to_target_rusage(target_rusage, &rusage); 7203 if (rusage_err) { 7204 ret = rusage_err; 7205 } 7206 } 7207 } 7208 } 7209 break; 7210 #ifdef TARGET_NR_swapoff 7211 case TARGET_NR_swapoff: 7212 if (!(p = lock_user_string(arg1))) 7213 goto efault; 7214 ret = get_errno(swapoff(p)); 7215 unlock_user(p, arg1, 0); 7216 break; 7217 #endif 7218 case TARGET_NR_sysinfo: 7219 { 7220 struct target_sysinfo *target_value; 7221 struct sysinfo value; 7222 ret = get_errno(sysinfo(&value)); 7223 if (!is_error(ret) && arg1) 7224 { 7225 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) 7226 goto efault; 7227 __put_user(value.uptime, &target_value->uptime); 7228 __put_user(value.loads[0], &target_value->loads[0]); 7229 __put_user(value.loads[1], &target_value->loads[1]); 7230 __put_user(value.loads[2], &target_value->loads[2]); 7231 __put_user(value.totalram, &target_value->totalram); 7232 __put_user(value.freeram, &target_value->freeram); 7233 __put_user(value.sharedram, &target_value->sharedram); 7234 __put_user(value.bufferram, &target_value->bufferram); 7235 __put_user(value.totalswap, &target_value->totalswap); 7236 __put_user(value.freeswap, &target_value->freeswap); 7237 __put_user(value.procs, &target_value->procs); 7238 __put_user(value.totalhigh, &target_value->totalhigh); 7239 __put_user(value.freehigh, &target_value->freehigh); 7240 __put_user(value.mem_unit, &target_value->mem_unit); 7241 unlock_user_struct(target_value, arg1, 1); 7242 } 7243 } 7244 break; 7245 #ifdef TARGET_NR_ipc 7246 case TARGET_NR_ipc: 7247 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6); 7248 break; 7249 #endif 7250 #ifdef TARGET_NR_semget 7251 case TARGET_NR_semget: 7252 ret = get_errno(semget(arg1, arg2, arg3)); 7253 break; 7254 #endif 7255 #ifdef TARGET_NR_semop 7256 case TARGET_NR_semop: 7257 ret = do_semop(arg1, arg2, arg3); 7258 break; 7259 #endif 7260 #ifdef TARGET_NR_semctl 7261 case TARGET_NR_semctl: 7262 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4); 7263 break; 7264 #endif 7265 #ifdef TARGET_NR_msgctl 7266 case TARGET_NR_msgctl: 7267 ret = do_msgctl(arg1, arg2, arg3); 7268 break; 7269 #endif 7270 #ifdef TARGET_NR_msgget 7271 case TARGET_NR_msgget: 7272 ret = get_errno(msgget(arg1, arg2)); 7273 break; 7274 #endif 7275 #ifdef TARGET_NR_msgrcv 7276 case TARGET_NR_msgrcv: 7277 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5); 7278 break; 7279 #endif 7280 #ifdef TARGET_NR_msgsnd 7281 case TARGET_NR_msgsnd: 7282 ret = do_msgsnd(arg1, arg2, arg3, arg4); 7283 break; 7284 #endif 7285 #ifdef TARGET_NR_shmget 7286 case TARGET_NR_shmget: 7287 ret = get_errno(shmget(arg1, arg2, arg3)); 7288 break; 7289 #endif 7290 #ifdef TARGET_NR_shmctl 7291 case TARGET_NR_shmctl: 7292 ret = do_shmctl(arg1, arg2, arg3); 7293 break; 7294 #endif 7295 #ifdef TARGET_NR_shmat 7296 case TARGET_NR_shmat: 7297 ret = do_shmat(arg1, arg2, arg3); 7298 break; 7299 #endif 7300 #ifdef TARGET_NR_shmdt 7301 case TARGET_NR_shmdt: 7302 ret = do_shmdt(arg1); 7303 break; 7304 #endif 7305 case TARGET_NR_fsync: 7306 ret = get_errno(fsync(arg1)); 7307 break; 7308 case TARGET_NR_clone: 7309 /* Linux manages to have three different orderings for its 7310 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines 7311 * match the kernel's CONFIG_CLONE_* settings. 7312 * Microblaze is further special in that it uses a sixth 7313 * implicit argument to clone for the TLS pointer. 7314 */ 7315 #if defined(TARGET_MICROBLAZE) 7316 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5)); 7317 #elif defined(TARGET_CLONE_BACKWARDS) 7318 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); 7319 #elif defined(TARGET_CLONE_BACKWARDS2) 7320 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); 7321 #else 7322 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); 7323 #endif 7324 break; 7325 #ifdef __NR_exit_group 7326 /* new thread calls */ 7327 case TARGET_NR_exit_group: 7328 #ifdef TARGET_GPROF 7329 _mcleanup(); 7330 #endif 7331 gdb_exit(cpu_env, arg1); 7332 ret = get_errno(exit_group(arg1)); 7333 break; 7334 #endif 7335 case TARGET_NR_setdomainname: 7336 if (!(p = lock_user_string(arg1))) 7337 goto efault; 7338 ret = get_errno(setdomainname(p, arg2)); 7339 unlock_user(p, arg1, 0); 7340 break; 7341 case TARGET_NR_uname: 7342 /* no need to transcode because we use the linux syscall */ 7343 { 7344 struct new_utsname * buf; 7345 7346 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) 7347 goto efault; 7348 ret = get_errno(sys_uname(buf)); 7349 if (!is_error(ret)) { 7350 /* Overrite the native machine name with whatever is being 7351 emulated. */ 7352 strcpy (buf->machine, cpu_to_uname_machine(cpu_env)); 7353 /* Allow the user to override the reported release. */ 7354 if (qemu_uname_release && *qemu_uname_release) 7355 strcpy (buf->release, qemu_uname_release); 7356 } 7357 unlock_user_struct(buf, arg1, 1); 7358 } 7359 break; 7360 #ifdef TARGET_I386 7361 case TARGET_NR_modify_ldt: 7362 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3); 7363 break; 7364 #if !defined(TARGET_X86_64) 7365 case TARGET_NR_vm86old: 7366 goto unimplemented; 7367 case TARGET_NR_vm86: 7368 ret = do_vm86(cpu_env, arg1, arg2); 7369 break; 7370 #endif 7371 #endif 7372 case TARGET_NR_adjtimex: 7373 goto unimplemented; 7374 #ifdef TARGET_NR_create_module 7375 case TARGET_NR_create_module: 7376 #endif 7377 case TARGET_NR_init_module: 7378 case TARGET_NR_delete_module: 7379 #ifdef TARGET_NR_get_kernel_syms 7380 case TARGET_NR_get_kernel_syms: 7381 #endif 7382 goto unimplemented; 7383 case TARGET_NR_quotactl: 7384 goto unimplemented; 7385 case TARGET_NR_getpgid: 7386 ret = get_errno(getpgid(arg1)); 7387 break; 7388 case TARGET_NR_fchdir: 7389 ret = get_errno(fchdir(arg1)); 7390 break; 7391 #ifdef TARGET_NR_bdflush /* not on x86_64 */ 7392 case TARGET_NR_bdflush: 7393 goto unimplemented; 7394 #endif 7395 #ifdef TARGET_NR_sysfs 7396 case TARGET_NR_sysfs: 7397 goto unimplemented; 7398 #endif 7399 case TARGET_NR_personality: 7400 ret = get_errno(personality(arg1)); 7401 break; 7402 #ifdef TARGET_NR_afs_syscall 7403 case TARGET_NR_afs_syscall: 7404 goto unimplemented; 7405 #endif 7406 #ifdef TARGET_NR__llseek /* Not on alpha */ 7407 case TARGET_NR__llseek: 7408 { 7409 int64_t res; 7410 #if !defined(__NR_llseek) 7411 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5); 7412 if (res == -1) { 7413 ret = get_errno(res); 7414 } else { 7415 ret = 0; 7416 } 7417 #else 7418 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); 7419 #endif 7420 if ((ret == 0) && put_user_s64(res, arg4)) { 7421 goto efault; 7422 } 7423 } 7424 break; 7425 #endif 7426 case TARGET_NR_getdents: 7427 #ifdef __NR_getdents 7428 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 7429 { 7430 struct target_dirent *target_dirp; 7431 struct linux_dirent *dirp; 7432 abi_long count = arg3; 7433 7434 dirp = malloc(count); 7435 if (!dirp) { 7436 ret = -TARGET_ENOMEM; 7437 goto fail; 7438 } 7439 7440 ret = get_errno(sys_getdents(arg1, dirp, count)); 7441 if (!is_error(ret)) { 7442 struct linux_dirent *de; 7443 struct target_dirent *tde; 7444 int len = ret; 7445 int reclen, treclen; 7446 int count1, tnamelen; 7447 7448 count1 = 0; 7449 de = dirp; 7450 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7451 goto efault; 7452 tde = target_dirp; 7453 while (len > 0) { 7454 reclen = de->d_reclen; 7455 tnamelen = reclen - offsetof(struct linux_dirent, d_name); 7456 assert(tnamelen >= 0); 7457 treclen = tnamelen + offsetof(struct target_dirent, d_name); 7458 assert(count1 + treclen <= count); 7459 tde->d_reclen = tswap16(treclen); 7460 tde->d_ino = tswapal(de->d_ino); 7461 tde->d_off = tswapal(de->d_off); 7462 memcpy(tde->d_name, de->d_name, tnamelen); 7463 de = (struct linux_dirent *)((char *)de + reclen); 7464 len -= reclen; 7465 tde = (struct target_dirent *)((char *)tde + treclen); 7466 count1 += treclen; 7467 } 7468 ret = count1; 7469 unlock_user(target_dirp, arg2, ret); 7470 } 7471 free(dirp); 7472 } 7473 #else 7474 { 7475 struct linux_dirent *dirp; 7476 abi_long count = arg3; 7477 7478 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7479 goto efault; 7480 ret = get_errno(sys_getdents(arg1, dirp, count)); 7481 if (!is_error(ret)) { 7482 struct linux_dirent *de; 7483 int len = ret; 7484 int reclen; 7485 de = dirp; 7486 while (len > 0) { 7487 reclen = de->d_reclen; 7488 if (reclen > len) 7489 break; 7490 de->d_reclen = tswap16(reclen); 7491 tswapls(&de->d_ino); 7492 tswapls(&de->d_off); 7493 de = (struct linux_dirent *)((char *)de + reclen); 7494 len -= reclen; 7495 } 7496 } 7497 unlock_user(dirp, arg2, ret); 7498 } 7499 #endif 7500 #else 7501 /* Implement getdents in terms of getdents64 */ 7502 { 7503 struct linux_dirent64 *dirp; 7504 abi_long count = arg3; 7505 7506 dirp = lock_user(VERIFY_WRITE, arg2, count, 0); 7507 if (!dirp) { 7508 goto efault; 7509 } 7510 ret = get_errno(sys_getdents64(arg1, dirp, count)); 7511 if (!is_error(ret)) { 7512 /* Convert the dirent64 structs to target dirent. We do this 7513 * in-place, since we can guarantee that a target_dirent is no 7514 * larger than a dirent64; however this means we have to be 7515 * careful to read everything before writing in the new format. 7516 */ 7517 struct linux_dirent64 *de; 7518 struct target_dirent *tde; 7519 int len = ret; 7520 int tlen = 0; 7521 7522 de = dirp; 7523 tde = (struct target_dirent *)dirp; 7524 while (len > 0) { 7525 int namelen, treclen; 7526 int reclen = de->d_reclen; 7527 uint64_t ino = de->d_ino; 7528 int64_t off = de->d_off; 7529 uint8_t type = de->d_type; 7530 7531 namelen = strlen(de->d_name); 7532 treclen = offsetof(struct target_dirent, d_name) 7533 + namelen + 2; 7534 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long)); 7535 7536 memmove(tde->d_name, de->d_name, namelen + 1); 7537 tde->d_ino = tswapal(ino); 7538 tde->d_off = tswapal(off); 7539 tde->d_reclen = tswap16(treclen); 7540 /* The target_dirent type is in what was formerly a padding 7541 * byte at the end of the structure: 7542 */ 7543 *(((char *)tde) + treclen - 1) = type; 7544 7545 de = (struct linux_dirent64 *)((char *)de + reclen); 7546 tde = (struct target_dirent *)((char *)tde + treclen); 7547 len -= reclen; 7548 tlen += treclen; 7549 } 7550 ret = tlen; 7551 } 7552 unlock_user(dirp, arg2, ret); 7553 } 7554 #endif 7555 break; 7556 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 7557 case TARGET_NR_getdents64: 7558 { 7559 struct linux_dirent64 *dirp; 7560 abi_long count = arg3; 7561 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7562 goto efault; 7563 ret = get_errno(sys_getdents64(arg1, dirp, count)); 7564 if (!is_error(ret)) { 7565 struct linux_dirent64 *de; 7566 int len = ret; 7567 int reclen; 7568 de = dirp; 7569 while (len > 0) { 7570 reclen = de->d_reclen; 7571 if (reclen > len) 7572 break; 7573 de->d_reclen = tswap16(reclen); 7574 tswap64s((uint64_t *)&de->d_ino); 7575 tswap64s((uint64_t *)&de->d_off); 7576 de = (struct linux_dirent64 *)((char *)de + reclen); 7577 len -= reclen; 7578 } 7579 } 7580 unlock_user(dirp, arg2, ret); 7581 } 7582 break; 7583 #endif /* TARGET_NR_getdents64 */ 7584 #if defined(TARGET_NR__newselect) 7585 case TARGET_NR__newselect: 7586 ret = do_select(arg1, arg2, arg3, arg4, arg5); 7587 break; 7588 #endif 7589 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) 7590 # ifdef TARGET_NR_poll 7591 case TARGET_NR_poll: 7592 # endif 7593 # ifdef TARGET_NR_ppoll 7594 case TARGET_NR_ppoll: 7595 # endif 7596 { 7597 struct target_pollfd *target_pfd; 7598 unsigned int nfds = arg2; 7599 int timeout = arg3; 7600 struct pollfd *pfd; 7601 unsigned int i; 7602 7603 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1); 7604 if (!target_pfd) 7605 goto efault; 7606 7607 pfd = alloca(sizeof(struct pollfd) * nfds); 7608 for(i = 0; i < nfds; i++) { 7609 pfd[i].fd = tswap32(target_pfd[i].fd); 7610 pfd[i].events = tswap16(target_pfd[i].events); 7611 } 7612 7613 # ifdef TARGET_NR_ppoll 7614 if (num == TARGET_NR_ppoll) { 7615 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; 7616 target_sigset_t *target_set; 7617 sigset_t _set, *set = &_set; 7618 7619 if (arg3) { 7620 if (target_to_host_timespec(timeout_ts, arg3)) { 7621 unlock_user(target_pfd, arg1, 0); 7622 goto efault; 7623 } 7624 } else { 7625 timeout_ts = NULL; 7626 } 7627 7628 if (arg4) { 7629 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1); 7630 if (!target_set) { 7631 unlock_user(target_pfd, arg1, 0); 7632 goto efault; 7633 } 7634 target_to_host_sigset(set, target_set); 7635 } else { 7636 set = NULL; 7637 } 7638 7639 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8)); 7640 7641 if (!is_error(ret) && arg3) { 7642 host_to_target_timespec(arg3, timeout_ts); 7643 } 7644 if (arg4) { 7645 unlock_user(target_set, arg4, 0); 7646 } 7647 } else 7648 # endif 7649 ret = get_errno(poll(pfd, nfds, timeout)); 7650 7651 if (!is_error(ret)) { 7652 for(i = 0; i < nfds; i++) { 7653 target_pfd[i].revents = tswap16(pfd[i].revents); 7654 } 7655 } 7656 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds); 7657 } 7658 break; 7659 #endif 7660 case TARGET_NR_flock: 7661 /* NOTE: the flock constant seems to be the same for every 7662 Linux platform */ 7663 ret = get_errno(flock(arg1, arg2)); 7664 break; 7665 case TARGET_NR_readv: 7666 { 7667 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 7668 if (vec != NULL) { 7669 ret = get_errno(readv(arg1, vec, arg3)); 7670 unlock_iovec(vec, arg2, arg3, 1); 7671 } else { 7672 ret = -host_to_target_errno(errno); 7673 } 7674 } 7675 break; 7676 case TARGET_NR_writev: 7677 { 7678 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 7679 if (vec != NULL) { 7680 ret = get_errno(writev(arg1, vec, arg3)); 7681 unlock_iovec(vec, arg2, arg3, 0); 7682 } else { 7683 ret = -host_to_target_errno(errno); 7684 } 7685 } 7686 break; 7687 case TARGET_NR_getsid: 7688 ret = get_errno(getsid(arg1)); 7689 break; 7690 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ 7691 case TARGET_NR_fdatasync: 7692 ret = get_errno(fdatasync(arg1)); 7693 break; 7694 #endif 7695 case TARGET_NR__sysctl: 7696 /* We don't implement this, but ENOTDIR is always a safe 7697 return value. */ 7698 ret = -TARGET_ENOTDIR; 7699 break; 7700 case TARGET_NR_sched_getaffinity: 7701 { 7702 unsigned int mask_size; 7703 unsigned long *mask; 7704 7705 /* 7706 * sched_getaffinity needs multiples of ulong, so need to take 7707 * care of mismatches between target ulong and host ulong sizes. 7708 */ 7709 if (arg2 & (sizeof(abi_ulong) - 1)) { 7710 ret = -TARGET_EINVAL; 7711 break; 7712 } 7713 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 7714 7715 mask = alloca(mask_size); 7716 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask)); 7717 7718 if (!is_error(ret)) { 7719 if (ret > arg2) { 7720 /* More data returned than the caller's buffer will fit. 7721 * This only happens if sizeof(abi_long) < sizeof(long) 7722 * and the caller passed us a buffer holding an odd number 7723 * of abi_longs. If the host kernel is actually using the 7724 * extra 4 bytes then fail EINVAL; otherwise we can just 7725 * ignore them and only copy the interesting part. 7726 */ 7727 int numcpus = sysconf(_SC_NPROCESSORS_CONF); 7728 if (numcpus > arg2 * 8) { 7729 ret = -TARGET_EINVAL; 7730 break; 7731 } 7732 ret = arg2; 7733 } 7734 7735 if (copy_to_user(arg3, mask, ret)) { 7736 goto efault; 7737 } 7738 } 7739 } 7740 break; 7741 case TARGET_NR_sched_setaffinity: 7742 { 7743 unsigned int mask_size; 7744 unsigned long *mask; 7745 7746 /* 7747 * sched_setaffinity needs multiples of ulong, so need to take 7748 * care of mismatches between target ulong and host ulong sizes. 7749 */ 7750 if (arg2 & (sizeof(abi_ulong) - 1)) { 7751 ret = -TARGET_EINVAL; 7752 break; 7753 } 7754 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 7755 7756 mask = alloca(mask_size); 7757 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) { 7758 goto efault; 7759 } 7760 memcpy(mask, p, arg2); 7761 unlock_user_struct(p, arg2, 0); 7762 7763 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask)); 7764 } 7765 break; 7766 case TARGET_NR_sched_setparam: 7767 { 7768 struct sched_param *target_schp; 7769 struct sched_param schp; 7770 7771 if (arg2 == 0) { 7772 return -TARGET_EINVAL; 7773 } 7774 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) 7775 goto efault; 7776 schp.sched_priority = tswap32(target_schp->sched_priority); 7777 unlock_user_struct(target_schp, arg2, 0); 7778 ret = get_errno(sched_setparam(arg1, &schp)); 7779 } 7780 break; 7781 case TARGET_NR_sched_getparam: 7782 { 7783 struct sched_param *target_schp; 7784 struct sched_param schp; 7785 7786 if (arg2 == 0) { 7787 return -TARGET_EINVAL; 7788 } 7789 ret = get_errno(sched_getparam(arg1, &schp)); 7790 if (!is_error(ret)) { 7791 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) 7792 goto efault; 7793 target_schp->sched_priority = tswap32(schp.sched_priority); 7794 unlock_user_struct(target_schp, arg2, 1); 7795 } 7796 } 7797 break; 7798 case TARGET_NR_sched_setscheduler: 7799 { 7800 struct sched_param *target_schp; 7801 struct sched_param schp; 7802 if (arg3 == 0) { 7803 return -TARGET_EINVAL; 7804 } 7805 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) 7806 goto efault; 7807 schp.sched_priority = tswap32(target_schp->sched_priority); 7808 unlock_user_struct(target_schp, arg3, 0); 7809 ret = get_errno(sched_setscheduler(arg1, arg2, &schp)); 7810 } 7811 break; 7812 case TARGET_NR_sched_getscheduler: 7813 ret = get_errno(sched_getscheduler(arg1)); 7814 break; 7815 case TARGET_NR_sched_yield: 7816 ret = get_errno(sched_yield()); 7817 break; 7818 case TARGET_NR_sched_get_priority_max: 7819 ret = get_errno(sched_get_priority_max(arg1)); 7820 break; 7821 case TARGET_NR_sched_get_priority_min: 7822 ret = get_errno(sched_get_priority_min(arg1)); 7823 break; 7824 case TARGET_NR_sched_rr_get_interval: 7825 { 7826 struct timespec ts; 7827 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 7828 if (!is_error(ret)) { 7829 ret = host_to_target_timespec(arg2, &ts); 7830 } 7831 } 7832 break; 7833 case TARGET_NR_nanosleep: 7834 { 7835 struct timespec req, rem; 7836 target_to_host_timespec(&req, arg1); 7837 ret = get_errno(nanosleep(&req, &rem)); 7838 if (is_error(ret) && arg2) { 7839 host_to_target_timespec(arg2, &rem); 7840 } 7841 } 7842 break; 7843 #ifdef TARGET_NR_query_module 7844 case TARGET_NR_query_module: 7845 goto unimplemented; 7846 #endif 7847 #ifdef TARGET_NR_nfsservctl 7848 case TARGET_NR_nfsservctl: 7849 goto unimplemented; 7850 #endif 7851 case TARGET_NR_prctl: 7852 switch (arg1) { 7853 case PR_GET_PDEATHSIG: 7854 { 7855 int deathsig; 7856 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5)); 7857 if (!is_error(ret) && arg2 7858 && put_user_ual(deathsig, arg2)) { 7859 goto efault; 7860 } 7861 break; 7862 } 7863 #ifdef PR_GET_NAME 7864 case PR_GET_NAME: 7865 { 7866 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1); 7867 if (!name) { 7868 goto efault; 7869 } 7870 ret = get_errno(prctl(arg1, (unsigned long)name, 7871 arg3, arg4, arg5)); 7872 unlock_user(name, arg2, 16); 7873 break; 7874 } 7875 case PR_SET_NAME: 7876 { 7877 void *name = lock_user(VERIFY_READ, arg2, 16, 1); 7878 if (!name) { 7879 goto efault; 7880 } 7881 ret = get_errno(prctl(arg1, (unsigned long)name, 7882 arg3, arg4, arg5)); 7883 unlock_user(name, arg2, 0); 7884 break; 7885 } 7886 #endif 7887 default: 7888 /* Most prctl options have no pointer arguments */ 7889 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5)); 7890 break; 7891 } 7892 break; 7893 #ifdef TARGET_NR_arch_prctl 7894 case TARGET_NR_arch_prctl: 7895 #if defined(TARGET_I386) && !defined(TARGET_ABI32) 7896 ret = do_arch_prctl(cpu_env, arg1, arg2); 7897 break; 7898 #else 7899 goto unimplemented; 7900 #endif 7901 #endif 7902 #ifdef TARGET_NR_pread64 7903 case TARGET_NR_pread64: 7904 if (regpairs_aligned(cpu_env)) { 7905 arg4 = arg5; 7906 arg5 = arg6; 7907 } 7908 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 7909 goto efault; 7910 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); 7911 unlock_user(p, arg2, ret); 7912 break; 7913 case TARGET_NR_pwrite64: 7914 if (regpairs_aligned(cpu_env)) { 7915 arg4 = arg5; 7916 arg5 = arg6; 7917 } 7918 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 7919 goto efault; 7920 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); 7921 unlock_user(p, arg2, 0); 7922 break; 7923 #endif 7924 case TARGET_NR_getcwd: 7925 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) 7926 goto efault; 7927 ret = get_errno(sys_getcwd1(p, arg2)); 7928 unlock_user(p, arg1, ret); 7929 break; 7930 case TARGET_NR_capget: 7931 case TARGET_NR_capset: 7932 { 7933 struct target_user_cap_header *target_header; 7934 struct target_user_cap_data *target_data = NULL; 7935 struct __user_cap_header_struct header; 7936 struct __user_cap_data_struct data[2]; 7937 struct __user_cap_data_struct *dataptr = NULL; 7938 int i, target_datalen; 7939 int data_items = 1; 7940 7941 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) { 7942 goto efault; 7943 } 7944 header.version = tswap32(target_header->version); 7945 header.pid = tswap32(target_header->pid); 7946 7947 if (header.version != _LINUX_CAPABILITY_VERSION) { 7948 /* Version 2 and up takes pointer to two user_data structs */ 7949 data_items = 2; 7950 } 7951 7952 target_datalen = sizeof(*target_data) * data_items; 7953 7954 if (arg2) { 7955 if (num == TARGET_NR_capget) { 7956 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0); 7957 } else { 7958 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1); 7959 } 7960 if (!target_data) { 7961 unlock_user_struct(target_header, arg1, 0); 7962 goto efault; 7963 } 7964 7965 if (num == TARGET_NR_capset) { 7966 for (i = 0; i < data_items; i++) { 7967 data[i].effective = tswap32(target_data[i].effective); 7968 data[i].permitted = tswap32(target_data[i].permitted); 7969 data[i].inheritable = tswap32(target_data[i].inheritable); 7970 } 7971 } 7972 7973 dataptr = data; 7974 } 7975 7976 if (num == TARGET_NR_capget) { 7977 ret = get_errno(capget(&header, dataptr)); 7978 } else { 7979 ret = get_errno(capset(&header, dataptr)); 7980 } 7981 7982 /* The kernel always updates version for both capget and capset */ 7983 target_header->version = tswap32(header.version); 7984 unlock_user_struct(target_header, arg1, 1); 7985 7986 if (arg2) { 7987 if (num == TARGET_NR_capget) { 7988 for (i = 0; i < data_items; i++) { 7989 target_data[i].effective = tswap32(data[i].effective); 7990 target_data[i].permitted = tswap32(data[i].permitted); 7991 target_data[i].inheritable = tswap32(data[i].inheritable); 7992 } 7993 unlock_user(target_data, arg2, target_datalen); 7994 } else { 7995 unlock_user(target_data, arg2, 0); 7996 } 7997 } 7998 break; 7999 } 8000 case TARGET_NR_sigaltstack: 8001 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \ 8002 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \ 8003 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC) 8004 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env)); 8005 break; 8006 #else 8007 goto unimplemented; 8008 #endif 8009 8010 #ifdef CONFIG_SENDFILE 8011 case TARGET_NR_sendfile: 8012 { 8013 off_t *offp = NULL; 8014 off_t off; 8015 if (arg3) { 8016 ret = get_user_sal(off, arg3); 8017 if (is_error(ret)) { 8018 break; 8019 } 8020 offp = &off; 8021 } 8022 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 8023 if (!is_error(ret) && arg3) { 8024 abi_long ret2 = put_user_sal(off, arg3); 8025 if (is_error(ret2)) { 8026 ret = ret2; 8027 } 8028 } 8029 break; 8030 } 8031 #ifdef TARGET_NR_sendfile64 8032 case TARGET_NR_sendfile64: 8033 { 8034 off_t *offp = NULL; 8035 off_t off; 8036 if (arg3) { 8037 ret = get_user_s64(off, arg3); 8038 if (is_error(ret)) { 8039 break; 8040 } 8041 offp = &off; 8042 } 8043 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 8044 if (!is_error(ret) && arg3) { 8045 abi_long ret2 = put_user_s64(off, arg3); 8046 if (is_error(ret2)) { 8047 ret = ret2; 8048 } 8049 } 8050 break; 8051 } 8052 #endif 8053 #else 8054 case TARGET_NR_sendfile: 8055 #ifdef TARGET_NR_sendfile64 8056 case TARGET_NR_sendfile64: 8057 #endif 8058 goto unimplemented; 8059 #endif 8060 8061 #ifdef TARGET_NR_getpmsg 8062 case TARGET_NR_getpmsg: 8063 goto unimplemented; 8064 #endif 8065 #ifdef TARGET_NR_putpmsg 8066 case TARGET_NR_putpmsg: 8067 goto unimplemented; 8068 #endif 8069 #ifdef TARGET_NR_vfork 8070 case TARGET_NR_vfork: 8071 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 8072 0, 0, 0, 0)); 8073 break; 8074 #endif 8075 #ifdef TARGET_NR_ugetrlimit 8076 case TARGET_NR_ugetrlimit: 8077 { 8078 struct rlimit rlim; 8079 int resource = target_to_host_resource(arg1); 8080 ret = get_errno(getrlimit(resource, &rlim)); 8081 if (!is_error(ret)) { 8082 struct target_rlimit *target_rlim; 8083 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 8084 goto efault; 8085 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 8086 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 8087 unlock_user_struct(target_rlim, arg2, 1); 8088 } 8089 break; 8090 } 8091 #endif 8092 #ifdef TARGET_NR_truncate64 8093 case TARGET_NR_truncate64: 8094 if (!(p = lock_user_string(arg1))) 8095 goto efault; 8096 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); 8097 unlock_user(p, arg1, 0); 8098 break; 8099 #endif 8100 #ifdef TARGET_NR_ftruncate64 8101 case TARGET_NR_ftruncate64: 8102 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); 8103 break; 8104 #endif 8105 #ifdef TARGET_NR_stat64 8106 case TARGET_NR_stat64: 8107 if (!(p = lock_user_string(arg1))) 8108 goto efault; 8109 ret = get_errno(stat(path(p), &st)); 8110 unlock_user(p, arg1, 0); 8111 if (!is_error(ret)) 8112 ret = host_to_target_stat64(cpu_env, arg2, &st); 8113 break; 8114 #endif 8115 #ifdef TARGET_NR_lstat64 8116 case TARGET_NR_lstat64: 8117 if (!(p = lock_user_string(arg1))) 8118 goto efault; 8119 ret = get_errno(lstat(path(p), &st)); 8120 unlock_user(p, arg1, 0); 8121 if (!is_error(ret)) 8122 ret = host_to_target_stat64(cpu_env, arg2, &st); 8123 break; 8124 #endif 8125 #ifdef TARGET_NR_fstat64 8126 case TARGET_NR_fstat64: 8127 ret = get_errno(fstat(arg1, &st)); 8128 if (!is_error(ret)) 8129 ret = host_to_target_stat64(cpu_env, arg2, &st); 8130 break; 8131 #endif 8132 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) 8133 #ifdef TARGET_NR_fstatat64 8134 case TARGET_NR_fstatat64: 8135 #endif 8136 #ifdef TARGET_NR_newfstatat 8137 case TARGET_NR_newfstatat: 8138 #endif 8139 if (!(p = lock_user_string(arg2))) 8140 goto efault; 8141 ret = get_errno(fstatat(arg1, path(p), &st, arg4)); 8142 if (!is_error(ret)) 8143 ret = host_to_target_stat64(cpu_env, arg3, &st); 8144 break; 8145 #endif 8146 case TARGET_NR_lchown: 8147 if (!(p = lock_user_string(arg1))) 8148 goto efault; 8149 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); 8150 unlock_user(p, arg1, 0); 8151 break; 8152 #ifdef TARGET_NR_getuid 8153 case TARGET_NR_getuid: 8154 ret = get_errno(high2lowuid(getuid())); 8155 break; 8156 #endif 8157 #ifdef TARGET_NR_getgid 8158 case TARGET_NR_getgid: 8159 ret = get_errno(high2lowgid(getgid())); 8160 break; 8161 #endif 8162 #ifdef TARGET_NR_geteuid 8163 case TARGET_NR_geteuid: 8164 ret = get_errno(high2lowuid(geteuid())); 8165 break; 8166 #endif 8167 #ifdef TARGET_NR_getegid 8168 case TARGET_NR_getegid: 8169 ret = get_errno(high2lowgid(getegid())); 8170 break; 8171 #endif 8172 case TARGET_NR_setreuid: 8173 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); 8174 break; 8175 case TARGET_NR_setregid: 8176 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); 8177 break; 8178 case TARGET_NR_getgroups: 8179 { 8180 int gidsetsize = arg1; 8181 target_id *target_grouplist; 8182 gid_t *grouplist; 8183 int i; 8184 8185 grouplist = alloca(gidsetsize * sizeof(gid_t)); 8186 ret = get_errno(getgroups(gidsetsize, grouplist)); 8187 if (gidsetsize == 0) 8188 break; 8189 if (!is_error(ret)) { 8190 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0); 8191 if (!target_grouplist) 8192 goto efault; 8193 for(i = 0;i < ret; i++) 8194 target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); 8195 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id)); 8196 } 8197 } 8198 break; 8199 case TARGET_NR_setgroups: 8200 { 8201 int gidsetsize = arg1; 8202 target_id *target_grouplist; 8203 gid_t *grouplist = NULL; 8204 int i; 8205 if (gidsetsize) { 8206 grouplist = alloca(gidsetsize * sizeof(gid_t)); 8207 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1); 8208 if (!target_grouplist) { 8209 ret = -TARGET_EFAULT; 8210 goto fail; 8211 } 8212 for (i = 0; i < gidsetsize; i++) { 8213 grouplist[i] = low2highgid(tswapid(target_grouplist[i])); 8214 } 8215 unlock_user(target_grouplist, arg2, 0); 8216 } 8217 ret = get_errno(setgroups(gidsetsize, grouplist)); 8218 } 8219 break; 8220 case TARGET_NR_fchown: 8221 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); 8222 break; 8223 #if defined(TARGET_NR_fchownat) 8224 case TARGET_NR_fchownat: 8225 if (!(p = lock_user_string(arg2))) 8226 goto efault; 8227 ret = get_errno(fchownat(arg1, p, low2highuid(arg3), 8228 low2highgid(arg4), arg5)); 8229 unlock_user(p, arg2, 0); 8230 break; 8231 #endif 8232 #ifdef TARGET_NR_setresuid 8233 case TARGET_NR_setresuid: 8234 ret = get_errno(setresuid(low2highuid(arg1), 8235 low2highuid(arg2), 8236 low2highuid(arg3))); 8237 break; 8238 #endif 8239 #ifdef TARGET_NR_getresuid 8240 case TARGET_NR_getresuid: 8241 { 8242 uid_t ruid, euid, suid; 8243 ret = get_errno(getresuid(&ruid, &euid, &suid)); 8244 if (!is_error(ret)) { 8245 if (put_user_id(high2lowuid(ruid), arg1) 8246 || put_user_id(high2lowuid(euid), arg2) 8247 || put_user_id(high2lowuid(suid), arg3)) 8248 goto efault; 8249 } 8250 } 8251 break; 8252 #endif 8253 #ifdef TARGET_NR_getresgid 8254 case TARGET_NR_setresgid: 8255 ret = get_errno(setresgid(low2highgid(arg1), 8256 low2highgid(arg2), 8257 low2highgid(arg3))); 8258 break; 8259 #endif 8260 #ifdef TARGET_NR_getresgid 8261 case TARGET_NR_getresgid: 8262 { 8263 gid_t rgid, egid, sgid; 8264 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 8265 if (!is_error(ret)) { 8266 if (put_user_id(high2lowgid(rgid), arg1) 8267 || put_user_id(high2lowgid(egid), arg2) 8268 || put_user_id(high2lowgid(sgid), arg3)) 8269 goto efault; 8270 } 8271 } 8272 break; 8273 #endif 8274 case TARGET_NR_chown: 8275 if (!(p = lock_user_string(arg1))) 8276 goto efault; 8277 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); 8278 unlock_user(p, arg1, 0); 8279 break; 8280 case TARGET_NR_setuid: 8281 ret = get_errno(setuid(low2highuid(arg1))); 8282 break; 8283 case TARGET_NR_setgid: 8284 ret = get_errno(setgid(low2highgid(arg1))); 8285 break; 8286 case TARGET_NR_setfsuid: 8287 ret = get_errno(setfsuid(arg1)); 8288 break; 8289 case TARGET_NR_setfsgid: 8290 ret = get_errno(setfsgid(arg1)); 8291 break; 8292 8293 #ifdef TARGET_NR_lchown32 8294 case TARGET_NR_lchown32: 8295 if (!(p = lock_user_string(arg1))) 8296 goto efault; 8297 ret = get_errno(lchown(p, arg2, arg3)); 8298 unlock_user(p, arg1, 0); 8299 break; 8300 #endif 8301 #ifdef TARGET_NR_getuid32 8302 case TARGET_NR_getuid32: 8303 ret = get_errno(getuid()); 8304 break; 8305 #endif 8306 8307 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) 8308 /* Alpha specific */ 8309 case TARGET_NR_getxuid: 8310 { 8311 uid_t euid; 8312 euid=geteuid(); 8313 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid; 8314 } 8315 ret = get_errno(getuid()); 8316 break; 8317 #endif 8318 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) 8319 /* Alpha specific */ 8320 case TARGET_NR_getxgid: 8321 { 8322 uid_t egid; 8323 egid=getegid(); 8324 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid; 8325 } 8326 ret = get_errno(getgid()); 8327 break; 8328 #endif 8329 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) 8330 /* Alpha specific */ 8331 case TARGET_NR_osf_getsysinfo: 8332 ret = -TARGET_EOPNOTSUPP; 8333 switch (arg1) { 8334 case TARGET_GSI_IEEE_FP_CONTROL: 8335 { 8336 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env); 8337 8338 /* Copied from linux ieee_fpcr_to_swcr. */ 8339 swcr = (fpcr >> 35) & SWCR_STATUS_MASK; 8340 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ; 8341 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV 8342 | SWCR_TRAP_ENABLE_DZE 8343 | SWCR_TRAP_ENABLE_OVF); 8344 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF 8345 | SWCR_TRAP_ENABLE_INE); 8346 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ; 8347 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO; 8348 8349 if (put_user_u64 (swcr, arg2)) 8350 goto efault; 8351 ret = 0; 8352 } 8353 break; 8354 8355 /* case GSI_IEEE_STATE_AT_SIGNAL: 8356 -- Not implemented in linux kernel. 8357 case GSI_UACPROC: 8358 -- Retrieves current unaligned access state; not much used. 8359 case GSI_PROC_TYPE: 8360 -- Retrieves implver information; surely not used. 8361 case GSI_GET_HWRPB: 8362 -- Grabs a copy of the HWRPB; surely not used. 8363 */ 8364 } 8365 break; 8366 #endif 8367 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) 8368 /* Alpha specific */ 8369 case TARGET_NR_osf_setsysinfo: 8370 ret = -TARGET_EOPNOTSUPP; 8371 switch (arg1) { 8372 case TARGET_SSI_IEEE_FP_CONTROL: 8373 { 8374 uint64_t swcr, fpcr, orig_fpcr; 8375 8376 if (get_user_u64 (swcr, arg2)) { 8377 goto efault; 8378 } 8379 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 8380 fpcr = orig_fpcr & FPCR_DYN_MASK; 8381 8382 /* Copied from linux ieee_swcr_to_fpcr. */ 8383 fpcr |= (swcr & SWCR_STATUS_MASK) << 35; 8384 fpcr |= (swcr & SWCR_MAP_DMZ) << 36; 8385 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV 8386 | SWCR_TRAP_ENABLE_DZE 8387 | SWCR_TRAP_ENABLE_OVF)) << 48; 8388 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF 8389 | SWCR_TRAP_ENABLE_INE)) << 57; 8390 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0); 8391 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41; 8392 8393 cpu_alpha_store_fpcr(cpu_env, fpcr); 8394 ret = 0; 8395 } 8396 break; 8397 8398 case TARGET_SSI_IEEE_RAISE_EXCEPTION: 8399 { 8400 uint64_t exc, fpcr, orig_fpcr; 8401 int si_code; 8402 8403 if (get_user_u64(exc, arg2)) { 8404 goto efault; 8405 } 8406 8407 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 8408 8409 /* We only add to the exception status here. */ 8410 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35); 8411 8412 cpu_alpha_store_fpcr(cpu_env, fpcr); 8413 ret = 0; 8414 8415 /* Old exceptions are not signaled. */ 8416 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK); 8417 8418 /* If any exceptions set by this call, 8419 and are unmasked, send a signal. */ 8420 si_code = 0; 8421 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) { 8422 si_code = TARGET_FPE_FLTRES; 8423 } 8424 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) { 8425 si_code = TARGET_FPE_FLTUND; 8426 } 8427 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) { 8428 si_code = TARGET_FPE_FLTOVF; 8429 } 8430 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) { 8431 si_code = TARGET_FPE_FLTDIV; 8432 } 8433 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) { 8434 si_code = TARGET_FPE_FLTINV; 8435 } 8436 if (si_code != 0) { 8437 target_siginfo_t info; 8438 info.si_signo = SIGFPE; 8439 info.si_errno = 0; 8440 info.si_code = si_code; 8441 info._sifields._sigfault._addr 8442 = ((CPUArchState *)cpu_env)->pc; 8443 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info); 8444 } 8445 } 8446 break; 8447 8448 /* case SSI_NVPAIRS: 8449 -- Used with SSIN_UACPROC to enable unaligned accesses. 8450 case SSI_IEEE_STATE_AT_SIGNAL: 8451 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: 8452 -- Not implemented in linux kernel 8453 */ 8454 } 8455 break; 8456 #endif 8457 #ifdef TARGET_NR_osf_sigprocmask 8458 /* Alpha specific. */ 8459 case TARGET_NR_osf_sigprocmask: 8460 { 8461 abi_ulong mask; 8462 int how; 8463 sigset_t set, oldset; 8464 8465 switch(arg1) { 8466 case TARGET_SIG_BLOCK: 8467 how = SIG_BLOCK; 8468 break; 8469 case TARGET_SIG_UNBLOCK: 8470 how = SIG_UNBLOCK; 8471 break; 8472 case TARGET_SIG_SETMASK: 8473 how = SIG_SETMASK; 8474 break; 8475 default: 8476 ret = -TARGET_EINVAL; 8477 goto fail; 8478 } 8479 mask = arg2; 8480 target_to_host_old_sigset(&set, &mask); 8481 do_sigprocmask(how, &set, &oldset); 8482 host_to_target_old_sigset(&mask, &oldset); 8483 ret = mask; 8484 } 8485 break; 8486 #endif 8487 8488 #ifdef TARGET_NR_getgid32 8489 case TARGET_NR_getgid32: 8490 ret = get_errno(getgid()); 8491 break; 8492 #endif 8493 #ifdef TARGET_NR_geteuid32 8494 case TARGET_NR_geteuid32: 8495 ret = get_errno(geteuid()); 8496 break; 8497 #endif 8498 #ifdef TARGET_NR_getegid32 8499 case TARGET_NR_getegid32: 8500 ret = get_errno(getegid()); 8501 break; 8502 #endif 8503 #ifdef TARGET_NR_setreuid32 8504 case TARGET_NR_setreuid32: 8505 ret = get_errno(setreuid(arg1, arg2)); 8506 break; 8507 #endif 8508 #ifdef TARGET_NR_setregid32 8509 case TARGET_NR_setregid32: 8510 ret = get_errno(setregid(arg1, arg2)); 8511 break; 8512 #endif 8513 #ifdef TARGET_NR_getgroups32 8514 case TARGET_NR_getgroups32: 8515 { 8516 int gidsetsize = arg1; 8517 uint32_t *target_grouplist; 8518 gid_t *grouplist; 8519 int i; 8520 8521 grouplist = alloca(gidsetsize * sizeof(gid_t)); 8522 ret = get_errno(getgroups(gidsetsize, grouplist)); 8523 if (gidsetsize == 0) 8524 break; 8525 if (!is_error(ret)) { 8526 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); 8527 if (!target_grouplist) { 8528 ret = -TARGET_EFAULT; 8529 goto fail; 8530 } 8531 for(i = 0;i < ret; i++) 8532 target_grouplist[i] = tswap32(grouplist[i]); 8533 unlock_user(target_grouplist, arg2, gidsetsize * 4); 8534 } 8535 } 8536 break; 8537 #endif 8538 #ifdef TARGET_NR_setgroups32 8539 case TARGET_NR_setgroups32: 8540 { 8541 int gidsetsize = arg1; 8542 uint32_t *target_grouplist; 8543 gid_t *grouplist; 8544 int i; 8545 8546 grouplist = alloca(gidsetsize * sizeof(gid_t)); 8547 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); 8548 if (!target_grouplist) { 8549 ret = -TARGET_EFAULT; 8550 goto fail; 8551 } 8552 for(i = 0;i < gidsetsize; i++) 8553 grouplist[i] = tswap32(target_grouplist[i]); 8554 unlock_user(target_grouplist, arg2, 0); 8555 ret = get_errno(setgroups(gidsetsize, grouplist)); 8556 } 8557 break; 8558 #endif 8559 #ifdef TARGET_NR_fchown32 8560 case TARGET_NR_fchown32: 8561 ret = get_errno(fchown(arg1, arg2, arg3)); 8562 break; 8563 #endif 8564 #ifdef TARGET_NR_setresuid32 8565 case TARGET_NR_setresuid32: 8566 ret = get_errno(setresuid(arg1, arg2, arg3)); 8567 break; 8568 #endif 8569 #ifdef TARGET_NR_getresuid32 8570 case TARGET_NR_getresuid32: 8571 { 8572 uid_t ruid, euid, suid; 8573 ret = get_errno(getresuid(&ruid, &euid, &suid)); 8574 if (!is_error(ret)) { 8575 if (put_user_u32(ruid, arg1) 8576 || put_user_u32(euid, arg2) 8577 || put_user_u32(suid, arg3)) 8578 goto efault; 8579 } 8580 } 8581 break; 8582 #endif 8583 #ifdef TARGET_NR_setresgid32 8584 case TARGET_NR_setresgid32: 8585 ret = get_errno(setresgid(arg1, arg2, arg3)); 8586 break; 8587 #endif 8588 #ifdef TARGET_NR_getresgid32 8589 case TARGET_NR_getresgid32: 8590 { 8591 gid_t rgid, egid, sgid; 8592 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 8593 if (!is_error(ret)) { 8594 if (put_user_u32(rgid, arg1) 8595 || put_user_u32(egid, arg2) 8596 || put_user_u32(sgid, arg3)) 8597 goto efault; 8598 } 8599 } 8600 break; 8601 #endif 8602 #ifdef TARGET_NR_chown32 8603 case TARGET_NR_chown32: 8604 if (!(p = lock_user_string(arg1))) 8605 goto efault; 8606 ret = get_errno(chown(p, arg2, arg3)); 8607 unlock_user(p, arg1, 0); 8608 break; 8609 #endif 8610 #ifdef TARGET_NR_setuid32 8611 case TARGET_NR_setuid32: 8612 ret = get_errno(setuid(arg1)); 8613 break; 8614 #endif 8615 #ifdef TARGET_NR_setgid32 8616 case TARGET_NR_setgid32: 8617 ret = get_errno(setgid(arg1)); 8618 break; 8619 #endif 8620 #ifdef TARGET_NR_setfsuid32 8621 case TARGET_NR_setfsuid32: 8622 ret = get_errno(setfsuid(arg1)); 8623 break; 8624 #endif 8625 #ifdef TARGET_NR_setfsgid32 8626 case TARGET_NR_setfsgid32: 8627 ret = get_errno(setfsgid(arg1)); 8628 break; 8629 #endif 8630 8631 case TARGET_NR_pivot_root: 8632 goto unimplemented; 8633 #ifdef TARGET_NR_mincore 8634 case TARGET_NR_mincore: 8635 { 8636 void *a; 8637 ret = -TARGET_EFAULT; 8638 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0))) 8639 goto efault; 8640 if (!(p = lock_user_string(arg3))) 8641 goto mincore_fail; 8642 ret = get_errno(mincore(a, arg2, p)); 8643 unlock_user(p, arg3, ret); 8644 mincore_fail: 8645 unlock_user(a, arg1, 0); 8646 } 8647 break; 8648 #endif 8649 #ifdef TARGET_NR_arm_fadvise64_64 8650 case TARGET_NR_arm_fadvise64_64: 8651 { 8652 /* 8653 * arm_fadvise64_64 looks like fadvise64_64 but 8654 * with different argument order 8655 */ 8656 abi_long temp; 8657 temp = arg3; 8658 arg3 = arg4; 8659 arg4 = temp; 8660 } 8661 #endif 8662 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64) 8663 #ifdef TARGET_NR_fadvise64_64 8664 case TARGET_NR_fadvise64_64: 8665 #endif 8666 #ifdef TARGET_NR_fadvise64 8667 case TARGET_NR_fadvise64: 8668 #endif 8669 #ifdef TARGET_S390X 8670 switch (arg4) { 8671 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ 8672 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ 8673 case 6: arg4 = POSIX_FADV_DONTNEED; break; 8674 case 7: arg4 = POSIX_FADV_NOREUSE; break; 8675 default: break; 8676 } 8677 #endif 8678 ret = -posix_fadvise(arg1, arg2, arg3, arg4); 8679 break; 8680 #endif 8681 #ifdef TARGET_NR_madvise 8682 case TARGET_NR_madvise: 8683 /* A straight passthrough may not be safe because qemu sometimes 8684 turns private file-backed mappings into anonymous mappings. 8685 This will break MADV_DONTNEED. 8686 This is a hint, so ignoring and returning success is ok. */ 8687 ret = get_errno(0); 8688 break; 8689 #endif 8690 #if TARGET_ABI_BITS == 32 8691 case TARGET_NR_fcntl64: 8692 { 8693 int cmd; 8694 struct flock64 fl; 8695 struct target_flock64 *target_fl; 8696 #ifdef TARGET_ARM 8697 struct target_eabi_flock64 *target_efl; 8698 #endif 8699 8700 cmd = target_to_host_fcntl_cmd(arg2); 8701 if (cmd == -TARGET_EINVAL) { 8702 ret = cmd; 8703 break; 8704 } 8705 8706 switch(arg2) { 8707 case TARGET_F_GETLK64: 8708 #ifdef TARGET_ARM 8709 if (((CPUARMState *)cpu_env)->eabi) { 8710 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 8711 goto efault; 8712 fl.l_type = tswap16(target_efl->l_type); 8713 fl.l_whence = tswap16(target_efl->l_whence); 8714 fl.l_start = tswap64(target_efl->l_start); 8715 fl.l_len = tswap64(target_efl->l_len); 8716 fl.l_pid = tswap32(target_efl->l_pid); 8717 unlock_user_struct(target_efl, arg3, 0); 8718 } else 8719 #endif 8720 { 8721 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 8722 goto efault; 8723 fl.l_type = tswap16(target_fl->l_type); 8724 fl.l_whence = tswap16(target_fl->l_whence); 8725 fl.l_start = tswap64(target_fl->l_start); 8726 fl.l_len = tswap64(target_fl->l_len); 8727 fl.l_pid = tswap32(target_fl->l_pid); 8728 unlock_user_struct(target_fl, arg3, 0); 8729 } 8730 ret = get_errno(fcntl(arg1, cmd, &fl)); 8731 if (ret == 0) { 8732 #ifdef TARGET_ARM 8733 if (((CPUARMState *)cpu_env)->eabi) { 8734 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0)) 8735 goto efault; 8736 target_efl->l_type = tswap16(fl.l_type); 8737 target_efl->l_whence = tswap16(fl.l_whence); 8738 target_efl->l_start = tswap64(fl.l_start); 8739 target_efl->l_len = tswap64(fl.l_len); 8740 target_efl->l_pid = tswap32(fl.l_pid); 8741 unlock_user_struct(target_efl, arg3, 1); 8742 } else 8743 #endif 8744 { 8745 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0)) 8746 goto efault; 8747 target_fl->l_type = tswap16(fl.l_type); 8748 target_fl->l_whence = tswap16(fl.l_whence); 8749 target_fl->l_start = tswap64(fl.l_start); 8750 target_fl->l_len = tswap64(fl.l_len); 8751 target_fl->l_pid = tswap32(fl.l_pid); 8752 unlock_user_struct(target_fl, arg3, 1); 8753 } 8754 } 8755 break; 8756 8757 case TARGET_F_SETLK64: 8758 case TARGET_F_SETLKW64: 8759 #ifdef TARGET_ARM 8760 if (((CPUARMState *)cpu_env)->eabi) { 8761 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 8762 goto efault; 8763 fl.l_type = tswap16(target_efl->l_type); 8764 fl.l_whence = tswap16(target_efl->l_whence); 8765 fl.l_start = tswap64(target_efl->l_start); 8766 fl.l_len = tswap64(target_efl->l_len); 8767 fl.l_pid = tswap32(target_efl->l_pid); 8768 unlock_user_struct(target_efl, arg3, 0); 8769 } else 8770 #endif 8771 { 8772 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 8773 goto efault; 8774 fl.l_type = tswap16(target_fl->l_type); 8775 fl.l_whence = tswap16(target_fl->l_whence); 8776 fl.l_start = tswap64(target_fl->l_start); 8777 fl.l_len = tswap64(target_fl->l_len); 8778 fl.l_pid = tswap32(target_fl->l_pid); 8779 unlock_user_struct(target_fl, arg3, 0); 8780 } 8781 ret = get_errno(fcntl(arg1, cmd, &fl)); 8782 break; 8783 default: 8784 ret = do_fcntl(arg1, arg2, arg3); 8785 break; 8786 } 8787 break; 8788 } 8789 #endif 8790 #ifdef TARGET_NR_cacheflush 8791 case TARGET_NR_cacheflush: 8792 /* self-modifying code is handled automatically, so nothing needed */ 8793 ret = 0; 8794 break; 8795 #endif 8796 #ifdef TARGET_NR_security 8797 case TARGET_NR_security: 8798 goto unimplemented; 8799 #endif 8800 #ifdef TARGET_NR_getpagesize 8801 case TARGET_NR_getpagesize: 8802 ret = TARGET_PAGE_SIZE; 8803 break; 8804 #endif 8805 case TARGET_NR_gettid: 8806 ret = get_errno(gettid()); 8807 break; 8808 #ifdef TARGET_NR_readahead 8809 case TARGET_NR_readahead: 8810 #if TARGET_ABI_BITS == 32 8811 if (regpairs_aligned(cpu_env)) { 8812 arg2 = arg3; 8813 arg3 = arg4; 8814 arg4 = arg5; 8815 } 8816 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4)); 8817 #else 8818 ret = get_errno(readahead(arg1, arg2, arg3)); 8819 #endif 8820 break; 8821 #endif 8822 #ifdef CONFIG_ATTR 8823 #ifdef TARGET_NR_setxattr 8824 case TARGET_NR_listxattr: 8825 case TARGET_NR_llistxattr: 8826 { 8827 void *p, *b = 0; 8828 if (arg2) { 8829 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 8830 if (!b) { 8831 ret = -TARGET_EFAULT; 8832 break; 8833 } 8834 } 8835 p = lock_user_string(arg1); 8836 if (p) { 8837 if (num == TARGET_NR_listxattr) { 8838 ret = get_errno(listxattr(p, b, arg3)); 8839 } else { 8840 ret = get_errno(llistxattr(p, b, arg3)); 8841 } 8842 } else { 8843 ret = -TARGET_EFAULT; 8844 } 8845 unlock_user(p, arg1, 0); 8846 unlock_user(b, arg2, arg3); 8847 break; 8848 } 8849 case TARGET_NR_flistxattr: 8850 { 8851 void *b = 0; 8852 if (arg2) { 8853 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 8854 if (!b) { 8855 ret = -TARGET_EFAULT; 8856 break; 8857 } 8858 } 8859 ret = get_errno(flistxattr(arg1, b, arg3)); 8860 unlock_user(b, arg2, arg3); 8861 break; 8862 } 8863 case TARGET_NR_setxattr: 8864 case TARGET_NR_lsetxattr: 8865 { 8866 void *p, *n, *v = 0; 8867 if (arg3) { 8868 v = lock_user(VERIFY_READ, arg3, arg4, 1); 8869 if (!v) { 8870 ret = -TARGET_EFAULT; 8871 break; 8872 } 8873 } 8874 p = lock_user_string(arg1); 8875 n = lock_user_string(arg2); 8876 if (p && n) { 8877 if (num == TARGET_NR_setxattr) { 8878 ret = get_errno(setxattr(p, n, v, arg4, arg5)); 8879 } else { 8880 ret = get_errno(lsetxattr(p, n, v, arg4, arg5)); 8881 } 8882 } else { 8883 ret = -TARGET_EFAULT; 8884 } 8885 unlock_user(p, arg1, 0); 8886 unlock_user(n, arg2, 0); 8887 unlock_user(v, arg3, 0); 8888 } 8889 break; 8890 case TARGET_NR_fsetxattr: 8891 { 8892 void *n, *v = 0; 8893 if (arg3) { 8894 v = lock_user(VERIFY_READ, arg3, arg4, 1); 8895 if (!v) { 8896 ret = -TARGET_EFAULT; 8897 break; 8898 } 8899 } 8900 n = lock_user_string(arg2); 8901 if (n) { 8902 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5)); 8903 } else { 8904 ret = -TARGET_EFAULT; 8905 } 8906 unlock_user(n, arg2, 0); 8907 unlock_user(v, arg3, 0); 8908 } 8909 break; 8910 case TARGET_NR_getxattr: 8911 case TARGET_NR_lgetxattr: 8912 { 8913 void *p, *n, *v = 0; 8914 if (arg3) { 8915 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 8916 if (!v) { 8917 ret = -TARGET_EFAULT; 8918 break; 8919 } 8920 } 8921 p = lock_user_string(arg1); 8922 n = lock_user_string(arg2); 8923 if (p && n) { 8924 if (num == TARGET_NR_getxattr) { 8925 ret = get_errno(getxattr(p, n, v, arg4)); 8926 } else { 8927 ret = get_errno(lgetxattr(p, n, v, arg4)); 8928 } 8929 } else { 8930 ret = -TARGET_EFAULT; 8931 } 8932 unlock_user(p, arg1, 0); 8933 unlock_user(n, arg2, 0); 8934 unlock_user(v, arg3, arg4); 8935 } 8936 break; 8937 case TARGET_NR_fgetxattr: 8938 { 8939 void *n, *v = 0; 8940 if (arg3) { 8941 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 8942 if (!v) { 8943 ret = -TARGET_EFAULT; 8944 break; 8945 } 8946 } 8947 n = lock_user_string(arg2); 8948 if (n) { 8949 ret = get_errno(fgetxattr(arg1, n, v, arg4)); 8950 } else { 8951 ret = -TARGET_EFAULT; 8952 } 8953 unlock_user(n, arg2, 0); 8954 unlock_user(v, arg3, arg4); 8955 } 8956 break; 8957 case TARGET_NR_removexattr: 8958 case TARGET_NR_lremovexattr: 8959 { 8960 void *p, *n; 8961 p = lock_user_string(arg1); 8962 n = lock_user_string(arg2); 8963 if (p && n) { 8964 if (num == TARGET_NR_removexattr) { 8965 ret = get_errno(removexattr(p, n)); 8966 } else { 8967 ret = get_errno(lremovexattr(p, n)); 8968 } 8969 } else { 8970 ret = -TARGET_EFAULT; 8971 } 8972 unlock_user(p, arg1, 0); 8973 unlock_user(n, arg2, 0); 8974 } 8975 break; 8976 case TARGET_NR_fremovexattr: 8977 { 8978 void *n; 8979 n = lock_user_string(arg2); 8980 if (n) { 8981 ret = get_errno(fremovexattr(arg1, n)); 8982 } else { 8983 ret = -TARGET_EFAULT; 8984 } 8985 unlock_user(n, arg2, 0); 8986 } 8987 break; 8988 #endif 8989 #endif /* CONFIG_ATTR */ 8990 #ifdef TARGET_NR_set_thread_area 8991 case TARGET_NR_set_thread_area: 8992 #if defined(TARGET_MIPS) 8993 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1; 8994 ret = 0; 8995 break; 8996 #elif defined(TARGET_CRIS) 8997 if (arg1 & 0xff) 8998 ret = -TARGET_EINVAL; 8999 else { 9000 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1; 9001 ret = 0; 9002 } 9003 break; 9004 #elif defined(TARGET_I386) && defined(TARGET_ABI32) 9005 ret = do_set_thread_area(cpu_env, arg1); 9006 break; 9007 #elif defined(TARGET_M68K) 9008 { 9009 TaskState *ts = cpu->opaque; 9010 ts->tp_value = arg1; 9011 ret = 0; 9012 break; 9013 } 9014 #else 9015 goto unimplemented_nowarn; 9016 #endif 9017 #endif 9018 #ifdef TARGET_NR_get_thread_area 9019 case TARGET_NR_get_thread_area: 9020 #if defined(TARGET_I386) && defined(TARGET_ABI32) 9021 ret = do_get_thread_area(cpu_env, arg1); 9022 break; 9023 #elif defined(TARGET_M68K) 9024 { 9025 TaskState *ts = cpu->opaque; 9026 ret = ts->tp_value; 9027 break; 9028 } 9029 #else 9030 goto unimplemented_nowarn; 9031 #endif 9032 #endif 9033 #ifdef TARGET_NR_getdomainname 9034 case TARGET_NR_getdomainname: 9035 goto unimplemented_nowarn; 9036 #endif 9037 9038 #ifdef TARGET_NR_clock_gettime 9039 case TARGET_NR_clock_gettime: 9040 { 9041 struct timespec ts; 9042 ret = get_errno(clock_gettime(arg1, &ts)); 9043 if (!is_error(ret)) { 9044 host_to_target_timespec(arg2, &ts); 9045 } 9046 break; 9047 } 9048 #endif 9049 #ifdef TARGET_NR_clock_getres 9050 case TARGET_NR_clock_getres: 9051 { 9052 struct timespec ts; 9053 ret = get_errno(clock_getres(arg1, &ts)); 9054 if (!is_error(ret)) { 9055 host_to_target_timespec(arg2, &ts); 9056 } 9057 break; 9058 } 9059 #endif 9060 #ifdef TARGET_NR_clock_nanosleep 9061 case TARGET_NR_clock_nanosleep: 9062 { 9063 struct timespec ts; 9064 target_to_host_timespec(&ts, arg3); 9065 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL)); 9066 if (arg4) 9067 host_to_target_timespec(arg4, &ts); 9068 9069 #if defined(TARGET_PPC) 9070 /* clock_nanosleep is odd in that it returns positive errno values. 9071 * On PPC, CR0 bit 3 should be set in such a situation. */ 9072 if (ret) { 9073 ((CPUPPCState *)cpu_env)->crf[0] |= 1; 9074 } 9075 #endif 9076 break; 9077 } 9078 #endif 9079 9080 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 9081 case TARGET_NR_set_tid_address: 9082 ret = get_errno(set_tid_address((int *)g2h(arg1))); 9083 break; 9084 #endif 9085 9086 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 9087 case TARGET_NR_tkill: 9088 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2))); 9089 break; 9090 #endif 9091 9092 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 9093 case TARGET_NR_tgkill: 9094 ret = get_errno(sys_tgkill((int)arg1, (int)arg2, 9095 target_to_host_signal(arg3))); 9096 break; 9097 #endif 9098 9099 #ifdef TARGET_NR_set_robust_list 9100 case TARGET_NR_set_robust_list: 9101 case TARGET_NR_get_robust_list: 9102 /* The ABI for supporting robust futexes has userspace pass 9103 * the kernel a pointer to a linked list which is updated by 9104 * userspace after the syscall; the list is walked by the kernel 9105 * when the thread exits. Since the linked list in QEMU guest 9106 * memory isn't a valid linked list for the host and we have 9107 * no way to reliably intercept the thread-death event, we can't 9108 * support these. Silently return ENOSYS so that guest userspace 9109 * falls back to a non-robust futex implementation (which should 9110 * be OK except in the corner case of the guest crashing while 9111 * holding a mutex that is shared with another process via 9112 * shared memory). 9113 */ 9114 goto unimplemented_nowarn; 9115 #endif 9116 9117 #if defined(TARGET_NR_utimensat) 9118 case TARGET_NR_utimensat: 9119 { 9120 struct timespec *tsp, ts[2]; 9121 if (!arg3) { 9122 tsp = NULL; 9123 } else { 9124 target_to_host_timespec(ts, arg3); 9125 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec)); 9126 tsp = ts; 9127 } 9128 if (!arg2) 9129 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 9130 else { 9131 if (!(p = lock_user_string(arg2))) { 9132 ret = -TARGET_EFAULT; 9133 goto fail; 9134 } 9135 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 9136 unlock_user(p, arg2, 0); 9137 } 9138 } 9139 break; 9140 #endif 9141 case TARGET_NR_futex: 9142 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6); 9143 break; 9144 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 9145 case TARGET_NR_inotify_init: 9146 ret = get_errno(sys_inotify_init()); 9147 break; 9148 #endif 9149 #ifdef CONFIG_INOTIFY1 9150 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 9151 case TARGET_NR_inotify_init1: 9152 ret = get_errno(sys_inotify_init1(arg1)); 9153 break; 9154 #endif 9155 #endif 9156 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 9157 case TARGET_NR_inotify_add_watch: 9158 p = lock_user_string(arg2); 9159 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3)); 9160 unlock_user(p, arg2, 0); 9161 break; 9162 #endif 9163 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 9164 case TARGET_NR_inotify_rm_watch: 9165 ret = get_errno(sys_inotify_rm_watch(arg1, arg2)); 9166 break; 9167 #endif 9168 9169 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 9170 case TARGET_NR_mq_open: 9171 { 9172 struct mq_attr posix_mq_attr, *attrp; 9173 9174 p = lock_user_string(arg1 - 1); 9175 if (arg4 != 0) { 9176 copy_from_user_mq_attr (&posix_mq_attr, arg4); 9177 attrp = &posix_mq_attr; 9178 } else { 9179 attrp = 0; 9180 } 9181 ret = get_errno(mq_open(p, arg2, arg3, attrp)); 9182 unlock_user (p, arg1, 0); 9183 } 9184 break; 9185 9186 case TARGET_NR_mq_unlink: 9187 p = lock_user_string(arg1 - 1); 9188 ret = get_errno(mq_unlink(p)); 9189 unlock_user (p, arg1, 0); 9190 break; 9191 9192 case TARGET_NR_mq_timedsend: 9193 { 9194 struct timespec ts; 9195 9196 p = lock_user (VERIFY_READ, arg2, arg3, 1); 9197 if (arg5 != 0) { 9198 target_to_host_timespec(&ts, arg5); 9199 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts)); 9200 host_to_target_timespec(arg5, &ts); 9201 } 9202 else 9203 ret = get_errno(mq_send(arg1, p, arg3, arg4)); 9204 unlock_user (p, arg2, arg3); 9205 } 9206 break; 9207 9208 case TARGET_NR_mq_timedreceive: 9209 { 9210 struct timespec ts; 9211 unsigned int prio; 9212 9213 p = lock_user (VERIFY_READ, arg2, arg3, 1); 9214 if (arg5 != 0) { 9215 target_to_host_timespec(&ts, arg5); 9216 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts)); 9217 host_to_target_timespec(arg5, &ts); 9218 } 9219 else 9220 ret = get_errno(mq_receive(arg1, p, arg3, &prio)); 9221 unlock_user (p, arg2, arg3); 9222 if (arg4 != 0) 9223 put_user_u32(prio, arg4); 9224 } 9225 break; 9226 9227 /* Not implemented for now... */ 9228 /* case TARGET_NR_mq_notify: */ 9229 /* break; */ 9230 9231 case TARGET_NR_mq_getsetattr: 9232 { 9233 struct mq_attr posix_mq_attr_in, posix_mq_attr_out; 9234 ret = 0; 9235 if (arg3 != 0) { 9236 ret = mq_getattr(arg1, &posix_mq_attr_out); 9237 copy_to_user_mq_attr(arg3, &posix_mq_attr_out); 9238 } 9239 if (arg2 != 0) { 9240 copy_from_user_mq_attr(&posix_mq_attr_in, arg2); 9241 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out); 9242 } 9243 9244 } 9245 break; 9246 #endif 9247 9248 #ifdef CONFIG_SPLICE 9249 #ifdef TARGET_NR_tee 9250 case TARGET_NR_tee: 9251 { 9252 ret = get_errno(tee(arg1,arg2,arg3,arg4)); 9253 } 9254 break; 9255 #endif 9256 #ifdef TARGET_NR_splice 9257 case TARGET_NR_splice: 9258 { 9259 loff_t loff_in, loff_out; 9260 loff_t *ploff_in = NULL, *ploff_out = NULL; 9261 if(arg2) { 9262 get_user_u64(loff_in, arg2); 9263 ploff_in = &loff_in; 9264 } 9265 if(arg4) { 9266 get_user_u64(loff_out, arg2); 9267 ploff_out = &loff_out; 9268 } 9269 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); 9270 } 9271 break; 9272 #endif 9273 #ifdef TARGET_NR_vmsplice 9274 case TARGET_NR_vmsplice: 9275 { 9276 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 9277 if (vec != NULL) { 9278 ret = get_errno(vmsplice(arg1, vec, arg3, arg4)); 9279 unlock_iovec(vec, arg2, arg3, 0); 9280 } else { 9281 ret = -host_to_target_errno(errno); 9282 } 9283 } 9284 break; 9285 #endif 9286 #endif /* CONFIG_SPLICE */ 9287 #ifdef CONFIG_EVENTFD 9288 #if defined(TARGET_NR_eventfd) 9289 case TARGET_NR_eventfd: 9290 ret = get_errno(eventfd(arg1, 0)); 9291 break; 9292 #endif 9293 #if defined(TARGET_NR_eventfd2) 9294 case TARGET_NR_eventfd2: 9295 { 9296 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)); 9297 if (arg2 & TARGET_O_NONBLOCK) { 9298 host_flags |= O_NONBLOCK; 9299 } 9300 if (arg2 & TARGET_O_CLOEXEC) { 9301 host_flags |= O_CLOEXEC; 9302 } 9303 ret = get_errno(eventfd(arg1, host_flags)); 9304 break; 9305 } 9306 #endif 9307 #endif /* CONFIG_EVENTFD */ 9308 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) 9309 case TARGET_NR_fallocate: 9310 #if TARGET_ABI_BITS == 32 9311 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4), 9312 target_offset64(arg5, arg6))); 9313 #else 9314 ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); 9315 #endif 9316 break; 9317 #endif 9318 #if defined(CONFIG_SYNC_FILE_RANGE) 9319 #if defined(TARGET_NR_sync_file_range) 9320 case TARGET_NR_sync_file_range: 9321 #if TARGET_ABI_BITS == 32 9322 #if defined(TARGET_MIPS) 9323 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 9324 target_offset64(arg5, arg6), arg7)); 9325 #else 9326 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3), 9327 target_offset64(arg4, arg5), arg6)); 9328 #endif /* !TARGET_MIPS */ 9329 #else 9330 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4)); 9331 #endif 9332 break; 9333 #endif 9334 #if defined(TARGET_NR_sync_file_range2) 9335 case TARGET_NR_sync_file_range2: 9336 /* This is like sync_file_range but the arguments are reordered */ 9337 #if TARGET_ABI_BITS == 32 9338 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 9339 target_offset64(arg5, arg6), arg2)); 9340 #else 9341 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2)); 9342 #endif 9343 break; 9344 #endif 9345 #endif 9346 #if defined(CONFIG_EPOLL) 9347 #if defined(TARGET_NR_epoll_create) 9348 case TARGET_NR_epoll_create: 9349 ret = get_errno(epoll_create(arg1)); 9350 break; 9351 #endif 9352 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1) 9353 case TARGET_NR_epoll_create1: 9354 ret = get_errno(epoll_create1(arg1)); 9355 break; 9356 #endif 9357 #if defined(TARGET_NR_epoll_ctl) 9358 case TARGET_NR_epoll_ctl: 9359 { 9360 struct epoll_event ep; 9361 struct epoll_event *epp = 0; 9362 if (arg4) { 9363 struct target_epoll_event *target_ep; 9364 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) { 9365 goto efault; 9366 } 9367 ep.events = tswap32(target_ep->events); 9368 /* The epoll_data_t union is just opaque data to the kernel, 9369 * so we transfer all 64 bits across and need not worry what 9370 * actual data type it is. 9371 */ 9372 ep.data.u64 = tswap64(target_ep->data.u64); 9373 unlock_user_struct(target_ep, arg4, 0); 9374 epp = &ep; 9375 } 9376 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp)); 9377 break; 9378 } 9379 #endif 9380 9381 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT) 9382 #define IMPLEMENT_EPOLL_PWAIT 9383 #endif 9384 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT) 9385 #if defined(TARGET_NR_epoll_wait) 9386 case TARGET_NR_epoll_wait: 9387 #endif 9388 #if defined(IMPLEMENT_EPOLL_PWAIT) 9389 case TARGET_NR_epoll_pwait: 9390 #endif 9391 { 9392 struct target_epoll_event *target_ep; 9393 struct epoll_event *ep; 9394 int epfd = arg1; 9395 int maxevents = arg3; 9396 int timeout = arg4; 9397 9398 target_ep = lock_user(VERIFY_WRITE, arg2, 9399 maxevents * sizeof(struct target_epoll_event), 1); 9400 if (!target_ep) { 9401 goto efault; 9402 } 9403 9404 ep = alloca(maxevents * sizeof(struct epoll_event)); 9405 9406 switch (num) { 9407 #if defined(IMPLEMENT_EPOLL_PWAIT) 9408 case TARGET_NR_epoll_pwait: 9409 { 9410 target_sigset_t *target_set; 9411 sigset_t _set, *set = &_set; 9412 9413 if (arg5) { 9414 target_set = lock_user(VERIFY_READ, arg5, 9415 sizeof(target_sigset_t), 1); 9416 if (!target_set) { 9417 unlock_user(target_ep, arg2, 0); 9418 goto efault; 9419 } 9420 target_to_host_sigset(set, target_set); 9421 unlock_user(target_set, arg5, 0); 9422 } else { 9423 set = NULL; 9424 } 9425 9426 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set)); 9427 break; 9428 } 9429 #endif 9430 #if defined(TARGET_NR_epoll_wait) 9431 case TARGET_NR_epoll_wait: 9432 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout)); 9433 break; 9434 #endif 9435 default: 9436 ret = -TARGET_ENOSYS; 9437 } 9438 if (!is_error(ret)) { 9439 int i; 9440 for (i = 0; i < ret; i++) { 9441 target_ep[i].events = tswap32(ep[i].events); 9442 target_ep[i].data.u64 = tswap64(ep[i].data.u64); 9443 } 9444 } 9445 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event)); 9446 break; 9447 } 9448 #endif 9449 #endif 9450 #ifdef TARGET_NR_prlimit64 9451 case TARGET_NR_prlimit64: 9452 { 9453 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */ 9454 struct target_rlimit64 *target_rnew, *target_rold; 9455 struct host_rlimit64 rnew, rold, *rnewp = 0; 9456 if (arg3) { 9457 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { 9458 goto efault; 9459 } 9460 rnew.rlim_cur = tswap64(target_rnew->rlim_cur); 9461 rnew.rlim_max = tswap64(target_rnew->rlim_max); 9462 unlock_user_struct(target_rnew, arg3, 0); 9463 rnewp = &rnew; 9464 } 9465 9466 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0)); 9467 if (!is_error(ret) && arg4) { 9468 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { 9469 goto efault; 9470 } 9471 target_rold->rlim_cur = tswap64(rold.rlim_cur); 9472 target_rold->rlim_max = tswap64(rold.rlim_max); 9473 unlock_user_struct(target_rold, arg4, 1); 9474 } 9475 break; 9476 } 9477 #endif 9478 #ifdef TARGET_NR_gethostname 9479 case TARGET_NR_gethostname: 9480 { 9481 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0); 9482 if (name) { 9483 ret = get_errno(gethostname(name, arg2)); 9484 unlock_user(name, arg1, arg2); 9485 } else { 9486 ret = -TARGET_EFAULT; 9487 } 9488 break; 9489 } 9490 #endif 9491 #ifdef TARGET_NR_atomic_cmpxchg_32 9492 case TARGET_NR_atomic_cmpxchg_32: 9493 { 9494 /* should use start_exclusive from main.c */ 9495 abi_ulong mem_value; 9496 if (get_user_u32(mem_value, arg6)) { 9497 target_siginfo_t info; 9498 info.si_signo = SIGSEGV; 9499 info.si_errno = 0; 9500 info.si_code = TARGET_SEGV_MAPERR; 9501 info._sifields._sigfault._addr = arg6; 9502 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info); 9503 ret = 0xdeadbeef; 9504 9505 } 9506 if (mem_value == arg2) 9507 put_user_u32(arg1, arg6); 9508 ret = mem_value; 9509 break; 9510 } 9511 #endif 9512 #ifdef TARGET_NR_atomic_barrier 9513 case TARGET_NR_atomic_barrier: 9514 { 9515 /* Like the kernel implementation and the qemu arm barrier, no-op this? */ 9516 ret = 0; 9517 break; 9518 } 9519 #endif 9520 9521 #ifdef TARGET_NR_timer_create 9522 case TARGET_NR_timer_create: 9523 { 9524 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */ 9525 9526 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL; 9527 struct target_timer_t *ptarget_timer; 9528 9529 int clkid = arg1; 9530 int timer_index = next_free_host_timer(); 9531 9532 if (timer_index < 0) { 9533 ret = -TARGET_EAGAIN; 9534 } else { 9535 timer_t *phtimer = g_posix_timers + timer_index; 9536 9537 if (arg2) { 9538 phost_sevp = &host_sevp; 9539 ret = target_to_host_sigevent(phost_sevp, arg2); 9540 if (ret != 0) { 9541 break; 9542 } 9543 } 9544 9545 ret = get_errno(timer_create(clkid, phost_sevp, phtimer)); 9546 if (ret) { 9547 phtimer = NULL; 9548 } else { 9549 if (!lock_user_struct(VERIFY_WRITE, ptarget_timer, arg3, 1)) { 9550 goto efault; 9551 } 9552 ptarget_timer->ptr = tswap32(0xcafe0000 | timer_index); 9553 unlock_user_struct(ptarget_timer, arg3, 1); 9554 } 9555 } 9556 break; 9557 } 9558 #endif 9559 9560 #ifdef TARGET_NR_timer_settime 9561 case TARGET_NR_timer_settime: 9562 { 9563 /* args: timer_t timerid, int flags, const struct itimerspec *new_value, 9564 * struct itimerspec * old_value */ 9565 arg1 &= 0xffff; 9566 if (arg3 == 0 || arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9567 ret = -TARGET_EINVAL; 9568 } else { 9569 timer_t htimer = g_posix_timers[arg1]; 9570 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},}; 9571 9572 target_to_host_itimerspec(&hspec_new, arg3); 9573 ret = get_errno( 9574 timer_settime(htimer, arg2, &hspec_new, &hspec_old)); 9575 host_to_target_itimerspec(arg2, &hspec_old); 9576 } 9577 break; 9578 } 9579 #endif 9580 9581 #ifdef TARGET_NR_timer_gettime 9582 case TARGET_NR_timer_gettime: 9583 { 9584 /* args: timer_t timerid, struct itimerspec *curr_value */ 9585 arg1 &= 0xffff; 9586 if (!arg2) { 9587 return -TARGET_EFAULT; 9588 } else if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9589 ret = -TARGET_EINVAL; 9590 } else { 9591 timer_t htimer = g_posix_timers[arg1]; 9592 struct itimerspec hspec; 9593 ret = get_errno(timer_gettime(htimer, &hspec)); 9594 9595 if (host_to_target_itimerspec(arg2, &hspec)) { 9596 ret = -TARGET_EFAULT; 9597 } 9598 } 9599 break; 9600 } 9601 #endif 9602 9603 #ifdef TARGET_NR_timer_getoverrun 9604 case TARGET_NR_timer_getoverrun: 9605 { 9606 /* args: timer_t timerid */ 9607 arg1 &= 0xffff; 9608 if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9609 ret = -TARGET_EINVAL; 9610 } else { 9611 timer_t htimer = g_posix_timers[arg1]; 9612 ret = get_errno(timer_getoverrun(htimer)); 9613 } 9614 break; 9615 } 9616 #endif 9617 9618 #ifdef TARGET_NR_timer_delete 9619 case TARGET_NR_timer_delete: 9620 { 9621 /* args: timer_t timerid */ 9622 arg1 &= 0xffff; 9623 if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9624 ret = -TARGET_EINVAL; 9625 } else { 9626 timer_t htimer = g_posix_timers[arg1]; 9627 ret = get_errno(timer_delete(htimer)); 9628 g_posix_timers[arg1] = 0; 9629 } 9630 break; 9631 } 9632 #endif 9633 9634 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD) 9635 case TARGET_NR_timerfd_create: 9636 ret = get_errno(timerfd_create(arg1, 9637 target_to_host_bitmask(arg2, fcntl_flags_tbl))); 9638 break; 9639 #endif 9640 9641 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD) 9642 case TARGET_NR_timerfd_gettime: 9643 { 9644 struct itimerspec its_curr; 9645 9646 ret = get_errno(timerfd_gettime(arg1, &its_curr)); 9647 9648 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) { 9649 goto efault; 9650 } 9651 } 9652 break; 9653 #endif 9654 9655 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD) 9656 case TARGET_NR_timerfd_settime: 9657 { 9658 struct itimerspec its_new, its_old, *p_new; 9659 9660 if (arg3) { 9661 if (target_to_host_itimerspec(&its_new, arg3)) { 9662 goto efault; 9663 } 9664 p_new = &its_new; 9665 } else { 9666 p_new = NULL; 9667 } 9668 9669 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old)); 9670 9671 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) { 9672 goto efault; 9673 } 9674 } 9675 break; 9676 #endif 9677 9678 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get) 9679 case TARGET_NR_ioprio_get: 9680 ret = get_errno(ioprio_get(arg1, arg2)); 9681 break; 9682 #endif 9683 9684 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set) 9685 case TARGET_NR_ioprio_set: 9686 ret = get_errno(ioprio_set(arg1, arg2, arg3)); 9687 break; 9688 #endif 9689 9690 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS) 9691 case TARGET_NR_setns: 9692 ret = get_errno(setns(arg1, arg2)); 9693 break; 9694 #endif 9695 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS) 9696 case TARGET_NR_unshare: 9697 ret = get_errno(unshare(arg1)); 9698 break; 9699 #endif 9700 9701 default: 9702 unimplemented: 9703 gemu_log("qemu: Unsupported syscall: %d\n", num); 9704 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list) 9705 unimplemented_nowarn: 9706 #endif 9707 ret = -TARGET_ENOSYS; 9708 break; 9709 } 9710 fail: 9711 #ifdef DEBUG 9712 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret); 9713 #endif 9714 if(do_strace) 9715 print_syscall_ret(num, ret); 9716 return ret; 9717 efault: 9718 ret = -TARGET_EFAULT; 9719 goto fail; 9720 } 9721