1 /* 2 * Linux syscalls 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #define _ATFILE_SOURCE 20 #include <stdlib.h> 21 #include <stdio.h> 22 #include <stdarg.h> 23 #include <string.h> 24 #include <elf.h> 25 #include <endian.h> 26 #include <errno.h> 27 #include <unistd.h> 28 #include <fcntl.h> 29 #include <time.h> 30 #include <limits.h> 31 #include <grp.h> 32 #include <sys/types.h> 33 #include <sys/ipc.h> 34 #include <sys/msg.h> 35 #include <sys/wait.h> 36 #include <sys/time.h> 37 #include <sys/stat.h> 38 #include <sys/mount.h> 39 #include <sys/file.h> 40 #include <sys/fsuid.h> 41 #include <sys/personality.h> 42 #include <sys/prctl.h> 43 #include <sys/resource.h> 44 #include <sys/mman.h> 45 #include <sys/swap.h> 46 #include <signal.h> 47 #include <sched.h> 48 #ifdef __ia64__ 49 int __clone2(int (*fn)(void *), void *child_stack_base, 50 size_t stack_size, int flags, void *arg, ...); 51 #endif 52 #include <sys/socket.h> 53 #include <sys/un.h> 54 #include <sys/uio.h> 55 #include <sys/poll.h> 56 #include <sys/times.h> 57 #include <sys/shm.h> 58 #include <sys/sem.h> 59 #include <sys/statfs.h> 60 #include <utime.h> 61 #include <sys/sysinfo.h> 62 #include <sys/utsname.h> 63 //#include <sys/user.h> 64 #include <netinet/ip.h> 65 #include <netinet/tcp.h> 66 #include <linux/wireless.h> 67 #include <linux/icmp.h> 68 #include "qemu-common.h" 69 #ifdef TARGET_GPROF 70 #include <sys/gmon.h> 71 #endif 72 #ifdef CONFIG_EVENTFD 73 #include <sys/eventfd.h> 74 #endif 75 #ifdef CONFIG_EPOLL 76 #include <sys/epoll.h> 77 #endif 78 #ifdef CONFIG_ATTR 79 #include "qemu/xattr.h" 80 #endif 81 #ifdef CONFIG_SENDFILE 82 #include <sys/sendfile.h> 83 #endif 84 85 #define termios host_termios 86 #define winsize host_winsize 87 #define termio host_termio 88 #define sgttyb host_sgttyb /* same as target */ 89 #define tchars host_tchars /* same as target */ 90 #define ltchars host_ltchars /* same as target */ 91 92 #include <linux/termios.h> 93 #include <linux/unistd.h> 94 #include <linux/utsname.h> 95 #include <linux/cdrom.h> 96 #include <linux/hdreg.h> 97 #include <linux/soundcard.h> 98 #include <linux/kd.h> 99 #include <linux/mtio.h> 100 #include <linux/fs.h> 101 #if defined(CONFIG_FIEMAP) 102 #include <linux/fiemap.h> 103 #endif 104 #include <linux/fb.h> 105 #include <linux/vt.h> 106 #include <linux/dm-ioctl.h> 107 #include <linux/reboot.h> 108 #include <linux/route.h> 109 #include <linux/filter.h> 110 #include <linux/blkpg.h> 111 #include "linux_loop.h" 112 #include "cpu-uname.h" 113 114 #include "qemu.h" 115 116 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \ 117 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID) 118 119 //#define DEBUG 120 121 //#include <linux/msdos_fs.h> 122 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2]) 123 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2]) 124 125 126 #undef _syscall0 127 #undef _syscall1 128 #undef _syscall2 129 #undef _syscall3 130 #undef _syscall4 131 #undef _syscall5 132 #undef _syscall6 133 134 #define _syscall0(type,name) \ 135 static type name (void) \ 136 { \ 137 return syscall(__NR_##name); \ 138 } 139 140 #define _syscall1(type,name,type1,arg1) \ 141 static type name (type1 arg1) \ 142 { \ 143 return syscall(__NR_##name, arg1); \ 144 } 145 146 #define _syscall2(type,name,type1,arg1,type2,arg2) \ 147 static type name (type1 arg1,type2 arg2) \ 148 { \ 149 return syscall(__NR_##name, arg1, arg2); \ 150 } 151 152 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 153 static type name (type1 arg1,type2 arg2,type3 arg3) \ 154 { \ 155 return syscall(__NR_##name, arg1, arg2, arg3); \ 156 } 157 158 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 159 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ 160 { \ 161 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 162 } 163 164 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 165 type5,arg5) \ 166 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ 167 { \ 168 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 169 } 170 171 172 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 173 type5,arg5,type6,arg6) \ 174 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \ 175 type6 arg6) \ 176 { \ 177 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 178 } 179 180 181 #define __NR_sys_uname __NR_uname 182 #define __NR_sys_getcwd1 __NR_getcwd 183 #define __NR_sys_getdents __NR_getdents 184 #define __NR_sys_getdents64 __NR_getdents64 185 #define __NR_sys_getpriority __NR_getpriority 186 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo 187 #define __NR_sys_syslog __NR_syslog 188 #define __NR_sys_tgkill __NR_tgkill 189 #define __NR_sys_tkill __NR_tkill 190 #define __NR_sys_futex __NR_futex 191 #define __NR_sys_inotify_init __NR_inotify_init 192 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch 193 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch 194 195 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \ 196 defined(__s390x__) 197 #define __NR__llseek __NR_lseek 198 #endif 199 200 #ifdef __NR_gettid 201 _syscall0(int, gettid) 202 #else 203 /* This is a replacement for the host gettid() and must return a host 204 errno. */ 205 static int gettid(void) { 206 return -ENOSYS; 207 } 208 #endif 209 #ifdef __NR_getdents 210 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count); 211 #endif 212 #if !defined(__NR_getdents) || \ 213 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64)) 214 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count); 215 #endif 216 #if defined(TARGET_NR__llseek) && defined(__NR_llseek) 217 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo, 218 loff_t *, res, uint, wh); 219 #endif 220 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo) 221 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len) 222 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 223 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig) 224 #endif 225 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 226 _syscall2(int,sys_tkill,int,tid,int,sig) 227 #endif 228 #ifdef __NR_exit_group 229 _syscall1(int,exit_group,int,error_code) 230 #endif 231 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 232 _syscall1(int,set_tid_address,int *,tidptr) 233 #endif 234 #if defined(TARGET_NR_futex) && defined(__NR_futex) 235 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, 236 const struct timespec *,timeout,int *,uaddr2,int,val3) 237 #endif 238 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity 239 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, 240 unsigned long *, user_mask_ptr); 241 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity 242 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len, 243 unsigned long *, user_mask_ptr); 244 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd, 245 void *, arg); 246 247 static bitmask_transtbl fcntl_flags_tbl[] = { 248 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, }, 249 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, }, 250 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, }, 251 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, }, 252 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, }, 253 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, }, 254 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, }, 255 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, }, 256 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, }, 257 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, }, 258 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, }, 259 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, }, 260 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, }, 261 #if defined(O_DIRECT) 262 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, }, 263 #endif 264 #if defined(O_NOATIME) 265 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME }, 266 #endif 267 #if defined(O_CLOEXEC) 268 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC }, 269 #endif 270 #if defined(O_PATH) 271 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH }, 272 #endif 273 /* Don't terminate the list prematurely on 64-bit host+guest. */ 274 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0 275 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, }, 276 #endif 277 { 0, 0, 0, 0 } 278 }; 279 280 #define COPY_UTSNAME_FIELD(dest, src) \ 281 do { \ 282 /* __NEW_UTS_LEN doesn't include terminating null */ \ 283 (void) strncpy((dest), (src), __NEW_UTS_LEN); \ 284 (dest)[__NEW_UTS_LEN] = '\0'; \ 285 } while (0) 286 287 static int sys_uname(struct new_utsname *buf) 288 { 289 struct utsname uts_buf; 290 291 if (uname(&uts_buf) < 0) 292 return (-1); 293 294 /* 295 * Just in case these have some differences, we 296 * translate utsname to new_utsname (which is the 297 * struct linux kernel uses). 298 */ 299 300 memset(buf, 0, sizeof(*buf)); 301 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname); 302 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename); 303 COPY_UTSNAME_FIELD(buf->release, uts_buf.release); 304 COPY_UTSNAME_FIELD(buf->version, uts_buf.version); 305 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine); 306 #ifdef _GNU_SOURCE 307 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname); 308 #endif 309 return (0); 310 311 #undef COPY_UTSNAME_FIELD 312 } 313 314 static int sys_getcwd1(char *buf, size_t size) 315 { 316 if (getcwd(buf, size) == NULL) { 317 /* getcwd() sets errno */ 318 return (-1); 319 } 320 return strlen(buf)+1; 321 } 322 323 #ifdef TARGET_NR_openat 324 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode) 325 { 326 /* 327 * open(2) has extra parameter 'mode' when called with 328 * flag O_CREAT. 329 */ 330 if ((flags & O_CREAT) != 0) { 331 return (openat(dirfd, pathname, flags, mode)); 332 } 333 return (openat(dirfd, pathname, flags)); 334 } 335 #endif 336 337 #ifdef TARGET_NR_utimensat 338 #ifdef CONFIG_UTIMENSAT 339 static int sys_utimensat(int dirfd, const char *pathname, 340 const struct timespec times[2], int flags) 341 { 342 if (pathname == NULL) 343 return futimens(dirfd, times); 344 else 345 return utimensat(dirfd, pathname, times, flags); 346 } 347 #elif defined(__NR_utimensat) 348 #define __NR_sys_utimensat __NR_utimensat 349 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname, 350 const struct timespec *,tsp,int,flags) 351 #else 352 static int sys_utimensat(int dirfd, const char *pathname, 353 const struct timespec times[2], int flags) 354 { 355 errno = ENOSYS; 356 return -1; 357 } 358 #endif 359 #endif /* TARGET_NR_utimensat */ 360 361 #ifdef CONFIG_INOTIFY 362 #include <sys/inotify.h> 363 364 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 365 static int sys_inotify_init(void) 366 { 367 return (inotify_init()); 368 } 369 #endif 370 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 371 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask) 372 { 373 return (inotify_add_watch(fd, pathname, mask)); 374 } 375 #endif 376 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 377 static int sys_inotify_rm_watch(int fd, int32_t wd) 378 { 379 return (inotify_rm_watch(fd, wd)); 380 } 381 #endif 382 #ifdef CONFIG_INOTIFY1 383 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 384 static int sys_inotify_init1(int flags) 385 { 386 return (inotify_init1(flags)); 387 } 388 #endif 389 #endif 390 #else 391 /* Userspace can usually survive runtime without inotify */ 392 #undef TARGET_NR_inotify_init 393 #undef TARGET_NR_inotify_init1 394 #undef TARGET_NR_inotify_add_watch 395 #undef TARGET_NR_inotify_rm_watch 396 #endif /* CONFIG_INOTIFY */ 397 398 #if defined(TARGET_NR_ppoll) 399 #ifndef __NR_ppoll 400 # define __NR_ppoll -1 401 #endif 402 #define __NR_sys_ppoll __NR_ppoll 403 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds, 404 struct timespec *, timeout, const __sigset_t *, sigmask, 405 size_t, sigsetsize) 406 #endif 407 408 #if defined(TARGET_NR_pselect6) 409 #ifndef __NR_pselect6 410 # define __NR_pselect6 -1 411 #endif 412 #define __NR_sys_pselect6 __NR_pselect6 413 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, 414 fd_set *, exceptfds, struct timespec *, timeout, void *, sig); 415 #endif 416 417 #if defined(TARGET_NR_prlimit64) 418 #ifndef __NR_prlimit64 419 # define __NR_prlimit64 -1 420 #endif 421 #define __NR_sys_prlimit64 __NR_prlimit64 422 /* The glibc rlimit structure may not be that used by the underlying syscall */ 423 struct host_rlimit64 { 424 uint64_t rlim_cur; 425 uint64_t rlim_max; 426 }; 427 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource, 428 const struct host_rlimit64 *, new_limit, 429 struct host_rlimit64 *, old_limit) 430 #endif 431 432 433 #if defined(TARGET_NR_timer_create) 434 /* Maxiumum of 32 active POSIX timers allowed at any one time. */ 435 static timer_t g_posix_timers[32] = { 0, } ; 436 437 static inline int next_free_host_timer(void) 438 { 439 int k ; 440 /* FIXME: Does finding the next free slot require a lock? */ 441 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) { 442 if (g_posix_timers[k] == 0) { 443 g_posix_timers[k] = (timer_t) 1; 444 return k; 445 } 446 } 447 return -1; 448 } 449 #endif 450 451 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */ 452 #ifdef TARGET_ARM 453 static inline int regpairs_aligned(void *cpu_env) { 454 return ((((CPUARMState *)cpu_env)->eabi) == 1) ; 455 } 456 #elif defined(TARGET_MIPS) 457 static inline int regpairs_aligned(void *cpu_env) { return 1; } 458 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64) 459 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs 460 * of registers which translates to the same as ARM/MIPS, because we start with 461 * r3 as arg1 */ 462 static inline int regpairs_aligned(void *cpu_env) { return 1; } 463 #else 464 static inline int regpairs_aligned(void *cpu_env) { return 0; } 465 #endif 466 467 #define ERRNO_TABLE_SIZE 1200 468 469 /* target_to_host_errno_table[] is initialized from 470 * host_to_target_errno_table[] in syscall_init(). */ 471 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = { 472 }; 473 474 /* 475 * This list is the union of errno values overridden in asm-<arch>/errno.h 476 * minus the errnos that are not actually generic to all archs. 477 */ 478 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = { 479 [EIDRM] = TARGET_EIDRM, 480 [ECHRNG] = TARGET_ECHRNG, 481 [EL2NSYNC] = TARGET_EL2NSYNC, 482 [EL3HLT] = TARGET_EL3HLT, 483 [EL3RST] = TARGET_EL3RST, 484 [ELNRNG] = TARGET_ELNRNG, 485 [EUNATCH] = TARGET_EUNATCH, 486 [ENOCSI] = TARGET_ENOCSI, 487 [EL2HLT] = TARGET_EL2HLT, 488 [EDEADLK] = TARGET_EDEADLK, 489 [ENOLCK] = TARGET_ENOLCK, 490 [EBADE] = TARGET_EBADE, 491 [EBADR] = TARGET_EBADR, 492 [EXFULL] = TARGET_EXFULL, 493 [ENOANO] = TARGET_ENOANO, 494 [EBADRQC] = TARGET_EBADRQC, 495 [EBADSLT] = TARGET_EBADSLT, 496 [EBFONT] = TARGET_EBFONT, 497 [ENOSTR] = TARGET_ENOSTR, 498 [ENODATA] = TARGET_ENODATA, 499 [ETIME] = TARGET_ETIME, 500 [ENOSR] = TARGET_ENOSR, 501 [ENONET] = TARGET_ENONET, 502 [ENOPKG] = TARGET_ENOPKG, 503 [EREMOTE] = TARGET_EREMOTE, 504 [ENOLINK] = TARGET_ENOLINK, 505 [EADV] = TARGET_EADV, 506 [ESRMNT] = TARGET_ESRMNT, 507 [ECOMM] = TARGET_ECOMM, 508 [EPROTO] = TARGET_EPROTO, 509 [EDOTDOT] = TARGET_EDOTDOT, 510 [EMULTIHOP] = TARGET_EMULTIHOP, 511 [EBADMSG] = TARGET_EBADMSG, 512 [ENAMETOOLONG] = TARGET_ENAMETOOLONG, 513 [EOVERFLOW] = TARGET_EOVERFLOW, 514 [ENOTUNIQ] = TARGET_ENOTUNIQ, 515 [EBADFD] = TARGET_EBADFD, 516 [EREMCHG] = TARGET_EREMCHG, 517 [ELIBACC] = TARGET_ELIBACC, 518 [ELIBBAD] = TARGET_ELIBBAD, 519 [ELIBSCN] = TARGET_ELIBSCN, 520 [ELIBMAX] = TARGET_ELIBMAX, 521 [ELIBEXEC] = TARGET_ELIBEXEC, 522 [EILSEQ] = TARGET_EILSEQ, 523 [ENOSYS] = TARGET_ENOSYS, 524 [ELOOP] = TARGET_ELOOP, 525 [ERESTART] = TARGET_ERESTART, 526 [ESTRPIPE] = TARGET_ESTRPIPE, 527 [ENOTEMPTY] = TARGET_ENOTEMPTY, 528 [EUSERS] = TARGET_EUSERS, 529 [ENOTSOCK] = TARGET_ENOTSOCK, 530 [EDESTADDRREQ] = TARGET_EDESTADDRREQ, 531 [EMSGSIZE] = TARGET_EMSGSIZE, 532 [EPROTOTYPE] = TARGET_EPROTOTYPE, 533 [ENOPROTOOPT] = TARGET_ENOPROTOOPT, 534 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT, 535 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT, 536 [EOPNOTSUPP] = TARGET_EOPNOTSUPP, 537 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT, 538 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT, 539 [EADDRINUSE] = TARGET_EADDRINUSE, 540 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL, 541 [ENETDOWN] = TARGET_ENETDOWN, 542 [ENETUNREACH] = TARGET_ENETUNREACH, 543 [ENETRESET] = TARGET_ENETRESET, 544 [ECONNABORTED] = TARGET_ECONNABORTED, 545 [ECONNRESET] = TARGET_ECONNRESET, 546 [ENOBUFS] = TARGET_ENOBUFS, 547 [EISCONN] = TARGET_EISCONN, 548 [ENOTCONN] = TARGET_ENOTCONN, 549 [EUCLEAN] = TARGET_EUCLEAN, 550 [ENOTNAM] = TARGET_ENOTNAM, 551 [ENAVAIL] = TARGET_ENAVAIL, 552 [EISNAM] = TARGET_EISNAM, 553 [EREMOTEIO] = TARGET_EREMOTEIO, 554 [ESHUTDOWN] = TARGET_ESHUTDOWN, 555 [ETOOMANYREFS] = TARGET_ETOOMANYREFS, 556 [ETIMEDOUT] = TARGET_ETIMEDOUT, 557 [ECONNREFUSED] = TARGET_ECONNREFUSED, 558 [EHOSTDOWN] = TARGET_EHOSTDOWN, 559 [EHOSTUNREACH] = TARGET_EHOSTUNREACH, 560 [EALREADY] = TARGET_EALREADY, 561 [EINPROGRESS] = TARGET_EINPROGRESS, 562 [ESTALE] = TARGET_ESTALE, 563 [ECANCELED] = TARGET_ECANCELED, 564 [ENOMEDIUM] = TARGET_ENOMEDIUM, 565 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE, 566 #ifdef ENOKEY 567 [ENOKEY] = TARGET_ENOKEY, 568 #endif 569 #ifdef EKEYEXPIRED 570 [EKEYEXPIRED] = TARGET_EKEYEXPIRED, 571 #endif 572 #ifdef EKEYREVOKED 573 [EKEYREVOKED] = TARGET_EKEYREVOKED, 574 #endif 575 #ifdef EKEYREJECTED 576 [EKEYREJECTED] = TARGET_EKEYREJECTED, 577 #endif 578 #ifdef EOWNERDEAD 579 [EOWNERDEAD] = TARGET_EOWNERDEAD, 580 #endif 581 #ifdef ENOTRECOVERABLE 582 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE, 583 #endif 584 }; 585 586 static inline int host_to_target_errno(int err) 587 { 588 if(host_to_target_errno_table[err]) 589 return host_to_target_errno_table[err]; 590 return err; 591 } 592 593 static inline int target_to_host_errno(int err) 594 { 595 if (target_to_host_errno_table[err]) 596 return target_to_host_errno_table[err]; 597 return err; 598 } 599 600 static inline abi_long get_errno(abi_long ret) 601 { 602 if (ret == -1) 603 return -host_to_target_errno(errno); 604 else 605 return ret; 606 } 607 608 static inline int is_error(abi_long ret) 609 { 610 return (abi_ulong)ret >= (abi_ulong)(-4096); 611 } 612 613 char *target_strerror(int err) 614 { 615 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) { 616 return NULL; 617 } 618 return strerror(target_to_host_errno(err)); 619 } 620 621 static abi_ulong target_brk; 622 static abi_ulong target_original_brk; 623 static abi_ulong brk_page; 624 625 void target_set_brk(abi_ulong new_brk) 626 { 627 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk); 628 brk_page = HOST_PAGE_ALIGN(target_brk); 629 } 630 631 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0) 632 #define DEBUGF_BRK(message, args...) 633 634 /* do_brk() must return target values and target errnos. */ 635 abi_long do_brk(abi_ulong new_brk) 636 { 637 abi_long mapped_addr; 638 int new_alloc_size; 639 640 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk); 641 642 if (!new_brk) { 643 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk); 644 return target_brk; 645 } 646 if (new_brk < target_original_brk) { 647 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n", 648 target_brk); 649 return target_brk; 650 } 651 652 /* If the new brk is less than the highest page reserved to the 653 * target heap allocation, set it and we're almost done... */ 654 if (new_brk <= brk_page) { 655 /* Heap contents are initialized to zero, as for anonymous 656 * mapped pages. */ 657 if (new_brk > target_brk) { 658 memset(g2h(target_brk), 0, new_brk - target_brk); 659 } 660 target_brk = new_brk; 661 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk); 662 return target_brk; 663 } 664 665 /* We need to allocate more memory after the brk... Note that 666 * we don't use MAP_FIXED because that will map over the top of 667 * any existing mapping (like the one with the host libc or qemu 668 * itself); instead we treat "mapped but at wrong address" as 669 * a failure and unmap again. 670 */ 671 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page); 672 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, 673 PROT_READ|PROT_WRITE, 674 MAP_ANON|MAP_PRIVATE, 0, 0)); 675 676 if (mapped_addr == brk_page) { 677 /* Heap contents are initialized to zero, as for anonymous 678 * mapped pages. Technically the new pages are already 679 * initialized to zero since they *are* anonymous mapped 680 * pages, however we have to take care with the contents that 681 * come from the remaining part of the previous page: it may 682 * contains garbage data due to a previous heap usage (grown 683 * then shrunken). */ 684 memset(g2h(target_brk), 0, brk_page - target_brk); 685 686 target_brk = new_brk; 687 brk_page = HOST_PAGE_ALIGN(target_brk); 688 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n", 689 target_brk); 690 return target_brk; 691 } else if (mapped_addr != -1) { 692 /* Mapped but at wrong address, meaning there wasn't actually 693 * enough space for this brk. 694 */ 695 target_munmap(mapped_addr, new_alloc_size); 696 mapped_addr = -1; 697 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk); 698 } 699 else { 700 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk); 701 } 702 703 #if defined(TARGET_ALPHA) 704 /* We (partially) emulate OSF/1 on Alpha, which requires we 705 return a proper errno, not an unchanged brk value. */ 706 return -TARGET_ENOMEM; 707 #endif 708 /* For everything else, return the previous break. */ 709 return target_brk; 710 } 711 712 static inline abi_long copy_from_user_fdset(fd_set *fds, 713 abi_ulong target_fds_addr, 714 int n) 715 { 716 int i, nw, j, k; 717 abi_ulong b, *target_fds; 718 719 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 720 if (!(target_fds = lock_user(VERIFY_READ, 721 target_fds_addr, 722 sizeof(abi_ulong) * nw, 723 1))) 724 return -TARGET_EFAULT; 725 726 FD_ZERO(fds); 727 k = 0; 728 for (i = 0; i < nw; i++) { 729 /* grab the abi_ulong */ 730 __get_user(b, &target_fds[i]); 731 for (j = 0; j < TARGET_ABI_BITS; j++) { 732 /* check the bit inside the abi_ulong */ 733 if ((b >> j) & 1) 734 FD_SET(k, fds); 735 k++; 736 } 737 } 738 739 unlock_user(target_fds, target_fds_addr, 0); 740 741 return 0; 742 } 743 744 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr, 745 abi_ulong target_fds_addr, 746 int n) 747 { 748 if (target_fds_addr) { 749 if (copy_from_user_fdset(fds, target_fds_addr, n)) 750 return -TARGET_EFAULT; 751 *fds_ptr = fds; 752 } else { 753 *fds_ptr = NULL; 754 } 755 return 0; 756 } 757 758 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr, 759 const fd_set *fds, 760 int n) 761 { 762 int i, nw, j, k; 763 abi_long v; 764 abi_ulong *target_fds; 765 766 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 767 if (!(target_fds = lock_user(VERIFY_WRITE, 768 target_fds_addr, 769 sizeof(abi_ulong) * nw, 770 0))) 771 return -TARGET_EFAULT; 772 773 k = 0; 774 for (i = 0; i < nw; i++) { 775 v = 0; 776 for (j = 0; j < TARGET_ABI_BITS; j++) { 777 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j); 778 k++; 779 } 780 __put_user(v, &target_fds[i]); 781 } 782 783 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw); 784 785 return 0; 786 } 787 788 #if defined(__alpha__) 789 #define HOST_HZ 1024 790 #else 791 #define HOST_HZ 100 792 #endif 793 794 static inline abi_long host_to_target_clock_t(long ticks) 795 { 796 #if HOST_HZ == TARGET_HZ 797 return ticks; 798 #else 799 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ; 800 #endif 801 } 802 803 static inline abi_long host_to_target_rusage(abi_ulong target_addr, 804 const struct rusage *rusage) 805 { 806 struct target_rusage *target_rusage; 807 808 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0)) 809 return -TARGET_EFAULT; 810 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec); 811 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec); 812 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec); 813 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec); 814 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss); 815 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss); 816 target_rusage->ru_idrss = tswapal(rusage->ru_idrss); 817 target_rusage->ru_isrss = tswapal(rusage->ru_isrss); 818 target_rusage->ru_minflt = tswapal(rusage->ru_minflt); 819 target_rusage->ru_majflt = tswapal(rusage->ru_majflt); 820 target_rusage->ru_nswap = tswapal(rusage->ru_nswap); 821 target_rusage->ru_inblock = tswapal(rusage->ru_inblock); 822 target_rusage->ru_oublock = tswapal(rusage->ru_oublock); 823 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd); 824 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv); 825 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals); 826 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw); 827 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw); 828 unlock_user_struct(target_rusage, target_addr, 1); 829 830 return 0; 831 } 832 833 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim) 834 { 835 abi_ulong target_rlim_swap; 836 rlim_t result; 837 838 target_rlim_swap = tswapal(target_rlim); 839 if (target_rlim_swap == TARGET_RLIM_INFINITY) 840 return RLIM_INFINITY; 841 842 result = target_rlim_swap; 843 if (target_rlim_swap != (rlim_t)result) 844 return RLIM_INFINITY; 845 846 return result; 847 } 848 849 static inline abi_ulong host_to_target_rlim(rlim_t rlim) 850 { 851 abi_ulong target_rlim_swap; 852 abi_ulong result; 853 854 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim) 855 target_rlim_swap = TARGET_RLIM_INFINITY; 856 else 857 target_rlim_swap = rlim; 858 result = tswapal(target_rlim_swap); 859 860 return result; 861 } 862 863 static inline int target_to_host_resource(int code) 864 { 865 switch (code) { 866 case TARGET_RLIMIT_AS: 867 return RLIMIT_AS; 868 case TARGET_RLIMIT_CORE: 869 return RLIMIT_CORE; 870 case TARGET_RLIMIT_CPU: 871 return RLIMIT_CPU; 872 case TARGET_RLIMIT_DATA: 873 return RLIMIT_DATA; 874 case TARGET_RLIMIT_FSIZE: 875 return RLIMIT_FSIZE; 876 case TARGET_RLIMIT_LOCKS: 877 return RLIMIT_LOCKS; 878 case TARGET_RLIMIT_MEMLOCK: 879 return RLIMIT_MEMLOCK; 880 case TARGET_RLIMIT_MSGQUEUE: 881 return RLIMIT_MSGQUEUE; 882 case TARGET_RLIMIT_NICE: 883 return RLIMIT_NICE; 884 case TARGET_RLIMIT_NOFILE: 885 return RLIMIT_NOFILE; 886 case TARGET_RLIMIT_NPROC: 887 return RLIMIT_NPROC; 888 case TARGET_RLIMIT_RSS: 889 return RLIMIT_RSS; 890 case TARGET_RLIMIT_RTPRIO: 891 return RLIMIT_RTPRIO; 892 case TARGET_RLIMIT_SIGPENDING: 893 return RLIMIT_SIGPENDING; 894 case TARGET_RLIMIT_STACK: 895 return RLIMIT_STACK; 896 default: 897 return code; 898 } 899 } 900 901 static inline abi_long copy_from_user_timeval(struct timeval *tv, 902 abi_ulong target_tv_addr) 903 { 904 struct target_timeval *target_tv; 905 906 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) 907 return -TARGET_EFAULT; 908 909 __get_user(tv->tv_sec, &target_tv->tv_sec); 910 __get_user(tv->tv_usec, &target_tv->tv_usec); 911 912 unlock_user_struct(target_tv, target_tv_addr, 0); 913 914 return 0; 915 } 916 917 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr, 918 const struct timeval *tv) 919 { 920 struct target_timeval *target_tv; 921 922 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) 923 return -TARGET_EFAULT; 924 925 __put_user(tv->tv_sec, &target_tv->tv_sec); 926 __put_user(tv->tv_usec, &target_tv->tv_usec); 927 928 unlock_user_struct(target_tv, target_tv_addr, 1); 929 930 return 0; 931 } 932 933 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 934 #include <mqueue.h> 935 936 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr, 937 abi_ulong target_mq_attr_addr) 938 { 939 struct target_mq_attr *target_mq_attr; 940 941 if (!lock_user_struct(VERIFY_READ, target_mq_attr, 942 target_mq_attr_addr, 1)) 943 return -TARGET_EFAULT; 944 945 __get_user(attr->mq_flags, &target_mq_attr->mq_flags); 946 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 947 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 948 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 949 950 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0); 951 952 return 0; 953 } 954 955 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr, 956 const struct mq_attr *attr) 957 { 958 struct target_mq_attr *target_mq_attr; 959 960 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr, 961 target_mq_attr_addr, 0)) 962 return -TARGET_EFAULT; 963 964 __put_user(attr->mq_flags, &target_mq_attr->mq_flags); 965 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 966 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 967 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 968 969 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1); 970 971 return 0; 972 } 973 #endif 974 975 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) 976 /* do_select() must return target values and target errnos. */ 977 static abi_long do_select(int n, 978 abi_ulong rfd_addr, abi_ulong wfd_addr, 979 abi_ulong efd_addr, abi_ulong target_tv_addr) 980 { 981 fd_set rfds, wfds, efds; 982 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 983 struct timeval tv, *tv_ptr; 984 abi_long ret; 985 986 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 987 if (ret) { 988 return ret; 989 } 990 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 991 if (ret) { 992 return ret; 993 } 994 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 995 if (ret) { 996 return ret; 997 } 998 999 if (target_tv_addr) { 1000 if (copy_from_user_timeval(&tv, target_tv_addr)) 1001 return -TARGET_EFAULT; 1002 tv_ptr = &tv; 1003 } else { 1004 tv_ptr = NULL; 1005 } 1006 1007 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr)); 1008 1009 if (!is_error(ret)) { 1010 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 1011 return -TARGET_EFAULT; 1012 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 1013 return -TARGET_EFAULT; 1014 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 1015 return -TARGET_EFAULT; 1016 1017 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv)) 1018 return -TARGET_EFAULT; 1019 } 1020 1021 return ret; 1022 } 1023 #endif 1024 1025 static abi_long do_pipe2(int host_pipe[], int flags) 1026 { 1027 #ifdef CONFIG_PIPE2 1028 return pipe2(host_pipe, flags); 1029 #else 1030 return -ENOSYS; 1031 #endif 1032 } 1033 1034 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes, 1035 int flags, int is_pipe2) 1036 { 1037 int host_pipe[2]; 1038 abi_long ret; 1039 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe); 1040 1041 if (is_error(ret)) 1042 return get_errno(ret); 1043 1044 /* Several targets have special calling conventions for the original 1045 pipe syscall, but didn't replicate this into the pipe2 syscall. */ 1046 if (!is_pipe2) { 1047 #if defined(TARGET_ALPHA) 1048 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1]; 1049 return host_pipe[0]; 1050 #elif defined(TARGET_MIPS) 1051 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1]; 1052 return host_pipe[0]; 1053 #elif defined(TARGET_SH4) 1054 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1]; 1055 return host_pipe[0]; 1056 #elif defined(TARGET_SPARC) 1057 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1]; 1058 return host_pipe[0]; 1059 #endif 1060 } 1061 1062 if (put_user_s32(host_pipe[0], pipedes) 1063 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0]))) 1064 return -TARGET_EFAULT; 1065 return get_errno(ret); 1066 } 1067 1068 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn, 1069 abi_ulong target_addr, 1070 socklen_t len) 1071 { 1072 struct target_ip_mreqn *target_smreqn; 1073 1074 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1); 1075 if (!target_smreqn) 1076 return -TARGET_EFAULT; 1077 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr; 1078 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr; 1079 if (len == sizeof(struct target_ip_mreqn)) 1080 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex); 1081 unlock_user(target_smreqn, target_addr, 0); 1082 1083 return 0; 1084 } 1085 1086 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr, 1087 abi_ulong target_addr, 1088 socklen_t len) 1089 { 1090 const socklen_t unix_maxlen = sizeof (struct sockaddr_un); 1091 sa_family_t sa_family; 1092 struct target_sockaddr *target_saddr; 1093 1094 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 1095 if (!target_saddr) 1096 return -TARGET_EFAULT; 1097 1098 sa_family = tswap16(target_saddr->sa_family); 1099 1100 /* Oops. The caller might send a incomplete sun_path; sun_path 1101 * must be terminated by \0 (see the manual page), but 1102 * unfortunately it is quite common to specify sockaddr_un 1103 * length as "strlen(x->sun_path)" while it should be 1104 * "strlen(...) + 1". We'll fix that here if needed. 1105 * Linux kernel has a similar feature. 1106 */ 1107 1108 if (sa_family == AF_UNIX) { 1109 if (len < unix_maxlen && len > 0) { 1110 char *cp = (char*)target_saddr; 1111 1112 if ( cp[len-1] && !cp[len] ) 1113 len++; 1114 } 1115 if (len > unix_maxlen) 1116 len = unix_maxlen; 1117 } 1118 1119 memcpy(addr, target_saddr, len); 1120 addr->sa_family = sa_family; 1121 unlock_user(target_saddr, target_addr, 0); 1122 1123 return 0; 1124 } 1125 1126 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr, 1127 struct sockaddr *addr, 1128 socklen_t len) 1129 { 1130 struct target_sockaddr *target_saddr; 1131 1132 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0); 1133 if (!target_saddr) 1134 return -TARGET_EFAULT; 1135 memcpy(target_saddr, addr, len); 1136 target_saddr->sa_family = tswap16(addr->sa_family); 1137 unlock_user(target_saddr, target_addr, len); 1138 1139 return 0; 1140 } 1141 1142 static inline abi_long target_to_host_cmsg(struct msghdr *msgh, 1143 struct target_msghdr *target_msgh) 1144 { 1145 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1146 abi_long msg_controllen; 1147 abi_ulong target_cmsg_addr; 1148 struct target_cmsghdr *target_cmsg; 1149 socklen_t space = 0; 1150 1151 msg_controllen = tswapal(target_msgh->msg_controllen); 1152 if (msg_controllen < sizeof (struct target_cmsghdr)) 1153 goto the_end; 1154 target_cmsg_addr = tswapal(target_msgh->msg_control); 1155 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1); 1156 if (!target_cmsg) 1157 return -TARGET_EFAULT; 1158 1159 while (cmsg && target_cmsg) { 1160 void *data = CMSG_DATA(cmsg); 1161 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1162 1163 int len = tswapal(target_cmsg->cmsg_len) 1164 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr)); 1165 1166 space += CMSG_SPACE(len); 1167 if (space > msgh->msg_controllen) { 1168 space -= CMSG_SPACE(len); 1169 gemu_log("Host cmsg overflow\n"); 1170 break; 1171 } 1172 1173 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) { 1174 cmsg->cmsg_level = SOL_SOCKET; 1175 } else { 1176 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level); 1177 } 1178 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type); 1179 cmsg->cmsg_len = CMSG_LEN(len); 1180 1181 if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) { 1182 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type); 1183 memcpy(data, target_data, len); 1184 } else { 1185 int *fd = (int *)data; 1186 int *target_fd = (int *)target_data; 1187 int i, numfds = len / sizeof(int); 1188 1189 for (i = 0; i < numfds; i++) 1190 fd[i] = tswap32(target_fd[i]); 1191 } 1192 1193 cmsg = CMSG_NXTHDR(msgh, cmsg); 1194 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1195 } 1196 unlock_user(target_cmsg, target_cmsg_addr, 0); 1197 the_end: 1198 msgh->msg_controllen = space; 1199 return 0; 1200 } 1201 1202 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, 1203 struct msghdr *msgh) 1204 { 1205 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1206 abi_long msg_controllen; 1207 abi_ulong target_cmsg_addr; 1208 struct target_cmsghdr *target_cmsg; 1209 socklen_t space = 0; 1210 1211 msg_controllen = tswapal(target_msgh->msg_controllen); 1212 if (msg_controllen < sizeof (struct target_cmsghdr)) 1213 goto the_end; 1214 target_cmsg_addr = tswapal(target_msgh->msg_control); 1215 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0); 1216 if (!target_cmsg) 1217 return -TARGET_EFAULT; 1218 1219 while (cmsg && target_cmsg) { 1220 void *data = CMSG_DATA(cmsg); 1221 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1222 1223 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr)); 1224 1225 space += TARGET_CMSG_SPACE(len); 1226 if (space > msg_controllen) { 1227 space -= TARGET_CMSG_SPACE(len); 1228 gemu_log("Target cmsg overflow\n"); 1229 break; 1230 } 1231 1232 if (cmsg->cmsg_level == SOL_SOCKET) { 1233 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET); 1234 } else { 1235 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level); 1236 } 1237 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type); 1238 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len)); 1239 1240 if ((cmsg->cmsg_level == SOL_SOCKET) && 1241 (cmsg->cmsg_type == SCM_RIGHTS)) { 1242 int *fd = (int *)data; 1243 int *target_fd = (int *)target_data; 1244 int i, numfds = len / sizeof(int); 1245 1246 for (i = 0; i < numfds; i++) 1247 target_fd[i] = tswap32(fd[i]); 1248 } else if ((cmsg->cmsg_level == SOL_SOCKET) && 1249 (cmsg->cmsg_type == SO_TIMESTAMP) && 1250 (len == sizeof(struct timeval))) { 1251 /* copy struct timeval to target */ 1252 struct timeval *tv = (struct timeval *)data; 1253 struct target_timeval *target_tv = 1254 (struct target_timeval *)target_data; 1255 1256 target_tv->tv_sec = tswapal(tv->tv_sec); 1257 target_tv->tv_usec = tswapal(tv->tv_usec); 1258 } else { 1259 gemu_log("Unsupported ancillary data: %d/%d\n", 1260 cmsg->cmsg_level, cmsg->cmsg_type); 1261 memcpy(target_data, data, len); 1262 } 1263 1264 cmsg = CMSG_NXTHDR(msgh, cmsg); 1265 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1266 } 1267 unlock_user(target_cmsg, target_cmsg_addr, space); 1268 the_end: 1269 target_msgh->msg_controllen = tswapal(space); 1270 return 0; 1271 } 1272 1273 /* do_setsockopt() Must return target values and target errnos. */ 1274 static abi_long do_setsockopt(int sockfd, int level, int optname, 1275 abi_ulong optval_addr, socklen_t optlen) 1276 { 1277 abi_long ret; 1278 int val; 1279 struct ip_mreqn *ip_mreq; 1280 struct ip_mreq_source *ip_mreq_source; 1281 1282 switch(level) { 1283 case SOL_TCP: 1284 /* TCP options all take an 'int' value. */ 1285 if (optlen < sizeof(uint32_t)) 1286 return -TARGET_EINVAL; 1287 1288 if (get_user_u32(val, optval_addr)) 1289 return -TARGET_EFAULT; 1290 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1291 break; 1292 case SOL_IP: 1293 switch(optname) { 1294 case IP_TOS: 1295 case IP_TTL: 1296 case IP_HDRINCL: 1297 case IP_ROUTER_ALERT: 1298 case IP_RECVOPTS: 1299 case IP_RETOPTS: 1300 case IP_PKTINFO: 1301 case IP_MTU_DISCOVER: 1302 case IP_RECVERR: 1303 case IP_RECVTOS: 1304 #ifdef IP_FREEBIND 1305 case IP_FREEBIND: 1306 #endif 1307 case IP_MULTICAST_TTL: 1308 case IP_MULTICAST_LOOP: 1309 val = 0; 1310 if (optlen >= sizeof(uint32_t)) { 1311 if (get_user_u32(val, optval_addr)) 1312 return -TARGET_EFAULT; 1313 } else if (optlen >= 1) { 1314 if (get_user_u8(val, optval_addr)) 1315 return -TARGET_EFAULT; 1316 } 1317 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1318 break; 1319 case IP_ADD_MEMBERSHIP: 1320 case IP_DROP_MEMBERSHIP: 1321 if (optlen < sizeof (struct target_ip_mreq) || 1322 optlen > sizeof (struct target_ip_mreqn)) 1323 return -TARGET_EINVAL; 1324 1325 ip_mreq = (struct ip_mreqn *) alloca(optlen); 1326 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen); 1327 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen)); 1328 break; 1329 1330 case IP_BLOCK_SOURCE: 1331 case IP_UNBLOCK_SOURCE: 1332 case IP_ADD_SOURCE_MEMBERSHIP: 1333 case IP_DROP_SOURCE_MEMBERSHIP: 1334 if (optlen != sizeof (struct target_ip_mreq_source)) 1335 return -TARGET_EINVAL; 1336 1337 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1); 1338 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen)); 1339 unlock_user (ip_mreq_source, optval_addr, 0); 1340 break; 1341 1342 default: 1343 goto unimplemented; 1344 } 1345 break; 1346 case SOL_IPV6: 1347 switch (optname) { 1348 case IPV6_MTU_DISCOVER: 1349 case IPV6_MTU: 1350 case IPV6_V6ONLY: 1351 case IPV6_RECVPKTINFO: 1352 val = 0; 1353 if (optlen < sizeof(uint32_t)) { 1354 return -TARGET_EINVAL; 1355 } 1356 if (get_user_u32(val, optval_addr)) { 1357 return -TARGET_EFAULT; 1358 } 1359 ret = get_errno(setsockopt(sockfd, level, optname, 1360 &val, sizeof(val))); 1361 break; 1362 default: 1363 goto unimplemented; 1364 } 1365 break; 1366 case SOL_RAW: 1367 switch (optname) { 1368 case ICMP_FILTER: 1369 /* struct icmp_filter takes an u32 value */ 1370 if (optlen < sizeof(uint32_t)) { 1371 return -TARGET_EINVAL; 1372 } 1373 1374 if (get_user_u32(val, optval_addr)) { 1375 return -TARGET_EFAULT; 1376 } 1377 ret = get_errno(setsockopt(sockfd, level, optname, 1378 &val, sizeof(val))); 1379 break; 1380 1381 default: 1382 goto unimplemented; 1383 } 1384 break; 1385 case TARGET_SOL_SOCKET: 1386 switch (optname) { 1387 case TARGET_SO_RCVTIMEO: 1388 { 1389 struct timeval tv; 1390 1391 optname = SO_RCVTIMEO; 1392 1393 set_timeout: 1394 if (optlen != sizeof(struct target_timeval)) { 1395 return -TARGET_EINVAL; 1396 } 1397 1398 if (copy_from_user_timeval(&tv, optval_addr)) { 1399 return -TARGET_EFAULT; 1400 } 1401 1402 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 1403 &tv, sizeof(tv))); 1404 return ret; 1405 } 1406 case TARGET_SO_SNDTIMEO: 1407 optname = SO_SNDTIMEO; 1408 goto set_timeout; 1409 case TARGET_SO_ATTACH_FILTER: 1410 { 1411 struct target_sock_fprog *tfprog; 1412 struct target_sock_filter *tfilter; 1413 struct sock_fprog fprog; 1414 struct sock_filter *filter; 1415 int i; 1416 1417 if (optlen != sizeof(*tfprog)) { 1418 return -TARGET_EINVAL; 1419 } 1420 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) { 1421 return -TARGET_EFAULT; 1422 } 1423 if (!lock_user_struct(VERIFY_READ, tfilter, 1424 tswapal(tfprog->filter), 0)) { 1425 unlock_user_struct(tfprog, optval_addr, 1); 1426 return -TARGET_EFAULT; 1427 } 1428 1429 fprog.len = tswap16(tfprog->len); 1430 filter = malloc(fprog.len * sizeof(*filter)); 1431 if (filter == NULL) { 1432 unlock_user_struct(tfilter, tfprog->filter, 1); 1433 unlock_user_struct(tfprog, optval_addr, 1); 1434 return -TARGET_ENOMEM; 1435 } 1436 for (i = 0; i < fprog.len; i++) { 1437 filter[i].code = tswap16(tfilter[i].code); 1438 filter[i].jt = tfilter[i].jt; 1439 filter[i].jf = tfilter[i].jf; 1440 filter[i].k = tswap32(tfilter[i].k); 1441 } 1442 fprog.filter = filter; 1443 1444 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, 1445 SO_ATTACH_FILTER, &fprog, sizeof(fprog))); 1446 free(filter); 1447 1448 unlock_user_struct(tfilter, tfprog->filter, 1); 1449 unlock_user_struct(tfprog, optval_addr, 1); 1450 return ret; 1451 } 1452 /* Options with 'int' argument. */ 1453 case TARGET_SO_DEBUG: 1454 optname = SO_DEBUG; 1455 break; 1456 case TARGET_SO_REUSEADDR: 1457 optname = SO_REUSEADDR; 1458 break; 1459 case TARGET_SO_TYPE: 1460 optname = SO_TYPE; 1461 break; 1462 case TARGET_SO_ERROR: 1463 optname = SO_ERROR; 1464 break; 1465 case TARGET_SO_DONTROUTE: 1466 optname = SO_DONTROUTE; 1467 break; 1468 case TARGET_SO_BROADCAST: 1469 optname = SO_BROADCAST; 1470 break; 1471 case TARGET_SO_SNDBUF: 1472 optname = SO_SNDBUF; 1473 break; 1474 case TARGET_SO_RCVBUF: 1475 optname = SO_RCVBUF; 1476 break; 1477 case TARGET_SO_KEEPALIVE: 1478 optname = SO_KEEPALIVE; 1479 break; 1480 case TARGET_SO_OOBINLINE: 1481 optname = SO_OOBINLINE; 1482 break; 1483 case TARGET_SO_NO_CHECK: 1484 optname = SO_NO_CHECK; 1485 break; 1486 case TARGET_SO_PRIORITY: 1487 optname = SO_PRIORITY; 1488 break; 1489 #ifdef SO_BSDCOMPAT 1490 case TARGET_SO_BSDCOMPAT: 1491 optname = SO_BSDCOMPAT; 1492 break; 1493 #endif 1494 case TARGET_SO_PASSCRED: 1495 optname = SO_PASSCRED; 1496 break; 1497 case TARGET_SO_TIMESTAMP: 1498 optname = SO_TIMESTAMP; 1499 break; 1500 case TARGET_SO_RCVLOWAT: 1501 optname = SO_RCVLOWAT; 1502 break; 1503 break; 1504 default: 1505 goto unimplemented; 1506 } 1507 if (optlen < sizeof(uint32_t)) 1508 return -TARGET_EINVAL; 1509 1510 if (get_user_u32(val, optval_addr)) 1511 return -TARGET_EFAULT; 1512 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val))); 1513 break; 1514 default: 1515 unimplemented: 1516 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname); 1517 ret = -TARGET_ENOPROTOOPT; 1518 } 1519 return ret; 1520 } 1521 1522 /* do_getsockopt() Must return target values and target errnos. */ 1523 static abi_long do_getsockopt(int sockfd, int level, int optname, 1524 abi_ulong optval_addr, abi_ulong optlen) 1525 { 1526 abi_long ret; 1527 int len, val; 1528 socklen_t lv; 1529 1530 switch(level) { 1531 case TARGET_SOL_SOCKET: 1532 level = SOL_SOCKET; 1533 switch (optname) { 1534 /* These don't just return a single integer */ 1535 case TARGET_SO_LINGER: 1536 case TARGET_SO_RCVTIMEO: 1537 case TARGET_SO_SNDTIMEO: 1538 case TARGET_SO_PEERNAME: 1539 goto unimplemented; 1540 case TARGET_SO_PEERCRED: { 1541 struct ucred cr; 1542 socklen_t crlen; 1543 struct target_ucred *tcr; 1544 1545 if (get_user_u32(len, optlen)) { 1546 return -TARGET_EFAULT; 1547 } 1548 if (len < 0) { 1549 return -TARGET_EINVAL; 1550 } 1551 1552 crlen = sizeof(cr); 1553 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED, 1554 &cr, &crlen)); 1555 if (ret < 0) { 1556 return ret; 1557 } 1558 if (len > crlen) { 1559 len = crlen; 1560 } 1561 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) { 1562 return -TARGET_EFAULT; 1563 } 1564 __put_user(cr.pid, &tcr->pid); 1565 __put_user(cr.uid, &tcr->uid); 1566 __put_user(cr.gid, &tcr->gid); 1567 unlock_user_struct(tcr, optval_addr, 1); 1568 if (put_user_u32(len, optlen)) { 1569 return -TARGET_EFAULT; 1570 } 1571 break; 1572 } 1573 /* Options with 'int' argument. */ 1574 case TARGET_SO_DEBUG: 1575 optname = SO_DEBUG; 1576 goto int_case; 1577 case TARGET_SO_REUSEADDR: 1578 optname = SO_REUSEADDR; 1579 goto int_case; 1580 case TARGET_SO_TYPE: 1581 optname = SO_TYPE; 1582 goto int_case; 1583 case TARGET_SO_ERROR: 1584 optname = SO_ERROR; 1585 goto int_case; 1586 case TARGET_SO_DONTROUTE: 1587 optname = SO_DONTROUTE; 1588 goto int_case; 1589 case TARGET_SO_BROADCAST: 1590 optname = SO_BROADCAST; 1591 goto int_case; 1592 case TARGET_SO_SNDBUF: 1593 optname = SO_SNDBUF; 1594 goto int_case; 1595 case TARGET_SO_RCVBUF: 1596 optname = SO_RCVBUF; 1597 goto int_case; 1598 case TARGET_SO_KEEPALIVE: 1599 optname = SO_KEEPALIVE; 1600 goto int_case; 1601 case TARGET_SO_OOBINLINE: 1602 optname = SO_OOBINLINE; 1603 goto int_case; 1604 case TARGET_SO_NO_CHECK: 1605 optname = SO_NO_CHECK; 1606 goto int_case; 1607 case TARGET_SO_PRIORITY: 1608 optname = SO_PRIORITY; 1609 goto int_case; 1610 #ifdef SO_BSDCOMPAT 1611 case TARGET_SO_BSDCOMPAT: 1612 optname = SO_BSDCOMPAT; 1613 goto int_case; 1614 #endif 1615 case TARGET_SO_PASSCRED: 1616 optname = SO_PASSCRED; 1617 goto int_case; 1618 case TARGET_SO_TIMESTAMP: 1619 optname = SO_TIMESTAMP; 1620 goto int_case; 1621 case TARGET_SO_RCVLOWAT: 1622 optname = SO_RCVLOWAT; 1623 goto int_case; 1624 default: 1625 goto int_case; 1626 } 1627 break; 1628 case SOL_TCP: 1629 /* TCP options all take an 'int' value. */ 1630 int_case: 1631 if (get_user_u32(len, optlen)) 1632 return -TARGET_EFAULT; 1633 if (len < 0) 1634 return -TARGET_EINVAL; 1635 lv = sizeof(lv); 1636 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1637 if (ret < 0) 1638 return ret; 1639 if (len > lv) 1640 len = lv; 1641 if (len == 4) { 1642 if (put_user_u32(val, optval_addr)) 1643 return -TARGET_EFAULT; 1644 } else { 1645 if (put_user_u8(val, optval_addr)) 1646 return -TARGET_EFAULT; 1647 } 1648 if (put_user_u32(len, optlen)) 1649 return -TARGET_EFAULT; 1650 break; 1651 case SOL_IP: 1652 switch(optname) { 1653 case IP_TOS: 1654 case IP_TTL: 1655 case IP_HDRINCL: 1656 case IP_ROUTER_ALERT: 1657 case IP_RECVOPTS: 1658 case IP_RETOPTS: 1659 case IP_PKTINFO: 1660 case IP_MTU_DISCOVER: 1661 case IP_RECVERR: 1662 case IP_RECVTOS: 1663 #ifdef IP_FREEBIND 1664 case IP_FREEBIND: 1665 #endif 1666 case IP_MULTICAST_TTL: 1667 case IP_MULTICAST_LOOP: 1668 if (get_user_u32(len, optlen)) 1669 return -TARGET_EFAULT; 1670 if (len < 0) 1671 return -TARGET_EINVAL; 1672 lv = sizeof(lv); 1673 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1674 if (ret < 0) 1675 return ret; 1676 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 1677 len = 1; 1678 if (put_user_u32(len, optlen) 1679 || put_user_u8(val, optval_addr)) 1680 return -TARGET_EFAULT; 1681 } else { 1682 if (len > sizeof(int)) 1683 len = sizeof(int); 1684 if (put_user_u32(len, optlen) 1685 || put_user_u32(val, optval_addr)) 1686 return -TARGET_EFAULT; 1687 } 1688 break; 1689 default: 1690 ret = -TARGET_ENOPROTOOPT; 1691 break; 1692 } 1693 break; 1694 default: 1695 unimplemented: 1696 gemu_log("getsockopt level=%d optname=%d not yet supported\n", 1697 level, optname); 1698 ret = -TARGET_EOPNOTSUPP; 1699 break; 1700 } 1701 return ret; 1702 } 1703 1704 static struct iovec *lock_iovec(int type, abi_ulong target_addr, 1705 int count, int copy) 1706 { 1707 struct target_iovec *target_vec; 1708 struct iovec *vec; 1709 abi_ulong total_len, max_len; 1710 int i; 1711 int err = 0; 1712 1713 if (count == 0) { 1714 errno = 0; 1715 return NULL; 1716 } 1717 if (count < 0 || count > IOV_MAX) { 1718 errno = EINVAL; 1719 return NULL; 1720 } 1721 1722 vec = calloc(count, sizeof(struct iovec)); 1723 if (vec == NULL) { 1724 errno = ENOMEM; 1725 return NULL; 1726 } 1727 1728 target_vec = lock_user(VERIFY_READ, target_addr, 1729 count * sizeof(struct target_iovec), 1); 1730 if (target_vec == NULL) { 1731 err = EFAULT; 1732 goto fail2; 1733 } 1734 1735 /* ??? If host page size > target page size, this will result in a 1736 value larger than what we can actually support. */ 1737 max_len = 0x7fffffff & TARGET_PAGE_MASK; 1738 total_len = 0; 1739 1740 for (i = 0; i < count; i++) { 1741 abi_ulong base = tswapal(target_vec[i].iov_base); 1742 abi_long len = tswapal(target_vec[i].iov_len); 1743 1744 if (len < 0) { 1745 err = EINVAL; 1746 goto fail; 1747 } else if (len == 0) { 1748 /* Zero length pointer is ignored. */ 1749 vec[i].iov_base = 0; 1750 } else { 1751 vec[i].iov_base = lock_user(type, base, len, copy); 1752 if (!vec[i].iov_base) { 1753 err = EFAULT; 1754 goto fail; 1755 } 1756 if (len > max_len - total_len) { 1757 len = max_len - total_len; 1758 } 1759 } 1760 vec[i].iov_len = len; 1761 total_len += len; 1762 } 1763 1764 unlock_user(target_vec, target_addr, 0); 1765 return vec; 1766 1767 fail: 1768 unlock_user(target_vec, target_addr, 0); 1769 fail2: 1770 free(vec); 1771 errno = err; 1772 return NULL; 1773 } 1774 1775 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr, 1776 int count, int copy) 1777 { 1778 struct target_iovec *target_vec; 1779 int i; 1780 1781 target_vec = lock_user(VERIFY_READ, target_addr, 1782 count * sizeof(struct target_iovec), 1); 1783 if (target_vec) { 1784 for (i = 0; i < count; i++) { 1785 abi_ulong base = tswapal(target_vec[i].iov_base); 1786 abi_long len = tswapal(target_vec[i].iov_base); 1787 if (len < 0) { 1788 break; 1789 } 1790 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0); 1791 } 1792 unlock_user(target_vec, target_addr, 0); 1793 } 1794 1795 free(vec); 1796 } 1797 1798 static inline int target_to_host_sock_type(int *type) 1799 { 1800 int host_type = 0; 1801 int target_type = *type; 1802 1803 switch (target_type & TARGET_SOCK_TYPE_MASK) { 1804 case TARGET_SOCK_DGRAM: 1805 host_type = SOCK_DGRAM; 1806 break; 1807 case TARGET_SOCK_STREAM: 1808 host_type = SOCK_STREAM; 1809 break; 1810 default: 1811 host_type = target_type & TARGET_SOCK_TYPE_MASK; 1812 break; 1813 } 1814 if (target_type & TARGET_SOCK_CLOEXEC) { 1815 #if defined(SOCK_CLOEXEC) 1816 host_type |= SOCK_CLOEXEC; 1817 #else 1818 return -TARGET_EINVAL; 1819 #endif 1820 } 1821 if (target_type & TARGET_SOCK_NONBLOCK) { 1822 #if defined(SOCK_NONBLOCK) 1823 host_type |= SOCK_NONBLOCK; 1824 #elif !defined(O_NONBLOCK) 1825 return -TARGET_EINVAL; 1826 #endif 1827 } 1828 *type = host_type; 1829 return 0; 1830 } 1831 1832 /* Try to emulate socket type flags after socket creation. */ 1833 static int sock_flags_fixup(int fd, int target_type) 1834 { 1835 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK) 1836 if (target_type & TARGET_SOCK_NONBLOCK) { 1837 int flags = fcntl(fd, F_GETFL); 1838 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) { 1839 close(fd); 1840 return -TARGET_EINVAL; 1841 } 1842 } 1843 #endif 1844 return fd; 1845 } 1846 1847 /* do_socket() Must return target values and target errnos. */ 1848 static abi_long do_socket(int domain, int type, int protocol) 1849 { 1850 int target_type = type; 1851 int ret; 1852 1853 ret = target_to_host_sock_type(&type); 1854 if (ret) { 1855 return ret; 1856 } 1857 1858 if (domain == PF_NETLINK) 1859 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */ 1860 ret = get_errno(socket(domain, type, protocol)); 1861 if (ret >= 0) { 1862 ret = sock_flags_fixup(ret, target_type); 1863 } 1864 return ret; 1865 } 1866 1867 /* do_bind() Must return target values and target errnos. */ 1868 static abi_long do_bind(int sockfd, abi_ulong target_addr, 1869 socklen_t addrlen) 1870 { 1871 void *addr; 1872 abi_long ret; 1873 1874 if ((int)addrlen < 0) { 1875 return -TARGET_EINVAL; 1876 } 1877 1878 addr = alloca(addrlen+1); 1879 1880 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1881 if (ret) 1882 return ret; 1883 1884 return get_errno(bind(sockfd, addr, addrlen)); 1885 } 1886 1887 /* do_connect() Must return target values and target errnos. */ 1888 static abi_long do_connect(int sockfd, abi_ulong target_addr, 1889 socklen_t addrlen) 1890 { 1891 void *addr; 1892 abi_long ret; 1893 1894 if ((int)addrlen < 0) { 1895 return -TARGET_EINVAL; 1896 } 1897 1898 addr = alloca(addrlen); 1899 1900 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1901 if (ret) 1902 return ret; 1903 1904 return get_errno(connect(sockfd, addr, addrlen)); 1905 } 1906 1907 /* do_sendrecvmsg_locked() Must return target values and target errnos. */ 1908 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp, 1909 int flags, int send) 1910 { 1911 abi_long ret, len; 1912 struct msghdr msg; 1913 int count; 1914 struct iovec *vec; 1915 abi_ulong target_vec; 1916 1917 if (msgp->msg_name) { 1918 msg.msg_namelen = tswap32(msgp->msg_namelen); 1919 msg.msg_name = alloca(msg.msg_namelen); 1920 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name), 1921 msg.msg_namelen); 1922 if (ret) { 1923 goto out2; 1924 } 1925 } else { 1926 msg.msg_name = NULL; 1927 msg.msg_namelen = 0; 1928 } 1929 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen); 1930 msg.msg_control = alloca(msg.msg_controllen); 1931 msg.msg_flags = tswap32(msgp->msg_flags); 1932 1933 count = tswapal(msgp->msg_iovlen); 1934 target_vec = tswapal(msgp->msg_iov); 1935 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, 1936 target_vec, count, send); 1937 if (vec == NULL) { 1938 ret = -host_to_target_errno(errno); 1939 goto out2; 1940 } 1941 msg.msg_iovlen = count; 1942 msg.msg_iov = vec; 1943 1944 if (send) { 1945 ret = target_to_host_cmsg(&msg, msgp); 1946 if (ret == 0) 1947 ret = get_errno(sendmsg(fd, &msg, flags)); 1948 } else { 1949 ret = get_errno(recvmsg(fd, &msg, flags)); 1950 if (!is_error(ret)) { 1951 len = ret; 1952 ret = host_to_target_cmsg(msgp, &msg); 1953 if (!is_error(ret)) { 1954 msgp->msg_namelen = tswap32(msg.msg_namelen); 1955 if (msg.msg_name != NULL) { 1956 ret = host_to_target_sockaddr(tswapal(msgp->msg_name), 1957 msg.msg_name, msg.msg_namelen); 1958 if (ret) { 1959 goto out; 1960 } 1961 } 1962 1963 ret = len; 1964 } 1965 } 1966 } 1967 1968 out: 1969 unlock_iovec(vec, target_vec, count, !send); 1970 out2: 1971 return ret; 1972 } 1973 1974 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg, 1975 int flags, int send) 1976 { 1977 abi_long ret; 1978 struct target_msghdr *msgp; 1979 1980 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE, 1981 msgp, 1982 target_msg, 1983 send ? 1 : 0)) { 1984 return -TARGET_EFAULT; 1985 } 1986 ret = do_sendrecvmsg_locked(fd, msgp, flags, send); 1987 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 1988 return ret; 1989 } 1990 1991 #ifdef TARGET_NR_sendmmsg 1992 /* We don't rely on the C library to have sendmmsg/recvmmsg support, 1993 * so it might not have this *mmsg-specific flag either. 1994 */ 1995 #ifndef MSG_WAITFORONE 1996 #define MSG_WAITFORONE 0x10000 1997 #endif 1998 1999 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec, 2000 unsigned int vlen, unsigned int flags, 2001 int send) 2002 { 2003 struct target_mmsghdr *mmsgp; 2004 abi_long ret = 0; 2005 int i; 2006 2007 if (vlen > UIO_MAXIOV) { 2008 vlen = UIO_MAXIOV; 2009 } 2010 2011 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1); 2012 if (!mmsgp) { 2013 return -TARGET_EFAULT; 2014 } 2015 2016 for (i = 0; i < vlen; i++) { 2017 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send); 2018 if (is_error(ret)) { 2019 break; 2020 } 2021 mmsgp[i].msg_len = tswap32(ret); 2022 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ 2023 if (flags & MSG_WAITFORONE) { 2024 flags |= MSG_DONTWAIT; 2025 } 2026 } 2027 2028 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i); 2029 2030 /* Return number of datagrams sent if we sent any at all; 2031 * otherwise return the error. 2032 */ 2033 if (i) { 2034 return i; 2035 } 2036 return ret; 2037 } 2038 #endif 2039 2040 /* If we don't have a system accept4() then just call accept. 2041 * The callsites to do_accept4() will ensure that they don't 2042 * pass a non-zero flags argument in this config. 2043 */ 2044 #ifndef CONFIG_ACCEPT4 2045 static inline int accept4(int sockfd, struct sockaddr *addr, 2046 socklen_t *addrlen, int flags) 2047 { 2048 assert(flags == 0); 2049 return accept(sockfd, addr, addrlen); 2050 } 2051 #endif 2052 2053 /* do_accept4() Must return target values and target errnos. */ 2054 static abi_long do_accept4(int fd, abi_ulong target_addr, 2055 abi_ulong target_addrlen_addr, int flags) 2056 { 2057 socklen_t addrlen; 2058 void *addr; 2059 abi_long ret; 2060 2061 if (target_addr == 0) { 2062 return get_errno(accept4(fd, NULL, NULL, flags)); 2063 } 2064 2065 /* linux returns EINVAL if addrlen pointer is invalid */ 2066 if (get_user_u32(addrlen, target_addrlen_addr)) 2067 return -TARGET_EINVAL; 2068 2069 if ((int)addrlen < 0) { 2070 return -TARGET_EINVAL; 2071 } 2072 2073 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2074 return -TARGET_EINVAL; 2075 2076 addr = alloca(addrlen); 2077 2078 ret = get_errno(accept4(fd, addr, &addrlen, flags)); 2079 if (!is_error(ret)) { 2080 host_to_target_sockaddr(target_addr, addr, addrlen); 2081 if (put_user_u32(addrlen, target_addrlen_addr)) 2082 ret = -TARGET_EFAULT; 2083 } 2084 return ret; 2085 } 2086 2087 /* do_getpeername() Must return target values and target errnos. */ 2088 static abi_long do_getpeername(int fd, abi_ulong target_addr, 2089 abi_ulong target_addrlen_addr) 2090 { 2091 socklen_t addrlen; 2092 void *addr; 2093 abi_long ret; 2094 2095 if (get_user_u32(addrlen, target_addrlen_addr)) 2096 return -TARGET_EFAULT; 2097 2098 if ((int)addrlen < 0) { 2099 return -TARGET_EINVAL; 2100 } 2101 2102 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2103 return -TARGET_EFAULT; 2104 2105 addr = alloca(addrlen); 2106 2107 ret = get_errno(getpeername(fd, addr, &addrlen)); 2108 if (!is_error(ret)) { 2109 host_to_target_sockaddr(target_addr, addr, addrlen); 2110 if (put_user_u32(addrlen, target_addrlen_addr)) 2111 ret = -TARGET_EFAULT; 2112 } 2113 return ret; 2114 } 2115 2116 /* do_getsockname() Must return target values and target errnos. */ 2117 static abi_long do_getsockname(int fd, abi_ulong target_addr, 2118 abi_ulong target_addrlen_addr) 2119 { 2120 socklen_t addrlen; 2121 void *addr; 2122 abi_long ret; 2123 2124 if (get_user_u32(addrlen, target_addrlen_addr)) 2125 return -TARGET_EFAULT; 2126 2127 if ((int)addrlen < 0) { 2128 return -TARGET_EINVAL; 2129 } 2130 2131 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2132 return -TARGET_EFAULT; 2133 2134 addr = alloca(addrlen); 2135 2136 ret = get_errno(getsockname(fd, addr, &addrlen)); 2137 if (!is_error(ret)) { 2138 host_to_target_sockaddr(target_addr, addr, addrlen); 2139 if (put_user_u32(addrlen, target_addrlen_addr)) 2140 ret = -TARGET_EFAULT; 2141 } 2142 return ret; 2143 } 2144 2145 /* do_socketpair() Must return target values and target errnos. */ 2146 static abi_long do_socketpair(int domain, int type, int protocol, 2147 abi_ulong target_tab_addr) 2148 { 2149 int tab[2]; 2150 abi_long ret; 2151 2152 target_to_host_sock_type(&type); 2153 2154 ret = get_errno(socketpair(domain, type, protocol, tab)); 2155 if (!is_error(ret)) { 2156 if (put_user_s32(tab[0], target_tab_addr) 2157 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0]))) 2158 ret = -TARGET_EFAULT; 2159 } 2160 return ret; 2161 } 2162 2163 /* do_sendto() Must return target values and target errnos. */ 2164 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags, 2165 abi_ulong target_addr, socklen_t addrlen) 2166 { 2167 void *addr; 2168 void *host_msg; 2169 abi_long ret; 2170 2171 if ((int)addrlen < 0) { 2172 return -TARGET_EINVAL; 2173 } 2174 2175 host_msg = lock_user(VERIFY_READ, msg, len, 1); 2176 if (!host_msg) 2177 return -TARGET_EFAULT; 2178 if (target_addr) { 2179 addr = alloca(addrlen); 2180 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 2181 if (ret) { 2182 unlock_user(host_msg, msg, 0); 2183 return ret; 2184 } 2185 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen)); 2186 } else { 2187 ret = get_errno(send(fd, host_msg, len, flags)); 2188 } 2189 unlock_user(host_msg, msg, 0); 2190 return ret; 2191 } 2192 2193 /* do_recvfrom() Must return target values and target errnos. */ 2194 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags, 2195 abi_ulong target_addr, 2196 abi_ulong target_addrlen) 2197 { 2198 socklen_t addrlen; 2199 void *addr; 2200 void *host_msg; 2201 abi_long ret; 2202 2203 host_msg = lock_user(VERIFY_WRITE, msg, len, 0); 2204 if (!host_msg) 2205 return -TARGET_EFAULT; 2206 if (target_addr) { 2207 if (get_user_u32(addrlen, target_addrlen)) { 2208 ret = -TARGET_EFAULT; 2209 goto fail; 2210 } 2211 if ((int)addrlen < 0) { 2212 ret = -TARGET_EINVAL; 2213 goto fail; 2214 } 2215 addr = alloca(addrlen); 2216 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen)); 2217 } else { 2218 addr = NULL; /* To keep compiler quiet. */ 2219 ret = get_errno(qemu_recv(fd, host_msg, len, flags)); 2220 } 2221 if (!is_error(ret)) { 2222 if (target_addr) { 2223 host_to_target_sockaddr(target_addr, addr, addrlen); 2224 if (put_user_u32(addrlen, target_addrlen)) { 2225 ret = -TARGET_EFAULT; 2226 goto fail; 2227 } 2228 } 2229 unlock_user(host_msg, msg, len); 2230 } else { 2231 fail: 2232 unlock_user(host_msg, msg, 0); 2233 } 2234 return ret; 2235 } 2236 2237 #ifdef TARGET_NR_socketcall 2238 /* do_socketcall() Must return target values and target errnos. */ 2239 static abi_long do_socketcall(int num, abi_ulong vptr) 2240 { 2241 static const unsigned ac[] = { /* number of arguments per call */ 2242 [SOCKOP_socket] = 3, /* domain, type, protocol */ 2243 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */ 2244 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */ 2245 [SOCKOP_listen] = 2, /* sockfd, backlog */ 2246 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */ 2247 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */ 2248 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */ 2249 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */ 2250 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */ 2251 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */ 2252 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */ 2253 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */ 2254 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */ 2255 [SOCKOP_shutdown] = 2, /* sockfd, how */ 2256 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */ 2257 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */ 2258 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */ 2259 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */ 2260 }; 2261 abi_long a[6]; /* max 6 args */ 2262 2263 /* first, collect the arguments in a[] according to ac[] */ 2264 if (num >= 0 && num < ARRAY_SIZE(ac)) { 2265 unsigned i; 2266 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */ 2267 for (i = 0; i < ac[num]; ++i) { 2268 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) { 2269 return -TARGET_EFAULT; 2270 } 2271 } 2272 } 2273 2274 /* now when we have the args, actually handle the call */ 2275 switch (num) { 2276 case SOCKOP_socket: /* domain, type, protocol */ 2277 return do_socket(a[0], a[1], a[2]); 2278 case SOCKOP_bind: /* sockfd, addr, addrlen */ 2279 return do_bind(a[0], a[1], a[2]); 2280 case SOCKOP_connect: /* sockfd, addr, addrlen */ 2281 return do_connect(a[0], a[1], a[2]); 2282 case SOCKOP_listen: /* sockfd, backlog */ 2283 return get_errno(listen(a[0], a[1])); 2284 case SOCKOP_accept: /* sockfd, addr, addrlen */ 2285 return do_accept4(a[0], a[1], a[2], 0); 2286 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */ 2287 return do_accept4(a[0], a[1], a[2], a[3]); 2288 case SOCKOP_getsockname: /* sockfd, addr, addrlen */ 2289 return do_getsockname(a[0], a[1], a[2]); 2290 case SOCKOP_getpeername: /* sockfd, addr, addrlen */ 2291 return do_getpeername(a[0], a[1], a[2]); 2292 case SOCKOP_socketpair: /* domain, type, protocol, tab */ 2293 return do_socketpair(a[0], a[1], a[2], a[3]); 2294 case SOCKOP_send: /* sockfd, msg, len, flags */ 2295 return do_sendto(a[0], a[1], a[2], a[3], 0, 0); 2296 case SOCKOP_recv: /* sockfd, msg, len, flags */ 2297 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0); 2298 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */ 2299 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]); 2300 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */ 2301 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]); 2302 case SOCKOP_shutdown: /* sockfd, how */ 2303 return get_errno(shutdown(a[0], a[1])); 2304 case SOCKOP_sendmsg: /* sockfd, msg, flags */ 2305 return do_sendrecvmsg(a[0], a[1], a[2], 1); 2306 case SOCKOP_recvmsg: /* sockfd, msg, flags */ 2307 return do_sendrecvmsg(a[0], a[1], a[2], 0); 2308 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */ 2309 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]); 2310 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */ 2311 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]); 2312 default: 2313 gemu_log("Unsupported socketcall: %d\n", num); 2314 return -TARGET_ENOSYS; 2315 } 2316 } 2317 #endif 2318 2319 #define N_SHM_REGIONS 32 2320 2321 static struct shm_region { 2322 abi_ulong start; 2323 abi_ulong size; 2324 } shm_regions[N_SHM_REGIONS]; 2325 2326 struct target_semid_ds 2327 { 2328 struct target_ipc_perm sem_perm; 2329 abi_ulong sem_otime; 2330 abi_ulong __unused1; 2331 abi_ulong sem_ctime; 2332 abi_ulong __unused2; 2333 abi_ulong sem_nsems; 2334 abi_ulong __unused3; 2335 abi_ulong __unused4; 2336 }; 2337 2338 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip, 2339 abi_ulong target_addr) 2340 { 2341 struct target_ipc_perm *target_ip; 2342 struct target_semid_ds *target_sd; 2343 2344 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2345 return -TARGET_EFAULT; 2346 target_ip = &(target_sd->sem_perm); 2347 host_ip->__key = tswap32(target_ip->__key); 2348 host_ip->uid = tswap32(target_ip->uid); 2349 host_ip->gid = tswap32(target_ip->gid); 2350 host_ip->cuid = tswap32(target_ip->cuid); 2351 host_ip->cgid = tswap32(target_ip->cgid); 2352 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 2353 host_ip->mode = tswap32(target_ip->mode); 2354 #else 2355 host_ip->mode = tswap16(target_ip->mode); 2356 #endif 2357 #if defined(TARGET_PPC) 2358 host_ip->__seq = tswap32(target_ip->__seq); 2359 #else 2360 host_ip->__seq = tswap16(target_ip->__seq); 2361 #endif 2362 unlock_user_struct(target_sd, target_addr, 0); 2363 return 0; 2364 } 2365 2366 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr, 2367 struct ipc_perm *host_ip) 2368 { 2369 struct target_ipc_perm *target_ip; 2370 struct target_semid_ds *target_sd; 2371 2372 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2373 return -TARGET_EFAULT; 2374 target_ip = &(target_sd->sem_perm); 2375 target_ip->__key = tswap32(host_ip->__key); 2376 target_ip->uid = tswap32(host_ip->uid); 2377 target_ip->gid = tswap32(host_ip->gid); 2378 target_ip->cuid = tswap32(host_ip->cuid); 2379 target_ip->cgid = tswap32(host_ip->cgid); 2380 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 2381 target_ip->mode = tswap32(host_ip->mode); 2382 #else 2383 target_ip->mode = tswap16(host_ip->mode); 2384 #endif 2385 #if defined(TARGET_PPC) 2386 target_ip->__seq = tswap32(host_ip->__seq); 2387 #else 2388 target_ip->__seq = tswap16(host_ip->__seq); 2389 #endif 2390 unlock_user_struct(target_sd, target_addr, 1); 2391 return 0; 2392 } 2393 2394 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd, 2395 abi_ulong target_addr) 2396 { 2397 struct target_semid_ds *target_sd; 2398 2399 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2400 return -TARGET_EFAULT; 2401 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr)) 2402 return -TARGET_EFAULT; 2403 host_sd->sem_nsems = tswapal(target_sd->sem_nsems); 2404 host_sd->sem_otime = tswapal(target_sd->sem_otime); 2405 host_sd->sem_ctime = tswapal(target_sd->sem_ctime); 2406 unlock_user_struct(target_sd, target_addr, 0); 2407 return 0; 2408 } 2409 2410 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr, 2411 struct semid_ds *host_sd) 2412 { 2413 struct target_semid_ds *target_sd; 2414 2415 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2416 return -TARGET_EFAULT; 2417 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm))) 2418 return -TARGET_EFAULT; 2419 target_sd->sem_nsems = tswapal(host_sd->sem_nsems); 2420 target_sd->sem_otime = tswapal(host_sd->sem_otime); 2421 target_sd->sem_ctime = tswapal(host_sd->sem_ctime); 2422 unlock_user_struct(target_sd, target_addr, 1); 2423 return 0; 2424 } 2425 2426 struct target_seminfo { 2427 int semmap; 2428 int semmni; 2429 int semmns; 2430 int semmnu; 2431 int semmsl; 2432 int semopm; 2433 int semume; 2434 int semusz; 2435 int semvmx; 2436 int semaem; 2437 }; 2438 2439 static inline abi_long host_to_target_seminfo(abi_ulong target_addr, 2440 struct seminfo *host_seminfo) 2441 { 2442 struct target_seminfo *target_seminfo; 2443 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0)) 2444 return -TARGET_EFAULT; 2445 __put_user(host_seminfo->semmap, &target_seminfo->semmap); 2446 __put_user(host_seminfo->semmni, &target_seminfo->semmni); 2447 __put_user(host_seminfo->semmns, &target_seminfo->semmns); 2448 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu); 2449 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl); 2450 __put_user(host_seminfo->semopm, &target_seminfo->semopm); 2451 __put_user(host_seminfo->semume, &target_seminfo->semume); 2452 __put_user(host_seminfo->semusz, &target_seminfo->semusz); 2453 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx); 2454 __put_user(host_seminfo->semaem, &target_seminfo->semaem); 2455 unlock_user_struct(target_seminfo, target_addr, 1); 2456 return 0; 2457 } 2458 2459 union semun { 2460 int val; 2461 struct semid_ds *buf; 2462 unsigned short *array; 2463 struct seminfo *__buf; 2464 }; 2465 2466 union target_semun { 2467 int val; 2468 abi_ulong buf; 2469 abi_ulong array; 2470 abi_ulong __buf; 2471 }; 2472 2473 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array, 2474 abi_ulong target_addr) 2475 { 2476 int nsems; 2477 unsigned short *array; 2478 union semun semun; 2479 struct semid_ds semid_ds; 2480 int i, ret; 2481 2482 semun.buf = &semid_ds; 2483 2484 ret = semctl(semid, 0, IPC_STAT, semun); 2485 if (ret == -1) 2486 return get_errno(ret); 2487 2488 nsems = semid_ds.sem_nsems; 2489 2490 *host_array = malloc(nsems*sizeof(unsigned short)); 2491 if (!*host_array) { 2492 return -TARGET_ENOMEM; 2493 } 2494 array = lock_user(VERIFY_READ, target_addr, 2495 nsems*sizeof(unsigned short), 1); 2496 if (!array) { 2497 free(*host_array); 2498 return -TARGET_EFAULT; 2499 } 2500 2501 for(i=0; i<nsems; i++) { 2502 __get_user((*host_array)[i], &array[i]); 2503 } 2504 unlock_user(array, target_addr, 0); 2505 2506 return 0; 2507 } 2508 2509 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr, 2510 unsigned short **host_array) 2511 { 2512 int nsems; 2513 unsigned short *array; 2514 union semun semun; 2515 struct semid_ds semid_ds; 2516 int i, ret; 2517 2518 semun.buf = &semid_ds; 2519 2520 ret = semctl(semid, 0, IPC_STAT, semun); 2521 if (ret == -1) 2522 return get_errno(ret); 2523 2524 nsems = semid_ds.sem_nsems; 2525 2526 array = lock_user(VERIFY_WRITE, target_addr, 2527 nsems*sizeof(unsigned short), 0); 2528 if (!array) 2529 return -TARGET_EFAULT; 2530 2531 for(i=0; i<nsems; i++) { 2532 __put_user((*host_array)[i], &array[i]); 2533 } 2534 free(*host_array); 2535 unlock_user(array, target_addr, 1); 2536 2537 return 0; 2538 } 2539 2540 static inline abi_long do_semctl(int semid, int semnum, int cmd, 2541 union target_semun target_su) 2542 { 2543 union semun arg; 2544 struct semid_ds dsarg; 2545 unsigned short *array = NULL; 2546 struct seminfo seminfo; 2547 abi_long ret = -TARGET_EINVAL; 2548 abi_long err; 2549 cmd &= 0xff; 2550 2551 switch( cmd ) { 2552 case GETVAL: 2553 case SETVAL: 2554 arg.val = tswap32(target_su.val); 2555 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2556 target_su.val = tswap32(arg.val); 2557 break; 2558 case GETALL: 2559 case SETALL: 2560 err = target_to_host_semarray(semid, &array, target_su.array); 2561 if (err) 2562 return err; 2563 arg.array = array; 2564 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2565 err = host_to_target_semarray(semid, target_su.array, &array); 2566 if (err) 2567 return err; 2568 break; 2569 case IPC_STAT: 2570 case IPC_SET: 2571 case SEM_STAT: 2572 err = target_to_host_semid_ds(&dsarg, target_su.buf); 2573 if (err) 2574 return err; 2575 arg.buf = &dsarg; 2576 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2577 err = host_to_target_semid_ds(target_su.buf, &dsarg); 2578 if (err) 2579 return err; 2580 break; 2581 case IPC_INFO: 2582 case SEM_INFO: 2583 arg.__buf = &seminfo; 2584 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2585 err = host_to_target_seminfo(target_su.__buf, &seminfo); 2586 if (err) 2587 return err; 2588 break; 2589 case IPC_RMID: 2590 case GETPID: 2591 case GETNCNT: 2592 case GETZCNT: 2593 ret = get_errno(semctl(semid, semnum, cmd, NULL)); 2594 break; 2595 } 2596 2597 return ret; 2598 } 2599 2600 struct target_sembuf { 2601 unsigned short sem_num; 2602 short sem_op; 2603 short sem_flg; 2604 }; 2605 2606 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf, 2607 abi_ulong target_addr, 2608 unsigned nsops) 2609 { 2610 struct target_sembuf *target_sembuf; 2611 int i; 2612 2613 target_sembuf = lock_user(VERIFY_READ, target_addr, 2614 nsops*sizeof(struct target_sembuf), 1); 2615 if (!target_sembuf) 2616 return -TARGET_EFAULT; 2617 2618 for(i=0; i<nsops; i++) { 2619 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num); 2620 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op); 2621 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg); 2622 } 2623 2624 unlock_user(target_sembuf, target_addr, 0); 2625 2626 return 0; 2627 } 2628 2629 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops) 2630 { 2631 struct sembuf sops[nsops]; 2632 2633 if (target_to_host_sembuf(sops, ptr, nsops)) 2634 return -TARGET_EFAULT; 2635 2636 return get_errno(semop(semid, sops, nsops)); 2637 } 2638 2639 struct target_msqid_ds 2640 { 2641 struct target_ipc_perm msg_perm; 2642 abi_ulong msg_stime; 2643 #if TARGET_ABI_BITS == 32 2644 abi_ulong __unused1; 2645 #endif 2646 abi_ulong msg_rtime; 2647 #if TARGET_ABI_BITS == 32 2648 abi_ulong __unused2; 2649 #endif 2650 abi_ulong msg_ctime; 2651 #if TARGET_ABI_BITS == 32 2652 abi_ulong __unused3; 2653 #endif 2654 abi_ulong __msg_cbytes; 2655 abi_ulong msg_qnum; 2656 abi_ulong msg_qbytes; 2657 abi_ulong msg_lspid; 2658 abi_ulong msg_lrpid; 2659 abi_ulong __unused4; 2660 abi_ulong __unused5; 2661 }; 2662 2663 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md, 2664 abi_ulong target_addr) 2665 { 2666 struct target_msqid_ds *target_md; 2667 2668 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1)) 2669 return -TARGET_EFAULT; 2670 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr)) 2671 return -TARGET_EFAULT; 2672 host_md->msg_stime = tswapal(target_md->msg_stime); 2673 host_md->msg_rtime = tswapal(target_md->msg_rtime); 2674 host_md->msg_ctime = tswapal(target_md->msg_ctime); 2675 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes); 2676 host_md->msg_qnum = tswapal(target_md->msg_qnum); 2677 host_md->msg_qbytes = tswapal(target_md->msg_qbytes); 2678 host_md->msg_lspid = tswapal(target_md->msg_lspid); 2679 host_md->msg_lrpid = tswapal(target_md->msg_lrpid); 2680 unlock_user_struct(target_md, target_addr, 0); 2681 return 0; 2682 } 2683 2684 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr, 2685 struct msqid_ds *host_md) 2686 { 2687 struct target_msqid_ds *target_md; 2688 2689 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0)) 2690 return -TARGET_EFAULT; 2691 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm))) 2692 return -TARGET_EFAULT; 2693 target_md->msg_stime = tswapal(host_md->msg_stime); 2694 target_md->msg_rtime = tswapal(host_md->msg_rtime); 2695 target_md->msg_ctime = tswapal(host_md->msg_ctime); 2696 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes); 2697 target_md->msg_qnum = tswapal(host_md->msg_qnum); 2698 target_md->msg_qbytes = tswapal(host_md->msg_qbytes); 2699 target_md->msg_lspid = tswapal(host_md->msg_lspid); 2700 target_md->msg_lrpid = tswapal(host_md->msg_lrpid); 2701 unlock_user_struct(target_md, target_addr, 1); 2702 return 0; 2703 } 2704 2705 struct target_msginfo { 2706 int msgpool; 2707 int msgmap; 2708 int msgmax; 2709 int msgmnb; 2710 int msgmni; 2711 int msgssz; 2712 int msgtql; 2713 unsigned short int msgseg; 2714 }; 2715 2716 static inline abi_long host_to_target_msginfo(abi_ulong target_addr, 2717 struct msginfo *host_msginfo) 2718 { 2719 struct target_msginfo *target_msginfo; 2720 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0)) 2721 return -TARGET_EFAULT; 2722 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool); 2723 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap); 2724 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax); 2725 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb); 2726 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni); 2727 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz); 2728 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql); 2729 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg); 2730 unlock_user_struct(target_msginfo, target_addr, 1); 2731 return 0; 2732 } 2733 2734 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr) 2735 { 2736 struct msqid_ds dsarg; 2737 struct msginfo msginfo; 2738 abi_long ret = -TARGET_EINVAL; 2739 2740 cmd &= 0xff; 2741 2742 switch (cmd) { 2743 case IPC_STAT: 2744 case IPC_SET: 2745 case MSG_STAT: 2746 if (target_to_host_msqid_ds(&dsarg,ptr)) 2747 return -TARGET_EFAULT; 2748 ret = get_errno(msgctl(msgid, cmd, &dsarg)); 2749 if (host_to_target_msqid_ds(ptr,&dsarg)) 2750 return -TARGET_EFAULT; 2751 break; 2752 case IPC_RMID: 2753 ret = get_errno(msgctl(msgid, cmd, NULL)); 2754 break; 2755 case IPC_INFO: 2756 case MSG_INFO: 2757 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo)); 2758 if (host_to_target_msginfo(ptr, &msginfo)) 2759 return -TARGET_EFAULT; 2760 break; 2761 } 2762 2763 return ret; 2764 } 2765 2766 struct target_msgbuf { 2767 abi_long mtype; 2768 char mtext[1]; 2769 }; 2770 2771 static inline abi_long do_msgsnd(int msqid, abi_long msgp, 2772 unsigned int msgsz, int msgflg) 2773 { 2774 struct target_msgbuf *target_mb; 2775 struct msgbuf *host_mb; 2776 abi_long ret = 0; 2777 2778 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0)) 2779 return -TARGET_EFAULT; 2780 host_mb = malloc(msgsz+sizeof(long)); 2781 host_mb->mtype = (abi_long) tswapal(target_mb->mtype); 2782 memcpy(host_mb->mtext, target_mb->mtext, msgsz); 2783 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg)); 2784 free(host_mb); 2785 unlock_user_struct(target_mb, msgp, 0); 2786 2787 return ret; 2788 } 2789 2790 static inline abi_long do_msgrcv(int msqid, abi_long msgp, 2791 unsigned int msgsz, abi_long msgtyp, 2792 int msgflg) 2793 { 2794 struct target_msgbuf *target_mb; 2795 char *target_mtext; 2796 struct msgbuf *host_mb; 2797 abi_long ret = 0; 2798 2799 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0)) 2800 return -TARGET_EFAULT; 2801 2802 host_mb = g_malloc(msgsz+sizeof(long)); 2803 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg)); 2804 2805 if (ret > 0) { 2806 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong); 2807 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0); 2808 if (!target_mtext) { 2809 ret = -TARGET_EFAULT; 2810 goto end; 2811 } 2812 memcpy(target_mb->mtext, host_mb->mtext, ret); 2813 unlock_user(target_mtext, target_mtext_addr, ret); 2814 } 2815 2816 target_mb->mtype = tswapal(host_mb->mtype); 2817 2818 end: 2819 if (target_mb) 2820 unlock_user_struct(target_mb, msgp, 1); 2821 g_free(host_mb); 2822 return ret; 2823 } 2824 2825 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd, 2826 abi_ulong target_addr) 2827 { 2828 struct target_shmid_ds *target_sd; 2829 2830 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2831 return -TARGET_EFAULT; 2832 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr)) 2833 return -TARGET_EFAULT; 2834 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2835 __get_user(host_sd->shm_atime, &target_sd->shm_atime); 2836 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2837 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2838 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2839 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2840 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2841 unlock_user_struct(target_sd, target_addr, 0); 2842 return 0; 2843 } 2844 2845 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr, 2846 struct shmid_ds *host_sd) 2847 { 2848 struct target_shmid_ds *target_sd; 2849 2850 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2851 return -TARGET_EFAULT; 2852 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm))) 2853 return -TARGET_EFAULT; 2854 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2855 __put_user(host_sd->shm_atime, &target_sd->shm_atime); 2856 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2857 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2858 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2859 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2860 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2861 unlock_user_struct(target_sd, target_addr, 1); 2862 return 0; 2863 } 2864 2865 struct target_shminfo { 2866 abi_ulong shmmax; 2867 abi_ulong shmmin; 2868 abi_ulong shmmni; 2869 abi_ulong shmseg; 2870 abi_ulong shmall; 2871 }; 2872 2873 static inline abi_long host_to_target_shminfo(abi_ulong target_addr, 2874 struct shminfo *host_shminfo) 2875 { 2876 struct target_shminfo *target_shminfo; 2877 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0)) 2878 return -TARGET_EFAULT; 2879 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax); 2880 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin); 2881 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni); 2882 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg); 2883 __put_user(host_shminfo->shmall, &target_shminfo->shmall); 2884 unlock_user_struct(target_shminfo, target_addr, 1); 2885 return 0; 2886 } 2887 2888 struct target_shm_info { 2889 int used_ids; 2890 abi_ulong shm_tot; 2891 abi_ulong shm_rss; 2892 abi_ulong shm_swp; 2893 abi_ulong swap_attempts; 2894 abi_ulong swap_successes; 2895 }; 2896 2897 static inline abi_long host_to_target_shm_info(abi_ulong target_addr, 2898 struct shm_info *host_shm_info) 2899 { 2900 struct target_shm_info *target_shm_info; 2901 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0)) 2902 return -TARGET_EFAULT; 2903 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids); 2904 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot); 2905 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss); 2906 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp); 2907 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts); 2908 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes); 2909 unlock_user_struct(target_shm_info, target_addr, 1); 2910 return 0; 2911 } 2912 2913 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf) 2914 { 2915 struct shmid_ds dsarg; 2916 struct shminfo shminfo; 2917 struct shm_info shm_info; 2918 abi_long ret = -TARGET_EINVAL; 2919 2920 cmd &= 0xff; 2921 2922 switch(cmd) { 2923 case IPC_STAT: 2924 case IPC_SET: 2925 case SHM_STAT: 2926 if (target_to_host_shmid_ds(&dsarg, buf)) 2927 return -TARGET_EFAULT; 2928 ret = get_errno(shmctl(shmid, cmd, &dsarg)); 2929 if (host_to_target_shmid_ds(buf, &dsarg)) 2930 return -TARGET_EFAULT; 2931 break; 2932 case IPC_INFO: 2933 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo)); 2934 if (host_to_target_shminfo(buf, &shminfo)) 2935 return -TARGET_EFAULT; 2936 break; 2937 case SHM_INFO: 2938 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info)); 2939 if (host_to_target_shm_info(buf, &shm_info)) 2940 return -TARGET_EFAULT; 2941 break; 2942 case IPC_RMID: 2943 case SHM_LOCK: 2944 case SHM_UNLOCK: 2945 ret = get_errno(shmctl(shmid, cmd, NULL)); 2946 break; 2947 } 2948 2949 return ret; 2950 } 2951 2952 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg) 2953 { 2954 abi_long raddr; 2955 void *host_raddr; 2956 struct shmid_ds shm_info; 2957 int i,ret; 2958 2959 /* find out the length of the shared memory segment */ 2960 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); 2961 if (is_error(ret)) { 2962 /* can't get length, bail out */ 2963 return ret; 2964 } 2965 2966 mmap_lock(); 2967 2968 if (shmaddr) 2969 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg); 2970 else { 2971 abi_ulong mmap_start; 2972 2973 mmap_start = mmap_find_vma(0, shm_info.shm_segsz); 2974 2975 if (mmap_start == -1) { 2976 errno = ENOMEM; 2977 host_raddr = (void *)-1; 2978 } else 2979 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP); 2980 } 2981 2982 if (host_raddr == (void *)-1) { 2983 mmap_unlock(); 2984 return get_errno((long)host_raddr); 2985 } 2986 raddr=h2g((unsigned long)host_raddr); 2987 2988 page_set_flags(raddr, raddr + shm_info.shm_segsz, 2989 PAGE_VALID | PAGE_READ | 2990 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE)); 2991 2992 for (i = 0; i < N_SHM_REGIONS; i++) { 2993 if (shm_regions[i].start == 0) { 2994 shm_regions[i].start = raddr; 2995 shm_regions[i].size = shm_info.shm_segsz; 2996 break; 2997 } 2998 } 2999 3000 mmap_unlock(); 3001 return raddr; 3002 3003 } 3004 3005 static inline abi_long do_shmdt(abi_ulong shmaddr) 3006 { 3007 int i; 3008 3009 for (i = 0; i < N_SHM_REGIONS; ++i) { 3010 if (shm_regions[i].start == shmaddr) { 3011 shm_regions[i].start = 0; 3012 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0); 3013 break; 3014 } 3015 } 3016 3017 return get_errno(shmdt(g2h(shmaddr))); 3018 } 3019 3020 #ifdef TARGET_NR_ipc 3021 /* ??? This only works with linear mappings. */ 3022 /* do_ipc() must return target values and target errnos. */ 3023 static abi_long do_ipc(unsigned int call, int first, 3024 int second, int third, 3025 abi_long ptr, abi_long fifth) 3026 { 3027 int version; 3028 abi_long ret = 0; 3029 3030 version = call >> 16; 3031 call &= 0xffff; 3032 3033 switch (call) { 3034 case IPCOP_semop: 3035 ret = do_semop(first, ptr, second); 3036 break; 3037 3038 case IPCOP_semget: 3039 ret = get_errno(semget(first, second, third)); 3040 break; 3041 3042 case IPCOP_semctl: 3043 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr); 3044 break; 3045 3046 case IPCOP_msgget: 3047 ret = get_errno(msgget(first, second)); 3048 break; 3049 3050 case IPCOP_msgsnd: 3051 ret = do_msgsnd(first, ptr, second, third); 3052 break; 3053 3054 case IPCOP_msgctl: 3055 ret = do_msgctl(first, second, ptr); 3056 break; 3057 3058 case IPCOP_msgrcv: 3059 switch (version) { 3060 case 0: 3061 { 3062 struct target_ipc_kludge { 3063 abi_long msgp; 3064 abi_long msgtyp; 3065 } *tmp; 3066 3067 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) { 3068 ret = -TARGET_EFAULT; 3069 break; 3070 } 3071 3072 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third); 3073 3074 unlock_user_struct(tmp, ptr, 0); 3075 break; 3076 } 3077 default: 3078 ret = do_msgrcv(first, ptr, second, fifth, third); 3079 } 3080 break; 3081 3082 case IPCOP_shmat: 3083 switch (version) { 3084 default: 3085 { 3086 abi_ulong raddr; 3087 raddr = do_shmat(first, ptr, second); 3088 if (is_error(raddr)) 3089 return get_errno(raddr); 3090 if (put_user_ual(raddr, third)) 3091 return -TARGET_EFAULT; 3092 break; 3093 } 3094 case 1: 3095 ret = -TARGET_EINVAL; 3096 break; 3097 } 3098 break; 3099 case IPCOP_shmdt: 3100 ret = do_shmdt(ptr); 3101 break; 3102 3103 case IPCOP_shmget: 3104 /* IPC_* flag values are the same on all linux platforms */ 3105 ret = get_errno(shmget(first, second, third)); 3106 break; 3107 3108 /* IPC_* and SHM_* command values are the same on all linux platforms */ 3109 case IPCOP_shmctl: 3110 ret = do_shmctl(first, second, ptr); 3111 break; 3112 default: 3113 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version); 3114 ret = -TARGET_ENOSYS; 3115 break; 3116 } 3117 return ret; 3118 } 3119 #endif 3120 3121 /* kernel structure types definitions */ 3122 3123 #define STRUCT(name, ...) STRUCT_ ## name, 3124 #define STRUCT_SPECIAL(name) STRUCT_ ## name, 3125 enum { 3126 #include "syscall_types.h" 3127 }; 3128 #undef STRUCT 3129 #undef STRUCT_SPECIAL 3130 3131 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL }; 3132 #define STRUCT_SPECIAL(name) 3133 #include "syscall_types.h" 3134 #undef STRUCT 3135 #undef STRUCT_SPECIAL 3136 3137 typedef struct IOCTLEntry IOCTLEntry; 3138 3139 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp, 3140 int fd, abi_long cmd, abi_long arg); 3141 3142 struct IOCTLEntry { 3143 unsigned int target_cmd; 3144 unsigned int host_cmd; 3145 const char *name; 3146 int access; 3147 do_ioctl_fn *do_ioctl; 3148 const argtype arg_type[5]; 3149 }; 3150 3151 #define IOC_R 0x0001 3152 #define IOC_W 0x0002 3153 #define IOC_RW (IOC_R | IOC_W) 3154 3155 #define MAX_STRUCT_SIZE 4096 3156 3157 #ifdef CONFIG_FIEMAP 3158 /* So fiemap access checks don't overflow on 32 bit systems. 3159 * This is very slightly smaller than the limit imposed by 3160 * the underlying kernel. 3161 */ 3162 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \ 3163 / sizeof(struct fiemap_extent)) 3164 3165 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp, 3166 int fd, abi_long cmd, abi_long arg) 3167 { 3168 /* The parameter for this ioctl is a struct fiemap followed 3169 * by an array of struct fiemap_extent whose size is set 3170 * in fiemap->fm_extent_count. The array is filled in by the 3171 * ioctl. 3172 */ 3173 int target_size_in, target_size_out; 3174 struct fiemap *fm; 3175 const argtype *arg_type = ie->arg_type; 3176 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) }; 3177 void *argptr, *p; 3178 abi_long ret; 3179 int i, extent_size = thunk_type_size(extent_arg_type, 0); 3180 uint32_t outbufsz; 3181 int free_fm = 0; 3182 3183 assert(arg_type[0] == TYPE_PTR); 3184 assert(ie->access == IOC_RW); 3185 arg_type++; 3186 target_size_in = thunk_type_size(arg_type, 0); 3187 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1); 3188 if (!argptr) { 3189 return -TARGET_EFAULT; 3190 } 3191 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3192 unlock_user(argptr, arg, 0); 3193 fm = (struct fiemap *)buf_temp; 3194 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) { 3195 return -TARGET_EINVAL; 3196 } 3197 3198 outbufsz = sizeof (*fm) + 3199 (sizeof(struct fiemap_extent) * fm->fm_extent_count); 3200 3201 if (outbufsz > MAX_STRUCT_SIZE) { 3202 /* We can't fit all the extents into the fixed size buffer. 3203 * Allocate one that is large enough and use it instead. 3204 */ 3205 fm = malloc(outbufsz); 3206 if (!fm) { 3207 return -TARGET_ENOMEM; 3208 } 3209 memcpy(fm, buf_temp, sizeof(struct fiemap)); 3210 free_fm = 1; 3211 } 3212 ret = get_errno(ioctl(fd, ie->host_cmd, fm)); 3213 if (!is_error(ret)) { 3214 target_size_out = target_size_in; 3215 /* An extent_count of 0 means we were only counting the extents 3216 * so there are no structs to copy 3217 */ 3218 if (fm->fm_extent_count != 0) { 3219 target_size_out += fm->fm_mapped_extents * extent_size; 3220 } 3221 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0); 3222 if (!argptr) { 3223 ret = -TARGET_EFAULT; 3224 } else { 3225 /* Convert the struct fiemap */ 3226 thunk_convert(argptr, fm, arg_type, THUNK_TARGET); 3227 if (fm->fm_extent_count != 0) { 3228 p = argptr + target_size_in; 3229 /* ...and then all the struct fiemap_extents */ 3230 for (i = 0; i < fm->fm_mapped_extents; i++) { 3231 thunk_convert(p, &fm->fm_extents[i], extent_arg_type, 3232 THUNK_TARGET); 3233 p += extent_size; 3234 } 3235 } 3236 unlock_user(argptr, arg, target_size_out); 3237 } 3238 } 3239 if (free_fm) { 3240 free(fm); 3241 } 3242 return ret; 3243 } 3244 #endif 3245 3246 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, 3247 int fd, abi_long cmd, abi_long arg) 3248 { 3249 const argtype *arg_type = ie->arg_type; 3250 int target_size; 3251 void *argptr; 3252 int ret; 3253 struct ifconf *host_ifconf; 3254 uint32_t outbufsz; 3255 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) }; 3256 int target_ifreq_size; 3257 int nb_ifreq; 3258 int free_buf = 0; 3259 int i; 3260 int target_ifc_len; 3261 abi_long target_ifc_buf; 3262 int host_ifc_len; 3263 char *host_ifc_buf; 3264 3265 assert(arg_type[0] == TYPE_PTR); 3266 assert(ie->access == IOC_RW); 3267 3268 arg_type++; 3269 target_size = thunk_type_size(arg_type, 0); 3270 3271 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3272 if (!argptr) 3273 return -TARGET_EFAULT; 3274 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3275 unlock_user(argptr, arg, 0); 3276 3277 host_ifconf = (struct ifconf *)(unsigned long)buf_temp; 3278 target_ifc_len = host_ifconf->ifc_len; 3279 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf; 3280 3281 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0); 3282 nb_ifreq = target_ifc_len / target_ifreq_size; 3283 host_ifc_len = nb_ifreq * sizeof(struct ifreq); 3284 3285 outbufsz = sizeof(*host_ifconf) + host_ifc_len; 3286 if (outbufsz > MAX_STRUCT_SIZE) { 3287 /* We can't fit all the extents into the fixed size buffer. 3288 * Allocate one that is large enough and use it instead. 3289 */ 3290 host_ifconf = malloc(outbufsz); 3291 if (!host_ifconf) { 3292 return -TARGET_ENOMEM; 3293 } 3294 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf)); 3295 free_buf = 1; 3296 } 3297 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf); 3298 3299 host_ifconf->ifc_len = host_ifc_len; 3300 host_ifconf->ifc_buf = host_ifc_buf; 3301 3302 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf)); 3303 if (!is_error(ret)) { 3304 /* convert host ifc_len to target ifc_len */ 3305 3306 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq); 3307 target_ifc_len = nb_ifreq * target_ifreq_size; 3308 host_ifconf->ifc_len = target_ifc_len; 3309 3310 /* restore target ifc_buf */ 3311 3312 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf; 3313 3314 /* copy struct ifconf to target user */ 3315 3316 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3317 if (!argptr) 3318 return -TARGET_EFAULT; 3319 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET); 3320 unlock_user(argptr, arg, target_size); 3321 3322 /* copy ifreq[] to target user */ 3323 3324 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0); 3325 for (i = 0; i < nb_ifreq ; i++) { 3326 thunk_convert(argptr + i * target_ifreq_size, 3327 host_ifc_buf + i * sizeof(struct ifreq), 3328 ifreq_arg_type, THUNK_TARGET); 3329 } 3330 unlock_user(argptr, target_ifc_buf, target_ifc_len); 3331 } 3332 3333 if (free_buf) { 3334 free(host_ifconf); 3335 } 3336 3337 return ret; 3338 } 3339 3340 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 3341 abi_long cmd, abi_long arg) 3342 { 3343 void *argptr; 3344 struct dm_ioctl *host_dm; 3345 abi_long guest_data; 3346 uint32_t guest_data_size; 3347 int target_size; 3348 const argtype *arg_type = ie->arg_type; 3349 abi_long ret; 3350 void *big_buf = NULL; 3351 char *host_data; 3352 3353 arg_type++; 3354 target_size = thunk_type_size(arg_type, 0); 3355 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3356 if (!argptr) { 3357 ret = -TARGET_EFAULT; 3358 goto out; 3359 } 3360 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3361 unlock_user(argptr, arg, 0); 3362 3363 /* buf_temp is too small, so fetch things into a bigger buffer */ 3364 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2); 3365 memcpy(big_buf, buf_temp, target_size); 3366 buf_temp = big_buf; 3367 host_dm = big_buf; 3368 3369 guest_data = arg + host_dm->data_start; 3370 if ((guest_data - arg) < 0) { 3371 ret = -EINVAL; 3372 goto out; 3373 } 3374 guest_data_size = host_dm->data_size - host_dm->data_start; 3375 host_data = (char*)host_dm + host_dm->data_start; 3376 3377 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1); 3378 switch (ie->host_cmd) { 3379 case DM_REMOVE_ALL: 3380 case DM_LIST_DEVICES: 3381 case DM_DEV_CREATE: 3382 case DM_DEV_REMOVE: 3383 case DM_DEV_SUSPEND: 3384 case DM_DEV_STATUS: 3385 case DM_DEV_WAIT: 3386 case DM_TABLE_STATUS: 3387 case DM_TABLE_CLEAR: 3388 case DM_TABLE_DEPS: 3389 case DM_LIST_VERSIONS: 3390 /* no input data */ 3391 break; 3392 case DM_DEV_RENAME: 3393 case DM_DEV_SET_GEOMETRY: 3394 /* data contains only strings */ 3395 memcpy(host_data, argptr, guest_data_size); 3396 break; 3397 case DM_TARGET_MSG: 3398 memcpy(host_data, argptr, guest_data_size); 3399 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr); 3400 break; 3401 case DM_TABLE_LOAD: 3402 { 3403 void *gspec = argptr; 3404 void *cur_data = host_data; 3405 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 3406 int spec_size = thunk_type_size(arg_type, 0); 3407 int i; 3408 3409 for (i = 0; i < host_dm->target_count; i++) { 3410 struct dm_target_spec *spec = cur_data; 3411 uint32_t next; 3412 int slen; 3413 3414 thunk_convert(spec, gspec, arg_type, THUNK_HOST); 3415 slen = strlen((char*)gspec + spec_size) + 1; 3416 next = spec->next; 3417 spec->next = sizeof(*spec) + slen; 3418 strcpy((char*)&spec[1], gspec + spec_size); 3419 gspec += next; 3420 cur_data += spec->next; 3421 } 3422 break; 3423 } 3424 default: 3425 ret = -TARGET_EINVAL; 3426 goto out; 3427 } 3428 unlock_user(argptr, guest_data, 0); 3429 3430 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3431 if (!is_error(ret)) { 3432 guest_data = arg + host_dm->data_start; 3433 guest_data_size = host_dm->data_size - host_dm->data_start; 3434 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0); 3435 switch (ie->host_cmd) { 3436 case DM_REMOVE_ALL: 3437 case DM_DEV_CREATE: 3438 case DM_DEV_REMOVE: 3439 case DM_DEV_RENAME: 3440 case DM_DEV_SUSPEND: 3441 case DM_DEV_STATUS: 3442 case DM_TABLE_LOAD: 3443 case DM_TABLE_CLEAR: 3444 case DM_TARGET_MSG: 3445 case DM_DEV_SET_GEOMETRY: 3446 /* no return data */ 3447 break; 3448 case DM_LIST_DEVICES: 3449 { 3450 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start; 3451 uint32_t remaining_data = guest_data_size; 3452 void *cur_data = argptr; 3453 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) }; 3454 int nl_size = 12; /* can't use thunk_size due to alignment */ 3455 3456 while (1) { 3457 uint32_t next = nl->next; 3458 if (next) { 3459 nl->next = nl_size + (strlen(nl->name) + 1); 3460 } 3461 if (remaining_data < nl->next) { 3462 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3463 break; 3464 } 3465 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET); 3466 strcpy(cur_data + nl_size, nl->name); 3467 cur_data += nl->next; 3468 remaining_data -= nl->next; 3469 if (!next) { 3470 break; 3471 } 3472 nl = (void*)nl + next; 3473 } 3474 break; 3475 } 3476 case DM_DEV_WAIT: 3477 case DM_TABLE_STATUS: 3478 { 3479 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start; 3480 void *cur_data = argptr; 3481 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 3482 int spec_size = thunk_type_size(arg_type, 0); 3483 int i; 3484 3485 for (i = 0; i < host_dm->target_count; i++) { 3486 uint32_t next = spec->next; 3487 int slen = strlen((char*)&spec[1]) + 1; 3488 spec->next = (cur_data - argptr) + spec_size + slen; 3489 if (guest_data_size < spec->next) { 3490 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3491 break; 3492 } 3493 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET); 3494 strcpy(cur_data + spec_size, (char*)&spec[1]); 3495 cur_data = argptr + spec->next; 3496 spec = (void*)host_dm + host_dm->data_start + next; 3497 } 3498 break; 3499 } 3500 case DM_TABLE_DEPS: 3501 { 3502 void *hdata = (void*)host_dm + host_dm->data_start; 3503 int count = *(uint32_t*)hdata; 3504 uint64_t *hdev = hdata + 8; 3505 uint64_t *gdev = argptr + 8; 3506 int i; 3507 3508 *(uint32_t*)argptr = tswap32(count); 3509 for (i = 0; i < count; i++) { 3510 *gdev = tswap64(*hdev); 3511 gdev++; 3512 hdev++; 3513 } 3514 break; 3515 } 3516 case DM_LIST_VERSIONS: 3517 { 3518 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start; 3519 uint32_t remaining_data = guest_data_size; 3520 void *cur_data = argptr; 3521 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) }; 3522 int vers_size = thunk_type_size(arg_type, 0); 3523 3524 while (1) { 3525 uint32_t next = vers->next; 3526 if (next) { 3527 vers->next = vers_size + (strlen(vers->name) + 1); 3528 } 3529 if (remaining_data < vers->next) { 3530 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3531 break; 3532 } 3533 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET); 3534 strcpy(cur_data + vers_size, vers->name); 3535 cur_data += vers->next; 3536 remaining_data -= vers->next; 3537 if (!next) { 3538 break; 3539 } 3540 vers = (void*)vers + next; 3541 } 3542 break; 3543 } 3544 default: 3545 ret = -TARGET_EINVAL; 3546 goto out; 3547 } 3548 unlock_user(argptr, guest_data, guest_data_size); 3549 3550 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3551 if (!argptr) { 3552 ret = -TARGET_EFAULT; 3553 goto out; 3554 } 3555 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3556 unlock_user(argptr, arg, target_size); 3557 } 3558 out: 3559 g_free(big_buf); 3560 return ret; 3561 } 3562 3563 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp, 3564 int fd, abi_long cmd, abi_long arg) 3565 { 3566 const argtype *arg_type = ie->arg_type; 3567 const StructEntry *se; 3568 const argtype *field_types; 3569 const int *dst_offsets, *src_offsets; 3570 int target_size; 3571 void *argptr; 3572 abi_ulong *target_rt_dev_ptr; 3573 unsigned long *host_rt_dev_ptr; 3574 abi_long ret; 3575 int i; 3576 3577 assert(ie->access == IOC_W); 3578 assert(*arg_type == TYPE_PTR); 3579 arg_type++; 3580 assert(*arg_type == TYPE_STRUCT); 3581 target_size = thunk_type_size(arg_type, 0); 3582 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3583 if (!argptr) { 3584 return -TARGET_EFAULT; 3585 } 3586 arg_type++; 3587 assert(*arg_type == (int)STRUCT_rtentry); 3588 se = struct_entries + *arg_type++; 3589 assert(se->convert[0] == NULL); 3590 /* convert struct here to be able to catch rt_dev string */ 3591 field_types = se->field_types; 3592 dst_offsets = se->field_offsets[THUNK_HOST]; 3593 src_offsets = se->field_offsets[THUNK_TARGET]; 3594 for (i = 0; i < se->nb_fields; i++) { 3595 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) { 3596 assert(*field_types == TYPE_PTRVOID); 3597 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]); 3598 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]); 3599 if (*target_rt_dev_ptr != 0) { 3600 *host_rt_dev_ptr = (unsigned long)lock_user_string( 3601 tswapal(*target_rt_dev_ptr)); 3602 if (!*host_rt_dev_ptr) { 3603 unlock_user(argptr, arg, 0); 3604 return -TARGET_EFAULT; 3605 } 3606 } else { 3607 *host_rt_dev_ptr = 0; 3608 } 3609 field_types++; 3610 continue; 3611 } 3612 field_types = thunk_convert(buf_temp + dst_offsets[i], 3613 argptr + src_offsets[i], 3614 field_types, THUNK_HOST); 3615 } 3616 unlock_user(argptr, arg, 0); 3617 3618 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3619 if (*host_rt_dev_ptr != 0) { 3620 unlock_user((void *)*host_rt_dev_ptr, 3621 *target_rt_dev_ptr, 0); 3622 } 3623 return ret; 3624 } 3625 3626 static IOCTLEntry ioctl_entries[] = { 3627 #define IOCTL(cmd, access, ...) \ 3628 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } }, 3629 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \ 3630 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } }, 3631 #include "ioctls.h" 3632 { 0, 0, }, 3633 }; 3634 3635 /* ??? Implement proper locking for ioctls. */ 3636 /* do_ioctl() Must return target values and target errnos. */ 3637 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg) 3638 { 3639 const IOCTLEntry *ie; 3640 const argtype *arg_type; 3641 abi_long ret; 3642 uint8_t buf_temp[MAX_STRUCT_SIZE]; 3643 int target_size; 3644 void *argptr; 3645 3646 ie = ioctl_entries; 3647 for(;;) { 3648 if (ie->target_cmd == 0) { 3649 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd); 3650 return -TARGET_ENOSYS; 3651 } 3652 if (ie->target_cmd == cmd) 3653 break; 3654 ie++; 3655 } 3656 arg_type = ie->arg_type; 3657 #if defined(DEBUG) 3658 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name); 3659 #endif 3660 if (ie->do_ioctl) { 3661 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg); 3662 } 3663 3664 switch(arg_type[0]) { 3665 case TYPE_NULL: 3666 /* no argument */ 3667 ret = get_errno(ioctl(fd, ie->host_cmd)); 3668 break; 3669 case TYPE_PTRVOID: 3670 case TYPE_INT: 3671 /* int argment */ 3672 ret = get_errno(ioctl(fd, ie->host_cmd, arg)); 3673 break; 3674 case TYPE_PTR: 3675 arg_type++; 3676 target_size = thunk_type_size(arg_type, 0); 3677 switch(ie->access) { 3678 case IOC_R: 3679 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3680 if (!is_error(ret)) { 3681 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3682 if (!argptr) 3683 return -TARGET_EFAULT; 3684 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3685 unlock_user(argptr, arg, target_size); 3686 } 3687 break; 3688 case IOC_W: 3689 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3690 if (!argptr) 3691 return -TARGET_EFAULT; 3692 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3693 unlock_user(argptr, arg, 0); 3694 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3695 break; 3696 default: 3697 case IOC_RW: 3698 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3699 if (!argptr) 3700 return -TARGET_EFAULT; 3701 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3702 unlock_user(argptr, arg, 0); 3703 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3704 if (!is_error(ret)) { 3705 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3706 if (!argptr) 3707 return -TARGET_EFAULT; 3708 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3709 unlock_user(argptr, arg, target_size); 3710 } 3711 break; 3712 } 3713 break; 3714 default: 3715 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n", 3716 (long)cmd, arg_type[0]); 3717 ret = -TARGET_ENOSYS; 3718 break; 3719 } 3720 return ret; 3721 } 3722 3723 static const bitmask_transtbl iflag_tbl[] = { 3724 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK }, 3725 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT }, 3726 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR }, 3727 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK }, 3728 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK }, 3729 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP }, 3730 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR }, 3731 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR }, 3732 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL }, 3733 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC }, 3734 { TARGET_IXON, TARGET_IXON, IXON, IXON }, 3735 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY }, 3736 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF }, 3737 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL }, 3738 { 0, 0, 0, 0 } 3739 }; 3740 3741 static const bitmask_transtbl oflag_tbl[] = { 3742 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST }, 3743 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC }, 3744 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR }, 3745 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL }, 3746 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR }, 3747 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET }, 3748 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL }, 3749 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL }, 3750 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 }, 3751 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 }, 3752 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 }, 3753 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 }, 3754 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 }, 3755 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 }, 3756 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 }, 3757 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 }, 3758 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 }, 3759 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 }, 3760 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 }, 3761 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 }, 3762 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 }, 3763 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 }, 3764 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 }, 3765 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 }, 3766 { 0, 0, 0, 0 } 3767 }; 3768 3769 static const bitmask_transtbl cflag_tbl[] = { 3770 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 }, 3771 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 }, 3772 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 }, 3773 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 }, 3774 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 }, 3775 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 }, 3776 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 }, 3777 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 }, 3778 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 }, 3779 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 }, 3780 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 }, 3781 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 }, 3782 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 }, 3783 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 }, 3784 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 }, 3785 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 }, 3786 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 }, 3787 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 }, 3788 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 }, 3789 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 }, 3790 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 }, 3791 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 }, 3792 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 }, 3793 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 }, 3794 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB }, 3795 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD }, 3796 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB }, 3797 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD }, 3798 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL }, 3799 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL }, 3800 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS }, 3801 { 0, 0, 0, 0 } 3802 }; 3803 3804 static const bitmask_transtbl lflag_tbl[] = { 3805 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG }, 3806 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON }, 3807 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE }, 3808 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO }, 3809 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE }, 3810 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK }, 3811 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL }, 3812 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH }, 3813 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP }, 3814 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL }, 3815 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT }, 3816 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE }, 3817 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO }, 3818 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN }, 3819 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN }, 3820 { 0, 0, 0, 0 } 3821 }; 3822 3823 static void target_to_host_termios (void *dst, const void *src) 3824 { 3825 struct host_termios *host = dst; 3826 const struct target_termios *target = src; 3827 3828 host->c_iflag = 3829 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl); 3830 host->c_oflag = 3831 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl); 3832 host->c_cflag = 3833 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl); 3834 host->c_lflag = 3835 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl); 3836 host->c_line = target->c_line; 3837 3838 memset(host->c_cc, 0, sizeof(host->c_cc)); 3839 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR]; 3840 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT]; 3841 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE]; 3842 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL]; 3843 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF]; 3844 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME]; 3845 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN]; 3846 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC]; 3847 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART]; 3848 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP]; 3849 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP]; 3850 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL]; 3851 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT]; 3852 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD]; 3853 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE]; 3854 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT]; 3855 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2]; 3856 } 3857 3858 static void host_to_target_termios (void *dst, const void *src) 3859 { 3860 struct target_termios *target = dst; 3861 const struct host_termios *host = src; 3862 3863 target->c_iflag = 3864 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl)); 3865 target->c_oflag = 3866 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl)); 3867 target->c_cflag = 3868 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl)); 3869 target->c_lflag = 3870 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl)); 3871 target->c_line = host->c_line; 3872 3873 memset(target->c_cc, 0, sizeof(target->c_cc)); 3874 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR]; 3875 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT]; 3876 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE]; 3877 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL]; 3878 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF]; 3879 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME]; 3880 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN]; 3881 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC]; 3882 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART]; 3883 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP]; 3884 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP]; 3885 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL]; 3886 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT]; 3887 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD]; 3888 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE]; 3889 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT]; 3890 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2]; 3891 } 3892 3893 static const StructEntry struct_termios_def = { 3894 .convert = { host_to_target_termios, target_to_host_termios }, 3895 .size = { sizeof(struct target_termios), sizeof(struct host_termios) }, 3896 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) }, 3897 }; 3898 3899 static bitmask_transtbl mmap_flags_tbl[] = { 3900 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED }, 3901 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE }, 3902 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED }, 3903 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS }, 3904 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN }, 3905 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE }, 3906 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE }, 3907 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED }, 3908 { 0, 0, 0, 0 } 3909 }; 3910 3911 #if defined(TARGET_I386) 3912 3913 /* NOTE: there is really one LDT for all the threads */ 3914 static uint8_t *ldt_table; 3915 3916 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount) 3917 { 3918 int size; 3919 void *p; 3920 3921 if (!ldt_table) 3922 return 0; 3923 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE; 3924 if (size > bytecount) 3925 size = bytecount; 3926 p = lock_user(VERIFY_WRITE, ptr, size, 0); 3927 if (!p) 3928 return -TARGET_EFAULT; 3929 /* ??? Should this by byteswapped? */ 3930 memcpy(p, ldt_table, size); 3931 unlock_user(p, ptr, size); 3932 return size; 3933 } 3934 3935 /* XXX: add locking support */ 3936 static abi_long write_ldt(CPUX86State *env, 3937 abi_ulong ptr, unsigned long bytecount, int oldmode) 3938 { 3939 struct target_modify_ldt_ldt_s ldt_info; 3940 struct target_modify_ldt_ldt_s *target_ldt_info; 3941 int seg_32bit, contents, read_exec_only, limit_in_pages; 3942 int seg_not_present, useable, lm; 3943 uint32_t *lp, entry_1, entry_2; 3944 3945 if (bytecount != sizeof(ldt_info)) 3946 return -TARGET_EINVAL; 3947 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1)) 3948 return -TARGET_EFAULT; 3949 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 3950 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 3951 ldt_info.limit = tswap32(target_ldt_info->limit); 3952 ldt_info.flags = tswap32(target_ldt_info->flags); 3953 unlock_user_struct(target_ldt_info, ptr, 0); 3954 3955 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES) 3956 return -TARGET_EINVAL; 3957 seg_32bit = ldt_info.flags & 1; 3958 contents = (ldt_info.flags >> 1) & 3; 3959 read_exec_only = (ldt_info.flags >> 3) & 1; 3960 limit_in_pages = (ldt_info.flags >> 4) & 1; 3961 seg_not_present = (ldt_info.flags >> 5) & 1; 3962 useable = (ldt_info.flags >> 6) & 1; 3963 #ifdef TARGET_ABI32 3964 lm = 0; 3965 #else 3966 lm = (ldt_info.flags >> 7) & 1; 3967 #endif 3968 if (contents == 3) { 3969 if (oldmode) 3970 return -TARGET_EINVAL; 3971 if (seg_not_present == 0) 3972 return -TARGET_EINVAL; 3973 } 3974 /* allocate the LDT */ 3975 if (!ldt_table) { 3976 env->ldt.base = target_mmap(0, 3977 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE, 3978 PROT_READ|PROT_WRITE, 3979 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 3980 if (env->ldt.base == -1) 3981 return -TARGET_ENOMEM; 3982 memset(g2h(env->ldt.base), 0, 3983 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE); 3984 env->ldt.limit = 0xffff; 3985 ldt_table = g2h(env->ldt.base); 3986 } 3987 3988 /* NOTE: same code as Linux kernel */ 3989 /* Allow LDTs to be cleared by the user. */ 3990 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 3991 if (oldmode || 3992 (contents == 0 && 3993 read_exec_only == 1 && 3994 seg_32bit == 0 && 3995 limit_in_pages == 0 && 3996 seg_not_present == 1 && 3997 useable == 0 )) { 3998 entry_1 = 0; 3999 entry_2 = 0; 4000 goto install; 4001 } 4002 } 4003 4004 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 4005 (ldt_info.limit & 0x0ffff); 4006 entry_2 = (ldt_info.base_addr & 0xff000000) | 4007 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 4008 (ldt_info.limit & 0xf0000) | 4009 ((read_exec_only ^ 1) << 9) | 4010 (contents << 10) | 4011 ((seg_not_present ^ 1) << 15) | 4012 (seg_32bit << 22) | 4013 (limit_in_pages << 23) | 4014 (lm << 21) | 4015 0x7000; 4016 if (!oldmode) 4017 entry_2 |= (useable << 20); 4018 4019 /* Install the new entry ... */ 4020 install: 4021 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3)); 4022 lp[0] = tswap32(entry_1); 4023 lp[1] = tswap32(entry_2); 4024 return 0; 4025 } 4026 4027 /* specific and weird i386 syscalls */ 4028 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr, 4029 unsigned long bytecount) 4030 { 4031 abi_long ret; 4032 4033 switch (func) { 4034 case 0: 4035 ret = read_ldt(ptr, bytecount); 4036 break; 4037 case 1: 4038 ret = write_ldt(env, ptr, bytecount, 1); 4039 break; 4040 case 0x11: 4041 ret = write_ldt(env, ptr, bytecount, 0); 4042 break; 4043 default: 4044 ret = -TARGET_ENOSYS; 4045 break; 4046 } 4047 return ret; 4048 } 4049 4050 #if defined(TARGET_I386) && defined(TARGET_ABI32) 4051 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr) 4052 { 4053 uint64_t *gdt_table = g2h(env->gdt.base); 4054 struct target_modify_ldt_ldt_s ldt_info; 4055 struct target_modify_ldt_ldt_s *target_ldt_info; 4056 int seg_32bit, contents, read_exec_only, limit_in_pages; 4057 int seg_not_present, useable, lm; 4058 uint32_t *lp, entry_1, entry_2; 4059 int i; 4060 4061 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 4062 if (!target_ldt_info) 4063 return -TARGET_EFAULT; 4064 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 4065 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 4066 ldt_info.limit = tswap32(target_ldt_info->limit); 4067 ldt_info.flags = tswap32(target_ldt_info->flags); 4068 if (ldt_info.entry_number == -1) { 4069 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) { 4070 if (gdt_table[i] == 0) { 4071 ldt_info.entry_number = i; 4072 target_ldt_info->entry_number = tswap32(i); 4073 break; 4074 } 4075 } 4076 } 4077 unlock_user_struct(target_ldt_info, ptr, 1); 4078 4079 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 4080 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX) 4081 return -TARGET_EINVAL; 4082 seg_32bit = ldt_info.flags & 1; 4083 contents = (ldt_info.flags >> 1) & 3; 4084 read_exec_only = (ldt_info.flags >> 3) & 1; 4085 limit_in_pages = (ldt_info.flags >> 4) & 1; 4086 seg_not_present = (ldt_info.flags >> 5) & 1; 4087 useable = (ldt_info.flags >> 6) & 1; 4088 #ifdef TARGET_ABI32 4089 lm = 0; 4090 #else 4091 lm = (ldt_info.flags >> 7) & 1; 4092 #endif 4093 4094 if (contents == 3) { 4095 if (seg_not_present == 0) 4096 return -TARGET_EINVAL; 4097 } 4098 4099 /* NOTE: same code as Linux kernel */ 4100 /* Allow LDTs to be cleared by the user. */ 4101 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 4102 if ((contents == 0 && 4103 read_exec_only == 1 && 4104 seg_32bit == 0 && 4105 limit_in_pages == 0 && 4106 seg_not_present == 1 && 4107 useable == 0 )) { 4108 entry_1 = 0; 4109 entry_2 = 0; 4110 goto install; 4111 } 4112 } 4113 4114 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 4115 (ldt_info.limit & 0x0ffff); 4116 entry_2 = (ldt_info.base_addr & 0xff000000) | 4117 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 4118 (ldt_info.limit & 0xf0000) | 4119 ((read_exec_only ^ 1) << 9) | 4120 (contents << 10) | 4121 ((seg_not_present ^ 1) << 15) | 4122 (seg_32bit << 22) | 4123 (limit_in_pages << 23) | 4124 (useable << 20) | 4125 (lm << 21) | 4126 0x7000; 4127 4128 /* Install the new entry ... */ 4129 install: 4130 lp = (uint32_t *)(gdt_table + ldt_info.entry_number); 4131 lp[0] = tswap32(entry_1); 4132 lp[1] = tswap32(entry_2); 4133 return 0; 4134 } 4135 4136 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr) 4137 { 4138 struct target_modify_ldt_ldt_s *target_ldt_info; 4139 uint64_t *gdt_table = g2h(env->gdt.base); 4140 uint32_t base_addr, limit, flags; 4141 int seg_32bit, contents, read_exec_only, limit_in_pages, idx; 4142 int seg_not_present, useable, lm; 4143 uint32_t *lp, entry_1, entry_2; 4144 4145 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 4146 if (!target_ldt_info) 4147 return -TARGET_EFAULT; 4148 idx = tswap32(target_ldt_info->entry_number); 4149 if (idx < TARGET_GDT_ENTRY_TLS_MIN || 4150 idx > TARGET_GDT_ENTRY_TLS_MAX) { 4151 unlock_user_struct(target_ldt_info, ptr, 1); 4152 return -TARGET_EINVAL; 4153 } 4154 lp = (uint32_t *)(gdt_table + idx); 4155 entry_1 = tswap32(lp[0]); 4156 entry_2 = tswap32(lp[1]); 4157 4158 read_exec_only = ((entry_2 >> 9) & 1) ^ 1; 4159 contents = (entry_2 >> 10) & 3; 4160 seg_not_present = ((entry_2 >> 15) & 1) ^ 1; 4161 seg_32bit = (entry_2 >> 22) & 1; 4162 limit_in_pages = (entry_2 >> 23) & 1; 4163 useable = (entry_2 >> 20) & 1; 4164 #ifdef TARGET_ABI32 4165 lm = 0; 4166 #else 4167 lm = (entry_2 >> 21) & 1; 4168 #endif 4169 flags = (seg_32bit << 0) | (contents << 1) | 4170 (read_exec_only << 3) | (limit_in_pages << 4) | 4171 (seg_not_present << 5) | (useable << 6) | (lm << 7); 4172 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000); 4173 base_addr = (entry_1 >> 16) | 4174 (entry_2 & 0xff000000) | 4175 ((entry_2 & 0xff) << 16); 4176 target_ldt_info->base_addr = tswapal(base_addr); 4177 target_ldt_info->limit = tswap32(limit); 4178 target_ldt_info->flags = tswap32(flags); 4179 unlock_user_struct(target_ldt_info, ptr, 1); 4180 return 0; 4181 } 4182 #endif /* TARGET_I386 && TARGET_ABI32 */ 4183 4184 #ifndef TARGET_ABI32 4185 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 4186 { 4187 abi_long ret = 0; 4188 abi_ulong val; 4189 int idx; 4190 4191 switch(code) { 4192 case TARGET_ARCH_SET_GS: 4193 case TARGET_ARCH_SET_FS: 4194 if (code == TARGET_ARCH_SET_GS) 4195 idx = R_GS; 4196 else 4197 idx = R_FS; 4198 cpu_x86_load_seg(env, idx, 0); 4199 env->segs[idx].base = addr; 4200 break; 4201 case TARGET_ARCH_GET_GS: 4202 case TARGET_ARCH_GET_FS: 4203 if (code == TARGET_ARCH_GET_GS) 4204 idx = R_GS; 4205 else 4206 idx = R_FS; 4207 val = env->segs[idx].base; 4208 if (put_user(val, addr, abi_ulong)) 4209 ret = -TARGET_EFAULT; 4210 break; 4211 default: 4212 ret = -TARGET_EINVAL; 4213 break; 4214 } 4215 return ret; 4216 } 4217 #endif 4218 4219 #endif /* defined(TARGET_I386) */ 4220 4221 #define NEW_STACK_SIZE 0x40000 4222 4223 4224 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; 4225 typedef struct { 4226 CPUArchState *env; 4227 pthread_mutex_t mutex; 4228 pthread_cond_t cond; 4229 pthread_t thread; 4230 uint32_t tid; 4231 abi_ulong child_tidptr; 4232 abi_ulong parent_tidptr; 4233 sigset_t sigmask; 4234 } new_thread_info; 4235 4236 static void *clone_func(void *arg) 4237 { 4238 new_thread_info *info = arg; 4239 CPUArchState *env; 4240 CPUState *cpu; 4241 TaskState *ts; 4242 4243 env = info->env; 4244 cpu = ENV_GET_CPU(env); 4245 thread_cpu = cpu; 4246 ts = (TaskState *)cpu->opaque; 4247 info->tid = gettid(); 4248 cpu->host_tid = info->tid; 4249 task_settid(ts); 4250 if (info->child_tidptr) 4251 put_user_u32(info->tid, info->child_tidptr); 4252 if (info->parent_tidptr) 4253 put_user_u32(info->tid, info->parent_tidptr); 4254 /* Enable signals. */ 4255 sigprocmask(SIG_SETMASK, &info->sigmask, NULL); 4256 /* Signal to the parent that we're ready. */ 4257 pthread_mutex_lock(&info->mutex); 4258 pthread_cond_broadcast(&info->cond); 4259 pthread_mutex_unlock(&info->mutex); 4260 /* Wait until the parent has finshed initializing the tls state. */ 4261 pthread_mutex_lock(&clone_lock); 4262 pthread_mutex_unlock(&clone_lock); 4263 cpu_loop(env); 4264 /* never exits */ 4265 return NULL; 4266 } 4267 4268 /* do_fork() Must return host values and target errnos (unlike most 4269 do_*() functions). */ 4270 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, 4271 abi_ulong parent_tidptr, target_ulong newtls, 4272 abi_ulong child_tidptr) 4273 { 4274 CPUState *cpu = ENV_GET_CPU(env); 4275 int ret; 4276 TaskState *ts; 4277 CPUState *new_cpu; 4278 CPUArchState *new_env; 4279 unsigned int nptl_flags; 4280 sigset_t sigmask; 4281 4282 /* Emulate vfork() with fork() */ 4283 if (flags & CLONE_VFORK) 4284 flags &= ~(CLONE_VFORK | CLONE_VM); 4285 4286 if (flags & CLONE_VM) { 4287 TaskState *parent_ts = (TaskState *)cpu->opaque; 4288 new_thread_info info; 4289 pthread_attr_t attr; 4290 4291 ts = g_malloc0(sizeof(TaskState)); 4292 init_task_state(ts); 4293 /* we create a new CPU instance. */ 4294 new_env = cpu_copy(env); 4295 /* Init regs that differ from the parent. */ 4296 cpu_clone_regs(new_env, newsp); 4297 new_cpu = ENV_GET_CPU(new_env); 4298 new_cpu->opaque = ts; 4299 ts->bprm = parent_ts->bprm; 4300 ts->info = parent_ts->info; 4301 nptl_flags = flags; 4302 flags &= ~CLONE_NPTL_FLAGS2; 4303 4304 if (nptl_flags & CLONE_CHILD_CLEARTID) { 4305 ts->child_tidptr = child_tidptr; 4306 } 4307 4308 if (nptl_flags & CLONE_SETTLS) 4309 cpu_set_tls (new_env, newtls); 4310 4311 /* Grab a mutex so that thread setup appears atomic. */ 4312 pthread_mutex_lock(&clone_lock); 4313 4314 memset(&info, 0, sizeof(info)); 4315 pthread_mutex_init(&info.mutex, NULL); 4316 pthread_mutex_lock(&info.mutex); 4317 pthread_cond_init(&info.cond, NULL); 4318 info.env = new_env; 4319 if (nptl_flags & CLONE_CHILD_SETTID) 4320 info.child_tidptr = child_tidptr; 4321 if (nptl_flags & CLONE_PARENT_SETTID) 4322 info.parent_tidptr = parent_tidptr; 4323 4324 ret = pthread_attr_init(&attr); 4325 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE); 4326 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 4327 /* It is not safe to deliver signals until the child has finished 4328 initializing, so temporarily block all signals. */ 4329 sigfillset(&sigmask); 4330 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); 4331 4332 ret = pthread_create(&info.thread, &attr, clone_func, &info); 4333 /* TODO: Free new CPU state if thread creation failed. */ 4334 4335 sigprocmask(SIG_SETMASK, &info.sigmask, NULL); 4336 pthread_attr_destroy(&attr); 4337 if (ret == 0) { 4338 /* Wait for the child to initialize. */ 4339 pthread_cond_wait(&info.cond, &info.mutex); 4340 ret = info.tid; 4341 if (flags & CLONE_PARENT_SETTID) 4342 put_user_u32(ret, parent_tidptr); 4343 } else { 4344 ret = -1; 4345 } 4346 pthread_mutex_unlock(&info.mutex); 4347 pthread_cond_destroy(&info.cond); 4348 pthread_mutex_destroy(&info.mutex); 4349 pthread_mutex_unlock(&clone_lock); 4350 } else { 4351 /* if no CLONE_VM, we consider it is a fork */ 4352 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) 4353 return -EINVAL; 4354 fork_start(); 4355 ret = fork(); 4356 if (ret == 0) { 4357 /* Child Process. */ 4358 cpu_clone_regs(env, newsp); 4359 fork_end(1); 4360 /* There is a race condition here. The parent process could 4361 theoretically read the TID in the child process before the child 4362 tid is set. This would require using either ptrace 4363 (not implemented) or having *_tidptr to point at a shared memory 4364 mapping. We can't repeat the spinlock hack used above because 4365 the child process gets its own copy of the lock. */ 4366 if (flags & CLONE_CHILD_SETTID) 4367 put_user_u32(gettid(), child_tidptr); 4368 if (flags & CLONE_PARENT_SETTID) 4369 put_user_u32(gettid(), parent_tidptr); 4370 ts = (TaskState *)cpu->opaque; 4371 if (flags & CLONE_SETTLS) 4372 cpu_set_tls (env, newtls); 4373 if (flags & CLONE_CHILD_CLEARTID) 4374 ts->child_tidptr = child_tidptr; 4375 } else { 4376 fork_end(0); 4377 } 4378 } 4379 return ret; 4380 } 4381 4382 /* warning : doesn't handle linux specific flags... */ 4383 static int target_to_host_fcntl_cmd(int cmd) 4384 { 4385 switch(cmd) { 4386 case TARGET_F_DUPFD: 4387 case TARGET_F_GETFD: 4388 case TARGET_F_SETFD: 4389 case TARGET_F_GETFL: 4390 case TARGET_F_SETFL: 4391 return cmd; 4392 case TARGET_F_GETLK: 4393 return F_GETLK; 4394 case TARGET_F_SETLK: 4395 return F_SETLK; 4396 case TARGET_F_SETLKW: 4397 return F_SETLKW; 4398 case TARGET_F_GETOWN: 4399 return F_GETOWN; 4400 case TARGET_F_SETOWN: 4401 return F_SETOWN; 4402 case TARGET_F_GETSIG: 4403 return F_GETSIG; 4404 case TARGET_F_SETSIG: 4405 return F_SETSIG; 4406 #if TARGET_ABI_BITS == 32 4407 case TARGET_F_GETLK64: 4408 return F_GETLK64; 4409 case TARGET_F_SETLK64: 4410 return F_SETLK64; 4411 case TARGET_F_SETLKW64: 4412 return F_SETLKW64; 4413 #endif 4414 case TARGET_F_SETLEASE: 4415 return F_SETLEASE; 4416 case TARGET_F_GETLEASE: 4417 return F_GETLEASE; 4418 #ifdef F_DUPFD_CLOEXEC 4419 case TARGET_F_DUPFD_CLOEXEC: 4420 return F_DUPFD_CLOEXEC; 4421 #endif 4422 case TARGET_F_NOTIFY: 4423 return F_NOTIFY; 4424 default: 4425 return -TARGET_EINVAL; 4426 } 4427 return -TARGET_EINVAL; 4428 } 4429 4430 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a } 4431 static const bitmask_transtbl flock_tbl[] = { 4432 TRANSTBL_CONVERT(F_RDLCK), 4433 TRANSTBL_CONVERT(F_WRLCK), 4434 TRANSTBL_CONVERT(F_UNLCK), 4435 TRANSTBL_CONVERT(F_EXLCK), 4436 TRANSTBL_CONVERT(F_SHLCK), 4437 { 0, 0, 0, 0 } 4438 }; 4439 4440 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) 4441 { 4442 struct flock fl; 4443 struct target_flock *target_fl; 4444 struct flock64 fl64; 4445 struct target_flock64 *target_fl64; 4446 abi_long ret; 4447 int host_cmd = target_to_host_fcntl_cmd(cmd); 4448 4449 if (host_cmd == -TARGET_EINVAL) 4450 return host_cmd; 4451 4452 switch(cmd) { 4453 case TARGET_F_GETLK: 4454 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4455 return -TARGET_EFAULT; 4456 fl.l_type = 4457 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl); 4458 fl.l_whence = tswap16(target_fl->l_whence); 4459 fl.l_start = tswapal(target_fl->l_start); 4460 fl.l_len = tswapal(target_fl->l_len); 4461 fl.l_pid = tswap32(target_fl->l_pid); 4462 unlock_user_struct(target_fl, arg, 0); 4463 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4464 if (ret == 0) { 4465 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0)) 4466 return -TARGET_EFAULT; 4467 target_fl->l_type = 4468 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl); 4469 target_fl->l_whence = tswap16(fl.l_whence); 4470 target_fl->l_start = tswapal(fl.l_start); 4471 target_fl->l_len = tswapal(fl.l_len); 4472 target_fl->l_pid = tswap32(fl.l_pid); 4473 unlock_user_struct(target_fl, arg, 1); 4474 } 4475 break; 4476 4477 case TARGET_F_SETLK: 4478 case TARGET_F_SETLKW: 4479 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4480 return -TARGET_EFAULT; 4481 fl.l_type = 4482 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl); 4483 fl.l_whence = tswap16(target_fl->l_whence); 4484 fl.l_start = tswapal(target_fl->l_start); 4485 fl.l_len = tswapal(target_fl->l_len); 4486 fl.l_pid = tswap32(target_fl->l_pid); 4487 unlock_user_struct(target_fl, arg, 0); 4488 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4489 break; 4490 4491 case TARGET_F_GETLK64: 4492 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4493 return -TARGET_EFAULT; 4494 fl64.l_type = 4495 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1; 4496 fl64.l_whence = tswap16(target_fl64->l_whence); 4497 fl64.l_start = tswap64(target_fl64->l_start); 4498 fl64.l_len = tswap64(target_fl64->l_len); 4499 fl64.l_pid = tswap32(target_fl64->l_pid); 4500 unlock_user_struct(target_fl64, arg, 0); 4501 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4502 if (ret == 0) { 4503 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0)) 4504 return -TARGET_EFAULT; 4505 target_fl64->l_type = 4506 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1; 4507 target_fl64->l_whence = tswap16(fl64.l_whence); 4508 target_fl64->l_start = tswap64(fl64.l_start); 4509 target_fl64->l_len = tswap64(fl64.l_len); 4510 target_fl64->l_pid = tswap32(fl64.l_pid); 4511 unlock_user_struct(target_fl64, arg, 1); 4512 } 4513 break; 4514 case TARGET_F_SETLK64: 4515 case TARGET_F_SETLKW64: 4516 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4517 return -TARGET_EFAULT; 4518 fl64.l_type = 4519 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1; 4520 fl64.l_whence = tswap16(target_fl64->l_whence); 4521 fl64.l_start = tswap64(target_fl64->l_start); 4522 fl64.l_len = tswap64(target_fl64->l_len); 4523 fl64.l_pid = tswap32(target_fl64->l_pid); 4524 unlock_user_struct(target_fl64, arg, 0); 4525 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4526 break; 4527 4528 case TARGET_F_GETFL: 4529 ret = get_errno(fcntl(fd, host_cmd, arg)); 4530 if (ret >= 0) { 4531 ret = host_to_target_bitmask(ret, fcntl_flags_tbl); 4532 } 4533 break; 4534 4535 case TARGET_F_SETFL: 4536 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl))); 4537 break; 4538 4539 case TARGET_F_SETOWN: 4540 case TARGET_F_GETOWN: 4541 case TARGET_F_SETSIG: 4542 case TARGET_F_GETSIG: 4543 case TARGET_F_SETLEASE: 4544 case TARGET_F_GETLEASE: 4545 ret = get_errno(fcntl(fd, host_cmd, arg)); 4546 break; 4547 4548 default: 4549 ret = get_errno(fcntl(fd, cmd, arg)); 4550 break; 4551 } 4552 return ret; 4553 } 4554 4555 #ifdef USE_UID16 4556 4557 static inline int high2lowuid(int uid) 4558 { 4559 if (uid > 65535) 4560 return 65534; 4561 else 4562 return uid; 4563 } 4564 4565 static inline int high2lowgid(int gid) 4566 { 4567 if (gid > 65535) 4568 return 65534; 4569 else 4570 return gid; 4571 } 4572 4573 static inline int low2highuid(int uid) 4574 { 4575 if ((int16_t)uid == -1) 4576 return -1; 4577 else 4578 return uid; 4579 } 4580 4581 static inline int low2highgid(int gid) 4582 { 4583 if ((int16_t)gid == -1) 4584 return -1; 4585 else 4586 return gid; 4587 } 4588 static inline int tswapid(int id) 4589 { 4590 return tswap16(id); 4591 } 4592 4593 #define put_user_id(x, gaddr) put_user_u16(x, gaddr) 4594 4595 #else /* !USE_UID16 */ 4596 static inline int high2lowuid(int uid) 4597 { 4598 return uid; 4599 } 4600 static inline int high2lowgid(int gid) 4601 { 4602 return gid; 4603 } 4604 static inline int low2highuid(int uid) 4605 { 4606 return uid; 4607 } 4608 static inline int low2highgid(int gid) 4609 { 4610 return gid; 4611 } 4612 static inline int tswapid(int id) 4613 { 4614 return tswap32(id); 4615 } 4616 4617 #define put_user_id(x, gaddr) put_user_u32(x, gaddr) 4618 4619 #endif /* USE_UID16 */ 4620 4621 void syscall_init(void) 4622 { 4623 IOCTLEntry *ie; 4624 const argtype *arg_type; 4625 int size; 4626 int i; 4627 4628 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def); 4629 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def); 4630 #include "syscall_types.h" 4631 #undef STRUCT 4632 #undef STRUCT_SPECIAL 4633 4634 /* Build target_to_host_errno_table[] table from 4635 * host_to_target_errno_table[]. */ 4636 for (i = 0; i < ERRNO_TABLE_SIZE; i++) { 4637 target_to_host_errno_table[host_to_target_errno_table[i]] = i; 4638 } 4639 4640 /* we patch the ioctl size if necessary. We rely on the fact that 4641 no ioctl has all the bits at '1' in the size field */ 4642 ie = ioctl_entries; 4643 while (ie->target_cmd != 0) { 4644 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) == 4645 TARGET_IOC_SIZEMASK) { 4646 arg_type = ie->arg_type; 4647 if (arg_type[0] != TYPE_PTR) { 4648 fprintf(stderr, "cannot patch size for ioctl 0x%x\n", 4649 ie->target_cmd); 4650 exit(1); 4651 } 4652 arg_type++; 4653 size = thunk_type_size(arg_type, 0); 4654 ie->target_cmd = (ie->target_cmd & 4655 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) | 4656 (size << TARGET_IOC_SIZESHIFT); 4657 } 4658 4659 /* automatic consistency check if same arch */ 4660 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 4661 (defined(__x86_64__) && defined(TARGET_X86_64)) 4662 if (unlikely(ie->target_cmd != ie->host_cmd)) { 4663 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n", 4664 ie->name, ie->target_cmd, ie->host_cmd); 4665 } 4666 #endif 4667 ie++; 4668 } 4669 } 4670 4671 #if TARGET_ABI_BITS == 32 4672 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1) 4673 { 4674 #ifdef TARGET_WORDS_BIGENDIAN 4675 return ((uint64_t)word0 << 32) | word1; 4676 #else 4677 return ((uint64_t)word1 << 32) | word0; 4678 #endif 4679 } 4680 #else /* TARGET_ABI_BITS == 32 */ 4681 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1) 4682 { 4683 return word0; 4684 } 4685 #endif /* TARGET_ABI_BITS != 32 */ 4686 4687 #ifdef TARGET_NR_truncate64 4688 static inline abi_long target_truncate64(void *cpu_env, const char *arg1, 4689 abi_long arg2, 4690 abi_long arg3, 4691 abi_long arg4) 4692 { 4693 if (regpairs_aligned(cpu_env)) { 4694 arg2 = arg3; 4695 arg3 = arg4; 4696 } 4697 return get_errno(truncate64(arg1, target_offset64(arg2, arg3))); 4698 } 4699 #endif 4700 4701 #ifdef TARGET_NR_ftruncate64 4702 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1, 4703 abi_long arg2, 4704 abi_long arg3, 4705 abi_long arg4) 4706 { 4707 if (regpairs_aligned(cpu_env)) { 4708 arg2 = arg3; 4709 arg3 = arg4; 4710 } 4711 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3))); 4712 } 4713 #endif 4714 4715 static inline abi_long target_to_host_timespec(struct timespec *host_ts, 4716 abi_ulong target_addr) 4717 { 4718 struct target_timespec *target_ts; 4719 4720 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) 4721 return -TARGET_EFAULT; 4722 host_ts->tv_sec = tswapal(target_ts->tv_sec); 4723 host_ts->tv_nsec = tswapal(target_ts->tv_nsec); 4724 unlock_user_struct(target_ts, target_addr, 0); 4725 return 0; 4726 } 4727 4728 static inline abi_long host_to_target_timespec(abi_ulong target_addr, 4729 struct timespec *host_ts) 4730 { 4731 struct target_timespec *target_ts; 4732 4733 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) 4734 return -TARGET_EFAULT; 4735 target_ts->tv_sec = tswapal(host_ts->tv_sec); 4736 target_ts->tv_nsec = tswapal(host_ts->tv_nsec); 4737 unlock_user_struct(target_ts, target_addr, 1); 4738 return 0; 4739 } 4740 4741 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec, 4742 abi_ulong target_addr) 4743 { 4744 struct target_itimerspec *target_itspec; 4745 4746 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) { 4747 return -TARGET_EFAULT; 4748 } 4749 4750 host_itspec->it_interval.tv_sec = 4751 tswapal(target_itspec->it_interval.tv_sec); 4752 host_itspec->it_interval.tv_nsec = 4753 tswapal(target_itspec->it_interval.tv_nsec); 4754 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec); 4755 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec); 4756 4757 unlock_user_struct(target_itspec, target_addr, 1); 4758 return 0; 4759 } 4760 4761 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr, 4762 struct itimerspec *host_its) 4763 { 4764 struct target_itimerspec *target_itspec; 4765 4766 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) { 4767 return -TARGET_EFAULT; 4768 } 4769 4770 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec); 4771 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec); 4772 4773 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec); 4774 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec); 4775 4776 unlock_user_struct(target_itspec, target_addr, 0); 4777 return 0; 4778 } 4779 4780 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat) 4781 static inline abi_long host_to_target_stat64(void *cpu_env, 4782 abi_ulong target_addr, 4783 struct stat *host_st) 4784 { 4785 #if defined(TARGET_ARM) && defined(TARGET_ABI32) 4786 if (((CPUARMState *)cpu_env)->eabi) { 4787 struct target_eabi_stat64 *target_st; 4788 4789 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 4790 return -TARGET_EFAULT; 4791 memset(target_st, 0, sizeof(struct target_eabi_stat64)); 4792 __put_user(host_st->st_dev, &target_st->st_dev); 4793 __put_user(host_st->st_ino, &target_st->st_ino); 4794 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 4795 __put_user(host_st->st_ino, &target_st->__st_ino); 4796 #endif 4797 __put_user(host_st->st_mode, &target_st->st_mode); 4798 __put_user(host_st->st_nlink, &target_st->st_nlink); 4799 __put_user(host_st->st_uid, &target_st->st_uid); 4800 __put_user(host_st->st_gid, &target_st->st_gid); 4801 __put_user(host_st->st_rdev, &target_st->st_rdev); 4802 __put_user(host_st->st_size, &target_st->st_size); 4803 __put_user(host_st->st_blksize, &target_st->st_blksize); 4804 __put_user(host_st->st_blocks, &target_st->st_blocks); 4805 __put_user(host_st->st_atime, &target_st->target_st_atime); 4806 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 4807 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 4808 unlock_user_struct(target_st, target_addr, 1); 4809 } else 4810 #endif 4811 { 4812 #if defined(TARGET_HAS_STRUCT_STAT64) 4813 struct target_stat64 *target_st; 4814 #else 4815 struct target_stat *target_st; 4816 #endif 4817 4818 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 4819 return -TARGET_EFAULT; 4820 memset(target_st, 0, sizeof(*target_st)); 4821 __put_user(host_st->st_dev, &target_st->st_dev); 4822 __put_user(host_st->st_ino, &target_st->st_ino); 4823 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 4824 __put_user(host_st->st_ino, &target_st->__st_ino); 4825 #endif 4826 __put_user(host_st->st_mode, &target_st->st_mode); 4827 __put_user(host_st->st_nlink, &target_st->st_nlink); 4828 __put_user(host_st->st_uid, &target_st->st_uid); 4829 __put_user(host_st->st_gid, &target_st->st_gid); 4830 __put_user(host_st->st_rdev, &target_st->st_rdev); 4831 /* XXX: better use of kernel struct */ 4832 __put_user(host_st->st_size, &target_st->st_size); 4833 __put_user(host_st->st_blksize, &target_st->st_blksize); 4834 __put_user(host_st->st_blocks, &target_st->st_blocks); 4835 __put_user(host_st->st_atime, &target_st->target_st_atime); 4836 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 4837 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 4838 unlock_user_struct(target_st, target_addr, 1); 4839 } 4840 4841 return 0; 4842 } 4843 #endif 4844 4845 /* ??? Using host futex calls even when target atomic operations 4846 are not really atomic probably breaks things. However implementing 4847 futexes locally would make futexes shared between multiple processes 4848 tricky. However they're probably useless because guest atomic 4849 operations won't work either. */ 4850 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout, 4851 target_ulong uaddr2, int val3) 4852 { 4853 struct timespec ts, *pts; 4854 int base_op; 4855 4856 /* ??? We assume FUTEX_* constants are the same on both host 4857 and target. */ 4858 #ifdef FUTEX_CMD_MASK 4859 base_op = op & FUTEX_CMD_MASK; 4860 #else 4861 base_op = op; 4862 #endif 4863 switch (base_op) { 4864 case FUTEX_WAIT: 4865 case FUTEX_WAIT_BITSET: 4866 if (timeout) { 4867 pts = &ts; 4868 target_to_host_timespec(pts, timeout); 4869 } else { 4870 pts = NULL; 4871 } 4872 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val), 4873 pts, NULL, val3)); 4874 case FUTEX_WAKE: 4875 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 4876 case FUTEX_FD: 4877 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 4878 case FUTEX_REQUEUE: 4879 case FUTEX_CMP_REQUEUE: 4880 case FUTEX_WAKE_OP: 4881 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 4882 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 4883 But the prototype takes a `struct timespec *'; insert casts 4884 to satisfy the compiler. We do not need to tswap TIMEOUT 4885 since it's not compared to guest memory. */ 4886 pts = (struct timespec *)(uintptr_t) timeout; 4887 return get_errno(sys_futex(g2h(uaddr), op, val, pts, 4888 g2h(uaddr2), 4889 (base_op == FUTEX_CMP_REQUEUE 4890 ? tswap32(val3) 4891 : val3))); 4892 default: 4893 return -TARGET_ENOSYS; 4894 } 4895 } 4896 4897 /* Map host to target signal numbers for the wait family of syscalls. 4898 Assume all other status bits are the same. */ 4899 int host_to_target_waitstatus(int status) 4900 { 4901 if (WIFSIGNALED(status)) { 4902 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f); 4903 } 4904 if (WIFSTOPPED(status)) { 4905 return (host_to_target_signal(WSTOPSIG(status)) << 8) 4906 | (status & 0xff); 4907 } 4908 return status; 4909 } 4910 4911 static int relstr_to_int(const char *s) 4912 { 4913 /* Convert a uname release string like "2.6.18" to an integer 4914 * of the form 0x020612. (Beware that 0x020612 is *not* 2.6.12.) 4915 */ 4916 int i, n, tmp; 4917 4918 tmp = 0; 4919 for (i = 0; i < 3; i++) { 4920 n = 0; 4921 while (*s >= '0' && *s <= '9') { 4922 n *= 10; 4923 n += *s - '0'; 4924 s++; 4925 } 4926 tmp = (tmp << 8) + n; 4927 if (*s == '.') { 4928 s++; 4929 } 4930 } 4931 return tmp; 4932 } 4933 4934 int get_osversion(void) 4935 { 4936 static int osversion; 4937 struct new_utsname buf; 4938 const char *s; 4939 4940 if (osversion) 4941 return osversion; 4942 if (qemu_uname_release && *qemu_uname_release) { 4943 s = qemu_uname_release; 4944 } else { 4945 if (sys_uname(&buf)) 4946 return 0; 4947 s = buf.release; 4948 } 4949 osversion = relstr_to_int(s); 4950 return osversion; 4951 } 4952 4953 void init_qemu_uname_release(void) 4954 { 4955 /* Initialize qemu_uname_release for later use. 4956 * If the host kernel is too old and the user hasn't asked for 4957 * a specific fake version number, we might want to fake a minimum 4958 * target kernel version. 4959 */ 4960 #ifdef UNAME_MINIMUM_RELEASE 4961 struct new_utsname buf; 4962 4963 if (qemu_uname_release && *qemu_uname_release) { 4964 return; 4965 } 4966 4967 if (sys_uname(&buf)) { 4968 return; 4969 } 4970 4971 if (relstr_to_int(buf.release) < relstr_to_int(UNAME_MINIMUM_RELEASE)) { 4972 qemu_uname_release = UNAME_MINIMUM_RELEASE; 4973 } 4974 #endif 4975 } 4976 4977 static int open_self_maps(void *cpu_env, int fd) 4978 { 4979 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32) 4980 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 4981 TaskState *ts = cpu->opaque; 4982 #endif 4983 FILE *fp; 4984 char *line = NULL; 4985 size_t len = 0; 4986 ssize_t read; 4987 4988 fp = fopen("/proc/self/maps", "r"); 4989 if (fp == NULL) { 4990 return -EACCES; 4991 } 4992 4993 while ((read = getline(&line, &len, fp)) != -1) { 4994 int fields, dev_maj, dev_min, inode; 4995 uint64_t min, max, offset; 4996 char flag_r, flag_w, flag_x, flag_p; 4997 char path[512] = ""; 4998 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d" 4999 " %512s", &min, &max, &flag_r, &flag_w, &flag_x, 5000 &flag_p, &offset, &dev_maj, &dev_min, &inode, path); 5001 5002 if ((fields < 10) || (fields > 11)) { 5003 continue; 5004 } 5005 if (!strncmp(path, "[stack]", 7)) { 5006 continue; 5007 } 5008 if (h2g_valid(min) && h2g_valid(max)) { 5009 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx 5010 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n", 5011 h2g(min), h2g(max), flag_r, flag_w, 5012 flag_x, flag_p, offset, dev_maj, dev_min, inode, 5013 path[0] ? " " : "", path); 5014 } 5015 } 5016 5017 free(line); 5018 fclose(fp); 5019 5020 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32) 5021 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n", 5022 (unsigned long long)ts->info->stack_limit, 5023 (unsigned long long)(ts->info->start_stack + 5024 (TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK, 5025 (unsigned long long)0); 5026 #endif 5027 5028 return 0; 5029 } 5030 5031 static int open_self_stat(void *cpu_env, int fd) 5032 { 5033 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 5034 TaskState *ts = cpu->opaque; 5035 abi_ulong start_stack = ts->info->start_stack; 5036 int i; 5037 5038 for (i = 0; i < 44; i++) { 5039 char buf[128]; 5040 int len; 5041 uint64_t val = 0; 5042 5043 if (i == 0) { 5044 /* pid */ 5045 val = getpid(); 5046 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 5047 } else if (i == 1) { 5048 /* app name */ 5049 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]); 5050 } else if (i == 27) { 5051 /* stack bottom */ 5052 val = start_stack; 5053 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 5054 } else { 5055 /* for the rest, there is MasterCard */ 5056 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' '); 5057 } 5058 5059 len = strlen(buf); 5060 if (write(fd, buf, len) != len) { 5061 return -1; 5062 } 5063 } 5064 5065 return 0; 5066 } 5067 5068 static int open_self_auxv(void *cpu_env, int fd) 5069 { 5070 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 5071 TaskState *ts = cpu->opaque; 5072 abi_ulong auxv = ts->info->saved_auxv; 5073 abi_ulong len = ts->info->auxv_len; 5074 char *ptr; 5075 5076 /* 5077 * Auxiliary vector is stored in target process stack. 5078 * read in whole auxv vector and copy it to file 5079 */ 5080 ptr = lock_user(VERIFY_READ, auxv, len, 0); 5081 if (ptr != NULL) { 5082 while (len > 0) { 5083 ssize_t r; 5084 r = write(fd, ptr, len); 5085 if (r <= 0) { 5086 break; 5087 } 5088 len -= r; 5089 ptr += r; 5090 } 5091 lseek(fd, 0, SEEK_SET); 5092 unlock_user(ptr, auxv, len); 5093 } 5094 5095 return 0; 5096 } 5097 5098 static int is_proc_myself(const char *filename, const char *entry) 5099 { 5100 if (!strncmp(filename, "/proc/", strlen("/proc/"))) { 5101 filename += strlen("/proc/"); 5102 if (!strncmp(filename, "self/", strlen("self/"))) { 5103 filename += strlen("self/"); 5104 } else if (*filename >= '1' && *filename <= '9') { 5105 char myself[80]; 5106 snprintf(myself, sizeof(myself), "%d/", getpid()); 5107 if (!strncmp(filename, myself, strlen(myself))) { 5108 filename += strlen(myself); 5109 } else { 5110 return 0; 5111 } 5112 } else { 5113 return 0; 5114 } 5115 if (!strcmp(filename, entry)) { 5116 return 1; 5117 } 5118 } 5119 return 0; 5120 } 5121 5122 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 5123 static int is_proc(const char *filename, const char *entry) 5124 { 5125 return strcmp(filename, entry) == 0; 5126 } 5127 5128 static int open_net_route(void *cpu_env, int fd) 5129 { 5130 FILE *fp; 5131 char *line = NULL; 5132 size_t len = 0; 5133 ssize_t read; 5134 5135 fp = fopen("/proc/net/route", "r"); 5136 if (fp == NULL) { 5137 return -EACCES; 5138 } 5139 5140 /* read header */ 5141 5142 read = getline(&line, &len, fp); 5143 dprintf(fd, "%s", line); 5144 5145 /* read routes */ 5146 5147 while ((read = getline(&line, &len, fp)) != -1) { 5148 char iface[16]; 5149 uint32_t dest, gw, mask; 5150 unsigned int flags, refcnt, use, metric, mtu, window, irtt; 5151 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 5152 iface, &dest, &gw, &flags, &refcnt, &use, &metric, 5153 &mask, &mtu, &window, &irtt); 5154 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 5155 iface, tswap32(dest), tswap32(gw), flags, refcnt, use, 5156 metric, tswap32(mask), mtu, window, irtt); 5157 } 5158 5159 free(line); 5160 fclose(fp); 5161 5162 return 0; 5163 } 5164 #endif 5165 5166 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode) 5167 { 5168 struct fake_open { 5169 const char *filename; 5170 int (*fill)(void *cpu_env, int fd); 5171 int (*cmp)(const char *s1, const char *s2); 5172 }; 5173 const struct fake_open *fake_open; 5174 static const struct fake_open fakes[] = { 5175 { "maps", open_self_maps, is_proc_myself }, 5176 { "stat", open_self_stat, is_proc_myself }, 5177 { "auxv", open_self_auxv, is_proc_myself }, 5178 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 5179 { "/proc/net/route", open_net_route, is_proc }, 5180 #endif 5181 { NULL, NULL, NULL } 5182 }; 5183 5184 for (fake_open = fakes; fake_open->filename; fake_open++) { 5185 if (fake_open->cmp(pathname, fake_open->filename)) { 5186 break; 5187 } 5188 } 5189 5190 if (fake_open->filename) { 5191 const char *tmpdir; 5192 char filename[PATH_MAX]; 5193 int fd, r; 5194 5195 /* create temporary file to map stat to */ 5196 tmpdir = getenv("TMPDIR"); 5197 if (!tmpdir) 5198 tmpdir = "/tmp"; 5199 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir); 5200 fd = mkstemp(filename); 5201 if (fd < 0) { 5202 return fd; 5203 } 5204 unlink(filename); 5205 5206 if ((r = fake_open->fill(cpu_env, fd))) { 5207 close(fd); 5208 return r; 5209 } 5210 lseek(fd, 0, SEEK_SET); 5211 5212 return fd; 5213 } 5214 5215 return get_errno(open(path(pathname), flags, mode)); 5216 } 5217 5218 /* do_syscall() should always have a single exit point at the end so 5219 that actions, such as logging of syscall results, can be performed. 5220 All errnos that do_syscall() returns must be -TARGET_<errcode>. */ 5221 abi_long do_syscall(void *cpu_env, int num, abi_long arg1, 5222 abi_long arg2, abi_long arg3, abi_long arg4, 5223 abi_long arg5, abi_long arg6, abi_long arg7, 5224 abi_long arg8) 5225 { 5226 CPUState *cpu = ENV_GET_CPU(cpu_env); 5227 abi_long ret; 5228 struct stat st; 5229 struct statfs stfs; 5230 void *p; 5231 5232 #ifdef DEBUG 5233 gemu_log("syscall %d", num); 5234 #endif 5235 if(do_strace) 5236 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); 5237 5238 switch(num) { 5239 case TARGET_NR_exit: 5240 /* In old applications this may be used to implement _exit(2). 5241 However in threaded applictions it is used for thread termination, 5242 and _exit_group is used for application termination. 5243 Do thread termination if we have more then one thread. */ 5244 /* FIXME: This probably breaks if a signal arrives. We should probably 5245 be disabling signals. */ 5246 if (CPU_NEXT(first_cpu)) { 5247 TaskState *ts; 5248 5249 cpu_list_lock(); 5250 /* Remove the CPU from the list. */ 5251 QTAILQ_REMOVE(&cpus, cpu, node); 5252 cpu_list_unlock(); 5253 ts = cpu->opaque; 5254 if (ts->child_tidptr) { 5255 put_user_u32(0, ts->child_tidptr); 5256 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX, 5257 NULL, NULL, 0); 5258 } 5259 thread_cpu = NULL; 5260 object_unref(OBJECT(cpu)); 5261 g_free(ts); 5262 pthread_exit(NULL); 5263 } 5264 #ifdef TARGET_GPROF 5265 _mcleanup(); 5266 #endif 5267 gdb_exit(cpu_env, arg1); 5268 _exit(arg1); 5269 ret = 0; /* avoid warning */ 5270 break; 5271 case TARGET_NR_read: 5272 if (arg3 == 0) 5273 ret = 0; 5274 else { 5275 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 5276 goto efault; 5277 ret = get_errno(read(arg1, p, arg3)); 5278 unlock_user(p, arg2, ret); 5279 } 5280 break; 5281 case TARGET_NR_write: 5282 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 5283 goto efault; 5284 ret = get_errno(write(arg1, p, arg3)); 5285 unlock_user(p, arg2, 0); 5286 break; 5287 case TARGET_NR_open: 5288 if (!(p = lock_user_string(arg1))) 5289 goto efault; 5290 ret = get_errno(do_open(cpu_env, p, 5291 target_to_host_bitmask(arg2, fcntl_flags_tbl), 5292 arg3)); 5293 unlock_user(p, arg1, 0); 5294 break; 5295 #if defined(TARGET_NR_openat) && defined(__NR_openat) 5296 case TARGET_NR_openat: 5297 if (!(p = lock_user_string(arg2))) 5298 goto efault; 5299 ret = get_errno(sys_openat(arg1, 5300 path(p), 5301 target_to_host_bitmask(arg3, fcntl_flags_tbl), 5302 arg4)); 5303 unlock_user(p, arg2, 0); 5304 break; 5305 #endif 5306 case TARGET_NR_close: 5307 ret = get_errno(close(arg1)); 5308 break; 5309 case TARGET_NR_brk: 5310 ret = do_brk(arg1); 5311 break; 5312 case TARGET_NR_fork: 5313 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0)); 5314 break; 5315 #ifdef TARGET_NR_waitpid 5316 case TARGET_NR_waitpid: 5317 { 5318 int status; 5319 ret = get_errno(waitpid(arg1, &status, arg3)); 5320 if (!is_error(ret) && arg2 && ret 5321 && put_user_s32(host_to_target_waitstatus(status), arg2)) 5322 goto efault; 5323 } 5324 break; 5325 #endif 5326 #ifdef TARGET_NR_waitid 5327 case TARGET_NR_waitid: 5328 { 5329 siginfo_t info; 5330 info.si_pid = 0; 5331 ret = get_errno(waitid(arg1, arg2, &info, arg4)); 5332 if (!is_error(ret) && arg3 && info.si_pid != 0) { 5333 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) 5334 goto efault; 5335 host_to_target_siginfo(p, &info); 5336 unlock_user(p, arg3, sizeof(target_siginfo_t)); 5337 } 5338 } 5339 break; 5340 #endif 5341 #ifdef TARGET_NR_creat /* not on alpha */ 5342 case TARGET_NR_creat: 5343 if (!(p = lock_user_string(arg1))) 5344 goto efault; 5345 ret = get_errno(creat(p, arg2)); 5346 unlock_user(p, arg1, 0); 5347 break; 5348 #endif 5349 case TARGET_NR_link: 5350 { 5351 void * p2; 5352 p = lock_user_string(arg1); 5353 p2 = lock_user_string(arg2); 5354 if (!p || !p2) 5355 ret = -TARGET_EFAULT; 5356 else 5357 ret = get_errno(link(p, p2)); 5358 unlock_user(p2, arg2, 0); 5359 unlock_user(p, arg1, 0); 5360 } 5361 break; 5362 #if defined(TARGET_NR_linkat) 5363 case TARGET_NR_linkat: 5364 { 5365 void * p2 = NULL; 5366 if (!arg2 || !arg4) 5367 goto efault; 5368 p = lock_user_string(arg2); 5369 p2 = lock_user_string(arg4); 5370 if (!p || !p2) 5371 ret = -TARGET_EFAULT; 5372 else 5373 ret = get_errno(linkat(arg1, p, arg3, p2, arg5)); 5374 unlock_user(p, arg2, 0); 5375 unlock_user(p2, arg4, 0); 5376 } 5377 break; 5378 #endif 5379 case TARGET_NR_unlink: 5380 if (!(p = lock_user_string(arg1))) 5381 goto efault; 5382 ret = get_errno(unlink(p)); 5383 unlock_user(p, arg1, 0); 5384 break; 5385 #if defined(TARGET_NR_unlinkat) 5386 case TARGET_NR_unlinkat: 5387 if (!(p = lock_user_string(arg2))) 5388 goto efault; 5389 ret = get_errno(unlinkat(arg1, p, arg3)); 5390 unlock_user(p, arg2, 0); 5391 break; 5392 #endif 5393 case TARGET_NR_execve: 5394 { 5395 char **argp, **envp; 5396 int argc, envc; 5397 abi_ulong gp; 5398 abi_ulong guest_argp; 5399 abi_ulong guest_envp; 5400 abi_ulong addr; 5401 char **q; 5402 int total_size = 0; 5403 5404 argc = 0; 5405 guest_argp = arg2; 5406 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { 5407 if (get_user_ual(addr, gp)) 5408 goto efault; 5409 if (!addr) 5410 break; 5411 argc++; 5412 } 5413 envc = 0; 5414 guest_envp = arg3; 5415 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { 5416 if (get_user_ual(addr, gp)) 5417 goto efault; 5418 if (!addr) 5419 break; 5420 envc++; 5421 } 5422 5423 argp = alloca((argc + 1) * sizeof(void *)); 5424 envp = alloca((envc + 1) * sizeof(void *)); 5425 5426 for (gp = guest_argp, q = argp; gp; 5427 gp += sizeof(abi_ulong), q++) { 5428 if (get_user_ual(addr, gp)) 5429 goto execve_efault; 5430 if (!addr) 5431 break; 5432 if (!(*q = lock_user_string(addr))) 5433 goto execve_efault; 5434 total_size += strlen(*q) + 1; 5435 } 5436 *q = NULL; 5437 5438 for (gp = guest_envp, q = envp; gp; 5439 gp += sizeof(abi_ulong), q++) { 5440 if (get_user_ual(addr, gp)) 5441 goto execve_efault; 5442 if (!addr) 5443 break; 5444 if (!(*q = lock_user_string(addr))) 5445 goto execve_efault; 5446 total_size += strlen(*q) + 1; 5447 } 5448 *q = NULL; 5449 5450 /* This case will not be caught by the host's execve() if its 5451 page size is bigger than the target's. */ 5452 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) { 5453 ret = -TARGET_E2BIG; 5454 goto execve_end; 5455 } 5456 if (!(p = lock_user_string(arg1))) 5457 goto execve_efault; 5458 ret = get_errno(execve(p, argp, envp)); 5459 unlock_user(p, arg1, 0); 5460 5461 goto execve_end; 5462 5463 execve_efault: 5464 ret = -TARGET_EFAULT; 5465 5466 execve_end: 5467 for (gp = guest_argp, q = argp; *q; 5468 gp += sizeof(abi_ulong), q++) { 5469 if (get_user_ual(addr, gp) 5470 || !addr) 5471 break; 5472 unlock_user(*q, addr, 0); 5473 } 5474 for (gp = guest_envp, q = envp; *q; 5475 gp += sizeof(abi_ulong), q++) { 5476 if (get_user_ual(addr, gp) 5477 || !addr) 5478 break; 5479 unlock_user(*q, addr, 0); 5480 } 5481 } 5482 break; 5483 case TARGET_NR_chdir: 5484 if (!(p = lock_user_string(arg1))) 5485 goto efault; 5486 ret = get_errno(chdir(p)); 5487 unlock_user(p, arg1, 0); 5488 break; 5489 #ifdef TARGET_NR_time 5490 case TARGET_NR_time: 5491 { 5492 time_t host_time; 5493 ret = get_errno(time(&host_time)); 5494 if (!is_error(ret) 5495 && arg1 5496 && put_user_sal(host_time, arg1)) 5497 goto efault; 5498 } 5499 break; 5500 #endif 5501 case TARGET_NR_mknod: 5502 if (!(p = lock_user_string(arg1))) 5503 goto efault; 5504 ret = get_errno(mknod(p, arg2, arg3)); 5505 unlock_user(p, arg1, 0); 5506 break; 5507 #if defined(TARGET_NR_mknodat) 5508 case TARGET_NR_mknodat: 5509 if (!(p = lock_user_string(arg2))) 5510 goto efault; 5511 ret = get_errno(mknodat(arg1, p, arg3, arg4)); 5512 unlock_user(p, arg2, 0); 5513 break; 5514 #endif 5515 case TARGET_NR_chmod: 5516 if (!(p = lock_user_string(arg1))) 5517 goto efault; 5518 ret = get_errno(chmod(p, arg2)); 5519 unlock_user(p, arg1, 0); 5520 break; 5521 #ifdef TARGET_NR_break 5522 case TARGET_NR_break: 5523 goto unimplemented; 5524 #endif 5525 #ifdef TARGET_NR_oldstat 5526 case TARGET_NR_oldstat: 5527 goto unimplemented; 5528 #endif 5529 case TARGET_NR_lseek: 5530 ret = get_errno(lseek(arg1, arg2, arg3)); 5531 break; 5532 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) 5533 /* Alpha specific */ 5534 case TARGET_NR_getxpid: 5535 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid(); 5536 ret = get_errno(getpid()); 5537 break; 5538 #endif 5539 #ifdef TARGET_NR_getpid 5540 case TARGET_NR_getpid: 5541 ret = get_errno(getpid()); 5542 break; 5543 #endif 5544 case TARGET_NR_mount: 5545 { 5546 /* need to look at the data field */ 5547 void *p2, *p3; 5548 p = lock_user_string(arg1); 5549 p2 = lock_user_string(arg2); 5550 p3 = lock_user_string(arg3); 5551 if (!p || !p2 || !p3) 5552 ret = -TARGET_EFAULT; 5553 else { 5554 /* FIXME - arg5 should be locked, but it isn't clear how to 5555 * do that since it's not guaranteed to be a NULL-terminated 5556 * string. 5557 */ 5558 if ( ! arg5 ) 5559 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL)); 5560 else 5561 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5))); 5562 } 5563 unlock_user(p, arg1, 0); 5564 unlock_user(p2, arg2, 0); 5565 unlock_user(p3, arg3, 0); 5566 break; 5567 } 5568 #ifdef TARGET_NR_umount 5569 case TARGET_NR_umount: 5570 if (!(p = lock_user_string(arg1))) 5571 goto efault; 5572 ret = get_errno(umount(p)); 5573 unlock_user(p, arg1, 0); 5574 break; 5575 #endif 5576 #ifdef TARGET_NR_stime /* not on alpha */ 5577 case TARGET_NR_stime: 5578 { 5579 time_t host_time; 5580 if (get_user_sal(host_time, arg1)) 5581 goto efault; 5582 ret = get_errno(stime(&host_time)); 5583 } 5584 break; 5585 #endif 5586 case TARGET_NR_ptrace: 5587 goto unimplemented; 5588 #ifdef TARGET_NR_alarm /* not on alpha */ 5589 case TARGET_NR_alarm: 5590 ret = alarm(arg1); 5591 break; 5592 #endif 5593 #ifdef TARGET_NR_oldfstat 5594 case TARGET_NR_oldfstat: 5595 goto unimplemented; 5596 #endif 5597 #ifdef TARGET_NR_pause /* not on alpha */ 5598 case TARGET_NR_pause: 5599 ret = get_errno(pause()); 5600 break; 5601 #endif 5602 #ifdef TARGET_NR_utime 5603 case TARGET_NR_utime: 5604 { 5605 struct utimbuf tbuf, *host_tbuf; 5606 struct target_utimbuf *target_tbuf; 5607 if (arg2) { 5608 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) 5609 goto efault; 5610 tbuf.actime = tswapal(target_tbuf->actime); 5611 tbuf.modtime = tswapal(target_tbuf->modtime); 5612 unlock_user_struct(target_tbuf, arg2, 0); 5613 host_tbuf = &tbuf; 5614 } else { 5615 host_tbuf = NULL; 5616 } 5617 if (!(p = lock_user_string(arg1))) 5618 goto efault; 5619 ret = get_errno(utime(p, host_tbuf)); 5620 unlock_user(p, arg1, 0); 5621 } 5622 break; 5623 #endif 5624 case TARGET_NR_utimes: 5625 { 5626 struct timeval *tvp, tv[2]; 5627 if (arg2) { 5628 if (copy_from_user_timeval(&tv[0], arg2) 5629 || copy_from_user_timeval(&tv[1], 5630 arg2 + sizeof(struct target_timeval))) 5631 goto efault; 5632 tvp = tv; 5633 } else { 5634 tvp = NULL; 5635 } 5636 if (!(p = lock_user_string(arg1))) 5637 goto efault; 5638 ret = get_errno(utimes(p, tvp)); 5639 unlock_user(p, arg1, 0); 5640 } 5641 break; 5642 #if defined(TARGET_NR_futimesat) 5643 case TARGET_NR_futimesat: 5644 { 5645 struct timeval *tvp, tv[2]; 5646 if (arg3) { 5647 if (copy_from_user_timeval(&tv[0], arg3) 5648 || copy_from_user_timeval(&tv[1], 5649 arg3 + sizeof(struct target_timeval))) 5650 goto efault; 5651 tvp = tv; 5652 } else { 5653 tvp = NULL; 5654 } 5655 if (!(p = lock_user_string(arg2))) 5656 goto efault; 5657 ret = get_errno(futimesat(arg1, path(p), tvp)); 5658 unlock_user(p, arg2, 0); 5659 } 5660 break; 5661 #endif 5662 #ifdef TARGET_NR_stty 5663 case TARGET_NR_stty: 5664 goto unimplemented; 5665 #endif 5666 #ifdef TARGET_NR_gtty 5667 case TARGET_NR_gtty: 5668 goto unimplemented; 5669 #endif 5670 case TARGET_NR_access: 5671 if (!(p = lock_user_string(arg1))) 5672 goto efault; 5673 ret = get_errno(access(path(p), arg2)); 5674 unlock_user(p, arg1, 0); 5675 break; 5676 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 5677 case TARGET_NR_faccessat: 5678 if (!(p = lock_user_string(arg2))) 5679 goto efault; 5680 ret = get_errno(faccessat(arg1, p, arg3, 0)); 5681 unlock_user(p, arg2, 0); 5682 break; 5683 #endif 5684 #ifdef TARGET_NR_nice /* not on alpha */ 5685 case TARGET_NR_nice: 5686 ret = get_errno(nice(arg1)); 5687 break; 5688 #endif 5689 #ifdef TARGET_NR_ftime 5690 case TARGET_NR_ftime: 5691 goto unimplemented; 5692 #endif 5693 case TARGET_NR_sync: 5694 sync(); 5695 ret = 0; 5696 break; 5697 case TARGET_NR_kill: 5698 ret = get_errno(kill(arg1, target_to_host_signal(arg2))); 5699 break; 5700 case TARGET_NR_rename: 5701 { 5702 void *p2; 5703 p = lock_user_string(arg1); 5704 p2 = lock_user_string(arg2); 5705 if (!p || !p2) 5706 ret = -TARGET_EFAULT; 5707 else 5708 ret = get_errno(rename(p, p2)); 5709 unlock_user(p2, arg2, 0); 5710 unlock_user(p, arg1, 0); 5711 } 5712 break; 5713 #if defined(TARGET_NR_renameat) 5714 case TARGET_NR_renameat: 5715 { 5716 void *p2; 5717 p = lock_user_string(arg2); 5718 p2 = lock_user_string(arg4); 5719 if (!p || !p2) 5720 ret = -TARGET_EFAULT; 5721 else 5722 ret = get_errno(renameat(arg1, p, arg3, p2)); 5723 unlock_user(p2, arg4, 0); 5724 unlock_user(p, arg2, 0); 5725 } 5726 break; 5727 #endif 5728 case TARGET_NR_mkdir: 5729 if (!(p = lock_user_string(arg1))) 5730 goto efault; 5731 ret = get_errno(mkdir(p, arg2)); 5732 unlock_user(p, arg1, 0); 5733 break; 5734 #if defined(TARGET_NR_mkdirat) 5735 case TARGET_NR_mkdirat: 5736 if (!(p = lock_user_string(arg2))) 5737 goto efault; 5738 ret = get_errno(mkdirat(arg1, p, arg3)); 5739 unlock_user(p, arg2, 0); 5740 break; 5741 #endif 5742 case TARGET_NR_rmdir: 5743 if (!(p = lock_user_string(arg1))) 5744 goto efault; 5745 ret = get_errno(rmdir(p)); 5746 unlock_user(p, arg1, 0); 5747 break; 5748 case TARGET_NR_dup: 5749 ret = get_errno(dup(arg1)); 5750 break; 5751 case TARGET_NR_pipe: 5752 ret = do_pipe(cpu_env, arg1, 0, 0); 5753 break; 5754 #ifdef TARGET_NR_pipe2 5755 case TARGET_NR_pipe2: 5756 ret = do_pipe(cpu_env, arg1, 5757 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1); 5758 break; 5759 #endif 5760 case TARGET_NR_times: 5761 { 5762 struct target_tms *tmsp; 5763 struct tms tms; 5764 ret = get_errno(times(&tms)); 5765 if (arg1) { 5766 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); 5767 if (!tmsp) 5768 goto efault; 5769 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime)); 5770 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime)); 5771 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime)); 5772 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime)); 5773 } 5774 if (!is_error(ret)) 5775 ret = host_to_target_clock_t(ret); 5776 } 5777 break; 5778 #ifdef TARGET_NR_prof 5779 case TARGET_NR_prof: 5780 goto unimplemented; 5781 #endif 5782 #ifdef TARGET_NR_signal 5783 case TARGET_NR_signal: 5784 goto unimplemented; 5785 #endif 5786 case TARGET_NR_acct: 5787 if (arg1 == 0) { 5788 ret = get_errno(acct(NULL)); 5789 } else { 5790 if (!(p = lock_user_string(arg1))) 5791 goto efault; 5792 ret = get_errno(acct(path(p))); 5793 unlock_user(p, arg1, 0); 5794 } 5795 break; 5796 #ifdef TARGET_NR_umount2 5797 case TARGET_NR_umount2: 5798 if (!(p = lock_user_string(arg1))) 5799 goto efault; 5800 ret = get_errno(umount2(p, arg2)); 5801 unlock_user(p, arg1, 0); 5802 break; 5803 #endif 5804 #ifdef TARGET_NR_lock 5805 case TARGET_NR_lock: 5806 goto unimplemented; 5807 #endif 5808 case TARGET_NR_ioctl: 5809 ret = do_ioctl(arg1, arg2, arg3); 5810 break; 5811 case TARGET_NR_fcntl: 5812 ret = do_fcntl(arg1, arg2, arg3); 5813 break; 5814 #ifdef TARGET_NR_mpx 5815 case TARGET_NR_mpx: 5816 goto unimplemented; 5817 #endif 5818 case TARGET_NR_setpgid: 5819 ret = get_errno(setpgid(arg1, arg2)); 5820 break; 5821 #ifdef TARGET_NR_ulimit 5822 case TARGET_NR_ulimit: 5823 goto unimplemented; 5824 #endif 5825 #ifdef TARGET_NR_oldolduname 5826 case TARGET_NR_oldolduname: 5827 goto unimplemented; 5828 #endif 5829 case TARGET_NR_umask: 5830 ret = get_errno(umask(arg1)); 5831 break; 5832 case TARGET_NR_chroot: 5833 if (!(p = lock_user_string(arg1))) 5834 goto efault; 5835 ret = get_errno(chroot(p)); 5836 unlock_user(p, arg1, 0); 5837 break; 5838 case TARGET_NR_ustat: 5839 goto unimplemented; 5840 case TARGET_NR_dup2: 5841 ret = get_errno(dup2(arg1, arg2)); 5842 break; 5843 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) 5844 case TARGET_NR_dup3: 5845 ret = get_errno(dup3(arg1, arg2, arg3)); 5846 break; 5847 #endif 5848 #ifdef TARGET_NR_getppid /* not on alpha */ 5849 case TARGET_NR_getppid: 5850 ret = get_errno(getppid()); 5851 break; 5852 #endif 5853 case TARGET_NR_getpgrp: 5854 ret = get_errno(getpgrp()); 5855 break; 5856 case TARGET_NR_setsid: 5857 ret = get_errno(setsid()); 5858 break; 5859 #ifdef TARGET_NR_sigaction 5860 case TARGET_NR_sigaction: 5861 { 5862 #if defined(TARGET_ALPHA) 5863 struct target_sigaction act, oact, *pact = 0; 5864 struct target_old_sigaction *old_act; 5865 if (arg2) { 5866 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5867 goto efault; 5868 act._sa_handler = old_act->_sa_handler; 5869 target_siginitset(&act.sa_mask, old_act->sa_mask); 5870 act.sa_flags = old_act->sa_flags; 5871 act.sa_restorer = 0; 5872 unlock_user_struct(old_act, arg2, 0); 5873 pact = &act; 5874 } 5875 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5876 if (!is_error(ret) && arg3) { 5877 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5878 goto efault; 5879 old_act->_sa_handler = oact._sa_handler; 5880 old_act->sa_mask = oact.sa_mask.sig[0]; 5881 old_act->sa_flags = oact.sa_flags; 5882 unlock_user_struct(old_act, arg3, 1); 5883 } 5884 #elif defined(TARGET_MIPS) 5885 struct target_sigaction act, oact, *pact, *old_act; 5886 5887 if (arg2) { 5888 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5889 goto efault; 5890 act._sa_handler = old_act->_sa_handler; 5891 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); 5892 act.sa_flags = old_act->sa_flags; 5893 unlock_user_struct(old_act, arg2, 0); 5894 pact = &act; 5895 } else { 5896 pact = NULL; 5897 } 5898 5899 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5900 5901 if (!is_error(ret) && arg3) { 5902 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5903 goto efault; 5904 old_act->_sa_handler = oact._sa_handler; 5905 old_act->sa_flags = oact.sa_flags; 5906 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; 5907 old_act->sa_mask.sig[1] = 0; 5908 old_act->sa_mask.sig[2] = 0; 5909 old_act->sa_mask.sig[3] = 0; 5910 unlock_user_struct(old_act, arg3, 1); 5911 } 5912 #else 5913 struct target_old_sigaction *old_act; 5914 struct target_sigaction act, oact, *pact; 5915 if (arg2) { 5916 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5917 goto efault; 5918 act._sa_handler = old_act->_sa_handler; 5919 target_siginitset(&act.sa_mask, old_act->sa_mask); 5920 act.sa_flags = old_act->sa_flags; 5921 act.sa_restorer = old_act->sa_restorer; 5922 unlock_user_struct(old_act, arg2, 0); 5923 pact = &act; 5924 } else { 5925 pact = NULL; 5926 } 5927 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5928 if (!is_error(ret) && arg3) { 5929 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5930 goto efault; 5931 old_act->_sa_handler = oact._sa_handler; 5932 old_act->sa_mask = oact.sa_mask.sig[0]; 5933 old_act->sa_flags = oact.sa_flags; 5934 old_act->sa_restorer = oact.sa_restorer; 5935 unlock_user_struct(old_act, arg3, 1); 5936 } 5937 #endif 5938 } 5939 break; 5940 #endif 5941 case TARGET_NR_rt_sigaction: 5942 { 5943 #if defined(TARGET_ALPHA) 5944 struct target_sigaction act, oact, *pact = 0; 5945 struct target_rt_sigaction *rt_act; 5946 /* ??? arg4 == sizeof(sigset_t). */ 5947 if (arg2) { 5948 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1)) 5949 goto efault; 5950 act._sa_handler = rt_act->_sa_handler; 5951 act.sa_mask = rt_act->sa_mask; 5952 act.sa_flags = rt_act->sa_flags; 5953 act.sa_restorer = arg5; 5954 unlock_user_struct(rt_act, arg2, 0); 5955 pact = &act; 5956 } 5957 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5958 if (!is_error(ret) && arg3) { 5959 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0)) 5960 goto efault; 5961 rt_act->_sa_handler = oact._sa_handler; 5962 rt_act->sa_mask = oact.sa_mask; 5963 rt_act->sa_flags = oact.sa_flags; 5964 unlock_user_struct(rt_act, arg3, 1); 5965 } 5966 #else 5967 struct target_sigaction *act; 5968 struct target_sigaction *oact; 5969 5970 if (arg2) { 5971 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) 5972 goto efault; 5973 } else 5974 act = NULL; 5975 if (arg3) { 5976 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { 5977 ret = -TARGET_EFAULT; 5978 goto rt_sigaction_fail; 5979 } 5980 } else 5981 oact = NULL; 5982 ret = get_errno(do_sigaction(arg1, act, oact)); 5983 rt_sigaction_fail: 5984 if (act) 5985 unlock_user_struct(act, arg2, 0); 5986 if (oact) 5987 unlock_user_struct(oact, arg3, 1); 5988 #endif 5989 } 5990 break; 5991 #ifdef TARGET_NR_sgetmask /* not on alpha */ 5992 case TARGET_NR_sgetmask: 5993 { 5994 sigset_t cur_set; 5995 abi_ulong target_set; 5996 sigprocmask(0, NULL, &cur_set); 5997 host_to_target_old_sigset(&target_set, &cur_set); 5998 ret = target_set; 5999 } 6000 break; 6001 #endif 6002 #ifdef TARGET_NR_ssetmask /* not on alpha */ 6003 case TARGET_NR_ssetmask: 6004 { 6005 sigset_t set, oset, cur_set; 6006 abi_ulong target_set = arg1; 6007 sigprocmask(0, NULL, &cur_set); 6008 target_to_host_old_sigset(&set, &target_set); 6009 sigorset(&set, &set, &cur_set); 6010 sigprocmask(SIG_SETMASK, &set, &oset); 6011 host_to_target_old_sigset(&target_set, &oset); 6012 ret = target_set; 6013 } 6014 break; 6015 #endif 6016 #ifdef TARGET_NR_sigprocmask 6017 case TARGET_NR_sigprocmask: 6018 { 6019 #if defined(TARGET_ALPHA) 6020 sigset_t set, oldset; 6021 abi_ulong mask; 6022 int how; 6023 6024 switch (arg1) { 6025 case TARGET_SIG_BLOCK: 6026 how = SIG_BLOCK; 6027 break; 6028 case TARGET_SIG_UNBLOCK: 6029 how = SIG_UNBLOCK; 6030 break; 6031 case TARGET_SIG_SETMASK: 6032 how = SIG_SETMASK; 6033 break; 6034 default: 6035 ret = -TARGET_EINVAL; 6036 goto fail; 6037 } 6038 mask = arg2; 6039 target_to_host_old_sigset(&set, &mask); 6040 6041 ret = get_errno(sigprocmask(how, &set, &oldset)); 6042 if (!is_error(ret)) { 6043 host_to_target_old_sigset(&mask, &oldset); 6044 ret = mask; 6045 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */ 6046 } 6047 #else 6048 sigset_t set, oldset, *set_ptr; 6049 int how; 6050 6051 if (arg2) { 6052 switch (arg1) { 6053 case TARGET_SIG_BLOCK: 6054 how = SIG_BLOCK; 6055 break; 6056 case TARGET_SIG_UNBLOCK: 6057 how = SIG_UNBLOCK; 6058 break; 6059 case TARGET_SIG_SETMASK: 6060 how = SIG_SETMASK; 6061 break; 6062 default: 6063 ret = -TARGET_EINVAL; 6064 goto fail; 6065 } 6066 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 6067 goto efault; 6068 target_to_host_old_sigset(&set, p); 6069 unlock_user(p, arg2, 0); 6070 set_ptr = &set; 6071 } else { 6072 how = 0; 6073 set_ptr = NULL; 6074 } 6075 ret = get_errno(sigprocmask(how, set_ptr, &oldset)); 6076 if (!is_error(ret) && arg3) { 6077 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 6078 goto efault; 6079 host_to_target_old_sigset(p, &oldset); 6080 unlock_user(p, arg3, sizeof(target_sigset_t)); 6081 } 6082 #endif 6083 } 6084 break; 6085 #endif 6086 case TARGET_NR_rt_sigprocmask: 6087 { 6088 int how = arg1; 6089 sigset_t set, oldset, *set_ptr; 6090 6091 if (arg2) { 6092 switch(how) { 6093 case TARGET_SIG_BLOCK: 6094 how = SIG_BLOCK; 6095 break; 6096 case TARGET_SIG_UNBLOCK: 6097 how = SIG_UNBLOCK; 6098 break; 6099 case TARGET_SIG_SETMASK: 6100 how = SIG_SETMASK; 6101 break; 6102 default: 6103 ret = -TARGET_EINVAL; 6104 goto fail; 6105 } 6106 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 6107 goto efault; 6108 target_to_host_sigset(&set, p); 6109 unlock_user(p, arg2, 0); 6110 set_ptr = &set; 6111 } else { 6112 how = 0; 6113 set_ptr = NULL; 6114 } 6115 ret = get_errno(sigprocmask(how, set_ptr, &oldset)); 6116 if (!is_error(ret) && arg3) { 6117 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 6118 goto efault; 6119 host_to_target_sigset(p, &oldset); 6120 unlock_user(p, arg3, sizeof(target_sigset_t)); 6121 } 6122 } 6123 break; 6124 #ifdef TARGET_NR_sigpending 6125 case TARGET_NR_sigpending: 6126 { 6127 sigset_t set; 6128 ret = get_errno(sigpending(&set)); 6129 if (!is_error(ret)) { 6130 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 6131 goto efault; 6132 host_to_target_old_sigset(p, &set); 6133 unlock_user(p, arg1, sizeof(target_sigset_t)); 6134 } 6135 } 6136 break; 6137 #endif 6138 case TARGET_NR_rt_sigpending: 6139 { 6140 sigset_t set; 6141 ret = get_errno(sigpending(&set)); 6142 if (!is_error(ret)) { 6143 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 6144 goto efault; 6145 host_to_target_sigset(p, &set); 6146 unlock_user(p, arg1, sizeof(target_sigset_t)); 6147 } 6148 } 6149 break; 6150 #ifdef TARGET_NR_sigsuspend 6151 case TARGET_NR_sigsuspend: 6152 { 6153 sigset_t set; 6154 #if defined(TARGET_ALPHA) 6155 abi_ulong mask = arg1; 6156 target_to_host_old_sigset(&set, &mask); 6157 #else 6158 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6159 goto efault; 6160 target_to_host_old_sigset(&set, p); 6161 unlock_user(p, arg1, 0); 6162 #endif 6163 ret = get_errno(sigsuspend(&set)); 6164 } 6165 break; 6166 #endif 6167 case TARGET_NR_rt_sigsuspend: 6168 { 6169 sigset_t set; 6170 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6171 goto efault; 6172 target_to_host_sigset(&set, p); 6173 unlock_user(p, arg1, 0); 6174 ret = get_errno(sigsuspend(&set)); 6175 } 6176 break; 6177 case TARGET_NR_rt_sigtimedwait: 6178 { 6179 sigset_t set; 6180 struct timespec uts, *puts; 6181 siginfo_t uinfo; 6182 6183 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6184 goto efault; 6185 target_to_host_sigset(&set, p); 6186 unlock_user(p, arg1, 0); 6187 if (arg3) { 6188 puts = &uts; 6189 target_to_host_timespec(puts, arg3); 6190 } else { 6191 puts = NULL; 6192 } 6193 ret = get_errno(sigtimedwait(&set, &uinfo, puts)); 6194 if (!is_error(ret)) { 6195 if (arg2) { 6196 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 6197 0); 6198 if (!p) { 6199 goto efault; 6200 } 6201 host_to_target_siginfo(p, &uinfo); 6202 unlock_user(p, arg2, sizeof(target_siginfo_t)); 6203 } 6204 ret = host_to_target_signal(ret); 6205 } 6206 } 6207 break; 6208 case TARGET_NR_rt_sigqueueinfo: 6209 { 6210 siginfo_t uinfo; 6211 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1))) 6212 goto efault; 6213 target_to_host_siginfo(&uinfo, p); 6214 unlock_user(p, arg1, 0); 6215 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo)); 6216 } 6217 break; 6218 #ifdef TARGET_NR_sigreturn 6219 case TARGET_NR_sigreturn: 6220 /* NOTE: ret is eax, so not transcoding must be done */ 6221 ret = do_sigreturn(cpu_env); 6222 break; 6223 #endif 6224 case TARGET_NR_rt_sigreturn: 6225 /* NOTE: ret is eax, so not transcoding must be done */ 6226 ret = do_rt_sigreturn(cpu_env); 6227 break; 6228 case TARGET_NR_sethostname: 6229 if (!(p = lock_user_string(arg1))) 6230 goto efault; 6231 ret = get_errno(sethostname(p, arg2)); 6232 unlock_user(p, arg1, 0); 6233 break; 6234 case TARGET_NR_setrlimit: 6235 { 6236 int resource = target_to_host_resource(arg1); 6237 struct target_rlimit *target_rlim; 6238 struct rlimit rlim; 6239 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) 6240 goto efault; 6241 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur); 6242 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max); 6243 unlock_user_struct(target_rlim, arg2, 0); 6244 ret = get_errno(setrlimit(resource, &rlim)); 6245 } 6246 break; 6247 case TARGET_NR_getrlimit: 6248 { 6249 int resource = target_to_host_resource(arg1); 6250 struct target_rlimit *target_rlim; 6251 struct rlimit rlim; 6252 6253 ret = get_errno(getrlimit(resource, &rlim)); 6254 if (!is_error(ret)) { 6255 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 6256 goto efault; 6257 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 6258 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 6259 unlock_user_struct(target_rlim, arg2, 1); 6260 } 6261 } 6262 break; 6263 case TARGET_NR_getrusage: 6264 { 6265 struct rusage rusage; 6266 ret = get_errno(getrusage(arg1, &rusage)); 6267 if (!is_error(ret)) { 6268 host_to_target_rusage(arg2, &rusage); 6269 } 6270 } 6271 break; 6272 case TARGET_NR_gettimeofday: 6273 { 6274 struct timeval tv; 6275 ret = get_errno(gettimeofday(&tv, NULL)); 6276 if (!is_error(ret)) { 6277 if (copy_to_user_timeval(arg1, &tv)) 6278 goto efault; 6279 } 6280 } 6281 break; 6282 case TARGET_NR_settimeofday: 6283 { 6284 struct timeval tv; 6285 if (copy_from_user_timeval(&tv, arg1)) 6286 goto efault; 6287 ret = get_errno(settimeofday(&tv, NULL)); 6288 } 6289 break; 6290 #if defined(TARGET_NR_select) 6291 case TARGET_NR_select: 6292 #if defined(TARGET_S390X) || defined(TARGET_ALPHA) 6293 ret = do_select(arg1, arg2, arg3, arg4, arg5); 6294 #else 6295 { 6296 struct target_sel_arg_struct *sel; 6297 abi_ulong inp, outp, exp, tvp; 6298 long nsel; 6299 6300 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) 6301 goto efault; 6302 nsel = tswapal(sel->n); 6303 inp = tswapal(sel->inp); 6304 outp = tswapal(sel->outp); 6305 exp = tswapal(sel->exp); 6306 tvp = tswapal(sel->tvp); 6307 unlock_user_struct(sel, arg1, 0); 6308 ret = do_select(nsel, inp, outp, exp, tvp); 6309 } 6310 #endif 6311 break; 6312 #endif 6313 #ifdef TARGET_NR_pselect6 6314 case TARGET_NR_pselect6: 6315 { 6316 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr; 6317 fd_set rfds, wfds, efds; 6318 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 6319 struct timespec ts, *ts_ptr; 6320 6321 /* 6322 * The 6th arg is actually two args smashed together, 6323 * so we cannot use the C library. 6324 */ 6325 sigset_t set; 6326 struct { 6327 sigset_t *set; 6328 size_t size; 6329 } sig, *sig_ptr; 6330 6331 abi_ulong arg_sigset, arg_sigsize, *arg7; 6332 target_sigset_t *target_sigset; 6333 6334 n = arg1; 6335 rfd_addr = arg2; 6336 wfd_addr = arg3; 6337 efd_addr = arg4; 6338 ts_addr = arg5; 6339 6340 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 6341 if (ret) { 6342 goto fail; 6343 } 6344 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 6345 if (ret) { 6346 goto fail; 6347 } 6348 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 6349 if (ret) { 6350 goto fail; 6351 } 6352 6353 /* 6354 * This takes a timespec, and not a timeval, so we cannot 6355 * use the do_select() helper ... 6356 */ 6357 if (ts_addr) { 6358 if (target_to_host_timespec(&ts, ts_addr)) { 6359 goto efault; 6360 } 6361 ts_ptr = &ts; 6362 } else { 6363 ts_ptr = NULL; 6364 } 6365 6366 /* Extract the two packed args for the sigset */ 6367 if (arg6) { 6368 sig_ptr = &sig; 6369 sig.size = _NSIG / 8; 6370 6371 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1); 6372 if (!arg7) { 6373 goto efault; 6374 } 6375 arg_sigset = tswapal(arg7[0]); 6376 arg_sigsize = tswapal(arg7[1]); 6377 unlock_user(arg7, arg6, 0); 6378 6379 if (arg_sigset) { 6380 sig.set = &set; 6381 if (arg_sigsize != sizeof(*target_sigset)) { 6382 /* Like the kernel, we enforce correct size sigsets */ 6383 ret = -TARGET_EINVAL; 6384 goto fail; 6385 } 6386 target_sigset = lock_user(VERIFY_READ, arg_sigset, 6387 sizeof(*target_sigset), 1); 6388 if (!target_sigset) { 6389 goto efault; 6390 } 6391 target_to_host_sigset(&set, target_sigset); 6392 unlock_user(target_sigset, arg_sigset, 0); 6393 } else { 6394 sig.set = NULL; 6395 } 6396 } else { 6397 sig_ptr = NULL; 6398 } 6399 6400 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 6401 ts_ptr, sig_ptr)); 6402 6403 if (!is_error(ret)) { 6404 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 6405 goto efault; 6406 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 6407 goto efault; 6408 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 6409 goto efault; 6410 6411 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) 6412 goto efault; 6413 } 6414 } 6415 break; 6416 #endif 6417 case TARGET_NR_symlink: 6418 { 6419 void *p2; 6420 p = lock_user_string(arg1); 6421 p2 = lock_user_string(arg2); 6422 if (!p || !p2) 6423 ret = -TARGET_EFAULT; 6424 else 6425 ret = get_errno(symlink(p, p2)); 6426 unlock_user(p2, arg2, 0); 6427 unlock_user(p, arg1, 0); 6428 } 6429 break; 6430 #if defined(TARGET_NR_symlinkat) 6431 case TARGET_NR_symlinkat: 6432 { 6433 void *p2; 6434 p = lock_user_string(arg1); 6435 p2 = lock_user_string(arg3); 6436 if (!p || !p2) 6437 ret = -TARGET_EFAULT; 6438 else 6439 ret = get_errno(symlinkat(p, arg2, p2)); 6440 unlock_user(p2, arg3, 0); 6441 unlock_user(p, arg1, 0); 6442 } 6443 break; 6444 #endif 6445 #ifdef TARGET_NR_oldlstat 6446 case TARGET_NR_oldlstat: 6447 goto unimplemented; 6448 #endif 6449 case TARGET_NR_readlink: 6450 { 6451 void *p2; 6452 p = lock_user_string(arg1); 6453 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); 6454 if (!p || !p2) { 6455 ret = -TARGET_EFAULT; 6456 } else if (is_proc_myself((const char *)p, "exe")) { 6457 char real[PATH_MAX], *temp; 6458 temp = realpath(exec_path, real); 6459 ret = temp == NULL ? get_errno(-1) : strlen(real) ; 6460 snprintf((char *)p2, arg3, "%s", real); 6461 } else { 6462 ret = get_errno(readlink(path(p), p2, arg3)); 6463 } 6464 unlock_user(p2, arg2, ret); 6465 unlock_user(p, arg1, 0); 6466 } 6467 break; 6468 #if defined(TARGET_NR_readlinkat) 6469 case TARGET_NR_readlinkat: 6470 { 6471 void *p2; 6472 p = lock_user_string(arg2); 6473 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); 6474 if (!p || !p2) { 6475 ret = -TARGET_EFAULT; 6476 } else if (is_proc_myself((const char *)p, "exe")) { 6477 char real[PATH_MAX], *temp; 6478 temp = realpath(exec_path, real); 6479 ret = temp == NULL ? get_errno(-1) : strlen(real) ; 6480 snprintf((char *)p2, arg4, "%s", real); 6481 } else { 6482 ret = get_errno(readlinkat(arg1, path(p), p2, arg4)); 6483 } 6484 unlock_user(p2, arg3, ret); 6485 unlock_user(p, arg2, 0); 6486 } 6487 break; 6488 #endif 6489 #ifdef TARGET_NR_uselib 6490 case TARGET_NR_uselib: 6491 goto unimplemented; 6492 #endif 6493 #ifdef TARGET_NR_swapon 6494 case TARGET_NR_swapon: 6495 if (!(p = lock_user_string(arg1))) 6496 goto efault; 6497 ret = get_errno(swapon(p, arg2)); 6498 unlock_user(p, arg1, 0); 6499 break; 6500 #endif 6501 case TARGET_NR_reboot: 6502 if (arg3 == LINUX_REBOOT_CMD_RESTART2) { 6503 /* arg4 must be ignored in all other cases */ 6504 p = lock_user_string(arg4); 6505 if (!p) { 6506 goto efault; 6507 } 6508 ret = get_errno(reboot(arg1, arg2, arg3, p)); 6509 unlock_user(p, arg4, 0); 6510 } else { 6511 ret = get_errno(reboot(arg1, arg2, arg3, NULL)); 6512 } 6513 break; 6514 #ifdef TARGET_NR_readdir 6515 case TARGET_NR_readdir: 6516 goto unimplemented; 6517 #endif 6518 #ifdef TARGET_NR_mmap 6519 case TARGET_NR_mmap: 6520 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 6521 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \ 6522 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ 6523 || defined(TARGET_S390X) 6524 { 6525 abi_ulong *v; 6526 abi_ulong v1, v2, v3, v4, v5, v6; 6527 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) 6528 goto efault; 6529 v1 = tswapal(v[0]); 6530 v2 = tswapal(v[1]); 6531 v3 = tswapal(v[2]); 6532 v4 = tswapal(v[3]); 6533 v5 = tswapal(v[4]); 6534 v6 = tswapal(v[5]); 6535 unlock_user(v, arg1, 0); 6536 ret = get_errno(target_mmap(v1, v2, v3, 6537 target_to_host_bitmask(v4, mmap_flags_tbl), 6538 v5, v6)); 6539 } 6540 #else 6541 ret = get_errno(target_mmap(arg1, arg2, arg3, 6542 target_to_host_bitmask(arg4, mmap_flags_tbl), 6543 arg5, 6544 arg6)); 6545 #endif 6546 break; 6547 #endif 6548 #ifdef TARGET_NR_mmap2 6549 case TARGET_NR_mmap2: 6550 #ifndef MMAP_SHIFT 6551 #define MMAP_SHIFT 12 6552 #endif 6553 ret = get_errno(target_mmap(arg1, arg2, arg3, 6554 target_to_host_bitmask(arg4, mmap_flags_tbl), 6555 arg5, 6556 arg6 << MMAP_SHIFT)); 6557 break; 6558 #endif 6559 case TARGET_NR_munmap: 6560 ret = get_errno(target_munmap(arg1, arg2)); 6561 break; 6562 case TARGET_NR_mprotect: 6563 { 6564 TaskState *ts = cpu->opaque; 6565 /* Special hack to detect libc making the stack executable. */ 6566 if ((arg3 & PROT_GROWSDOWN) 6567 && arg1 >= ts->info->stack_limit 6568 && arg1 <= ts->info->start_stack) { 6569 arg3 &= ~PROT_GROWSDOWN; 6570 arg2 = arg2 + arg1 - ts->info->stack_limit; 6571 arg1 = ts->info->stack_limit; 6572 } 6573 } 6574 ret = get_errno(target_mprotect(arg1, arg2, arg3)); 6575 break; 6576 #ifdef TARGET_NR_mremap 6577 case TARGET_NR_mremap: 6578 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); 6579 break; 6580 #endif 6581 /* ??? msync/mlock/munlock are broken for softmmu. */ 6582 #ifdef TARGET_NR_msync 6583 case TARGET_NR_msync: 6584 ret = get_errno(msync(g2h(arg1), arg2, arg3)); 6585 break; 6586 #endif 6587 #ifdef TARGET_NR_mlock 6588 case TARGET_NR_mlock: 6589 ret = get_errno(mlock(g2h(arg1), arg2)); 6590 break; 6591 #endif 6592 #ifdef TARGET_NR_munlock 6593 case TARGET_NR_munlock: 6594 ret = get_errno(munlock(g2h(arg1), arg2)); 6595 break; 6596 #endif 6597 #ifdef TARGET_NR_mlockall 6598 case TARGET_NR_mlockall: 6599 ret = get_errno(mlockall(arg1)); 6600 break; 6601 #endif 6602 #ifdef TARGET_NR_munlockall 6603 case TARGET_NR_munlockall: 6604 ret = get_errno(munlockall()); 6605 break; 6606 #endif 6607 case TARGET_NR_truncate: 6608 if (!(p = lock_user_string(arg1))) 6609 goto efault; 6610 ret = get_errno(truncate(p, arg2)); 6611 unlock_user(p, arg1, 0); 6612 break; 6613 case TARGET_NR_ftruncate: 6614 ret = get_errno(ftruncate(arg1, arg2)); 6615 break; 6616 case TARGET_NR_fchmod: 6617 ret = get_errno(fchmod(arg1, arg2)); 6618 break; 6619 #if defined(TARGET_NR_fchmodat) 6620 case TARGET_NR_fchmodat: 6621 if (!(p = lock_user_string(arg2))) 6622 goto efault; 6623 ret = get_errno(fchmodat(arg1, p, arg3, 0)); 6624 unlock_user(p, arg2, 0); 6625 break; 6626 #endif 6627 case TARGET_NR_getpriority: 6628 /* Note that negative values are valid for getpriority, so we must 6629 differentiate based on errno settings. */ 6630 errno = 0; 6631 ret = getpriority(arg1, arg2); 6632 if (ret == -1 && errno != 0) { 6633 ret = -host_to_target_errno(errno); 6634 break; 6635 } 6636 #ifdef TARGET_ALPHA 6637 /* Return value is the unbiased priority. Signal no error. */ 6638 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; 6639 #else 6640 /* Return value is a biased priority to avoid negative numbers. */ 6641 ret = 20 - ret; 6642 #endif 6643 break; 6644 case TARGET_NR_setpriority: 6645 ret = get_errno(setpriority(arg1, arg2, arg3)); 6646 break; 6647 #ifdef TARGET_NR_profil 6648 case TARGET_NR_profil: 6649 goto unimplemented; 6650 #endif 6651 case TARGET_NR_statfs: 6652 if (!(p = lock_user_string(arg1))) 6653 goto efault; 6654 ret = get_errno(statfs(path(p), &stfs)); 6655 unlock_user(p, arg1, 0); 6656 convert_statfs: 6657 if (!is_error(ret)) { 6658 struct target_statfs *target_stfs; 6659 6660 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) 6661 goto efault; 6662 __put_user(stfs.f_type, &target_stfs->f_type); 6663 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 6664 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 6665 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 6666 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 6667 __put_user(stfs.f_files, &target_stfs->f_files); 6668 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 6669 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 6670 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 6671 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 6672 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 6673 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 6674 unlock_user_struct(target_stfs, arg2, 1); 6675 } 6676 break; 6677 case TARGET_NR_fstatfs: 6678 ret = get_errno(fstatfs(arg1, &stfs)); 6679 goto convert_statfs; 6680 #ifdef TARGET_NR_statfs64 6681 case TARGET_NR_statfs64: 6682 if (!(p = lock_user_string(arg1))) 6683 goto efault; 6684 ret = get_errno(statfs(path(p), &stfs)); 6685 unlock_user(p, arg1, 0); 6686 convert_statfs64: 6687 if (!is_error(ret)) { 6688 struct target_statfs64 *target_stfs; 6689 6690 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) 6691 goto efault; 6692 __put_user(stfs.f_type, &target_stfs->f_type); 6693 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 6694 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 6695 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 6696 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 6697 __put_user(stfs.f_files, &target_stfs->f_files); 6698 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 6699 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 6700 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 6701 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 6702 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 6703 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 6704 unlock_user_struct(target_stfs, arg3, 1); 6705 } 6706 break; 6707 case TARGET_NR_fstatfs64: 6708 ret = get_errno(fstatfs(arg1, &stfs)); 6709 goto convert_statfs64; 6710 #endif 6711 #ifdef TARGET_NR_ioperm 6712 case TARGET_NR_ioperm: 6713 goto unimplemented; 6714 #endif 6715 #ifdef TARGET_NR_socketcall 6716 case TARGET_NR_socketcall: 6717 ret = do_socketcall(arg1, arg2); 6718 break; 6719 #endif 6720 #ifdef TARGET_NR_accept 6721 case TARGET_NR_accept: 6722 ret = do_accept4(arg1, arg2, arg3, 0); 6723 break; 6724 #endif 6725 #ifdef TARGET_NR_accept4 6726 case TARGET_NR_accept4: 6727 #ifdef CONFIG_ACCEPT4 6728 ret = do_accept4(arg1, arg2, arg3, arg4); 6729 #else 6730 goto unimplemented; 6731 #endif 6732 break; 6733 #endif 6734 #ifdef TARGET_NR_bind 6735 case TARGET_NR_bind: 6736 ret = do_bind(arg1, arg2, arg3); 6737 break; 6738 #endif 6739 #ifdef TARGET_NR_connect 6740 case TARGET_NR_connect: 6741 ret = do_connect(arg1, arg2, arg3); 6742 break; 6743 #endif 6744 #ifdef TARGET_NR_getpeername 6745 case TARGET_NR_getpeername: 6746 ret = do_getpeername(arg1, arg2, arg3); 6747 break; 6748 #endif 6749 #ifdef TARGET_NR_getsockname 6750 case TARGET_NR_getsockname: 6751 ret = do_getsockname(arg1, arg2, arg3); 6752 break; 6753 #endif 6754 #ifdef TARGET_NR_getsockopt 6755 case TARGET_NR_getsockopt: 6756 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5); 6757 break; 6758 #endif 6759 #ifdef TARGET_NR_listen 6760 case TARGET_NR_listen: 6761 ret = get_errno(listen(arg1, arg2)); 6762 break; 6763 #endif 6764 #ifdef TARGET_NR_recv 6765 case TARGET_NR_recv: 6766 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); 6767 break; 6768 #endif 6769 #ifdef TARGET_NR_recvfrom 6770 case TARGET_NR_recvfrom: 6771 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); 6772 break; 6773 #endif 6774 #ifdef TARGET_NR_recvmsg 6775 case TARGET_NR_recvmsg: 6776 ret = do_sendrecvmsg(arg1, arg2, arg3, 0); 6777 break; 6778 #endif 6779 #ifdef TARGET_NR_send 6780 case TARGET_NR_send: 6781 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0); 6782 break; 6783 #endif 6784 #ifdef TARGET_NR_sendmsg 6785 case TARGET_NR_sendmsg: 6786 ret = do_sendrecvmsg(arg1, arg2, arg3, 1); 6787 break; 6788 #endif 6789 #ifdef TARGET_NR_sendmmsg 6790 case TARGET_NR_sendmmsg: 6791 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1); 6792 break; 6793 case TARGET_NR_recvmmsg: 6794 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0); 6795 break; 6796 #endif 6797 #ifdef TARGET_NR_sendto 6798 case TARGET_NR_sendto: 6799 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); 6800 break; 6801 #endif 6802 #ifdef TARGET_NR_shutdown 6803 case TARGET_NR_shutdown: 6804 ret = get_errno(shutdown(arg1, arg2)); 6805 break; 6806 #endif 6807 #ifdef TARGET_NR_socket 6808 case TARGET_NR_socket: 6809 ret = do_socket(arg1, arg2, arg3); 6810 break; 6811 #endif 6812 #ifdef TARGET_NR_socketpair 6813 case TARGET_NR_socketpair: 6814 ret = do_socketpair(arg1, arg2, arg3, arg4); 6815 break; 6816 #endif 6817 #ifdef TARGET_NR_setsockopt 6818 case TARGET_NR_setsockopt: 6819 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); 6820 break; 6821 #endif 6822 6823 case TARGET_NR_syslog: 6824 if (!(p = lock_user_string(arg2))) 6825 goto efault; 6826 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); 6827 unlock_user(p, arg2, 0); 6828 break; 6829 6830 case TARGET_NR_setitimer: 6831 { 6832 struct itimerval value, ovalue, *pvalue; 6833 6834 if (arg2) { 6835 pvalue = &value; 6836 if (copy_from_user_timeval(&pvalue->it_interval, arg2) 6837 || copy_from_user_timeval(&pvalue->it_value, 6838 arg2 + sizeof(struct target_timeval))) 6839 goto efault; 6840 } else { 6841 pvalue = NULL; 6842 } 6843 ret = get_errno(setitimer(arg1, pvalue, &ovalue)); 6844 if (!is_error(ret) && arg3) { 6845 if (copy_to_user_timeval(arg3, 6846 &ovalue.it_interval) 6847 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), 6848 &ovalue.it_value)) 6849 goto efault; 6850 } 6851 } 6852 break; 6853 case TARGET_NR_getitimer: 6854 { 6855 struct itimerval value; 6856 6857 ret = get_errno(getitimer(arg1, &value)); 6858 if (!is_error(ret) && arg2) { 6859 if (copy_to_user_timeval(arg2, 6860 &value.it_interval) 6861 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), 6862 &value.it_value)) 6863 goto efault; 6864 } 6865 } 6866 break; 6867 case TARGET_NR_stat: 6868 if (!(p = lock_user_string(arg1))) 6869 goto efault; 6870 ret = get_errno(stat(path(p), &st)); 6871 unlock_user(p, arg1, 0); 6872 goto do_stat; 6873 case TARGET_NR_lstat: 6874 if (!(p = lock_user_string(arg1))) 6875 goto efault; 6876 ret = get_errno(lstat(path(p), &st)); 6877 unlock_user(p, arg1, 0); 6878 goto do_stat; 6879 case TARGET_NR_fstat: 6880 { 6881 ret = get_errno(fstat(arg1, &st)); 6882 do_stat: 6883 if (!is_error(ret)) { 6884 struct target_stat *target_st; 6885 6886 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) 6887 goto efault; 6888 memset(target_st, 0, sizeof(*target_st)); 6889 __put_user(st.st_dev, &target_st->st_dev); 6890 __put_user(st.st_ino, &target_st->st_ino); 6891 __put_user(st.st_mode, &target_st->st_mode); 6892 __put_user(st.st_uid, &target_st->st_uid); 6893 __put_user(st.st_gid, &target_st->st_gid); 6894 __put_user(st.st_nlink, &target_st->st_nlink); 6895 __put_user(st.st_rdev, &target_st->st_rdev); 6896 __put_user(st.st_size, &target_st->st_size); 6897 __put_user(st.st_blksize, &target_st->st_blksize); 6898 __put_user(st.st_blocks, &target_st->st_blocks); 6899 __put_user(st.st_atime, &target_st->target_st_atime); 6900 __put_user(st.st_mtime, &target_st->target_st_mtime); 6901 __put_user(st.st_ctime, &target_st->target_st_ctime); 6902 unlock_user_struct(target_st, arg2, 1); 6903 } 6904 } 6905 break; 6906 #ifdef TARGET_NR_olduname 6907 case TARGET_NR_olduname: 6908 goto unimplemented; 6909 #endif 6910 #ifdef TARGET_NR_iopl 6911 case TARGET_NR_iopl: 6912 goto unimplemented; 6913 #endif 6914 case TARGET_NR_vhangup: 6915 ret = get_errno(vhangup()); 6916 break; 6917 #ifdef TARGET_NR_idle 6918 case TARGET_NR_idle: 6919 goto unimplemented; 6920 #endif 6921 #ifdef TARGET_NR_syscall 6922 case TARGET_NR_syscall: 6923 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5, 6924 arg6, arg7, arg8, 0); 6925 break; 6926 #endif 6927 case TARGET_NR_wait4: 6928 { 6929 int status; 6930 abi_long status_ptr = arg2; 6931 struct rusage rusage, *rusage_ptr; 6932 abi_ulong target_rusage = arg4; 6933 if (target_rusage) 6934 rusage_ptr = &rusage; 6935 else 6936 rusage_ptr = NULL; 6937 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr)); 6938 if (!is_error(ret)) { 6939 if (status_ptr && ret) { 6940 status = host_to_target_waitstatus(status); 6941 if (put_user_s32(status, status_ptr)) 6942 goto efault; 6943 } 6944 if (target_rusage) 6945 host_to_target_rusage(target_rusage, &rusage); 6946 } 6947 } 6948 break; 6949 #ifdef TARGET_NR_swapoff 6950 case TARGET_NR_swapoff: 6951 if (!(p = lock_user_string(arg1))) 6952 goto efault; 6953 ret = get_errno(swapoff(p)); 6954 unlock_user(p, arg1, 0); 6955 break; 6956 #endif 6957 case TARGET_NR_sysinfo: 6958 { 6959 struct target_sysinfo *target_value; 6960 struct sysinfo value; 6961 ret = get_errno(sysinfo(&value)); 6962 if (!is_error(ret) && arg1) 6963 { 6964 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) 6965 goto efault; 6966 __put_user(value.uptime, &target_value->uptime); 6967 __put_user(value.loads[0], &target_value->loads[0]); 6968 __put_user(value.loads[1], &target_value->loads[1]); 6969 __put_user(value.loads[2], &target_value->loads[2]); 6970 __put_user(value.totalram, &target_value->totalram); 6971 __put_user(value.freeram, &target_value->freeram); 6972 __put_user(value.sharedram, &target_value->sharedram); 6973 __put_user(value.bufferram, &target_value->bufferram); 6974 __put_user(value.totalswap, &target_value->totalswap); 6975 __put_user(value.freeswap, &target_value->freeswap); 6976 __put_user(value.procs, &target_value->procs); 6977 __put_user(value.totalhigh, &target_value->totalhigh); 6978 __put_user(value.freehigh, &target_value->freehigh); 6979 __put_user(value.mem_unit, &target_value->mem_unit); 6980 unlock_user_struct(target_value, arg1, 1); 6981 } 6982 } 6983 break; 6984 #ifdef TARGET_NR_ipc 6985 case TARGET_NR_ipc: 6986 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6); 6987 break; 6988 #endif 6989 #ifdef TARGET_NR_semget 6990 case TARGET_NR_semget: 6991 ret = get_errno(semget(arg1, arg2, arg3)); 6992 break; 6993 #endif 6994 #ifdef TARGET_NR_semop 6995 case TARGET_NR_semop: 6996 ret = do_semop(arg1, arg2, arg3); 6997 break; 6998 #endif 6999 #ifdef TARGET_NR_semctl 7000 case TARGET_NR_semctl: 7001 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4); 7002 break; 7003 #endif 7004 #ifdef TARGET_NR_msgctl 7005 case TARGET_NR_msgctl: 7006 ret = do_msgctl(arg1, arg2, arg3); 7007 break; 7008 #endif 7009 #ifdef TARGET_NR_msgget 7010 case TARGET_NR_msgget: 7011 ret = get_errno(msgget(arg1, arg2)); 7012 break; 7013 #endif 7014 #ifdef TARGET_NR_msgrcv 7015 case TARGET_NR_msgrcv: 7016 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5); 7017 break; 7018 #endif 7019 #ifdef TARGET_NR_msgsnd 7020 case TARGET_NR_msgsnd: 7021 ret = do_msgsnd(arg1, arg2, arg3, arg4); 7022 break; 7023 #endif 7024 #ifdef TARGET_NR_shmget 7025 case TARGET_NR_shmget: 7026 ret = get_errno(shmget(arg1, arg2, arg3)); 7027 break; 7028 #endif 7029 #ifdef TARGET_NR_shmctl 7030 case TARGET_NR_shmctl: 7031 ret = do_shmctl(arg1, arg2, arg3); 7032 break; 7033 #endif 7034 #ifdef TARGET_NR_shmat 7035 case TARGET_NR_shmat: 7036 ret = do_shmat(arg1, arg2, arg3); 7037 break; 7038 #endif 7039 #ifdef TARGET_NR_shmdt 7040 case TARGET_NR_shmdt: 7041 ret = do_shmdt(arg1); 7042 break; 7043 #endif 7044 case TARGET_NR_fsync: 7045 ret = get_errno(fsync(arg1)); 7046 break; 7047 case TARGET_NR_clone: 7048 /* Linux manages to have three different orderings for its 7049 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines 7050 * match the kernel's CONFIG_CLONE_* settings. 7051 * Microblaze is further special in that it uses a sixth 7052 * implicit argument to clone for the TLS pointer. 7053 */ 7054 #if defined(TARGET_MICROBLAZE) 7055 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5)); 7056 #elif defined(TARGET_CLONE_BACKWARDS) 7057 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); 7058 #elif defined(TARGET_CLONE_BACKWARDS2) 7059 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); 7060 #else 7061 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); 7062 #endif 7063 break; 7064 #ifdef __NR_exit_group 7065 /* new thread calls */ 7066 case TARGET_NR_exit_group: 7067 #ifdef TARGET_GPROF 7068 _mcleanup(); 7069 #endif 7070 gdb_exit(cpu_env, arg1); 7071 ret = get_errno(exit_group(arg1)); 7072 break; 7073 #endif 7074 case TARGET_NR_setdomainname: 7075 if (!(p = lock_user_string(arg1))) 7076 goto efault; 7077 ret = get_errno(setdomainname(p, arg2)); 7078 unlock_user(p, arg1, 0); 7079 break; 7080 case TARGET_NR_uname: 7081 /* no need to transcode because we use the linux syscall */ 7082 { 7083 struct new_utsname * buf; 7084 7085 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) 7086 goto efault; 7087 ret = get_errno(sys_uname(buf)); 7088 if (!is_error(ret)) { 7089 /* Overrite the native machine name with whatever is being 7090 emulated. */ 7091 strcpy (buf->machine, cpu_to_uname_machine(cpu_env)); 7092 /* Allow the user to override the reported release. */ 7093 if (qemu_uname_release && *qemu_uname_release) 7094 strcpy (buf->release, qemu_uname_release); 7095 } 7096 unlock_user_struct(buf, arg1, 1); 7097 } 7098 break; 7099 #ifdef TARGET_I386 7100 case TARGET_NR_modify_ldt: 7101 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3); 7102 break; 7103 #if !defined(TARGET_X86_64) 7104 case TARGET_NR_vm86old: 7105 goto unimplemented; 7106 case TARGET_NR_vm86: 7107 ret = do_vm86(cpu_env, arg1, arg2); 7108 break; 7109 #endif 7110 #endif 7111 case TARGET_NR_adjtimex: 7112 goto unimplemented; 7113 #ifdef TARGET_NR_create_module 7114 case TARGET_NR_create_module: 7115 #endif 7116 case TARGET_NR_init_module: 7117 case TARGET_NR_delete_module: 7118 #ifdef TARGET_NR_get_kernel_syms 7119 case TARGET_NR_get_kernel_syms: 7120 #endif 7121 goto unimplemented; 7122 case TARGET_NR_quotactl: 7123 goto unimplemented; 7124 case TARGET_NR_getpgid: 7125 ret = get_errno(getpgid(arg1)); 7126 break; 7127 case TARGET_NR_fchdir: 7128 ret = get_errno(fchdir(arg1)); 7129 break; 7130 #ifdef TARGET_NR_bdflush /* not on x86_64 */ 7131 case TARGET_NR_bdflush: 7132 goto unimplemented; 7133 #endif 7134 #ifdef TARGET_NR_sysfs 7135 case TARGET_NR_sysfs: 7136 goto unimplemented; 7137 #endif 7138 case TARGET_NR_personality: 7139 ret = get_errno(personality(arg1)); 7140 break; 7141 #ifdef TARGET_NR_afs_syscall 7142 case TARGET_NR_afs_syscall: 7143 goto unimplemented; 7144 #endif 7145 #ifdef TARGET_NR__llseek /* Not on alpha */ 7146 case TARGET_NR__llseek: 7147 { 7148 int64_t res; 7149 #if !defined(__NR_llseek) 7150 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5); 7151 if (res == -1) { 7152 ret = get_errno(res); 7153 } else { 7154 ret = 0; 7155 } 7156 #else 7157 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); 7158 #endif 7159 if ((ret == 0) && put_user_s64(res, arg4)) { 7160 goto efault; 7161 } 7162 } 7163 break; 7164 #endif 7165 case TARGET_NR_getdents: 7166 #ifdef __NR_getdents 7167 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 7168 { 7169 struct target_dirent *target_dirp; 7170 struct linux_dirent *dirp; 7171 abi_long count = arg3; 7172 7173 dirp = malloc(count); 7174 if (!dirp) { 7175 ret = -TARGET_ENOMEM; 7176 goto fail; 7177 } 7178 7179 ret = get_errno(sys_getdents(arg1, dirp, count)); 7180 if (!is_error(ret)) { 7181 struct linux_dirent *de; 7182 struct target_dirent *tde; 7183 int len = ret; 7184 int reclen, treclen; 7185 int count1, tnamelen; 7186 7187 count1 = 0; 7188 de = dirp; 7189 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7190 goto efault; 7191 tde = target_dirp; 7192 while (len > 0) { 7193 reclen = de->d_reclen; 7194 tnamelen = reclen - offsetof(struct linux_dirent, d_name); 7195 assert(tnamelen >= 0); 7196 treclen = tnamelen + offsetof(struct target_dirent, d_name); 7197 assert(count1 + treclen <= count); 7198 tde->d_reclen = tswap16(treclen); 7199 tde->d_ino = tswapal(de->d_ino); 7200 tde->d_off = tswapal(de->d_off); 7201 memcpy(tde->d_name, de->d_name, tnamelen); 7202 de = (struct linux_dirent *)((char *)de + reclen); 7203 len -= reclen; 7204 tde = (struct target_dirent *)((char *)tde + treclen); 7205 count1 += treclen; 7206 } 7207 ret = count1; 7208 unlock_user(target_dirp, arg2, ret); 7209 } 7210 free(dirp); 7211 } 7212 #else 7213 { 7214 struct linux_dirent *dirp; 7215 abi_long count = arg3; 7216 7217 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7218 goto efault; 7219 ret = get_errno(sys_getdents(arg1, dirp, count)); 7220 if (!is_error(ret)) { 7221 struct linux_dirent *de; 7222 int len = ret; 7223 int reclen; 7224 de = dirp; 7225 while (len > 0) { 7226 reclen = de->d_reclen; 7227 if (reclen > len) 7228 break; 7229 de->d_reclen = tswap16(reclen); 7230 tswapls(&de->d_ino); 7231 tswapls(&de->d_off); 7232 de = (struct linux_dirent *)((char *)de + reclen); 7233 len -= reclen; 7234 } 7235 } 7236 unlock_user(dirp, arg2, ret); 7237 } 7238 #endif 7239 #else 7240 /* Implement getdents in terms of getdents64 */ 7241 { 7242 struct linux_dirent64 *dirp; 7243 abi_long count = arg3; 7244 7245 dirp = lock_user(VERIFY_WRITE, arg2, count, 0); 7246 if (!dirp) { 7247 goto efault; 7248 } 7249 ret = get_errno(sys_getdents64(arg1, dirp, count)); 7250 if (!is_error(ret)) { 7251 /* Convert the dirent64 structs to target dirent. We do this 7252 * in-place, since we can guarantee that a target_dirent is no 7253 * larger than a dirent64; however this means we have to be 7254 * careful to read everything before writing in the new format. 7255 */ 7256 struct linux_dirent64 *de; 7257 struct target_dirent *tde; 7258 int len = ret; 7259 int tlen = 0; 7260 7261 de = dirp; 7262 tde = (struct target_dirent *)dirp; 7263 while (len > 0) { 7264 int namelen, treclen; 7265 int reclen = de->d_reclen; 7266 uint64_t ino = de->d_ino; 7267 int64_t off = de->d_off; 7268 uint8_t type = de->d_type; 7269 7270 namelen = strlen(de->d_name); 7271 treclen = offsetof(struct target_dirent, d_name) 7272 + namelen + 2; 7273 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long)); 7274 7275 memmove(tde->d_name, de->d_name, namelen + 1); 7276 tde->d_ino = tswapal(ino); 7277 tde->d_off = tswapal(off); 7278 tde->d_reclen = tswap16(treclen); 7279 /* The target_dirent type is in what was formerly a padding 7280 * byte at the end of the structure: 7281 */ 7282 *(((char *)tde) + treclen - 1) = type; 7283 7284 de = (struct linux_dirent64 *)((char *)de + reclen); 7285 tde = (struct target_dirent *)((char *)tde + treclen); 7286 len -= reclen; 7287 tlen += treclen; 7288 } 7289 ret = tlen; 7290 } 7291 unlock_user(dirp, arg2, ret); 7292 } 7293 #endif 7294 break; 7295 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 7296 case TARGET_NR_getdents64: 7297 { 7298 struct linux_dirent64 *dirp; 7299 abi_long count = arg3; 7300 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7301 goto efault; 7302 ret = get_errno(sys_getdents64(arg1, dirp, count)); 7303 if (!is_error(ret)) { 7304 struct linux_dirent64 *de; 7305 int len = ret; 7306 int reclen; 7307 de = dirp; 7308 while (len > 0) { 7309 reclen = de->d_reclen; 7310 if (reclen > len) 7311 break; 7312 de->d_reclen = tswap16(reclen); 7313 tswap64s((uint64_t *)&de->d_ino); 7314 tswap64s((uint64_t *)&de->d_off); 7315 de = (struct linux_dirent64 *)((char *)de + reclen); 7316 len -= reclen; 7317 } 7318 } 7319 unlock_user(dirp, arg2, ret); 7320 } 7321 break; 7322 #endif /* TARGET_NR_getdents64 */ 7323 #if defined(TARGET_NR__newselect) 7324 case TARGET_NR__newselect: 7325 ret = do_select(arg1, arg2, arg3, arg4, arg5); 7326 break; 7327 #endif 7328 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) 7329 # ifdef TARGET_NR_poll 7330 case TARGET_NR_poll: 7331 # endif 7332 # ifdef TARGET_NR_ppoll 7333 case TARGET_NR_ppoll: 7334 # endif 7335 { 7336 struct target_pollfd *target_pfd; 7337 unsigned int nfds = arg2; 7338 int timeout = arg3; 7339 struct pollfd *pfd; 7340 unsigned int i; 7341 7342 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1); 7343 if (!target_pfd) 7344 goto efault; 7345 7346 pfd = alloca(sizeof(struct pollfd) * nfds); 7347 for(i = 0; i < nfds; i++) { 7348 pfd[i].fd = tswap32(target_pfd[i].fd); 7349 pfd[i].events = tswap16(target_pfd[i].events); 7350 } 7351 7352 # ifdef TARGET_NR_ppoll 7353 if (num == TARGET_NR_ppoll) { 7354 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; 7355 target_sigset_t *target_set; 7356 sigset_t _set, *set = &_set; 7357 7358 if (arg3) { 7359 if (target_to_host_timespec(timeout_ts, arg3)) { 7360 unlock_user(target_pfd, arg1, 0); 7361 goto efault; 7362 } 7363 } else { 7364 timeout_ts = NULL; 7365 } 7366 7367 if (arg4) { 7368 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1); 7369 if (!target_set) { 7370 unlock_user(target_pfd, arg1, 0); 7371 goto efault; 7372 } 7373 target_to_host_sigset(set, target_set); 7374 } else { 7375 set = NULL; 7376 } 7377 7378 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8)); 7379 7380 if (!is_error(ret) && arg3) { 7381 host_to_target_timespec(arg3, timeout_ts); 7382 } 7383 if (arg4) { 7384 unlock_user(target_set, arg4, 0); 7385 } 7386 } else 7387 # endif 7388 ret = get_errno(poll(pfd, nfds, timeout)); 7389 7390 if (!is_error(ret)) { 7391 for(i = 0; i < nfds; i++) { 7392 target_pfd[i].revents = tswap16(pfd[i].revents); 7393 } 7394 } 7395 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds); 7396 } 7397 break; 7398 #endif 7399 case TARGET_NR_flock: 7400 /* NOTE: the flock constant seems to be the same for every 7401 Linux platform */ 7402 ret = get_errno(flock(arg1, arg2)); 7403 break; 7404 case TARGET_NR_readv: 7405 { 7406 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 7407 if (vec != NULL) { 7408 ret = get_errno(readv(arg1, vec, arg3)); 7409 unlock_iovec(vec, arg2, arg3, 1); 7410 } else { 7411 ret = -host_to_target_errno(errno); 7412 } 7413 } 7414 break; 7415 case TARGET_NR_writev: 7416 { 7417 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 7418 if (vec != NULL) { 7419 ret = get_errno(writev(arg1, vec, arg3)); 7420 unlock_iovec(vec, arg2, arg3, 0); 7421 } else { 7422 ret = -host_to_target_errno(errno); 7423 } 7424 } 7425 break; 7426 case TARGET_NR_getsid: 7427 ret = get_errno(getsid(arg1)); 7428 break; 7429 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ 7430 case TARGET_NR_fdatasync: 7431 ret = get_errno(fdatasync(arg1)); 7432 break; 7433 #endif 7434 case TARGET_NR__sysctl: 7435 /* We don't implement this, but ENOTDIR is always a safe 7436 return value. */ 7437 ret = -TARGET_ENOTDIR; 7438 break; 7439 case TARGET_NR_sched_getaffinity: 7440 { 7441 unsigned int mask_size; 7442 unsigned long *mask; 7443 7444 /* 7445 * sched_getaffinity needs multiples of ulong, so need to take 7446 * care of mismatches between target ulong and host ulong sizes. 7447 */ 7448 if (arg2 & (sizeof(abi_ulong) - 1)) { 7449 ret = -TARGET_EINVAL; 7450 break; 7451 } 7452 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 7453 7454 mask = alloca(mask_size); 7455 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask)); 7456 7457 if (!is_error(ret)) { 7458 if (copy_to_user(arg3, mask, ret)) { 7459 goto efault; 7460 } 7461 } 7462 } 7463 break; 7464 case TARGET_NR_sched_setaffinity: 7465 { 7466 unsigned int mask_size; 7467 unsigned long *mask; 7468 7469 /* 7470 * sched_setaffinity needs multiples of ulong, so need to take 7471 * care of mismatches between target ulong and host ulong sizes. 7472 */ 7473 if (arg2 & (sizeof(abi_ulong) - 1)) { 7474 ret = -TARGET_EINVAL; 7475 break; 7476 } 7477 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 7478 7479 mask = alloca(mask_size); 7480 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) { 7481 goto efault; 7482 } 7483 memcpy(mask, p, arg2); 7484 unlock_user_struct(p, arg2, 0); 7485 7486 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask)); 7487 } 7488 break; 7489 case TARGET_NR_sched_setparam: 7490 { 7491 struct sched_param *target_schp; 7492 struct sched_param schp; 7493 7494 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) 7495 goto efault; 7496 schp.sched_priority = tswap32(target_schp->sched_priority); 7497 unlock_user_struct(target_schp, arg2, 0); 7498 ret = get_errno(sched_setparam(arg1, &schp)); 7499 } 7500 break; 7501 case TARGET_NR_sched_getparam: 7502 { 7503 struct sched_param *target_schp; 7504 struct sched_param schp; 7505 ret = get_errno(sched_getparam(arg1, &schp)); 7506 if (!is_error(ret)) { 7507 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) 7508 goto efault; 7509 target_schp->sched_priority = tswap32(schp.sched_priority); 7510 unlock_user_struct(target_schp, arg2, 1); 7511 } 7512 } 7513 break; 7514 case TARGET_NR_sched_setscheduler: 7515 { 7516 struct sched_param *target_schp; 7517 struct sched_param schp; 7518 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) 7519 goto efault; 7520 schp.sched_priority = tswap32(target_schp->sched_priority); 7521 unlock_user_struct(target_schp, arg3, 0); 7522 ret = get_errno(sched_setscheduler(arg1, arg2, &schp)); 7523 } 7524 break; 7525 case TARGET_NR_sched_getscheduler: 7526 ret = get_errno(sched_getscheduler(arg1)); 7527 break; 7528 case TARGET_NR_sched_yield: 7529 ret = get_errno(sched_yield()); 7530 break; 7531 case TARGET_NR_sched_get_priority_max: 7532 ret = get_errno(sched_get_priority_max(arg1)); 7533 break; 7534 case TARGET_NR_sched_get_priority_min: 7535 ret = get_errno(sched_get_priority_min(arg1)); 7536 break; 7537 case TARGET_NR_sched_rr_get_interval: 7538 { 7539 struct timespec ts; 7540 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 7541 if (!is_error(ret)) { 7542 host_to_target_timespec(arg2, &ts); 7543 } 7544 } 7545 break; 7546 case TARGET_NR_nanosleep: 7547 { 7548 struct timespec req, rem; 7549 target_to_host_timespec(&req, arg1); 7550 ret = get_errno(nanosleep(&req, &rem)); 7551 if (is_error(ret) && arg2) { 7552 host_to_target_timespec(arg2, &rem); 7553 } 7554 } 7555 break; 7556 #ifdef TARGET_NR_query_module 7557 case TARGET_NR_query_module: 7558 goto unimplemented; 7559 #endif 7560 #ifdef TARGET_NR_nfsservctl 7561 case TARGET_NR_nfsservctl: 7562 goto unimplemented; 7563 #endif 7564 case TARGET_NR_prctl: 7565 switch (arg1) { 7566 case PR_GET_PDEATHSIG: 7567 { 7568 int deathsig; 7569 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5)); 7570 if (!is_error(ret) && arg2 7571 && put_user_ual(deathsig, arg2)) { 7572 goto efault; 7573 } 7574 break; 7575 } 7576 #ifdef PR_GET_NAME 7577 case PR_GET_NAME: 7578 { 7579 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1); 7580 if (!name) { 7581 goto efault; 7582 } 7583 ret = get_errno(prctl(arg1, (unsigned long)name, 7584 arg3, arg4, arg5)); 7585 unlock_user(name, arg2, 16); 7586 break; 7587 } 7588 case PR_SET_NAME: 7589 { 7590 void *name = lock_user(VERIFY_READ, arg2, 16, 1); 7591 if (!name) { 7592 goto efault; 7593 } 7594 ret = get_errno(prctl(arg1, (unsigned long)name, 7595 arg3, arg4, arg5)); 7596 unlock_user(name, arg2, 0); 7597 break; 7598 } 7599 #endif 7600 default: 7601 /* Most prctl options have no pointer arguments */ 7602 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5)); 7603 break; 7604 } 7605 break; 7606 #ifdef TARGET_NR_arch_prctl 7607 case TARGET_NR_arch_prctl: 7608 #if defined(TARGET_I386) && !defined(TARGET_ABI32) 7609 ret = do_arch_prctl(cpu_env, arg1, arg2); 7610 break; 7611 #else 7612 goto unimplemented; 7613 #endif 7614 #endif 7615 #ifdef TARGET_NR_pread64 7616 case TARGET_NR_pread64: 7617 if (regpairs_aligned(cpu_env)) { 7618 arg4 = arg5; 7619 arg5 = arg6; 7620 } 7621 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 7622 goto efault; 7623 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); 7624 unlock_user(p, arg2, ret); 7625 break; 7626 case TARGET_NR_pwrite64: 7627 if (regpairs_aligned(cpu_env)) { 7628 arg4 = arg5; 7629 arg5 = arg6; 7630 } 7631 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 7632 goto efault; 7633 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); 7634 unlock_user(p, arg2, 0); 7635 break; 7636 #endif 7637 case TARGET_NR_getcwd: 7638 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) 7639 goto efault; 7640 ret = get_errno(sys_getcwd1(p, arg2)); 7641 unlock_user(p, arg1, ret); 7642 break; 7643 case TARGET_NR_capget: 7644 goto unimplemented; 7645 case TARGET_NR_capset: 7646 goto unimplemented; 7647 case TARGET_NR_sigaltstack: 7648 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \ 7649 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \ 7650 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC) 7651 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env)); 7652 break; 7653 #else 7654 goto unimplemented; 7655 #endif 7656 7657 #ifdef CONFIG_SENDFILE 7658 case TARGET_NR_sendfile: 7659 { 7660 off_t *offp = NULL; 7661 off_t off; 7662 if (arg3) { 7663 ret = get_user_sal(off, arg3); 7664 if (is_error(ret)) { 7665 break; 7666 } 7667 offp = &off; 7668 } 7669 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 7670 if (!is_error(ret) && arg3) { 7671 abi_long ret2 = put_user_sal(off, arg3); 7672 if (is_error(ret2)) { 7673 ret = ret2; 7674 } 7675 } 7676 break; 7677 } 7678 #ifdef TARGET_NR_sendfile64 7679 case TARGET_NR_sendfile64: 7680 { 7681 off_t *offp = NULL; 7682 off_t off; 7683 if (arg3) { 7684 ret = get_user_s64(off, arg3); 7685 if (is_error(ret)) { 7686 break; 7687 } 7688 offp = &off; 7689 } 7690 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 7691 if (!is_error(ret) && arg3) { 7692 abi_long ret2 = put_user_s64(off, arg3); 7693 if (is_error(ret2)) { 7694 ret = ret2; 7695 } 7696 } 7697 break; 7698 } 7699 #endif 7700 #else 7701 case TARGET_NR_sendfile: 7702 #ifdef TARGET_NR_sendfile64 7703 case TARGET_NR_sendfile64: 7704 #endif 7705 goto unimplemented; 7706 #endif 7707 7708 #ifdef TARGET_NR_getpmsg 7709 case TARGET_NR_getpmsg: 7710 goto unimplemented; 7711 #endif 7712 #ifdef TARGET_NR_putpmsg 7713 case TARGET_NR_putpmsg: 7714 goto unimplemented; 7715 #endif 7716 #ifdef TARGET_NR_vfork 7717 case TARGET_NR_vfork: 7718 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 7719 0, 0, 0, 0)); 7720 break; 7721 #endif 7722 #ifdef TARGET_NR_ugetrlimit 7723 case TARGET_NR_ugetrlimit: 7724 { 7725 struct rlimit rlim; 7726 int resource = target_to_host_resource(arg1); 7727 ret = get_errno(getrlimit(resource, &rlim)); 7728 if (!is_error(ret)) { 7729 struct target_rlimit *target_rlim; 7730 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 7731 goto efault; 7732 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 7733 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 7734 unlock_user_struct(target_rlim, arg2, 1); 7735 } 7736 break; 7737 } 7738 #endif 7739 #ifdef TARGET_NR_truncate64 7740 case TARGET_NR_truncate64: 7741 if (!(p = lock_user_string(arg1))) 7742 goto efault; 7743 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); 7744 unlock_user(p, arg1, 0); 7745 break; 7746 #endif 7747 #ifdef TARGET_NR_ftruncate64 7748 case TARGET_NR_ftruncate64: 7749 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); 7750 break; 7751 #endif 7752 #ifdef TARGET_NR_stat64 7753 case TARGET_NR_stat64: 7754 if (!(p = lock_user_string(arg1))) 7755 goto efault; 7756 ret = get_errno(stat(path(p), &st)); 7757 unlock_user(p, arg1, 0); 7758 if (!is_error(ret)) 7759 ret = host_to_target_stat64(cpu_env, arg2, &st); 7760 break; 7761 #endif 7762 #ifdef TARGET_NR_lstat64 7763 case TARGET_NR_lstat64: 7764 if (!(p = lock_user_string(arg1))) 7765 goto efault; 7766 ret = get_errno(lstat(path(p), &st)); 7767 unlock_user(p, arg1, 0); 7768 if (!is_error(ret)) 7769 ret = host_to_target_stat64(cpu_env, arg2, &st); 7770 break; 7771 #endif 7772 #ifdef TARGET_NR_fstat64 7773 case TARGET_NR_fstat64: 7774 ret = get_errno(fstat(arg1, &st)); 7775 if (!is_error(ret)) 7776 ret = host_to_target_stat64(cpu_env, arg2, &st); 7777 break; 7778 #endif 7779 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) 7780 #ifdef TARGET_NR_fstatat64 7781 case TARGET_NR_fstatat64: 7782 #endif 7783 #ifdef TARGET_NR_newfstatat 7784 case TARGET_NR_newfstatat: 7785 #endif 7786 if (!(p = lock_user_string(arg2))) 7787 goto efault; 7788 ret = get_errno(fstatat(arg1, path(p), &st, arg4)); 7789 if (!is_error(ret)) 7790 ret = host_to_target_stat64(cpu_env, arg3, &st); 7791 break; 7792 #endif 7793 case TARGET_NR_lchown: 7794 if (!(p = lock_user_string(arg1))) 7795 goto efault; 7796 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); 7797 unlock_user(p, arg1, 0); 7798 break; 7799 #ifdef TARGET_NR_getuid 7800 case TARGET_NR_getuid: 7801 ret = get_errno(high2lowuid(getuid())); 7802 break; 7803 #endif 7804 #ifdef TARGET_NR_getgid 7805 case TARGET_NR_getgid: 7806 ret = get_errno(high2lowgid(getgid())); 7807 break; 7808 #endif 7809 #ifdef TARGET_NR_geteuid 7810 case TARGET_NR_geteuid: 7811 ret = get_errno(high2lowuid(geteuid())); 7812 break; 7813 #endif 7814 #ifdef TARGET_NR_getegid 7815 case TARGET_NR_getegid: 7816 ret = get_errno(high2lowgid(getegid())); 7817 break; 7818 #endif 7819 case TARGET_NR_setreuid: 7820 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); 7821 break; 7822 case TARGET_NR_setregid: 7823 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); 7824 break; 7825 case TARGET_NR_getgroups: 7826 { 7827 int gidsetsize = arg1; 7828 target_id *target_grouplist; 7829 gid_t *grouplist; 7830 int i; 7831 7832 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7833 ret = get_errno(getgroups(gidsetsize, grouplist)); 7834 if (gidsetsize == 0) 7835 break; 7836 if (!is_error(ret)) { 7837 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0); 7838 if (!target_grouplist) 7839 goto efault; 7840 for(i = 0;i < ret; i++) 7841 target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); 7842 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id)); 7843 } 7844 } 7845 break; 7846 case TARGET_NR_setgroups: 7847 { 7848 int gidsetsize = arg1; 7849 target_id *target_grouplist; 7850 gid_t *grouplist = NULL; 7851 int i; 7852 if (gidsetsize) { 7853 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7854 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1); 7855 if (!target_grouplist) { 7856 ret = -TARGET_EFAULT; 7857 goto fail; 7858 } 7859 for (i = 0; i < gidsetsize; i++) { 7860 grouplist[i] = low2highgid(tswapid(target_grouplist[i])); 7861 } 7862 unlock_user(target_grouplist, arg2, 0); 7863 } 7864 ret = get_errno(setgroups(gidsetsize, grouplist)); 7865 } 7866 break; 7867 case TARGET_NR_fchown: 7868 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); 7869 break; 7870 #if defined(TARGET_NR_fchownat) 7871 case TARGET_NR_fchownat: 7872 if (!(p = lock_user_string(arg2))) 7873 goto efault; 7874 ret = get_errno(fchownat(arg1, p, low2highuid(arg3), 7875 low2highgid(arg4), arg5)); 7876 unlock_user(p, arg2, 0); 7877 break; 7878 #endif 7879 #ifdef TARGET_NR_setresuid 7880 case TARGET_NR_setresuid: 7881 ret = get_errno(setresuid(low2highuid(arg1), 7882 low2highuid(arg2), 7883 low2highuid(arg3))); 7884 break; 7885 #endif 7886 #ifdef TARGET_NR_getresuid 7887 case TARGET_NR_getresuid: 7888 { 7889 uid_t ruid, euid, suid; 7890 ret = get_errno(getresuid(&ruid, &euid, &suid)); 7891 if (!is_error(ret)) { 7892 if (put_user_id(high2lowuid(ruid), arg1) 7893 || put_user_id(high2lowuid(euid), arg2) 7894 || put_user_id(high2lowuid(suid), arg3)) 7895 goto efault; 7896 } 7897 } 7898 break; 7899 #endif 7900 #ifdef TARGET_NR_getresgid 7901 case TARGET_NR_setresgid: 7902 ret = get_errno(setresgid(low2highgid(arg1), 7903 low2highgid(arg2), 7904 low2highgid(arg3))); 7905 break; 7906 #endif 7907 #ifdef TARGET_NR_getresgid 7908 case TARGET_NR_getresgid: 7909 { 7910 gid_t rgid, egid, sgid; 7911 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 7912 if (!is_error(ret)) { 7913 if (put_user_id(high2lowgid(rgid), arg1) 7914 || put_user_id(high2lowgid(egid), arg2) 7915 || put_user_id(high2lowgid(sgid), arg3)) 7916 goto efault; 7917 } 7918 } 7919 break; 7920 #endif 7921 case TARGET_NR_chown: 7922 if (!(p = lock_user_string(arg1))) 7923 goto efault; 7924 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); 7925 unlock_user(p, arg1, 0); 7926 break; 7927 case TARGET_NR_setuid: 7928 ret = get_errno(setuid(low2highuid(arg1))); 7929 break; 7930 case TARGET_NR_setgid: 7931 ret = get_errno(setgid(low2highgid(arg1))); 7932 break; 7933 case TARGET_NR_setfsuid: 7934 ret = get_errno(setfsuid(arg1)); 7935 break; 7936 case TARGET_NR_setfsgid: 7937 ret = get_errno(setfsgid(arg1)); 7938 break; 7939 7940 #ifdef TARGET_NR_lchown32 7941 case TARGET_NR_lchown32: 7942 if (!(p = lock_user_string(arg1))) 7943 goto efault; 7944 ret = get_errno(lchown(p, arg2, arg3)); 7945 unlock_user(p, arg1, 0); 7946 break; 7947 #endif 7948 #ifdef TARGET_NR_getuid32 7949 case TARGET_NR_getuid32: 7950 ret = get_errno(getuid()); 7951 break; 7952 #endif 7953 7954 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) 7955 /* Alpha specific */ 7956 case TARGET_NR_getxuid: 7957 { 7958 uid_t euid; 7959 euid=geteuid(); 7960 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid; 7961 } 7962 ret = get_errno(getuid()); 7963 break; 7964 #endif 7965 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) 7966 /* Alpha specific */ 7967 case TARGET_NR_getxgid: 7968 { 7969 uid_t egid; 7970 egid=getegid(); 7971 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid; 7972 } 7973 ret = get_errno(getgid()); 7974 break; 7975 #endif 7976 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) 7977 /* Alpha specific */ 7978 case TARGET_NR_osf_getsysinfo: 7979 ret = -TARGET_EOPNOTSUPP; 7980 switch (arg1) { 7981 case TARGET_GSI_IEEE_FP_CONTROL: 7982 { 7983 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env); 7984 7985 /* Copied from linux ieee_fpcr_to_swcr. */ 7986 swcr = (fpcr >> 35) & SWCR_STATUS_MASK; 7987 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ; 7988 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV 7989 | SWCR_TRAP_ENABLE_DZE 7990 | SWCR_TRAP_ENABLE_OVF); 7991 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF 7992 | SWCR_TRAP_ENABLE_INE); 7993 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ; 7994 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO; 7995 7996 if (put_user_u64 (swcr, arg2)) 7997 goto efault; 7998 ret = 0; 7999 } 8000 break; 8001 8002 /* case GSI_IEEE_STATE_AT_SIGNAL: 8003 -- Not implemented in linux kernel. 8004 case GSI_UACPROC: 8005 -- Retrieves current unaligned access state; not much used. 8006 case GSI_PROC_TYPE: 8007 -- Retrieves implver information; surely not used. 8008 case GSI_GET_HWRPB: 8009 -- Grabs a copy of the HWRPB; surely not used. 8010 */ 8011 } 8012 break; 8013 #endif 8014 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) 8015 /* Alpha specific */ 8016 case TARGET_NR_osf_setsysinfo: 8017 ret = -TARGET_EOPNOTSUPP; 8018 switch (arg1) { 8019 case TARGET_SSI_IEEE_FP_CONTROL: 8020 { 8021 uint64_t swcr, fpcr, orig_fpcr; 8022 8023 if (get_user_u64 (swcr, arg2)) { 8024 goto efault; 8025 } 8026 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 8027 fpcr = orig_fpcr & FPCR_DYN_MASK; 8028 8029 /* Copied from linux ieee_swcr_to_fpcr. */ 8030 fpcr |= (swcr & SWCR_STATUS_MASK) << 35; 8031 fpcr |= (swcr & SWCR_MAP_DMZ) << 36; 8032 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV 8033 | SWCR_TRAP_ENABLE_DZE 8034 | SWCR_TRAP_ENABLE_OVF)) << 48; 8035 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF 8036 | SWCR_TRAP_ENABLE_INE)) << 57; 8037 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0); 8038 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41; 8039 8040 cpu_alpha_store_fpcr(cpu_env, fpcr); 8041 ret = 0; 8042 } 8043 break; 8044 8045 case TARGET_SSI_IEEE_RAISE_EXCEPTION: 8046 { 8047 uint64_t exc, fpcr, orig_fpcr; 8048 int si_code; 8049 8050 if (get_user_u64(exc, arg2)) { 8051 goto efault; 8052 } 8053 8054 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 8055 8056 /* We only add to the exception status here. */ 8057 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35); 8058 8059 cpu_alpha_store_fpcr(cpu_env, fpcr); 8060 ret = 0; 8061 8062 /* Old exceptions are not signaled. */ 8063 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK); 8064 8065 /* If any exceptions set by this call, 8066 and are unmasked, send a signal. */ 8067 si_code = 0; 8068 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) { 8069 si_code = TARGET_FPE_FLTRES; 8070 } 8071 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) { 8072 si_code = TARGET_FPE_FLTUND; 8073 } 8074 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) { 8075 si_code = TARGET_FPE_FLTOVF; 8076 } 8077 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) { 8078 si_code = TARGET_FPE_FLTDIV; 8079 } 8080 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) { 8081 si_code = TARGET_FPE_FLTINV; 8082 } 8083 if (si_code != 0) { 8084 target_siginfo_t info; 8085 info.si_signo = SIGFPE; 8086 info.si_errno = 0; 8087 info.si_code = si_code; 8088 info._sifields._sigfault._addr 8089 = ((CPUArchState *)cpu_env)->pc; 8090 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info); 8091 } 8092 } 8093 break; 8094 8095 /* case SSI_NVPAIRS: 8096 -- Used with SSIN_UACPROC to enable unaligned accesses. 8097 case SSI_IEEE_STATE_AT_SIGNAL: 8098 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: 8099 -- Not implemented in linux kernel 8100 */ 8101 } 8102 break; 8103 #endif 8104 #ifdef TARGET_NR_osf_sigprocmask 8105 /* Alpha specific. */ 8106 case TARGET_NR_osf_sigprocmask: 8107 { 8108 abi_ulong mask; 8109 int how; 8110 sigset_t set, oldset; 8111 8112 switch(arg1) { 8113 case TARGET_SIG_BLOCK: 8114 how = SIG_BLOCK; 8115 break; 8116 case TARGET_SIG_UNBLOCK: 8117 how = SIG_UNBLOCK; 8118 break; 8119 case TARGET_SIG_SETMASK: 8120 how = SIG_SETMASK; 8121 break; 8122 default: 8123 ret = -TARGET_EINVAL; 8124 goto fail; 8125 } 8126 mask = arg2; 8127 target_to_host_old_sigset(&set, &mask); 8128 sigprocmask(how, &set, &oldset); 8129 host_to_target_old_sigset(&mask, &oldset); 8130 ret = mask; 8131 } 8132 break; 8133 #endif 8134 8135 #ifdef TARGET_NR_getgid32 8136 case TARGET_NR_getgid32: 8137 ret = get_errno(getgid()); 8138 break; 8139 #endif 8140 #ifdef TARGET_NR_geteuid32 8141 case TARGET_NR_geteuid32: 8142 ret = get_errno(geteuid()); 8143 break; 8144 #endif 8145 #ifdef TARGET_NR_getegid32 8146 case TARGET_NR_getegid32: 8147 ret = get_errno(getegid()); 8148 break; 8149 #endif 8150 #ifdef TARGET_NR_setreuid32 8151 case TARGET_NR_setreuid32: 8152 ret = get_errno(setreuid(arg1, arg2)); 8153 break; 8154 #endif 8155 #ifdef TARGET_NR_setregid32 8156 case TARGET_NR_setregid32: 8157 ret = get_errno(setregid(arg1, arg2)); 8158 break; 8159 #endif 8160 #ifdef TARGET_NR_getgroups32 8161 case TARGET_NR_getgroups32: 8162 { 8163 int gidsetsize = arg1; 8164 uint32_t *target_grouplist; 8165 gid_t *grouplist; 8166 int i; 8167 8168 grouplist = alloca(gidsetsize * sizeof(gid_t)); 8169 ret = get_errno(getgroups(gidsetsize, grouplist)); 8170 if (gidsetsize == 0) 8171 break; 8172 if (!is_error(ret)) { 8173 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); 8174 if (!target_grouplist) { 8175 ret = -TARGET_EFAULT; 8176 goto fail; 8177 } 8178 for(i = 0;i < ret; i++) 8179 target_grouplist[i] = tswap32(grouplist[i]); 8180 unlock_user(target_grouplist, arg2, gidsetsize * 4); 8181 } 8182 } 8183 break; 8184 #endif 8185 #ifdef TARGET_NR_setgroups32 8186 case TARGET_NR_setgroups32: 8187 { 8188 int gidsetsize = arg1; 8189 uint32_t *target_grouplist; 8190 gid_t *grouplist; 8191 int i; 8192 8193 grouplist = alloca(gidsetsize * sizeof(gid_t)); 8194 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); 8195 if (!target_grouplist) { 8196 ret = -TARGET_EFAULT; 8197 goto fail; 8198 } 8199 for(i = 0;i < gidsetsize; i++) 8200 grouplist[i] = tswap32(target_grouplist[i]); 8201 unlock_user(target_grouplist, arg2, 0); 8202 ret = get_errno(setgroups(gidsetsize, grouplist)); 8203 } 8204 break; 8205 #endif 8206 #ifdef TARGET_NR_fchown32 8207 case TARGET_NR_fchown32: 8208 ret = get_errno(fchown(arg1, arg2, arg3)); 8209 break; 8210 #endif 8211 #ifdef TARGET_NR_setresuid32 8212 case TARGET_NR_setresuid32: 8213 ret = get_errno(setresuid(arg1, arg2, arg3)); 8214 break; 8215 #endif 8216 #ifdef TARGET_NR_getresuid32 8217 case TARGET_NR_getresuid32: 8218 { 8219 uid_t ruid, euid, suid; 8220 ret = get_errno(getresuid(&ruid, &euid, &suid)); 8221 if (!is_error(ret)) { 8222 if (put_user_u32(ruid, arg1) 8223 || put_user_u32(euid, arg2) 8224 || put_user_u32(suid, arg3)) 8225 goto efault; 8226 } 8227 } 8228 break; 8229 #endif 8230 #ifdef TARGET_NR_setresgid32 8231 case TARGET_NR_setresgid32: 8232 ret = get_errno(setresgid(arg1, arg2, arg3)); 8233 break; 8234 #endif 8235 #ifdef TARGET_NR_getresgid32 8236 case TARGET_NR_getresgid32: 8237 { 8238 gid_t rgid, egid, sgid; 8239 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 8240 if (!is_error(ret)) { 8241 if (put_user_u32(rgid, arg1) 8242 || put_user_u32(egid, arg2) 8243 || put_user_u32(sgid, arg3)) 8244 goto efault; 8245 } 8246 } 8247 break; 8248 #endif 8249 #ifdef TARGET_NR_chown32 8250 case TARGET_NR_chown32: 8251 if (!(p = lock_user_string(arg1))) 8252 goto efault; 8253 ret = get_errno(chown(p, arg2, arg3)); 8254 unlock_user(p, arg1, 0); 8255 break; 8256 #endif 8257 #ifdef TARGET_NR_setuid32 8258 case TARGET_NR_setuid32: 8259 ret = get_errno(setuid(arg1)); 8260 break; 8261 #endif 8262 #ifdef TARGET_NR_setgid32 8263 case TARGET_NR_setgid32: 8264 ret = get_errno(setgid(arg1)); 8265 break; 8266 #endif 8267 #ifdef TARGET_NR_setfsuid32 8268 case TARGET_NR_setfsuid32: 8269 ret = get_errno(setfsuid(arg1)); 8270 break; 8271 #endif 8272 #ifdef TARGET_NR_setfsgid32 8273 case TARGET_NR_setfsgid32: 8274 ret = get_errno(setfsgid(arg1)); 8275 break; 8276 #endif 8277 8278 case TARGET_NR_pivot_root: 8279 goto unimplemented; 8280 #ifdef TARGET_NR_mincore 8281 case TARGET_NR_mincore: 8282 { 8283 void *a; 8284 ret = -TARGET_EFAULT; 8285 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0))) 8286 goto efault; 8287 if (!(p = lock_user_string(arg3))) 8288 goto mincore_fail; 8289 ret = get_errno(mincore(a, arg2, p)); 8290 unlock_user(p, arg3, ret); 8291 mincore_fail: 8292 unlock_user(a, arg1, 0); 8293 } 8294 break; 8295 #endif 8296 #ifdef TARGET_NR_arm_fadvise64_64 8297 case TARGET_NR_arm_fadvise64_64: 8298 { 8299 /* 8300 * arm_fadvise64_64 looks like fadvise64_64 but 8301 * with different argument order 8302 */ 8303 abi_long temp; 8304 temp = arg3; 8305 arg3 = arg4; 8306 arg4 = temp; 8307 } 8308 #endif 8309 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64) 8310 #ifdef TARGET_NR_fadvise64_64 8311 case TARGET_NR_fadvise64_64: 8312 #endif 8313 #ifdef TARGET_NR_fadvise64 8314 case TARGET_NR_fadvise64: 8315 #endif 8316 #ifdef TARGET_S390X 8317 switch (arg4) { 8318 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ 8319 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ 8320 case 6: arg4 = POSIX_FADV_DONTNEED; break; 8321 case 7: arg4 = POSIX_FADV_NOREUSE; break; 8322 default: break; 8323 } 8324 #endif 8325 ret = -posix_fadvise(arg1, arg2, arg3, arg4); 8326 break; 8327 #endif 8328 #ifdef TARGET_NR_madvise 8329 case TARGET_NR_madvise: 8330 /* A straight passthrough may not be safe because qemu sometimes 8331 turns private file-backed mappings into anonymous mappings. 8332 This will break MADV_DONTNEED. 8333 This is a hint, so ignoring and returning success is ok. */ 8334 ret = get_errno(0); 8335 break; 8336 #endif 8337 #if TARGET_ABI_BITS == 32 8338 case TARGET_NR_fcntl64: 8339 { 8340 int cmd; 8341 struct flock64 fl; 8342 struct target_flock64 *target_fl; 8343 #ifdef TARGET_ARM 8344 struct target_eabi_flock64 *target_efl; 8345 #endif 8346 8347 cmd = target_to_host_fcntl_cmd(arg2); 8348 if (cmd == -TARGET_EINVAL) { 8349 ret = cmd; 8350 break; 8351 } 8352 8353 switch(arg2) { 8354 case TARGET_F_GETLK64: 8355 #ifdef TARGET_ARM 8356 if (((CPUARMState *)cpu_env)->eabi) { 8357 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 8358 goto efault; 8359 fl.l_type = tswap16(target_efl->l_type); 8360 fl.l_whence = tswap16(target_efl->l_whence); 8361 fl.l_start = tswap64(target_efl->l_start); 8362 fl.l_len = tswap64(target_efl->l_len); 8363 fl.l_pid = tswap32(target_efl->l_pid); 8364 unlock_user_struct(target_efl, arg3, 0); 8365 } else 8366 #endif 8367 { 8368 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 8369 goto efault; 8370 fl.l_type = tswap16(target_fl->l_type); 8371 fl.l_whence = tswap16(target_fl->l_whence); 8372 fl.l_start = tswap64(target_fl->l_start); 8373 fl.l_len = tswap64(target_fl->l_len); 8374 fl.l_pid = tswap32(target_fl->l_pid); 8375 unlock_user_struct(target_fl, arg3, 0); 8376 } 8377 ret = get_errno(fcntl(arg1, cmd, &fl)); 8378 if (ret == 0) { 8379 #ifdef TARGET_ARM 8380 if (((CPUARMState *)cpu_env)->eabi) { 8381 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0)) 8382 goto efault; 8383 target_efl->l_type = tswap16(fl.l_type); 8384 target_efl->l_whence = tswap16(fl.l_whence); 8385 target_efl->l_start = tswap64(fl.l_start); 8386 target_efl->l_len = tswap64(fl.l_len); 8387 target_efl->l_pid = tswap32(fl.l_pid); 8388 unlock_user_struct(target_efl, arg3, 1); 8389 } else 8390 #endif 8391 { 8392 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0)) 8393 goto efault; 8394 target_fl->l_type = tswap16(fl.l_type); 8395 target_fl->l_whence = tswap16(fl.l_whence); 8396 target_fl->l_start = tswap64(fl.l_start); 8397 target_fl->l_len = tswap64(fl.l_len); 8398 target_fl->l_pid = tswap32(fl.l_pid); 8399 unlock_user_struct(target_fl, arg3, 1); 8400 } 8401 } 8402 break; 8403 8404 case TARGET_F_SETLK64: 8405 case TARGET_F_SETLKW64: 8406 #ifdef TARGET_ARM 8407 if (((CPUARMState *)cpu_env)->eabi) { 8408 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 8409 goto efault; 8410 fl.l_type = tswap16(target_efl->l_type); 8411 fl.l_whence = tswap16(target_efl->l_whence); 8412 fl.l_start = tswap64(target_efl->l_start); 8413 fl.l_len = tswap64(target_efl->l_len); 8414 fl.l_pid = tswap32(target_efl->l_pid); 8415 unlock_user_struct(target_efl, arg3, 0); 8416 } else 8417 #endif 8418 { 8419 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 8420 goto efault; 8421 fl.l_type = tswap16(target_fl->l_type); 8422 fl.l_whence = tswap16(target_fl->l_whence); 8423 fl.l_start = tswap64(target_fl->l_start); 8424 fl.l_len = tswap64(target_fl->l_len); 8425 fl.l_pid = tswap32(target_fl->l_pid); 8426 unlock_user_struct(target_fl, arg3, 0); 8427 } 8428 ret = get_errno(fcntl(arg1, cmd, &fl)); 8429 break; 8430 default: 8431 ret = do_fcntl(arg1, arg2, arg3); 8432 break; 8433 } 8434 break; 8435 } 8436 #endif 8437 #ifdef TARGET_NR_cacheflush 8438 case TARGET_NR_cacheflush: 8439 /* self-modifying code is handled automatically, so nothing needed */ 8440 ret = 0; 8441 break; 8442 #endif 8443 #ifdef TARGET_NR_security 8444 case TARGET_NR_security: 8445 goto unimplemented; 8446 #endif 8447 #ifdef TARGET_NR_getpagesize 8448 case TARGET_NR_getpagesize: 8449 ret = TARGET_PAGE_SIZE; 8450 break; 8451 #endif 8452 case TARGET_NR_gettid: 8453 ret = get_errno(gettid()); 8454 break; 8455 #ifdef TARGET_NR_readahead 8456 case TARGET_NR_readahead: 8457 #if TARGET_ABI_BITS == 32 8458 if (regpairs_aligned(cpu_env)) { 8459 arg2 = arg3; 8460 arg3 = arg4; 8461 arg4 = arg5; 8462 } 8463 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4)); 8464 #else 8465 ret = get_errno(readahead(arg1, arg2, arg3)); 8466 #endif 8467 break; 8468 #endif 8469 #ifdef CONFIG_ATTR 8470 #ifdef TARGET_NR_setxattr 8471 case TARGET_NR_listxattr: 8472 case TARGET_NR_llistxattr: 8473 { 8474 void *p, *b = 0; 8475 if (arg2) { 8476 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 8477 if (!b) { 8478 ret = -TARGET_EFAULT; 8479 break; 8480 } 8481 } 8482 p = lock_user_string(arg1); 8483 if (p) { 8484 if (num == TARGET_NR_listxattr) { 8485 ret = get_errno(listxattr(p, b, arg3)); 8486 } else { 8487 ret = get_errno(llistxattr(p, b, arg3)); 8488 } 8489 } else { 8490 ret = -TARGET_EFAULT; 8491 } 8492 unlock_user(p, arg1, 0); 8493 unlock_user(b, arg2, arg3); 8494 break; 8495 } 8496 case TARGET_NR_flistxattr: 8497 { 8498 void *b = 0; 8499 if (arg2) { 8500 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 8501 if (!b) { 8502 ret = -TARGET_EFAULT; 8503 break; 8504 } 8505 } 8506 ret = get_errno(flistxattr(arg1, b, arg3)); 8507 unlock_user(b, arg2, arg3); 8508 break; 8509 } 8510 case TARGET_NR_setxattr: 8511 case TARGET_NR_lsetxattr: 8512 { 8513 void *p, *n, *v = 0; 8514 if (arg3) { 8515 v = lock_user(VERIFY_READ, arg3, arg4, 1); 8516 if (!v) { 8517 ret = -TARGET_EFAULT; 8518 break; 8519 } 8520 } 8521 p = lock_user_string(arg1); 8522 n = lock_user_string(arg2); 8523 if (p && n) { 8524 if (num == TARGET_NR_setxattr) { 8525 ret = get_errno(setxattr(p, n, v, arg4, arg5)); 8526 } else { 8527 ret = get_errno(lsetxattr(p, n, v, arg4, arg5)); 8528 } 8529 } else { 8530 ret = -TARGET_EFAULT; 8531 } 8532 unlock_user(p, arg1, 0); 8533 unlock_user(n, arg2, 0); 8534 unlock_user(v, arg3, 0); 8535 } 8536 break; 8537 case TARGET_NR_fsetxattr: 8538 { 8539 void *n, *v = 0; 8540 if (arg3) { 8541 v = lock_user(VERIFY_READ, arg3, arg4, 1); 8542 if (!v) { 8543 ret = -TARGET_EFAULT; 8544 break; 8545 } 8546 } 8547 n = lock_user_string(arg2); 8548 if (n) { 8549 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5)); 8550 } else { 8551 ret = -TARGET_EFAULT; 8552 } 8553 unlock_user(n, arg2, 0); 8554 unlock_user(v, arg3, 0); 8555 } 8556 break; 8557 case TARGET_NR_getxattr: 8558 case TARGET_NR_lgetxattr: 8559 { 8560 void *p, *n, *v = 0; 8561 if (arg3) { 8562 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 8563 if (!v) { 8564 ret = -TARGET_EFAULT; 8565 break; 8566 } 8567 } 8568 p = lock_user_string(arg1); 8569 n = lock_user_string(arg2); 8570 if (p && n) { 8571 if (num == TARGET_NR_getxattr) { 8572 ret = get_errno(getxattr(p, n, v, arg4)); 8573 } else { 8574 ret = get_errno(lgetxattr(p, n, v, arg4)); 8575 } 8576 } else { 8577 ret = -TARGET_EFAULT; 8578 } 8579 unlock_user(p, arg1, 0); 8580 unlock_user(n, arg2, 0); 8581 unlock_user(v, arg3, arg4); 8582 } 8583 break; 8584 case TARGET_NR_fgetxattr: 8585 { 8586 void *n, *v = 0; 8587 if (arg3) { 8588 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 8589 if (!v) { 8590 ret = -TARGET_EFAULT; 8591 break; 8592 } 8593 } 8594 n = lock_user_string(arg2); 8595 if (n) { 8596 ret = get_errno(fgetxattr(arg1, n, v, arg4)); 8597 } else { 8598 ret = -TARGET_EFAULT; 8599 } 8600 unlock_user(n, arg2, 0); 8601 unlock_user(v, arg3, arg4); 8602 } 8603 break; 8604 case TARGET_NR_removexattr: 8605 case TARGET_NR_lremovexattr: 8606 { 8607 void *p, *n; 8608 p = lock_user_string(arg1); 8609 n = lock_user_string(arg2); 8610 if (p && n) { 8611 if (num == TARGET_NR_removexattr) { 8612 ret = get_errno(removexattr(p, n)); 8613 } else { 8614 ret = get_errno(lremovexattr(p, n)); 8615 } 8616 } else { 8617 ret = -TARGET_EFAULT; 8618 } 8619 unlock_user(p, arg1, 0); 8620 unlock_user(n, arg2, 0); 8621 } 8622 break; 8623 case TARGET_NR_fremovexattr: 8624 { 8625 void *n; 8626 n = lock_user_string(arg2); 8627 if (n) { 8628 ret = get_errno(fremovexattr(arg1, n)); 8629 } else { 8630 ret = -TARGET_EFAULT; 8631 } 8632 unlock_user(n, arg2, 0); 8633 } 8634 break; 8635 #endif 8636 #endif /* CONFIG_ATTR */ 8637 #ifdef TARGET_NR_set_thread_area 8638 case TARGET_NR_set_thread_area: 8639 #if defined(TARGET_MIPS) 8640 ((CPUMIPSState *) cpu_env)->tls_value = arg1; 8641 ret = 0; 8642 break; 8643 #elif defined(TARGET_CRIS) 8644 if (arg1 & 0xff) 8645 ret = -TARGET_EINVAL; 8646 else { 8647 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1; 8648 ret = 0; 8649 } 8650 break; 8651 #elif defined(TARGET_I386) && defined(TARGET_ABI32) 8652 ret = do_set_thread_area(cpu_env, arg1); 8653 break; 8654 #elif defined(TARGET_M68K) 8655 { 8656 TaskState *ts = cpu->opaque; 8657 ts->tp_value = arg1; 8658 ret = 0; 8659 break; 8660 } 8661 #else 8662 goto unimplemented_nowarn; 8663 #endif 8664 #endif 8665 #ifdef TARGET_NR_get_thread_area 8666 case TARGET_NR_get_thread_area: 8667 #if defined(TARGET_I386) && defined(TARGET_ABI32) 8668 ret = do_get_thread_area(cpu_env, arg1); 8669 break; 8670 #elif defined(TARGET_M68K) 8671 { 8672 TaskState *ts = cpu->opaque; 8673 ret = ts->tp_value; 8674 break; 8675 } 8676 #else 8677 goto unimplemented_nowarn; 8678 #endif 8679 #endif 8680 #ifdef TARGET_NR_getdomainname 8681 case TARGET_NR_getdomainname: 8682 goto unimplemented_nowarn; 8683 #endif 8684 8685 #ifdef TARGET_NR_clock_gettime 8686 case TARGET_NR_clock_gettime: 8687 { 8688 struct timespec ts; 8689 ret = get_errno(clock_gettime(arg1, &ts)); 8690 if (!is_error(ret)) { 8691 host_to_target_timespec(arg2, &ts); 8692 } 8693 break; 8694 } 8695 #endif 8696 #ifdef TARGET_NR_clock_getres 8697 case TARGET_NR_clock_getres: 8698 { 8699 struct timespec ts; 8700 ret = get_errno(clock_getres(arg1, &ts)); 8701 if (!is_error(ret)) { 8702 host_to_target_timespec(arg2, &ts); 8703 } 8704 break; 8705 } 8706 #endif 8707 #ifdef TARGET_NR_clock_nanosleep 8708 case TARGET_NR_clock_nanosleep: 8709 { 8710 struct timespec ts; 8711 target_to_host_timespec(&ts, arg3); 8712 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL)); 8713 if (arg4) 8714 host_to_target_timespec(arg4, &ts); 8715 break; 8716 } 8717 #endif 8718 8719 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 8720 case TARGET_NR_set_tid_address: 8721 ret = get_errno(set_tid_address((int *)g2h(arg1))); 8722 break; 8723 #endif 8724 8725 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 8726 case TARGET_NR_tkill: 8727 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2))); 8728 break; 8729 #endif 8730 8731 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 8732 case TARGET_NR_tgkill: 8733 ret = get_errno(sys_tgkill((int)arg1, (int)arg2, 8734 target_to_host_signal(arg3))); 8735 break; 8736 #endif 8737 8738 #ifdef TARGET_NR_set_robust_list 8739 case TARGET_NR_set_robust_list: 8740 case TARGET_NR_get_robust_list: 8741 /* The ABI for supporting robust futexes has userspace pass 8742 * the kernel a pointer to a linked list which is updated by 8743 * userspace after the syscall; the list is walked by the kernel 8744 * when the thread exits. Since the linked list in QEMU guest 8745 * memory isn't a valid linked list for the host and we have 8746 * no way to reliably intercept the thread-death event, we can't 8747 * support these. Silently return ENOSYS so that guest userspace 8748 * falls back to a non-robust futex implementation (which should 8749 * be OK except in the corner case of the guest crashing while 8750 * holding a mutex that is shared with another process via 8751 * shared memory). 8752 */ 8753 goto unimplemented_nowarn; 8754 #endif 8755 8756 #if defined(TARGET_NR_utimensat) 8757 case TARGET_NR_utimensat: 8758 { 8759 struct timespec *tsp, ts[2]; 8760 if (!arg3) { 8761 tsp = NULL; 8762 } else { 8763 target_to_host_timespec(ts, arg3); 8764 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec)); 8765 tsp = ts; 8766 } 8767 if (!arg2) 8768 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 8769 else { 8770 if (!(p = lock_user_string(arg2))) { 8771 ret = -TARGET_EFAULT; 8772 goto fail; 8773 } 8774 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 8775 unlock_user(p, arg2, 0); 8776 } 8777 } 8778 break; 8779 #endif 8780 case TARGET_NR_futex: 8781 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6); 8782 break; 8783 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 8784 case TARGET_NR_inotify_init: 8785 ret = get_errno(sys_inotify_init()); 8786 break; 8787 #endif 8788 #ifdef CONFIG_INOTIFY1 8789 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 8790 case TARGET_NR_inotify_init1: 8791 ret = get_errno(sys_inotify_init1(arg1)); 8792 break; 8793 #endif 8794 #endif 8795 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 8796 case TARGET_NR_inotify_add_watch: 8797 p = lock_user_string(arg2); 8798 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3)); 8799 unlock_user(p, arg2, 0); 8800 break; 8801 #endif 8802 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 8803 case TARGET_NR_inotify_rm_watch: 8804 ret = get_errno(sys_inotify_rm_watch(arg1, arg2)); 8805 break; 8806 #endif 8807 8808 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 8809 case TARGET_NR_mq_open: 8810 { 8811 struct mq_attr posix_mq_attr; 8812 8813 p = lock_user_string(arg1 - 1); 8814 if (arg4 != 0) 8815 copy_from_user_mq_attr (&posix_mq_attr, arg4); 8816 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr)); 8817 unlock_user (p, arg1, 0); 8818 } 8819 break; 8820 8821 case TARGET_NR_mq_unlink: 8822 p = lock_user_string(arg1 - 1); 8823 ret = get_errno(mq_unlink(p)); 8824 unlock_user (p, arg1, 0); 8825 break; 8826 8827 case TARGET_NR_mq_timedsend: 8828 { 8829 struct timespec ts; 8830 8831 p = lock_user (VERIFY_READ, arg2, arg3, 1); 8832 if (arg5 != 0) { 8833 target_to_host_timespec(&ts, arg5); 8834 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts)); 8835 host_to_target_timespec(arg5, &ts); 8836 } 8837 else 8838 ret = get_errno(mq_send(arg1, p, arg3, arg4)); 8839 unlock_user (p, arg2, arg3); 8840 } 8841 break; 8842 8843 case TARGET_NR_mq_timedreceive: 8844 { 8845 struct timespec ts; 8846 unsigned int prio; 8847 8848 p = lock_user (VERIFY_READ, arg2, arg3, 1); 8849 if (arg5 != 0) { 8850 target_to_host_timespec(&ts, arg5); 8851 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts)); 8852 host_to_target_timespec(arg5, &ts); 8853 } 8854 else 8855 ret = get_errno(mq_receive(arg1, p, arg3, &prio)); 8856 unlock_user (p, arg2, arg3); 8857 if (arg4 != 0) 8858 put_user_u32(prio, arg4); 8859 } 8860 break; 8861 8862 /* Not implemented for now... */ 8863 /* case TARGET_NR_mq_notify: */ 8864 /* break; */ 8865 8866 case TARGET_NR_mq_getsetattr: 8867 { 8868 struct mq_attr posix_mq_attr_in, posix_mq_attr_out; 8869 ret = 0; 8870 if (arg3 != 0) { 8871 ret = mq_getattr(arg1, &posix_mq_attr_out); 8872 copy_to_user_mq_attr(arg3, &posix_mq_attr_out); 8873 } 8874 if (arg2 != 0) { 8875 copy_from_user_mq_attr(&posix_mq_attr_in, arg2); 8876 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out); 8877 } 8878 8879 } 8880 break; 8881 #endif 8882 8883 #ifdef CONFIG_SPLICE 8884 #ifdef TARGET_NR_tee 8885 case TARGET_NR_tee: 8886 { 8887 ret = get_errno(tee(arg1,arg2,arg3,arg4)); 8888 } 8889 break; 8890 #endif 8891 #ifdef TARGET_NR_splice 8892 case TARGET_NR_splice: 8893 { 8894 loff_t loff_in, loff_out; 8895 loff_t *ploff_in = NULL, *ploff_out = NULL; 8896 if(arg2) { 8897 get_user_u64(loff_in, arg2); 8898 ploff_in = &loff_in; 8899 } 8900 if(arg4) { 8901 get_user_u64(loff_out, arg2); 8902 ploff_out = &loff_out; 8903 } 8904 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); 8905 } 8906 break; 8907 #endif 8908 #ifdef TARGET_NR_vmsplice 8909 case TARGET_NR_vmsplice: 8910 { 8911 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 8912 if (vec != NULL) { 8913 ret = get_errno(vmsplice(arg1, vec, arg3, arg4)); 8914 unlock_iovec(vec, arg2, arg3, 0); 8915 } else { 8916 ret = -host_to_target_errno(errno); 8917 } 8918 } 8919 break; 8920 #endif 8921 #endif /* CONFIG_SPLICE */ 8922 #ifdef CONFIG_EVENTFD 8923 #if defined(TARGET_NR_eventfd) 8924 case TARGET_NR_eventfd: 8925 ret = get_errno(eventfd(arg1, 0)); 8926 break; 8927 #endif 8928 #if defined(TARGET_NR_eventfd2) 8929 case TARGET_NR_eventfd2: 8930 { 8931 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)); 8932 if (arg2 & TARGET_O_NONBLOCK) { 8933 host_flags |= O_NONBLOCK; 8934 } 8935 if (arg2 & TARGET_O_CLOEXEC) { 8936 host_flags |= O_CLOEXEC; 8937 } 8938 ret = get_errno(eventfd(arg1, host_flags)); 8939 break; 8940 } 8941 #endif 8942 #endif /* CONFIG_EVENTFD */ 8943 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) 8944 case TARGET_NR_fallocate: 8945 #if TARGET_ABI_BITS == 32 8946 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4), 8947 target_offset64(arg5, arg6))); 8948 #else 8949 ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); 8950 #endif 8951 break; 8952 #endif 8953 #if defined(CONFIG_SYNC_FILE_RANGE) 8954 #if defined(TARGET_NR_sync_file_range) 8955 case TARGET_NR_sync_file_range: 8956 #if TARGET_ABI_BITS == 32 8957 #if defined(TARGET_MIPS) 8958 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 8959 target_offset64(arg5, arg6), arg7)); 8960 #else 8961 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3), 8962 target_offset64(arg4, arg5), arg6)); 8963 #endif /* !TARGET_MIPS */ 8964 #else 8965 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4)); 8966 #endif 8967 break; 8968 #endif 8969 #if defined(TARGET_NR_sync_file_range2) 8970 case TARGET_NR_sync_file_range2: 8971 /* This is like sync_file_range but the arguments are reordered */ 8972 #if TARGET_ABI_BITS == 32 8973 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 8974 target_offset64(arg5, arg6), arg2)); 8975 #else 8976 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2)); 8977 #endif 8978 break; 8979 #endif 8980 #endif 8981 #if defined(CONFIG_EPOLL) 8982 #if defined(TARGET_NR_epoll_create) 8983 case TARGET_NR_epoll_create: 8984 ret = get_errno(epoll_create(arg1)); 8985 break; 8986 #endif 8987 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1) 8988 case TARGET_NR_epoll_create1: 8989 ret = get_errno(epoll_create1(arg1)); 8990 break; 8991 #endif 8992 #if defined(TARGET_NR_epoll_ctl) 8993 case TARGET_NR_epoll_ctl: 8994 { 8995 struct epoll_event ep; 8996 struct epoll_event *epp = 0; 8997 if (arg4) { 8998 struct target_epoll_event *target_ep; 8999 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) { 9000 goto efault; 9001 } 9002 ep.events = tswap32(target_ep->events); 9003 /* The epoll_data_t union is just opaque data to the kernel, 9004 * so we transfer all 64 bits across and need not worry what 9005 * actual data type it is. 9006 */ 9007 ep.data.u64 = tswap64(target_ep->data.u64); 9008 unlock_user_struct(target_ep, arg4, 0); 9009 epp = &ep; 9010 } 9011 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp)); 9012 break; 9013 } 9014 #endif 9015 9016 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT) 9017 #define IMPLEMENT_EPOLL_PWAIT 9018 #endif 9019 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT) 9020 #if defined(TARGET_NR_epoll_wait) 9021 case TARGET_NR_epoll_wait: 9022 #endif 9023 #if defined(IMPLEMENT_EPOLL_PWAIT) 9024 case TARGET_NR_epoll_pwait: 9025 #endif 9026 { 9027 struct target_epoll_event *target_ep; 9028 struct epoll_event *ep; 9029 int epfd = arg1; 9030 int maxevents = arg3; 9031 int timeout = arg4; 9032 9033 target_ep = lock_user(VERIFY_WRITE, arg2, 9034 maxevents * sizeof(struct target_epoll_event), 1); 9035 if (!target_ep) { 9036 goto efault; 9037 } 9038 9039 ep = alloca(maxevents * sizeof(struct epoll_event)); 9040 9041 switch (num) { 9042 #if defined(IMPLEMENT_EPOLL_PWAIT) 9043 case TARGET_NR_epoll_pwait: 9044 { 9045 target_sigset_t *target_set; 9046 sigset_t _set, *set = &_set; 9047 9048 if (arg5) { 9049 target_set = lock_user(VERIFY_READ, arg5, 9050 sizeof(target_sigset_t), 1); 9051 if (!target_set) { 9052 unlock_user(target_ep, arg2, 0); 9053 goto efault; 9054 } 9055 target_to_host_sigset(set, target_set); 9056 unlock_user(target_set, arg5, 0); 9057 } else { 9058 set = NULL; 9059 } 9060 9061 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set)); 9062 break; 9063 } 9064 #endif 9065 #if defined(TARGET_NR_epoll_wait) 9066 case TARGET_NR_epoll_wait: 9067 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout)); 9068 break; 9069 #endif 9070 default: 9071 ret = -TARGET_ENOSYS; 9072 } 9073 if (!is_error(ret)) { 9074 int i; 9075 for (i = 0; i < ret; i++) { 9076 target_ep[i].events = tswap32(ep[i].events); 9077 target_ep[i].data.u64 = tswap64(ep[i].data.u64); 9078 } 9079 } 9080 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event)); 9081 break; 9082 } 9083 #endif 9084 #endif 9085 #ifdef TARGET_NR_prlimit64 9086 case TARGET_NR_prlimit64: 9087 { 9088 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */ 9089 struct target_rlimit64 *target_rnew, *target_rold; 9090 struct host_rlimit64 rnew, rold, *rnewp = 0; 9091 if (arg3) { 9092 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { 9093 goto efault; 9094 } 9095 rnew.rlim_cur = tswap64(target_rnew->rlim_cur); 9096 rnew.rlim_max = tswap64(target_rnew->rlim_max); 9097 unlock_user_struct(target_rnew, arg3, 0); 9098 rnewp = &rnew; 9099 } 9100 9101 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0)); 9102 if (!is_error(ret) && arg4) { 9103 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { 9104 goto efault; 9105 } 9106 target_rold->rlim_cur = tswap64(rold.rlim_cur); 9107 target_rold->rlim_max = tswap64(rold.rlim_max); 9108 unlock_user_struct(target_rold, arg4, 1); 9109 } 9110 break; 9111 } 9112 #endif 9113 #ifdef TARGET_NR_gethostname 9114 case TARGET_NR_gethostname: 9115 { 9116 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0); 9117 if (name) { 9118 ret = get_errno(gethostname(name, arg2)); 9119 unlock_user(name, arg1, arg2); 9120 } else { 9121 ret = -TARGET_EFAULT; 9122 } 9123 break; 9124 } 9125 #endif 9126 #ifdef TARGET_NR_atomic_cmpxchg_32 9127 case TARGET_NR_atomic_cmpxchg_32: 9128 { 9129 /* should use start_exclusive from main.c */ 9130 abi_ulong mem_value; 9131 if (get_user_u32(mem_value, arg6)) { 9132 target_siginfo_t info; 9133 info.si_signo = SIGSEGV; 9134 info.si_errno = 0; 9135 info.si_code = TARGET_SEGV_MAPERR; 9136 info._sifields._sigfault._addr = arg6; 9137 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info); 9138 ret = 0xdeadbeef; 9139 9140 } 9141 if (mem_value == arg2) 9142 put_user_u32(arg1, arg6); 9143 ret = mem_value; 9144 break; 9145 } 9146 #endif 9147 #ifdef TARGET_NR_atomic_barrier 9148 case TARGET_NR_atomic_barrier: 9149 { 9150 /* Like the kernel implementation and the qemu arm barrier, no-op this? */ 9151 break; 9152 } 9153 #endif 9154 9155 #ifdef TARGET_NR_timer_create 9156 case TARGET_NR_timer_create: 9157 { 9158 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */ 9159 9160 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL; 9161 struct target_sigevent *ptarget_sevp; 9162 struct target_timer_t *ptarget_timer; 9163 9164 int clkid = arg1; 9165 int timer_index = next_free_host_timer(); 9166 9167 if (timer_index < 0) { 9168 ret = -TARGET_EAGAIN; 9169 } else { 9170 timer_t *phtimer = g_posix_timers + timer_index; 9171 9172 if (arg2) { 9173 if (!lock_user_struct(VERIFY_READ, ptarget_sevp, arg2, 1)) { 9174 goto efault; 9175 } 9176 9177 host_sevp.sigev_signo = tswap32(ptarget_sevp->sigev_signo); 9178 host_sevp.sigev_notify = tswap32(ptarget_sevp->sigev_notify); 9179 9180 phost_sevp = &host_sevp; 9181 } 9182 9183 ret = get_errno(timer_create(clkid, phost_sevp, phtimer)); 9184 if (ret) { 9185 phtimer = NULL; 9186 } else { 9187 if (!lock_user_struct(VERIFY_WRITE, ptarget_timer, arg3, 1)) { 9188 goto efault; 9189 } 9190 ptarget_timer->ptr = tswap32(0xcafe0000 | timer_index); 9191 unlock_user_struct(ptarget_timer, arg3, 1); 9192 } 9193 } 9194 break; 9195 } 9196 #endif 9197 9198 #ifdef TARGET_NR_timer_settime 9199 case TARGET_NR_timer_settime: 9200 { 9201 /* args: timer_t timerid, int flags, const struct itimerspec *new_value, 9202 * struct itimerspec * old_value */ 9203 arg1 &= 0xffff; 9204 if (arg3 == 0 || arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9205 ret = -TARGET_EINVAL; 9206 } else { 9207 timer_t htimer = g_posix_timers[arg1]; 9208 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},}; 9209 9210 target_to_host_itimerspec(&hspec_new, arg3); 9211 ret = get_errno( 9212 timer_settime(htimer, arg2, &hspec_new, &hspec_old)); 9213 host_to_target_itimerspec(arg2, &hspec_old); 9214 } 9215 break; 9216 } 9217 #endif 9218 9219 #ifdef TARGET_NR_timer_gettime 9220 case TARGET_NR_timer_gettime: 9221 { 9222 /* args: timer_t timerid, struct itimerspec *curr_value */ 9223 arg1 &= 0xffff; 9224 if (!arg2) { 9225 return -TARGET_EFAULT; 9226 } else if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9227 ret = -TARGET_EINVAL; 9228 } else { 9229 timer_t htimer = g_posix_timers[arg1]; 9230 struct itimerspec hspec; 9231 ret = get_errno(timer_gettime(htimer, &hspec)); 9232 9233 if (host_to_target_itimerspec(arg2, &hspec)) { 9234 ret = -TARGET_EFAULT; 9235 } 9236 } 9237 break; 9238 } 9239 #endif 9240 9241 #ifdef TARGET_NR_timer_getoverrun 9242 case TARGET_NR_timer_getoverrun: 9243 { 9244 /* args: timer_t timerid */ 9245 arg1 &= 0xffff; 9246 if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9247 ret = -TARGET_EINVAL; 9248 } else { 9249 timer_t htimer = g_posix_timers[arg1]; 9250 ret = get_errno(timer_getoverrun(htimer)); 9251 } 9252 break; 9253 } 9254 #endif 9255 9256 #ifdef TARGET_NR_timer_delete 9257 case TARGET_NR_timer_delete: 9258 { 9259 /* args: timer_t timerid */ 9260 arg1 &= 0xffff; 9261 if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9262 ret = -TARGET_EINVAL; 9263 } else { 9264 timer_t htimer = g_posix_timers[arg1]; 9265 ret = get_errno(timer_delete(htimer)); 9266 g_posix_timers[arg1] = 0; 9267 } 9268 break; 9269 } 9270 #endif 9271 9272 default: 9273 unimplemented: 9274 gemu_log("qemu: Unsupported syscall: %d\n", num); 9275 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list) 9276 unimplemented_nowarn: 9277 #endif 9278 ret = -TARGET_ENOSYS; 9279 break; 9280 } 9281 fail: 9282 #ifdef DEBUG 9283 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret); 9284 #endif 9285 if(do_strace) 9286 print_syscall_ret(num, ret); 9287 return ret; 9288 efault: 9289 ret = -TARGET_EFAULT; 9290 goto fail; 9291 } 9292