1 /* 2 * Linux syscalls 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #define _ATFILE_SOURCE 20 #include <stdlib.h> 21 #include <stdio.h> 22 #include <stdarg.h> 23 #include <string.h> 24 #include <elf.h> 25 #include <endian.h> 26 #include <errno.h> 27 #include <unistd.h> 28 #include <fcntl.h> 29 #include <time.h> 30 #include <limits.h> 31 #include <grp.h> 32 #include <sys/types.h> 33 #include <sys/ipc.h> 34 #include <sys/msg.h> 35 #include <sys/wait.h> 36 #include <sys/time.h> 37 #include <sys/stat.h> 38 #include <sys/mount.h> 39 #include <sys/file.h> 40 #include <sys/fsuid.h> 41 #include <sys/personality.h> 42 #include <sys/prctl.h> 43 #include <sys/resource.h> 44 #include <sys/mman.h> 45 #include <sys/swap.h> 46 #include <linux/capability.h> 47 #include <signal.h> 48 #include <sched.h> 49 #ifdef __ia64__ 50 int __clone2(int (*fn)(void *), void *child_stack_base, 51 size_t stack_size, int flags, void *arg, ...); 52 #endif 53 #include <sys/socket.h> 54 #include <sys/un.h> 55 #include <sys/uio.h> 56 #include <sys/poll.h> 57 #include <sys/times.h> 58 #include <sys/shm.h> 59 #include <sys/sem.h> 60 #include <sys/statfs.h> 61 #include <utime.h> 62 #include <sys/sysinfo.h> 63 //#include <sys/user.h> 64 #include <netinet/ip.h> 65 #include <netinet/tcp.h> 66 #include <linux/wireless.h> 67 #include <linux/icmp.h> 68 #include "qemu-common.h" 69 #ifdef TARGET_GPROF 70 #include <sys/gmon.h> 71 #endif 72 #ifdef CONFIG_EVENTFD 73 #include <sys/eventfd.h> 74 #endif 75 #ifdef CONFIG_EPOLL 76 #include <sys/epoll.h> 77 #endif 78 #ifdef CONFIG_ATTR 79 #include "qemu/xattr.h" 80 #endif 81 #ifdef CONFIG_SENDFILE 82 #include <sys/sendfile.h> 83 #endif 84 85 #define termios host_termios 86 #define winsize host_winsize 87 #define termio host_termio 88 #define sgttyb host_sgttyb /* same as target */ 89 #define tchars host_tchars /* same as target */ 90 #define ltchars host_ltchars /* same as target */ 91 92 #include <linux/termios.h> 93 #include <linux/unistd.h> 94 #include <linux/cdrom.h> 95 #include <linux/hdreg.h> 96 #include <linux/soundcard.h> 97 #include <linux/kd.h> 98 #include <linux/mtio.h> 99 #include <linux/fs.h> 100 #if defined(CONFIG_FIEMAP) 101 #include <linux/fiemap.h> 102 #endif 103 #include <linux/fb.h> 104 #include <linux/vt.h> 105 #include <linux/dm-ioctl.h> 106 #include <linux/reboot.h> 107 #include <linux/route.h> 108 #include <linux/filter.h> 109 #include <linux/blkpg.h> 110 #include "linux_loop.h" 111 #include "uname.h" 112 113 #include "qemu.h" 114 115 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \ 116 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID) 117 118 //#define DEBUG 119 120 //#include <linux/msdos_fs.h> 121 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2]) 122 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2]) 123 124 125 #undef _syscall0 126 #undef _syscall1 127 #undef _syscall2 128 #undef _syscall3 129 #undef _syscall4 130 #undef _syscall5 131 #undef _syscall6 132 133 #define _syscall0(type,name) \ 134 static type name (void) \ 135 { \ 136 return syscall(__NR_##name); \ 137 } 138 139 #define _syscall1(type,name,type1,arg1) \ 140 static type name (type1 arg1) \ 141 { \ 142 return syscall(__NR_##name, arg1); \ 143 } 144 145 #define _syscall2(type,name,type1,arg1,type2,arg2) \ 146 static type name (type1 arg1,type2 arg2) \ 147 { \ 148 return syscall(__NR_##name, arg1, arg2); \ 149 } 150 151 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 152 static type name (type1 arg1,type2 arg2,type3 arg3) \ 153 { \ 154 return syscall(__NR_##name, arg1, arg2, arg3); \ 155 } 156 157 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 158 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ 159 { \ 160 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 161 } 162 163 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 164 type5,arg5) \ 165 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ 166 { \ 167 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 168 } 169 170 171 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 172 type5,arg5,type6,arg6) \ 173 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \ 174 type6 arg6) \ 175 { \ 176 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 177 } 178 179 180 #define __NR_sys_uname __NR_uname 181 #define __NR_sys_getcwd1 __NR_getcwd 182 #define __NR_sys_getdents __NR_getdents 183 #define __NR_sys_getdents64 __NR_getdents64 184 #define __NR_sys_getpriority __NR_getpriority 185 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo 186 #define __NR_sys_syslog __NR_syslog 187 #define __NR_sys_tgkill __NR_tgkill 188 #define __NR_sys_tkill __NR_tkill 189 #define __NR_sys_futex __NR_futex 190 #define __NR_sys_inotify_init __NR_inotify_init 191 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch 192 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch 193 194 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \ 195 defined(__s390x__) 196 #define __NR__llseek __NR_lseek 197 #endif 198 199 /* Newer kernel ports have llseek() instead of _llseek() */ 200 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek) 201 #define TARGET_NR__llseek TARGET_NR_llseek 202 #endif 203 204 #ifdef __NR_gettid 205 _syscall0(int, gettid) 206 #else 207 /* This is a replacement for the host gettid() and must return a host 208 errno. */ 209 static int gettid(void) { 210 return -ENOSYS; 211 } 212 #endif 213 #ifdef __NR_getdents 214 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count); 215 #endif 216 #if !defined(__NR_getdents) || \ 217 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64)) 218 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count); 219 #endif 220 #if defined(TARGET_NR__llseek) && defined(__NR_llseek) 221 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo, 222 loff_t *, res, uint, wh); 223 #endif 224 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo) 225 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len) 226 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 227 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig) 228 #endif 229 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 230 _syscall2(int,sys_tkill,int,tid,int,sig) 231 #endif 232 #ifdef __NR_exit_group 233 _syscall1(int,exit_group,int,error_code) 234 #endif 235 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 236 _syscall1(int,set_tid_address,int *,tidptr) 237 #endif 238 #if defined(TARGET_NR_futex) && defined(__NR_futex) 239 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, 240 const struct timespec *,timeout,int *,uaddr2,int,val3) 241 #endif 242 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity 243 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, 244 unsigned long *, user_mask_ptr); 245 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity 246 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len, 247 unsigned long *, user_mask_ptr); 248 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd, 249 void *, arg); 250 _syscall2(int, capget, struct __user_cap_header_struct *, header, 251 struct __user_cap_data_struct *, data); 252 _syscall2(int, capset, struct __user_cap_header_struct *, header, 253 struct __user_cap_data_struct *, data); 254 255 static bitmask_transtbl fcntl_flags_tbl[] = { 256 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, }, 257 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, }, 258 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, }, 259 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, }, 260 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, }, 261 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, }, 262 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, }, 263 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, }, 264 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, }, 265 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, }, 266 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, }, 267 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, }, 268 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, }, 269 #if defined(O_DIRECT) 270 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, }, 271 #endif 272 #if defined(O_NOATIME) 273 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME }, 274 #endif 275 #if defined(O_CLOEXEC) 276 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC }, 277 #endif 278 #if defined(O_PATH) 279 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH }, 280 #endif 281 /* Don't terminate the list prematurely on 64-bit host+guest. */ 282 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0 283 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, }, 284 #endif 285 { 0, 0, 0, 0 } 286 }; 287 288 static int sys_getcwd1(char *buf, size_t size) 289 { 290 if (getcwd(buf, size) == NULL) { 291 /* getcwd() sets errno */ 292 return (-1); 293 } 294 return strlen(buf)+1; 295 } 296 297 #ifdef TARGET_NR_openat 298 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode) 299 { 300 /* 301 * open(2) has extra parameter 'mode' when called with 302 * flag O_CREAT. 303 */ 304 if ((flags & O_CREAT) != 0) { 305 return (openat(dirfd, pathname, flags, mode)); 306 } 307 return (openat(dirfd, pathname, flags)); 308 } 309 #endif 310 311 #ifdef TARGET_NR_utimensat 312 #ifdef CONFIG_UTIMENSAT 313 static int sys_utimensat(int dirfd, const char *pathname, 314 const struct timespec times[2], int flags) 315 { 316 if (pathname == NULL) 317 return futimens(dirfd, times); 318 else 319 return utimensat(dirfd, pathname, times, flags); 320 } 321 #elif defined(__NR_utimensat) 322 #define __NR_sys_utimensat __NR_utimensat 323 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname, 324 const struct timespec *,tsp,int,flags) 325 #else 326 static int sys_utimensat(int dirfd, const char *pathname, 327 const struct timespec times[2], int flags) 328 { 329 errno = ENOSYS; 330 return -1; 331 } 332 #endif 333 #endif /* TARGET_NR_utimensat */ 334 335 #ifdef CONFIG_INOTIFY 336 #include <sys/inotify.h> 337 338 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 339 static int sys_inotify_init(void) 340 { 341 return (inotify_init()); 342 } 343 #endif 344 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 345 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask) 346 { 347 return (inotify_add_watch(fd, pathname, mask)); 348 } 349 #endif 350 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 351 static int sys_inotify_rm_watch(int fd, int32_t wd) 352 { 353 return (inotify_rm_watch(fd, wd)); 354 } 355 #endif 356 #ifdef CONFIG_INOTIFY1 357 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 358 static int sys_inotify_init1(int flags) 359 { 360 return (inotify_init1(flags)); 361 } 362 #endif 363 #endif 364 #else 365 /* Userspace can usually survive runtime without inotify */ 366 #undef TARGET_NR_inotify_init 367 #undef TARGET_NR_inotify_init1 368 #undef TARGET_NR_inotify_add_watch 369 #undef TARGET_NR_inotify_rm_watch 370 #endif /* CONFIG_INOTIFY */ 371 372 #if defined(TARGET_NR_ppoll) 373 #ifndef __NR_ppoll 374 # define __NR_ppoll -1 375 #endif 376 #define __NR_sys_ppoll __NR_ppoll 377 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds, 378 struct timespec *, timeout, const sigset_t *, sigmask, 379 size_t, sigsetsize) 380 #endif 381 382 #if defined(TARGET_NR_pselect6) 383 #ifndef __NR_pselect6 384 # define __NR_pselect6 -1 385 #endif 386 #define __NR_sys_pselect6 __NR_pselect6 387 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, 388 fd_set *, exceptfds, struct timespec *, timeout, void *, sig); 389 #endif 390 391 #if defined(TARGET_NR_prlimit64) 392 #ifndef __NR_prlimit64 393 # define __NR_prlimit64 -1 394 #endif 395 #define __NR_sys_prlimit64 __NR_prlimit64 396 /* The glibc rlimit structure may not be that used by the underlying syscall */ 397 struct host_rlimit64 { 398 uint64_t rlim_cur; 399 uint64_t rlim_max; 400 }; 401 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource, 402 const struct host_rlimit64 *, new_limit, 403 struct host_rlimit64 *, old_limit) 404 #endif 405 406 407 #if defined(TARGET_NR_timer_create) 408 /* Maxiumum of 32 active POSIX timers allowed at any one time. */ 409 static timer_t g_posix_timers[32] = { 0, } ; 410 411 static inline int next_free_host_timer(void) 412 { 413 int k ; 414 /* FIXME: Does finding the next free slot require a lock? */ 415 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) { 416 if (g_posix_timers[k] == 0) { 417 g_posix_timers[k] = (timer_t) 1; 418 return k; 419 } 420 } 421 return -1; 422 } 423 #endif 424 425 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */ 426 #ifdef TARGET_ARM 427 static inline int regpairs_aligned(void *cpu_env) { 428 return ((((CPUARMState *)cpu_env)->eabi) == 1) ; 429 } 430 #elif defined(TARGET_MIPS) 431 static inline int regpairs_aligned(void *cpu_env) { return 1; } 432 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64) 433 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs 434 * of registers which translates to the same as ARM/MIPS, because we start with 435 * r3 as arg1 */ 436 static inline int regpairs_aligned(void *cpu_env) { return 1; } 437 #else 438 static inline int regpairs_aligned(void *cpu_env) { return 0; } 439 #endif 440 441 #define ERRNO_TABLE_SIZE 1200 442 443 /* target_to_host_errno_table[] is initialized from 444 * host_to_target_errno_table[] in syscall_init(). */ 445 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = { 446 }; 447 448 /* 449 * This list is the union of errno values overridden in asm-<arch>/errno.h 450 * minus the errnos that are not actually generic to all archs. 451 */ 452 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = { 453 [EIDRM] = TARGET_EIDRM, 454 [ECHRNG] = TARGET_ECHRNG, 455 [EL2NSYNC] = TARGET_EL2NSYNC, 456 [EL3HLT] = TARGET_EL3HLT, 457 [EL3RST] = TARGET_EL3RST, 458 [ELNRNG] = TARGET_ELNRNG, 459 [EUNATCH] = TARGET_EUNATCH, 460 [ENOCSI] = TARGET_ENOCSI, 461 [EL2HLT] = TARGET_EL2HLT, 462 [EDEADLK] = TARGET_EDEADLK, 463 [ENOLCK] = TARGET_ENOLCK, 464 [EBADE] = TARGET_EBADE, 465 [EBADR] = TARGET_EBADR, 466 [EXFULL] = TARGET_EXFULL, 467 [ENOANO] = TARGET_ENOANO, 468 [EBADRQC] = TARGET_EBADRQC, 469 [EBADSLT] = TARGET_EBADSLT, 470 [EBFONT] = TARGET_EBFONT, 471 [ENOSTR] = TARGET_ENOSTR, 472 [ENODATA] = TARGET_ENODATA, 473 [ETIME] = TARGET_ETIME, 474 [ENOSR] = TARGET_ENOSR, 475 [ENONET] = TARGET_ENONET, 476 [ENOPKG] = TARGET_ENOPKG, 477 [EREMOTE] = TARGET_EREMOTE, 478 [ENOLINK] = TARGET_ENOLINK, 479 [EADV] = TARGET_EADV, 480 [ESRMNT] = TARGET_ESRMNT, 481 [ECOMM] = TARGET_ECOMM, 482 [EPROTO] = TARGET_EPROTO, 483 [EDOTDOT] = TARGET_EDOTDOT, 484 [EMULTIHOP] = TARGET_EMULTIHOP, 485 [EBADMSG] = TARGET_EBADMSG, 486 [ENAMETOOLONG] = TARGET_ENAMETOOLONG, 487 [EOVERFLOW] = TARGET_EOVERFLOW, 488 [ENOTUNIQ] = TARGET_ENOTUNIQ, 489 [EBADFD] = TARGET_EBADFD, 490 [EREMCHG] = TARGET_EREMCHG, 491 [ELIBACC] = TARGET_ELIBACC, 492 [ELIBBAD] = TARGET_ELIBBAD, 493 [ELIBSCN] = TARGET_ELIBSCN, 494 [ELIBMAX] = TARGET_ELIBMAX, 495 [ELIBEXEC] = TARGET_ELIBEXEC, 496 [EILSEQ] = TARGET_EILSEQ, 497 [ENOSYS] = TARGET_ENOSYS, 498 [ELOOP] = TARGET_ELOOP, 499 [ERESTART] = TARGET_ERESTART, 500 [ESTRPIPE] = TARGET_ESTRPIPE, 501 [ENOTEMPTY] = TARGET_ENOTEMPTY, 502 [EUSERS] = TARGET_EUSERS, 503 [ENOTSOCK] = TARGET_ENOTSOCK, 504 [EDESTADDRREQ] = TARGET_EDESTADDRREQ, 505 [EMSGSIZE] = TARGET_EMSGSIZE, 506 [EPROTOTYPE] = TARGET_EPROTOTYPE, 507 [ENOPROTOOPT] = TARGET_ENOPROTOOPT, 508 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT, 509 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT, 510 [EOPNOTSUPP] = TARGET_EOPNOTSUPP, 511 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT, 512 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT, 513 [EADDRINUSE] = TARGET_EADDRINUSE, 514 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL, 515 [ENETDOWN] = TARGET_ENETDOWN, 516 [ENETUNREACH] = TARGET_ENETUNREACH, 517 [ENETRESET] = TARGET_ENETRESET, 518 [ECONNABORTED] = TARGET_ECONNABORTED, 519 [ECONNRESET] = TARGET_ECONNRESET, 520 [ENOBUFS] = TARGET_ENOBUFS, 521 [EISCONN] = TARGET_EISCONN, 522 [ENOTCONN] = TARGET_ENOTCONN, 523 [EUCLEAN] = TARGET_EUCLEAN, 524 [ENOTNAM] = TARGET_ENOTNAM, 525 [ENAVAIL] = TARGET_ENAVAIL, 526 [EISNAM] = TARGET_EISNAM, 527 [EREMOTEIO] = TARGET_EREMOTEIO, 528 [ESHUTDOWN] = TARGET_ESHUTDOWN, 529 [ETOOMANYREFS] = TARGET_ETOOMANYREFS, 530 [ETIMEDOUT] = TARGET_ETIMEDOUT, 531 [ECONNREFUSED] = TARGET_ECONNREFUSED, 532 [EHOSTDOWN] = TARGET_EHOSTDOWN, 533 [EHOSTUNREACH] = TARGET_EHOSTUNREACH, 534 [EALREADY] = TARGET_EALREADY, 535 [EINPROGRESS] = TARGET_EINPROGRESS, 536 [ESTALE] = TARGET_ESTALE, 537 [ECANCELED] = TARGET_ECANCELED, 538 [ENOMEDIUM] = TARGET_ENOMEDIUM, 539 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE, 540 #ifdef ENOKEY 541 [ENOKEY] = TARGET_ENOKEY, 542 #endif 543 #ifdef EKEYEXPIRED 544 [EKEYEXPIRED] = TARGET_EKEYEXPIRED, 545 #endif 546 #ifdef EKEYREVOKED 547 [EKEYREVOKED] = TARGET_EKEYREVOKED, 548 #endif 549 #ifdef EKEYREJECTED 550 [EKEYREJECTED] = TARGET_EKEYREJECTED, 551 #endif 552 #ifdef EOWNERDEAD 553 [EOWNERDEAD] = TARGET_EOWNERDEAD, 554 #endif 555 #ifdef ENOTRECOVERABLE 556 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE, 557 #endif 558 }; 559 560 static inline int host_to_target_errno(int err) 561 { 562 if(host_to_target_errno_table[err]) 563 return host_to_target_errno_table[err]; 564 return err; 565 } 566 567 static inline int target_to_host_errno(int err) 568 { 569 if (target_to_host_errno_table[err]) 570 return target_to_host_errno_table[err]; 571 return err; 572 } 573 574 static inline abi_long get_errno(abi_long ret) 575 { 576 if (ret == -1) 577 return -host_to_target_errno(errno); 578 else 579 return ret; 580 } 581 582 static inline int is_error(abi_long ret) 583 { 584 return (abi_ulong)ret >= (abi_ulong)(-4096); 585 } 586 587 char *target_strerror(int err) 588 { 589 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) { 590 return NULL; 591 } 592 return strerror(target_to_host_errno(err)); 593 } 594 595 static abi_ulong target_brk; 596 static abi_ulong target_original_brk; 597 static abi_ulong brk_page; 598 599 void target_set_brk(abi_ulong new_brk) 600 { 601 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk); 602 brk_page = HOST_PAGE_ALIGN(target_brk); 603 } 604 605 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0) 606 #define DEBUGF_BRK(message, args...) 607 608 /* do_brk() must return target values and target errnos. */ 609 abi_long do_brk(abi_ulong new_brk) 610 { 611 abi_long mapped_addr; 612 int new_alloc_size; 613 614 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk); 615 616 if (!new_brk) { 617 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk); 618 return target_brk; 619 } 620 if (new_brk < target_original_brk) { 621 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n", 622 target_brk); 623 return target_brk; 624 } 625 626 /* If the new brk is less than the highest page reserved to the 627 * target heap allocation, set it and we're almost done... */ 628 if (new_brk <= brk_page) { 629 /* Heap contents are initialized to zero, as for anonymous 630 * mapped pages. */ 631 if (new_brk > target_brk) { 632 memset(g2h(target_brk), 0, new_brk - target_brk); 633 } 634 target_brk = new_brk; 635 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk); 636 return target_brk; 637 } 638 639 /* We need to allocate more memory after the brk... Note that 640 * we don't use MAP_FIXED because that will map over the top of 641 * any existing mapping (like the one with the host libc or qemu 642 * itself); instead we treat "mapped but at wrong address" as 643 * a failure and unmap again. 644 */ 645 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page); 646 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, 647 PROT_READ|PROT_WRITE, 648 MAP_ANON|MAP_PRIVATE, 0, 0)); 649 650 if (mapped_addr == brk_page) { 651 /* Heap contents are initialized to zero, as for anonymous 652 * mapped pages. Technically the new pages are already 653 * initialized to zero since they *are* anonymous mapped 654 * pages, however we have to take care with the contents that 655 * come from the remaining part of the previous page: it may 656 * contains garbage data due to a previous heap usage (grown 657 * then shrunken). */ 658 memset(g2h(target_brk), 0, brk_page - target_brk); 659 660 target_brk = new_brk; 661 brk_page = HOST_PAGE_ALIGN(target_brk); 662 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n", 663 target_brk); 664 return target_brk; 665 } else if (mapped_addr != -1) { 666 /* Mapped but at wrong address, meaning there wasn't actually 667 * enough space for this brk. 668 */ 669 target_munmap(mapped_addr, new_alloc_size); 670 mapped_addr = -1; 671 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk); 672 } 673 else { 674 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk); 675 } 676 677 #if defined(TARGET_ALPHA) 678 /* We (partially) emulate OSF/1 on Alpha, which requires we 679 return a proper errno, not an unchanged brk value. */ 680 return -TARGET_ENOMEM; 681 #endif 682 /* For everything else, return the previous break. */ 683 return target_brk; 684 } 685 686 static inline abi_long copy_from_user_fdset(fd_set *fds, 687 abi_ulong target_fds_addr, 688 int n) 689 { 690 int i, nw, j, k; 691 abi_ulong b, *target_fds; 692 693 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 694 if (!(target_fds = lock_user(VERIFY_READ, 695 target_fds_addr, 696 sizeof(abi_ulong) * nw, 697 1))) 698 return -TARGET_EFAULT; 699 700 FD_ZERO(fds); 701 k = 0; 702 for (i = 0; i < nw; i++) { 703 /* grab the abi_ulong */ 704 __get_user(b, &target_fds[i]); 705 for (j = 0; j < TARGET_ABI_BITS; j++) { 706 /* check the bit inside the abi_ulong */ 707 if ((b >> j) & 1) 708 FD_SET(k, fds); 709 k++; 710 } 711 } 712 713 unlock_user(target_fds, target_fds_addr, 0); 714 715 return 0; 716 } 717 718 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr, 719 abi_ulong target_fds_addr, 720 int n) 721 { 722 if (target_fds_addr) { 723 if (copy_from_user_fdset(fds, target_fds_addr, n)) 724 return -TARGET_EFAULT; 725 *fds_ptr = fds; 726 } else { 727 *fds_ptr = NULL; 728 } 729 return 0; 730 } 731 732 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr, 733 const fd_set *fds, 734 int n) 735 { 736 int i, nw, j, k; 737 abi_long v; 738 abi_ulong *target_fds; 739 740 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 741 if (!(target_fds = lock_user(VERIFY_WRITE, 742 target_fds_addr, 743 sizeof(abi_ulong) * nw, 744 0))) 745 return -TARGET_EFAULT; 746 747 k = 0; 748 for (i = 0; i < nw; i++) { 749 v = 0; 750 for (j = 0; j < TARGET_ABI_BITS; j++) { 751 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j); 752 k++; 753 } 754 __put_user(v, &target_fds[i]); 755 } 756 757 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw); 758 759 return 0; 760 } 761 762 #if defined(__alpha__) 763 #define HOST_HZ 1024 764 #else 765 #define HOST_HZ 100 766 #endif 767 768 static inline abi_long host_to_target_clock_t(long ticks) 769 { 770 #if HOST_HZ == TARGET_HZ 771 return ticks; 772 #else 773 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ; 774 #endif 775 } 776 777 static inline abi_long host_to_target_rusage(abi_ulong target_addr, 778 const struct rusage *rusage) 779 { 780 struct target_rusage *target_rusage; 781 782 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0)) 783 return -TARGET_EFAULT; 784 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec); 785 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec); 786 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec); 787 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec); 788 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss); 789 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss); 790 target_rusage->ru_idrss = tswapal(rusage->ru_idrss); 791 target_rusage->ru_isrss = tswapal(rusage->ru_isrss); 792 target_rusage->ru_minflt = tswapal(rusage->ru_minflt); 793 target_rusage->ru_majflt = tswapal(rusage->ru_majflt); 794 target_rusage->ru_nswap = tswapal(rusage->ru_nswap); 795 target_rusage->ru_inblock = tswapal(rusage->ru_inblock); 796 target_rusage->ru_oublock = tswapal(rusage->ru_oublock); 797 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd); 798 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv); 799 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals); 800 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw); 801 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw); 802 unlock_user_struct(target_rusage, target_addr, 1); 803 804 return 0; 805 } 806 807 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim) 808 { 809 abi_ulong target_rlim_swap; 810 rlim_t result; 811 812 target_rlim_swap = tswapal(target_rlim); 813 if (target_rlim_swap == TARGET_RLIM_INFINITY) 814 return RLIM_INFINITY; 815 816 result = target_rlim_swap; 817 if (target_rlim_swap != (rlim_t)result) 818 return RLIM_INFINITY; 819 820 return result; 821 } 822 823 static inline abi_ulong host_to_target_rlim(rlim_t rlim) 824 { 825 abi_ulong target_rlim_swap; 826 abi_ulong result; 827 828 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim) 829 target_rlim_swap = TARGET_RLIM_INFINITY; 830 else 831 target_rlim_swap = rlim; 832 result = tswapal(target_rlim_swap); 833 834 return result; 835 } 836 837 static inline int target_to_host_resource(int code) 838 { 839 switch (code) { 840 case TARGET_RLIMIT_AS: 841 return RLIMIT_AS; 842 case TARGET_RLIMIT_CORE: 843 return RLIMIT_CORE; 844 case TARGET_RLIMIT_CPU: 845 return RLIMIT_CPU; 846 case TARGET_RLIMIT_DATA: 847 return RLIMIT_DATA; 848 case TARGET_RLIMIT_FSIZE: 849 return RLIMIT_FSIZE; 850 case TARGET_RLIMIT_LOCKS: 851 return RLIMIT_LOCKS; 852 case TARGET_RLIMIT_MEMLOCK: 853 return RLIMIT_MEMLOCK; 854 case TARGET_RLIMIT_MSGQUEUE: 855 return RLIMIT_MSGQUEUE; 856 case TARGET_RLIMIT_NICE: 857 return RLIMIT_NICE; 858 case TARGET_RLIMIT_NOFILE: 859 return RLIMIT_NOFILE; 860 case TARGET_RLIMIT_NPROC: 861 return RLIMIT_NPROC; 862 case TARGET_RLIMIT_RSS: 863 return RLIMIT_RSS; 864 case TARGET_RLIMIT_RTPRIO: 865 return RLIMIT_RTPRIO; 866 case TARGET_RLIMIT_SIGPENDING: 867 return RLIMIT_SIGPENDING; 868 case TARGET_RLIMIT_STACK: 869 return RLIMIT_STACK; 870 default: 871 return code; 872 } 873 } 874 875 static inline abi_long copy_from_user_timeval(struct timeval *tv, 876 abi_ulong target_tv_addr) 877 { 878 struct target_timeval *target_tv; 879 880 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) 881 return -TARGET_EFAULT; 882 883 __get_user(tv->tv_sec, &target_tv->tv_sec); 884 __get_user(tv->tv_usec, &target_tv->tv_usec); 885 886 unlock_user_struct(target_tv, target_tv_addr, 0); 887 888 return 0; 889 } 890 891 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr, 892 const struct timeval *tv) 893 { 894 struct target_timeval *target_tv; 895 896 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) 897 return -TARGET_EFAULT; 898 899 __put_user(tv->tv_sec, &target_tv->tv_sec); 900 __put_user(tv->tv_usec, &target_tv->tv_usec); 901 902 unlock_user_struct(target_tv, target_tv_addr, 1); 903 904 return 0; 905 } 906 907 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 908 #include <mqueue.h> 909 910 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr, 911 abi_ulong target_mq_attr_addr) 912 { 913 struct target_mq_attr *target_mq_attr; 914 915 if (!lock_user_struct(VERIFY_READ, target_mq_attr, 916 target_mq_attr_addr, 1)) 917 return -TARGET_EFAULT; 918 919 __get_user(attr->mq_flags, &target_mq_attr->mq_flags); 920 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 921 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 922 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 923 924 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0); 925 926 return 0; 927 } 928 929 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr, 930 const struct mq_attr *attr) 931 { 932 struct target_mq_attr *target_mq_attr; 933 934 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr, 935 target_mq_attr_addr, 0)) 936 return -TARGET_EFAULT; 937 938 __put_user(attr->mq_flags, &target_mq_attr->mq_flags); 939 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 940 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 941 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 942 943 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1); 944 945 return 0; 946 } 947 #endif 948 949 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) 950 /* do_select() must return target values and target errnos. */ 951 static abi_long do_select(int n, 952 abi_ulong rfd_addr, abi_ulong wfd_addr, 953 abi_ulong efd_addr, abi_ulong target_tv_addr) 954 { 955 fd_set rfds, wfds, efds; 956 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 957 struct timeval tv, *tv_ptr; 958 abi_long ret; 959 960 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 961 if (ret) { 962 return ret; 963 } 964 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 965 if (ret) { 966 return ret; 967 } 968 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 969 if (ret) { 970 return ret; 971 } 972 973 if (target_tv_addr) { 974 if (copy_from_user_timeval(&tv, target_tv_addr)) 975 return -TARGET_EFAULT; 976 tv_ptr = &tv; 977 } else { 978 tv_ptr = NULL; 979 } 980 981 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr)); 982 983 if (!is_error(ret)) { 984 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 985 return -TARGET_EFAULT; 986 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 987 return -TARGET_EFAULT; 988 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 989 return -TARGET_EFAULT; 990 991 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv)) 992 return -TARGET_EFAULT; 993 } 994 995 return ret; 996 } 997 #endif 998 999 static abi_long do_pipe2(int host_pipe[], int flags) 1000 { 1001 #ifdef CONFIG_PIPE2 1002 return pipe2(host_pipe, flags); 1003 #else 1004 return -ENOSYS; 1005 #endif 1006 } 1007 1008 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes, 1009 int flags, int is_pipe2) 1010 { 1011 int host_pipe[2]; 1012 abi_long ret; 1013 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe); 1014 1015 if (is_error(ret)) 1016 return get_errno(ret); 1017 1018 /* Several targets have special calling conventions for the original 1019 pipe syscall, but didn't replicate this into the pipe2 syscall. */ 1020 if (!is_pipe2) { 1021 #if defined(TARGET_ALPHA) 1022 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1]; 1023 return host_pipe[0]; 1024 #elif defined(TARGET_MIPS) 1025 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1]; 1026 return host_pipe[0]; 1027 #elif defined(TARGET_SH4) 1028 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1]; 1029 return host_pipe[0]; 1030 #elif defined(TARGET_SPARC) 1031 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1]; 1032 return host_pipe[0]; 1033 #endif 1034 } 1035 1036 if (put_user_s32(host_pipe[0], pipedes) 1037 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0]))) 1038 return -TARGET_EFAULT; 1039 return get_errno(ret); 1040 } 1041 1042 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn, 1043 abi_ulong target_addr, 1044 socklen_t len) 1045 { 1046 struct target_ip_mreqn *target_smreqn; 1047 1048 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1); 1049 if (!target_smreqn) 1050 return -TARGET_EFAULT; 1051 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr; 1052 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr; 1053 if (len == sizeof(struct target_ip_mreqn)) 1054 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex); 1055 unlock_user(target_smreqn, target_addr, 0); 1056 1057 return 0; 1058 } 1059 1060 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr, 1061 abi_ulong target_addr, 1062 socklen_t len) 1063 { 1064 const socklen_t unix_maxlen = sizeof (struct sockaddr_un); 1065 sa_family_t sa_family; 1066 struct target_sockaddr *target_saddr; 1067 1068 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 1069 if (!target_saddr) 1070 return -TARGET_EFAULT; 1071 1072 sa_family = tswap16(target_saddr->sa_family); 1073 1074 /* Oops. The caller might send a incomplete sun_path; sun_path 1075 * must be terminated by \0 (see the manual page), but 1076 * unfortunately it is quite common to specify sockaddr_un 1077 * length as "strlen(x->sun_path)" while it should be 1078 * "strlen(...) + 1". We'll fix that here if needed. 1079 * Linux kernel has a similar feature. 1080 */ 1081 1082 if (sa_family == AF_UNIX) { 1083 if (len < unix_maxlen && len > 0) { 1084 char *cp = (char*)target_saddr; 1085 1086 if ( cp[len-1] && !cp[len] ) 1087 len++; 1088 } 1089 if (len > unix_maxlen) 1090 len = unix_maxlen; 1091 } 1092 1093 memcpy(addr, target_saddr, len); 1094 addr->sa_family = sa_family; 1095 unlock_user(target_saddr, target_addr, 0); 1096 1097 return 0; 1098 } 1099 1100 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr, 1101 struct sockaddr *addr, 1102 socklen_t len) 1103 { 1104 struct target_sockaddr *target_saddr; 1105 1106 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0); 1107 if (!target_saddr) 1108 return -TARGET_EFAULT; 1109 memcpy(target_saddr, addr, len); 1110 target_saddr->sa_family = tswap16(addr->sa_family); 1111 unlock_user(target_saddr, target_addr, len); 1112 1113 return 0; 1114 } 1115 1116 static inline abi_long target_to_host_cmsg(struct msghdr *msgh, 1117 struct target_msghdr *target_msgh) 1118 { 1119 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1120 abi_long msg_controllen; 1121 abi_ulong target_cmsg_addr; 1122 struct target_cmsghdr *target_cmsg; 1123 socklen_t space = 0; 1124 1125 msg_controllen = tswapal(target_msgh->msg_controllen); 1126 if (msg_controllen < sizeof (struct target_cmsghdr)) 1127 goto the_end; 1128 target_cmsg_addr = tswapal(target_msgh->msg_control); 1129 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1); 1130 if (!target_cmsg) 1131 return -TARGET_EFAULT; 1132 1133 while (cmsg && target_cmsg) { 1134 void *data = CMSG_DATA(cmsg); 1135 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1136 1137 int len = tswapal(target_cmsg->cmsg_len) 1138 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr)); 1139 1140 space += CMSG_SPACE(len); 1141 if (space > msgh->msg_controllen) { 1142 space -= CMSG_SPACE(len); 1143 gemu_log("Host cmsg overflow\n"); 1144 break; 1145 } 1146 1147 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) { 1148 cmsg->cmsg_level = SOL_SOCKET; 1149 } else { 1150 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level); 1151 } 1152 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type); 1153 cmsg->cmsg_len = CMSG_LEN(len); 1154 1155 if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) { 1156 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type); 1157 memcpy(data, target_data, len); 1158 } else { 1159 int *fd = (int *)data; 1160 int *target_fd = (int *)target_data; 1161 int i, numfds = len / sizeof(int); 1162 1163 for (i = 0; i < numfds; i++) 1164 fd[i] = tswap32(target_fd[i]); 1165 } 1166 1167 cmsg = CMSG_NXTHDR(msgh, cmsg); 1168 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1169 } 1170 unlock_user(target_cmsg, target_cmsg_addr, 0); 1171 the_end: 1172 msgh->msg_controllen = space; 1173 return 0; 1174 } 1175 1176 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, 1177 struct msghdr *msgh) 1178 { 1179 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1180 abi_long msg_controllen; 1181 abi_ulong target_cmsg_addr; 1182 struct target_cmsghdr *target_cmsg; 1183 socklen_t space = 0; 1184 1185 msg_controllen = tswapal(target_msgh->msg_controllen); 1186 if (msg_controllen < sizeof (struct target_cmsghdr)) 1187 goto the_end; 1188 target_cmsg_addr = tswapal(target_msgh->msg_control); 1189 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0); 1190 if (!target_cmsg) 1191 return -TARGET_EFAULT; 1192 1193 while (cmsg && target_cmsg) { 1194 void *data = CMSG_DATA(cmsg); 1195 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1196 1197 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr)); 1198 1199 space += TARGET_CMSG_SPACE(len); 1200 if (space > msg_controllen) { 1201 space -= TARGET_CMSG_SPACE(len); 1202 gemu_log("Target cmsg overflow\n"); 1203 break; 1204 } 1205 1206 if (cmsg->cmsg_level == SOL_SOCKET) { 1207 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET); 1208 } else { 1209 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level); 1210 } 1211 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type); 1212 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len)); 1213 1214 switch (cmsg->cmsg_level) { 1215 case SOL_SOCKET: 1216 switch (cmsg->cmsg_type) { 1217 case SCM_RIGHTS: 1218 { 1219 int *fd = (int *)data; 1220 int *target_fd = (int *)target_data; 1221 int i, numfds = len / sizeof(int); 1222 1223 for (i = 0; i < numfds; i++) 1224 target_fd[i] = tswap32(fd[i]); 1225 break; 1226 } 1227 case SO_TIMESTAMP: 1228 { 1229 struct timeval *tv = (struct timeval *)data; 1230 struct target_timeval *target_tv = 1231 (struct target_timeval *)target_data; 1232 1233 if (len != sizeof(struct timeval)) 1234 goto unimplemented; 1235 1236 /* copy struct timeval to target */ 1237 target_tv->tv_sec = tswapal(tv->tv_sec); 1238 target_tv->tv_usec = tswapal(tv->tv_usec); 1239 break; 1240 } 1241 case SCM_CREDENTIALS: 1242 { 1243 struct ucred *cred = (struct ucred *)data; 1244 struct target_ucred *target_cred = 1245 (struct target_ucred *)target_data; 1246 1247 __put_user(cred->pid, &target_cred->pid); 1248 __put_user(cred->uid, &target_cred->uid); 1249 __put_user(cred->gid, &target_cred->gid); 1250 break; 1251 } 1252 default: 1253 goto unimplemented; 1254 } 1255 break; 1256 1257 default: 1258 unimplemented: 1259 gemu_log("Unsupported ancillary data: %d/%d\n", 1260 cmsg->cmsg_level, cmsg->cmsg_type); 1261 memcpy(target_data, data, len); 1262 } 1263 1264 cmsg = CMSG_NXTHDR(msgh, cmsg); 1265 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1266 } 1267 unlock_user(target_cmsg, target_cmsg_addr, space); 1268 the_end: 1269 target_msgh->msg_controllen = tswapal(space); 1270 return 0; 1271 } 1272 1273 /* do_setsockopt() Must return target values and target errnos. */ 1274 static abi_long do_setsockopt(int sockfd, int level, int optname, 1275 abi_ulong optval_addr, socklen_t optlen) 1276 { 1277 abi_long ret; 1278 int val; 1279 struct ip_mreqn *ip_mreq; 1280 struct ip_mreq_source *ip_mreq_source; 1281 1282 switch(level) { 1283 case SOL_TCP: 1284 /* TCP options all take an 'int' value. */ 1285 if (optlen < sizeof(uint32_t)) 1286 return -TARGET_EINVAL; 1287 1288 if (get_user_u32(val, optval_addr)) 1289 return -TARGET_EFAULT; 1290 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1291 break; 1292 case SOL_IP: 1293 switch(optname) { 1294 case IP_TOS: 1295 case IP_TTL: 1296 case IP_HDRINCL: 1297 case IP_ROUTER_ALERT: 1298 case IP_RECVOPTS: 1299 case IP_RETOPTS: 1300 case IP_PKTINFO: 1301 case IP_MTU_DISCOVER: 1302 case IP_RECVERR: 1303 case IP_RECVTOS: 1304 #ifdef IP_FREEBIND 1305 case IP_FREEBIND: 1306 #endif 1307 case IP_MULTICAST_TTL: 1308 case IP_MULTICAST_LOOP: 1309 val = 0; 1310 if (optlen >= sizeof(uint32_t)) { 1311 if (get_user_u32(val, optval_addr)) 1312 return -TARGET_EFAULT; 1313 } else if (optlen >= 1) { 1314 if (get_user_u8(val, optval_addr)) 1315 return -TARGET_EFAULT; 1316 } 1317 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1318 break; 1319 case IP_ADD_MEMBERSHIP: 1320 case IP_DROP_MEMBERSHIP: 1321 if (optlen < sizeof (struct target_ip_mreq) || 1322 optlen > sizeof (struct target_ip_mreqn)) 1323 return -TARGET_EINVAL; 1324 1325 ip_mreq = (struct ip_mreqn *) alloca(optlen); 1326 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen); 1327 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen)); 1328 break; 1329 1330 case IP_BLOCK_SOURCE: 1331 case IP_UNBLOCK_SOURCE: 1332 case IP_ADD_SOURCE_MEMBERSHIP: 1333 case IP_DROP_SOURCE_MEMBERSHIP: 1334 if (optlen != sizeof (struct target_ip_mreq_source)) 1335 return -TARGET_EINVAL; 1336 1337 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1); 1338 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen)); 1339 unlock_user (ip_mreq_source, optval_addr, 0); 1340 break; 1341 1342 default: 1343 goto unimplemented; 1344 } 1345 break; 1346 case SOL_IPV6: 1347 switch (optname) { 1348 case IPV6_MTU_DISCOVER: 1349 case IPV6_MTU: 1350 case IPV6_V6ONLY: 1351 case IPV6_RECVPKTINFO: 1352 val = 0; 1353 if (optlen < sizeof(uint32_t)) { 1354 return -TARGET_EINVAL; 1355 } 1356 if (get_user_u32(val, optval_addr)) { 1357 return -TARGET_EFAULT; 1358 } 1359 ret = get_errno(setsockopt(sockfd, level, optname, 1360 &val, sizeof(val))); 1361 break; 1362 default: 1363 goto unimplemented; 1364 } 1365 break; 1366 case SOL_RAW: 1367 switch (optname) { 1368 case ICMP_FILTER: 1369 /* struct icmp_filter takes an u32 value */ 1370 if (optlen < sizeof(uint32_t)) { 1371 return -TARGET_EINVAL; 1372 } 1373 1374 if (get_user_u32(val, optval_addr)) { 1375 return -TARGET_EFAULT; 1376 } 1377 ret = get_errno(setsockopt(sockfd, level, optname, 1378 &val, sizeof(val))); 1379 break; 1380 1381 default: 1382 goto unimplemented; 1383 } 1384 break; 1385 case TARGET_SOL_SOCKET: 1386 switch (optname) { 1387 case TARGET_SO_RCVTIMEO: 1388 { 1389 struct timeval tv; 1390 1391 optname = SO_RCVTIMEO; 1392 1393 set_timeout: 1394 if (optlen != sizeof(struct target_timeval)) { 1395 return -TARGET_EINVAL; 1396 } 1397 1398 if (copy_from_user_timeval(&tv, optval_addr)) { 1399 return -TARGET_EFAULT; 1400 } 1401 1402 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 1403 &tv, sizeof(tv))); 1404 return ret; 1405 } 1406 case TARGET_SO_SNDTIMEO: 1407 optname = SO_SNDTIMEO; 1408 goto set_timeout; 1409 case TARGET_SO_ATTACH_FILTER: 1410 { 1411 struct target_sock_fprog *tfprog; 1412 struct target_sock_filter *tfilter; 1413 struct sock_fprog fprog; 1414 struct sock_filter *filter; 1415 int i; 1416 1417 if (optlen != sizeof(*tfprog)) { 1418 return -TARGET_EINVAL; 1419 } 1420 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) { 1421 return -TARGET_EFAULT; 1422 } 1423 if (!lock_user_struct(VERIFY_READ, tfilter, 1424 tswapal(tfprog->filter), 0)) { 1425 unlock_user_struct(tfprog, optval_addr, 1); 1426 return -TARGET_EFAULT; 1427 } 1428 1429 fprog.len = tswap16(tfprog->len); 1430 filter = malloc(fprog.len * sizeof(*filter)); 1431 if (filter == NULL) { 1432 unlock_user_struct(tfilter, tfprog->filter, 1); 1433 unlock_user_struct(tfprog, optval_addr, 1); 1434 return -TARGET_ENOMEM; 1435 } 1436 for (i = 0; i < fprog.len; i++) { 1437 filter[i].code = tswap16(tfilter[i].code); 1438 filter[i].jt = tfilter[i].jt; 1439 filter[i].jf = tfilter[i].jf; 1440 filter[i].k = tswap32(tfilter[i].k); 1441 } 1442 fprog.filter = filter; 1443 1444 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, 1445 SO_ATTACH_FILTER, &fprog, sizeof(fprog))); 1446 free(filter); 1447 1448 unlock_user_struct(tfilter, tfprog->filter, 1); 1449 unlock_user_struct(tfprog, optval_addr, 1); 1450 return ret; 1451 } 1452 /* Options with 'int' argument. */ 1453 case TARGET_SO_DEBUG: 1454 optname = SO_DEBUG; 1455 break; 1456 case TARGET_SO_REUSEADDR: 1457 optname = SO_REUSEADDR; 1458 break; 1459 case TARGET_SO_TYPE: 1460 optname = SO_TYPE; 1461 break; 1462 case TARGET_SO_ERROR: 1463 optname = SO_ERROR; 1464 break; 1465 case TARGET_SO_DONTROUTE: 1466 optname = SO_DONTROUTE; 1467 break; 1468 case TARGET_SO_BROADCAST: 1469 optname = SO_BROADCAST; 1470 break; 1471 case TARGET_SO_SNDBUF: 1472 optname = SO_SNDBUF; 1473 break; 1474 case TARGET_SO_RCVBUF: 1475 optname = SO_RCVBUF; 1476 break; 1477 case TARGET_SO_KEEPALIVE: 1478 optname = SO_KEEPALIVE; 1479 break; 1480 case TARGET_SO_OOBINLINE: 1481 optname = SO_OOBINLINE; 1482 break; 1483 case TARGET_SO_NO_CHECK: 1484 optname = SO_NO_CHECK; 1485 break; 1486 case TARGET_SO_PRIORITY: 1487 optname = SO_PRIORITY; 1488 break; 1489 #ifdef SO_BSDCOMPAT 1490 case TARGET_SO_BSDCOMPAT: 1491 optname = SO_BSDCOMPAT; 1492 break; 1493 #endif 1494 case TARGET_SO_PASSCRED: 1495 optname = SO_PASSCRED; 1496 break; 1497 case TARGET_SO_TIMESTAMP: 1498 optname = SO_TIMESTAMP; 1499 break; 1500 case TARGET_SO_RCVLOWAT: 1501 optname = SO_RCVLOWAT; 1502 break; 1503 break; 1504 default: 1505 goto unimplemented; 1506 } 1507 if (optlen < sizeof(uint32_t)) 1508 return -TARGET_EINVAL; 1509 1510 if (get_user_u32(val, optval_addr)) 1511 return -TARGET_EFAULT; 1512 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val))); 1513 break; 1514 default: 1515 unimplemented: 1516 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname); 1517 ret = -TARGET_ENOPROTOOPT; 1518 } 1519 return ret; 1520 } 1521 1522 /* do_getsockopt() Must return target values and target errnos. */ 1523 static abi_long do_getsockopt(int sockfd, int level, int optname, 1524 abi_ulong optval_addr, abi_ulong optlen) 1525 { 1526 abi_long ret; 1527 int len, val; 1528 socklen_t lv; 1529 1530 switch(level) { 1531 case TARGET_SOL_SOCKET: 1532 level = SOL_SOCKET; 1533 switch (optname) { 1534 /* These don't just return a single integer */ 1535 case TARGET_SO_LINGER: 1536 case TARGET_SO_RCVTIMEO: 1537 case TARGET_SO_SNDTIMEO: 1538 case TARGET_SO_PEERNAME: 1539 goto unimplemented; 1540 case TARGET_SO_PEERCRED: { 1541 struct ucred cr; 1542 socklen_t crlen; 1543 struct target_ucred *tcr; 1544 1545 if (get_user_u32(len, optlen)) { 1546 return -TARGET_EFAULT; 1547 } 1548 if (len < 0) { 1549 return -TARGET_EINVAL; 1550 } 1551 1552 crlen = sizeof(cr); 1553 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED, 1554 &cr, &crlen)); 1555 if (ret < 0) { 1556 return ret; 1557 } 1558 if (len > crlen) { 1559 len = crlen; 1560 } 1561 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) { 1562 return -TARGET_EFAULT; 1563 } 1564 __put_user(cr.pid, &tcr->pid); 1565 __put_user(cr.uid, &tcr->uid); 1566 __put_user(cr.gid, &tcr->gid); 1567 unlock_user_struct(tcr, optval_addr, 1); 1568 if (put_user_u32(len, optlen)) { 1569 return -TARGET_EFAULT; 1570 } 1571 break; 1572 } 1573 /* Options with 'int' argument. */ 1574 case TARGET_SO_DEBUG: 1575 optname = SO_DEBUG; 1576 goto int_case; 1577 case TARGET_SO_REUSEADDR: 1578 optname = SO_REUSEADDR; 1579 goto int_case; 1580 case TARGET_SO_TYPE: 1581 optname = SO_TYPE; 1582 goto int_case; 1583 case TARGET_SO_ERROR: 1584 optname = SO_ERROR; 1585 goto int_case; 1586 case TARGET_SO_DONTROUTE: 1587 optname = SO_DONTROUTE; 1588 goto int_case; 1589 case TARGET_SO_BROADCAST: 1590 optname = SO_BROADCAST; 1591 goto int_case; 1592 case TARGET_SO_SNDBUF: 1593 optname = SO_SNDBUF; 1594 goto int_case; 1595 case TARGET_SO_RCVBUF: 1596 optname = SO_RCVBUF; 1597 goto int_case; 1598 case TARGET_SO_KEEPALIVE: 1599 optname = SO_KEEPALIVE; 1600 goto int_case; 1601 case TARGET_SO_OOBINLINE: 1602 optname = SO_OOBINLINE; 1603 goto int_case; 1604 case TARGET_SO_NO_CHECK: 1605 optname = SO_NO_CHECK; 1606 goto int_case; 1607 case TARGET_SO_PRIORITY: 1608 optname = SO_PRIORITY; 1609 goto int_case; 1610 #ifdef SO_BSDCOMPAT 1611 case TARGET_SO_BSDCOMPAT: 1612 optname = SO_BSDCOMPAT; 1613 goto int_case; 1614 #endif 1615 case TARGET_SO_PASSCRED: 1616 optname = SO_PASSCRED; 1617 goto int_case; 1618 case TARGET_SO_TIMESTAMP: 1619 optname = SO_TIMESTAMP; 1620 goto int_case; 1621 case TARGET_SO_RCVLOWAT: 1622 optname = SO_RCVLOWAT; 1623 goto int_case; 1624 default: 1625 goto int_case; 1626 } 1627 break; 1628 case SOL_TCP: 1629 /* TCP options all take an 'int' value. */ 1630 int_case: 1631 if (get_user_u32(len, optlen)) 1632 return -TARGET_EFAULT; 1633 if (len < 0) 1634 return -TARGET_EINVAL; 1635 lv = sizeof(lv); 1636 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1637 if (ret < 0) 1638 return ret; 1639 if (len > lv) 1640 len = lv; 1641 if (len == 4) { 1642 if (put_user_u32(val, optval_addr)) 1643 return -TARGET_EFAULT; 1644 } else { 1645 if (put_user_u8(val, optval_addr)) 1646 return -TARGET_EFAULT; 1647 } 1648 if (put_user_u32(len, optlen)) 1649 return -TARGET_EFAULT; 1650 break; 1651 case SOL_IP: 1652 switch(optname) { 1653 case IP_TOS: 1654 case IP_TTL: 1655 case IP_HDRINCL: 1656 case IP_ROUTER_ALERT: 1657 case IP_RECVOPTS: 1658 case IP_RETOPTS: 1659 case IP_PKTINFO: 1660 case IP_MTU_DISCOVER: 1661 case IP_RECVERR: 1662 case IP_RECVTOS: 1663 #ifdef IP_FREEBIND 1664 case IP_FREEBIND: 1665 #endif 1666 case IP_MULTICAST_TTL: 1667 case IP_MULTICAST_LOOP: 1668 if (get_user_u32(len, optlen)) 1669 return -TARGET_EFAULT; 1670 if (len < 0) 1671 return -TARGET_EINVAL; 1672 lv = sizeof(lv); 1673 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1674 if (ret < 0) 1675 return ret; 1676 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 1677 len = 1; 1678 if (put_user_u32(len, optlen) 1679 || put_user_u8(val, optval_addr)) 1680 return -TARGET_EFAULT; 1681 } else { 1682 if (len > sizeof(int)) 1683 len = sizeof(int); 1684 if (put_user_u32(len, optlen) 1685 || put_user_u32(val, optval_addr)) 1686 return -TARGET_EFAULT; 1687 } 1688 break; 1689 default: 1690 ret = -TARGET_ENOPROTOOPT; 1691 break; 1692 } 1693 break; 1694 default: 1695 unimplemented: 1696 gemu_log("getsockopt level=%d optname=%d not yet supported\n", 1697 level, optname); 1698 ret = -TARGET_EOPNOTSUPP; 1699 break; 1700 } 1701 return ret; 1702 } 1703 1704 static struct iovec *lock_iovec(int type, abi_ulong target_addr, 1705 int count, int copy) 1706 { 1707 struct target_iovec *target_vec; 1708 struct iovec *vec; 1709 abi_ulong total_len, max_len; 1710 int i; 1711 int err = 0; 1712 1713 if (count == 0) { 1714 errno = 0; 1715 return NULL; 1716 } 1717 if (count < 0 || count > IOV_MAX) { 1718 errno = EINVAL; 1719 return NULL; 1720 } 1721 1722 vec = calloc(count, sizeof(struct iovec)); 1723 if (vec == NULL) { 1724 errno = ENOMEM; 1725 return NULL; 1726 } 1727 1728 target_vec = lock_user(VERIFY_READ, target_addr, 1729 count * sizeof(struct target_iovec), 1); 1730 if (target_vec == NULL) { 1731 err = EFAULT; 1732 goto fail2; 1733 } 1734 1735 /* ??? If host page size > target page size, this will result in a 1736 value larger than what we can actually support. */ 1737 max_len = 0x7fffffff & TARGET_PAGE_MASK; 1738 total_len = 0; 1739 1740 for (i = 0; i < count; i++) { 1741 abi_ulong base = tswapal(target_vec[i].iov_base); 1742 abi_long len = tswapal(target_vec[i].iov_len); 1743 1744 if (len < 0) { 1745 err = EINVAL; 1746 goto fail; 1747 } else if (len == 0) { 1748 /* Zero length pointer is ignored. */ 1749 vec[i].iov_base = 0; 1750 } else { 1751 vec[i].iov_base = lock_user(type, base, len, copy); 1752 if (!vec[i].iov_base) { 1753 err = EFAULT; 1754 goto fail; 1755 } 1756 if (len > max_len - total_len) { 1757 len = max_len - total_len; 1758 } 1759 } 1760 vec[i].iov_len = len; 1761 total_len += len; 1762 } 1763 1764 unlock_user(target_vec, target_addr, 0); 1765 return vec; 1766 1767 fail: 1768 unlock_user(target_vec, target_addr, 0); 1769 fail2: 1770 free(vec); 1771 errno = err; 1772 return NULL; 1773 } 1774 1775 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr, 1776 int count, int copy) 1777 { 1778 struct target_iovec *target_vec; 1779 int i; 1780 1781 target_vec = lock_user(VERIFY_READ, target_addr, 1782 count * sizeof(struct target_iovec), 1); 1783 if (target_vec) { 1784 for (i = 0; i < count; i++) { 1785 abi_ulong base = tswapal(target_vec[i].iov_base); 1786 abi_long len = tswapal(target_vec[i].iov_base); 1787 if (len < 0) { 1788 break; 1789 } 1790 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0); 1791 } 1792 unlock_user(target_vec, target_addr, 0); 1793 } 1794 1795 free(vec); 1796 } 1797 1798 static inline int target_to_host_sock_type(int *type) 1799 { 1800 int host_type = 0; 1801 int target_type = *type; 1802 1803 switch (target_type & TARGET_SOCK_TYPE_MASK) { 1804 case TARGET_SOCK_DGRAM: 1805 host_type = SOCK_DGRAM; 1806 break; 1807 case TARGET_SOCK_STREAM: 1808 host_type = SOCK_STREAM; 1809 break; 1810 default: 1811 host_type = target_type & TARGET_SOCK_TYPE_MASK; 1812 break; 1813 } 1814 if (target_type & TARGET_SOCK_CLOEXEC) { 1815 #if defined(SOCK_CLOEXEC) 1816 host_type |= SOCK_CLOEXEC; 1817 #else 1818 return -TARGET_EINVAL; 1819 #endif 1820 } 1821 if (target_type & TARGET_SOCK_NONBLOCK) { 1822 #if defined(SOCK_NONBLOCK) 1823 host_type |= SOCK_NONBLOCK; 1824 #elif !defined(O_NONBLOCK) 1825 return -TARGET_EINVAL; 1826 #endif 1827 } 1828 *type = host_type; 1829 return 0; 1830 } 1831 1832 /* Try to emulate socket type flags after socket creation. */ 1833 static int sock_flags_fixup(int fd, int target_type) 1834 { 1835 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK) 1836 if (target_type & TARGET_SOCK_NONBLOCK) { 1837 int flags = fcntl(fd, F_GETFL); 1838 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) { 1839 close(fd); 1840 return -TARGET_EINVAL; 1841 } 1842 } 1843 #endif 1844 return fd; 1845 } 1846 1847 /* do_socket() Must return target values and target errnos. */ 1848 static abi_long do_socket(int domain, int type, int protocol) 1849 { 1850 int target_type = type; 1851 int ret; 1852 1853 ret = target_to_host_sock_type(&type); 1854 if (ret) { 1855 return ret; 1856 } 1857 1858 if (domain == PF_NETLINK) 1859 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */ 1860 ret = get_errno(socket(domain, type, protocol)); 1861 if (ret >= 0) { 1862 ret = sock_flags_fixup(ret, target_type); 1863 } 1864 return ret; 1865 } 1866 1867 /* do_bind() Must return target values and target errnos. */ 1868 static abi_long do_bind(int sockfd, abi_ulong target_addr, 1869 socklen_t addrlen) 1870 { 1871 void *addr; 1872 abi_long ret; 1873 1874 if ((int)addrlen < 0) { 1875 return -TARGET_EINVAL; 1876 } 1877 1878 addr = alloca(addrlen+1); 1879 1880 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1881 if (ret) 1882 return ret; 1883 1884 return get_errno(bind(sockfd, addr, addrlen)); 1885 } 1886 1887 /* do_connect() Must return target values and target errnos. */ 1888 static abi_long do_connect(int sockfd, abi_ulong target_addr, 1889 socklen_t addrlen) 1890 { 1891 void *addr; 1892 abi_long ret; 1893 1894 if ((int)addrlen < 0) { 1895 return -TARGET_EINVAL; 1896 } 1897 1898 addr = alloca(addrlen); 1899 1900 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1901 if (ret) 1902 return ret; 1903 1904 return get_errno(connect(sockfd, addr, addrlen)); 1905 } 1906 1907 /* do_sendrecvmsg_locked() Must return target values and target errnos. */ 1908 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp, 1909 int flags, int send) 1910 { 1911 abi_long ret, len; 1912 struct msghdr msg; 1913 int count; 1914 struct iovec *vec; 1915 abi_ulong target_vec; 1916 1917 if (msgp->msg_name) { 1918 msg.msg_namelen = tswap32(msgp->msg_namelen); 1919 msg.msg_name = alloca(msg.msg_namelen); 1920 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name), 1921 msg.msg_namelen); 1922 if (ret) { 1923 goto out2; 1924 } 1925 } else { 1926 msg.msg_name = NULL; 1927 msg.msg_namelen = 0; 1928 } 1929 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen); 1930 msg.msg_control = alloca(msg.msg_controllen); 1931 msg.msg_flags = tswap32(msgp->msg_flags); 1932 1933 count = tswapal(msgp->msg_iovlen); 1934 target_vec = tswapal(msgp->msg_iov); 1935 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, 1936 target_vec, count, send); 1937 if (vec == NULL) { 1938 ret = -host_to_target_errno(errno); 1939 goto out2; 1940 } 1941 msg.msg_iovlen = count; 1942 msg.msg_iov = vec; 1943 1944 if (send) { 1945 ret = target_to_host_cmsg(&msg, msgp); 1946 if (ret == 0) 1947 ret = get_errno(sendmsg(fd, &msg, flags)); 1948 } else { 1949 ret = get_errno(recvmsg(fd, &msg, flags)); 1950 if (!is_error(ret)) { 1951 len = ret; 1952 ret = host_to_target_cmsg(msgp, &msg); 1953 if (!is_error(ret)) { 1954 msgp->msg_namelen = tswap32(msg.msg_namelen); 1955 if (msg.msg_name != NULL) { 1956 ret = host_to_target_sockaddr(tswapal(msgp->msg_name), 1957 msg.msg_name, msg.msg_namelen); 1958 if (ret) { 1959 goto out; 1960 } 1961 } 1962 1963 ret = len; 1964 } 1965 } 1966 } 1967 1968 out: 1969 unlock_iovec(vec, target_vec, count, !send); 1970 out2: 1971 return ret; 1972 } 1973 1974 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg, 1975 int flags, int send) 1976 { 1977 abi_long ret; 1978 struct target_msghdr *msgp; 1979 1980 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE, 1981 msgp, 1982 target_msg, 1983 send ? 1 : 0)) { 1984 return -TARGET_EFAULT; 1985 } 1986 ret = do_sendrecvmsg_locked(fd, msgp, flags, send); 1987 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 1988 return ret; 1989 } 1990 1991 #ifdef TARGET_NR_sendmmsg 1992 /* We don't rely on the C library to have sendmmsg/recvmmsg support, 1993 * so it might not have this *mmsg-specific flag either. 1994 */ 1995 #ifndef MSG_WAITFORONE 1996 #define MSG_WAITFORONE 0x10000 1997 #endif 1998 1999 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec, 2000 unsigned int vlen, unsigned int flags, 2001 int send) 2002 { 2003 struct target_mmsghdr *mmsgp; 2004 abi_long ret = 0; 2005 int i; 2006 2007 if (vlen > UIO_MAXIOV) { 2008 vlen = UIO_MAXIOV; 2009 } 2010 2011 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1); 2012 if (!mmsgp) { 2013 return -TARGET_EFAULT; 2014 } 2015 2016 for (i = 0; i < vlen; i++) { 2017 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send); 2018 if (is_error(ret)) { 2019 break; 2020 } 2021 mmsgp[i].msg_len = tswap32(ret); 2022 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ 2023 if (flags & MSG_WAITFORONE) { 2024 flags |= MSG_DONTWAIT; 2025 } 2026 } 2027 2028 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i); 2029 2030 /* Return number of datagrams sent if we sent any at all; 2031 * otherwise return the error. 2032 */ 2033 if (i) { 2034 return i; 2035 } 2036 return ret; 2037 } 2038 #endif 2039 2040 /* If we don't have a system accept4() then just call accept. 2041 * The callsites to do_accept4() will ensure that they don't 2042 * pass a non-zero flags argument in this config. 2043 */ 2044 #ifndef CONFIG_ACCEPT4 2045 static inline int accept4(int sockfd, struct sockaddr *addr, 2046 socklen_t *addrlen, int flags) 2047 { 2048 assert(flags == 0); 2049 return accept(sockfd, addr, addrlen); 2050 } 2051 #endif 2052 2053 /* do_accept4() Must return target values and target errnos. */ 2054 static abi_long do_accept4(int fd, abi_ulong target_addr, 2055 abi_ulong target_addrlen_addr, int flags) 2056 { 2057 socklen_t addrlen; 2058 void *addr; 2059 abi_long ret; 2060 int host_flags; 2061 2062 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl); 2063 2064 if (target_addr == 0) { 2065 return get_errno(accept4(fd, NULL, NULL, host_flags)); 2066 } 2067 2068 /* linux returns EINVAL if addrlen pointer is invalid */ 2069 if (get_user_u32(addrlen, target_addrlen_addr)) 2070 return -TARGET_EINVAL; 2071 2072 if ((int)addrlen < 0) { 2073 return -TARGET_EINVAL; 2074 } 2075 2076 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2077 return -TARGET_EINVAL; 2078 2079 addr = alloca(addrlen); 2080 2081 ret = get_errno(accept4(fd, addr, &addrlen, host_flags)); 2082 if (!is_error(ret)) { 2083 host_to_target_sockaddr(target_addr, addr, addrlen); 2084 if (put_user_u32(addrlen, target_addrlen_addr)) 2085 ret = -TARGET_EFAULT; 2086 } 2087 return ret; 2088 } 2089 2090 /* do_getpeername() Must return target values and target errnos. */ 2091 static abi_long do_getpeername(int fd, abi_ulong target_addr, 2092 abi_ulong target_addrlen_addr) 2093 { 2094 socklen_t addrlen; 2095 void *addr; 2096 abi_long ret; 2097 2098 if (get_user_u32(addrlen, target_addrlen_addr)) 2099 return -TARGET_EFAULT; 2100 2101 if ((int)addrlen < 0) { 2102 return -TARGET_EINVAL; 2103 } 2104 2105 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2106 return -TARGET_EFAULT; 2107 2108 addr = alloca(addrlen); 2109 2110 ret = get_errno(getpeername(fd, addr, &addrlen)); 2111 if (!is_error(ret)) { 2112 host_to_target_sockaddr(target_addr, addr, addrlen); 2113 if (put_user_u32(addrlen, target_addrlen_addr)) 2114 ret = -TARGET_EFAULT; 2115 } 2116 return ret; 2117 } 2118 2119 /* do_getsockname() Must return target values and target errnos. */ 2120 static abi_long do_getsockname(int fd, abi_ulong target_addr, 2121 abi_ulong target_addrlen_addr) 2122 { 2123 socklen_t addrlen; 2124 void *addr; 2125 abi_long ret; 2126 2127 if (get_user_u32(addrlen, target_addrlen_addr)) 2128 return -TARGET_EFAULT; 2129 2130 if ((int)addrlen < 0) { 2131 return -TARGET_EINVAL; 2132 } 2133 2134 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2135 return -TARGET_EFAULT; 2136 2137 addr = alloca(addrlen); 2138 2139 ret = get_errno(getsockname(fd, addr, &addrlen)); 2140 if (!is_error(ret)) { 2141 host_to_target_sockaddr(target_addr, addr, addrlen); 2142 if (put_user_u32(addrlen, target_addrlen_addr)) 2143 ret = -TARGET_EFAULT; 2144 } 2145 return ret; 2146 } 2147 2148 /* do_socketpair() Must return target values and target errnos. */ 2149 static abi_long do_socketpair(int domain, int type, int protocol, 2150 abi_ulong target_tab_addr) 2151 { 2152 int tab[2]; 2153 abi_long ret; 2154 2155 target_to_host_sock_type(&type); 2156 2157 ret = get_errno(socketpair(domain, type, protocol, tab)); 2158 if (!is_error(ret)) { 2159 if (put_user_s32(tab[0], target_tab_addr) 2160 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0]))) 2161 ret = -TARGET_EFAULT; 2162 } 2163 return ret; 2164 } 2165 2166 /* do_sendto() Must return target values and target errnos. */ 2167 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags, 2168 abi_ulong target_addr, socklen_t addrlen) 2169 { 2170 void *addr; 2171 void *host_msg; 2172 abi_long ret; 2173 2174 if ((int)addrlen < 0) { 2175 return -TARGET_EINVAL; 2176 } 2177 2178 host_msg = lock_user(VERIFY_READ, msg, len, 1); 2179 if (!host_msg) 2180 return -TARGET_EFAULT; 2181 if (target_addr) { 2182 addr = alloca(addrlen); 2183 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 2184 if (ret) { 2185 unlock_user(host_msg, msg, 0); 2186 return ret; 2187 } 2188 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen)); 2189 } else { 2190 ret = get_errno(send(fd, host_msg, len, flags)); 2191 } 2192 unlock_user(host_msg, msg, 0); 2193 return ret; 2194 } 2195 2196 /* do_recvfrom() Must return target values and target errnos. */ 2197 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags, 2198 abi_ulong target_addr, 2199 abi_ulong target_addrlen) 2200 { 2201 socklen_t addrlen; 2202 void *addr; 2203 void *host_msg; 2204 abi_long ret; 2205 2206 host_msg = lock_user(VERIFY_WRITE, msg, len, 0); 2207 if (!host_msg) 2208 return -TARGET_EFAULT; 2209 if (target_addr) { 2210 if (get_user_u32(addrlen, target_addrlen)) { 2211 ret = -TARGET_EFAULT; 2212 goto fail; 2213 } 2214 if ((int)addrlen < 0) { 2215 ret = -TARGET_EINVAL; 2216 goto fail; 2217 } 2218 addr = alloca(addrlen); 2219 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen)); 2220 } else { 2221 addr = NULL; /* To keep compiler quiet. */ 2222 ret = get_errno(qemu_recv(fd, host_msg, len, flags)); 2223 } 2224 if (!is_error(ret)) { 2225 if (target_addr) { 2226 host_to_target_sockaddr(target_addr, addr, addrlen); 2227 if (put_user_u32(addrlen, target_addrlen)) { 2228 ret = -TARGET_EFAULT; 2229 goto fail; 2230 } 2231 } 2232 unlock_user(host_msg, msg, len); 2233 } else { 2234 fail: 2235 unlock_user(host_msg, msg, 0); 2236 } 2237 return ret; 2238 } 2239 2240 #ifdef TARGET_NR_socketcall 2241 /* do_socketcall() Must return target values and target errnos. */ 2242 static abi_long do_socketcall(int num, abi_ulong vptr) 2243 { 2244 static const unsigned ac[] = { /* number of arguments per call */ 2245 [SOCKOP_socket] = 3, /* domain, type, protocol */ 2246 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */ 2247 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */ 2248 [SOCKOP_listen] = 2, /* sockfd, backlog */ 2249 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */ 2250 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */ 2251 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */ 2252 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */ 2253 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */ 2254 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */ 2255 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */ 2256 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */ 2257 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */ 2258 [SOCKOP_shutdown] = 2, /* sockfd, how */ 2259 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */ 2260 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */ 2261 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */ 2262 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */ 2263 }; 2264 abi_long a[6]; /* max 6 args */ 2265 2266 /* first, collect the arguments in a[] according to ac[] */ 2267 if (num >= 0 && num < ARRAY_SIZE(ac)) { 2268 unsigned i; 2269 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */ 2270 for (i = 0; i < ac[num]; ++i) { 2271 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) { 2272 return -TARGET_EFAULT; 2273 } 2274 } 2275 } 2276 2277 /* now when we have the args, actually handle the call */ 2278 switch (num) { 2279 case SOCKOP_socket: /* domain, type, protocol */ 2280 return do_socket(a[0], a[1], a[2]); 2281 case SOCKOP_bind: /* sockfd, addr, addrlen */ 2282 return do_bind(a[0], a[1], a[2]); 2283 case SOCKOP_connect: /* sockfd, addr, addrlen */ 2284 return do_connect(a[0], a[1], a[2]); 2285 case SOCKOP_listen: /* sockfd, backlog */ 2286 return get_errno(listen(a[0], a[1])); 2287 case SOCKOP_accept: /* sockfd, addr, addrlen */ 2288 return do_accept4(a[0], a[1], a[2], 0); 2289 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */ 2290 return do_accept4(a[0], a[1], a[2], a[3]); 2291 case SOCKOP_getsockname: /* sockfd, addr, addrlen */ 2292 return do_getsockname(a[0], a[1], a[2]); 2293 case SOCKOP_getpeername: /* sockfd, addr, addrlen */ 2294 return do_getpeername(a[0], a[1], a[2]); 2295 case SOCKOP_socketpair: /* domain, type, protocol, tab */ 2296 return do_socketpair(a[0], a[1], a[2], a[3]); 2297 case SOCKOP_send: /* sockfd, msg, len, flags */ 2298 return do_sendto(a[0], a[1], a[2], a[3], 0, 0); 2299 case SOCKOP_recv: /* sockfd, msg, len, flags */ 2300 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0); 2301 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */ 2302 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]); 2303 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */ 2304 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]); 2305 case SOCKOP_shutdown: /* sockfd, how */ 2306 return get_errno(shutdown(a[0], a[1])); 2307 case SOCKOP_sendmsg: /* sockfd, msg, flags */ 2308 return do_sendrecvmsg(a[0], a[1], a[2], 1); 2309 case SOCKOP_recvmsg: /* sockfd, msg, flags */ 2310 return do_sendrecvmsg(a[0], a[1], a[2], 0); 2311 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */ 2312 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]); 2313 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */ 2314 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]); 2315 default: 2316 gemu_log("Unsupported socketcall: %d\n", num); 2317 return -TARGET_ENOSYS; 2318 } 2319 } 2320 #endif 2321 2322 #define N_SHM_REGIONS 32 2323 2324 static struct shm_region { 2325 abi_ulong start; 2326 abi_ulong size; 2327 } shm_regions[N_SHM_REGIONS]; 2328 2329 struct target_semid_ds 2330 { 2331 struct target_ipc_perm sem_perm; 2332 abi_ulong sem_otime; 2333 abi_ulong __unused1; 2334 abi_ulong sem_ctime; 2335 abi_ulong __unused2; 2336 abi_ulong sem_nsems; 2337 abi_ulong __unused3; 2338 abi_ulong __unused4; 2339 }; 2340 2341 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip, 2342 abi_ulong target_addr) 2343 { 2344 struct target_ipc_perm *target_ip; 2345 struct target_semid_ds *target_sd; 2346 2347 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2348 return -TARGET_EFAULT; 2349 target_ip = &(target_sd->sem_perm); 2350 host_ip->__key = tswap32(target_ip->__key); 2351 host_ip->uid = tswap32(target_ip->uid); 2352 host_ip->gid = tswap32(target_ip->gid); 2353 host_ip->cuid = tswap32(target_ip->cuid); 2354 host_ip->cgid = tswap32(target_ip->cgid); 2355 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 2356 host_ip->mode = tswap32(target_ip->mode); 2357 #else 2358 host_ip->mode = tswap16(target_ip->mode); 2359 #endif 2360 #if defined(TARGET_PPC) 2361 host_ip->__seq = tswap32(target_ip->__seq); 2362 #else 2363 host_ip->__seq = tswap16(target_ip->__seq); 2364 #endif 2365 unlock_user_struct(target_sd, target_addr, 0); 2366 return 0; 2367 } 2368 2369 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr, 2370 struct ipc_perm *host_ip) 2371 { 2372 struct target_ipc_perm *target_ip; 2373 struct target_semid_ds *target_sd; 2374 2375 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2376 return -TARGET_EFAULT; 2377 target_ip = &(target_sd->sem_perm); 2378 target_ip->__key = tswap32(host_ip->__key); 2379 target_ip->uid = tswap32(host_ip->uid); 2380 target_ip->gid = tswap32(host_ip->gid); 2381 target_ip->cuid = tswap32(host_ip->cuid); 2382 target_ip->cgid = tswap32(host_ip->cgid); 2383 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 2384 target_ip->mode = tswap32(host_ip->mode); 2385 #else 2386 target_ip->mode = tswap16(host_ip->mode); 2387 #endif 2388 #if defined(TARGET_PPC) 2389 target_ip->__seq = tswap32(host_ip->__seq); 2390 #else 2391 target_ip->__seq = tswap16(host_ip->__seq); 2392 #endif 2393 unlock_user_struct(target_sd, target_addr, 1); 2394 return 0; 2395 } 2396 2397 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd, 2398 abi_ulong target_addr) 2399 { 2400 struct target_semid_ds *target_sd; 2401 2402 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2403 return -TARGET_EFAULT; 2404 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr)) 2405 return -TARGET_EFAULT; 2406 host_sd->sem_nsems = tswapal(target_sd->sem_nsems); 2407 host_sd->sem_otime = tswapal(target_sd->sem_otime); 2408 host_sd->sem_ctime = tswapal(target_sd->sem_ctime); 2409 unlock_user_struct(target_sd, target_addr, 0); 2410 return 0; 2411 } 2412 2413 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr, 2414 struct semid_ds *host_sd) 2415 { 2416 struct target_semid_ds *target_sd; 2417 2418 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2419 return -TARGET_EFAULT; 2420 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm))) 2421 return -TARGET_EFAULT; 2422 target_sd->sem_nsems = tswapal(host_sd->sem_nsems); 2423 target_sd->sem_otime = tswapal(host_sd->sem_otime); 2424 target_sd->sem_ctime = tswapal(host_sd->sem_ctime); 2425 unlock_user_struct(target_sd, target_addr, 1); 2426 return 0; 2427 } 2428 2429 struct target_seminfo { 2430 int semmap; 2431 int semmni; 2432 int semmns; 2433 int semmnu; 2434 int semmsl; 2435 int semopm; 2436 int semume; 2437 int semusz; 2438 int semvmx; 2439 int semaem; 2440 }; 2441 2442 static inline abi_long host_to_target_seminfo(abi_ulong target_addr, 2443 struct seminfo *host_seminfo) 2444 { 2445 struct target_seminfo *target_seminfo; 2446 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0)) 2447 return -TARGET_EFAULT; 2448 __put_user(host_seminfo->semmap, &target_seminfo->semmap); 2449 __put_user(host_seminfo->semmni, &target_seminfo->semmni); 2450 __put_user(host_seminfo->semmns, &target_seminfo->semmns); 2451 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu); 2452 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl); 2453 __put_user(host_seminfo->semopm, &target_seminfo->semopm); 2454 __put_user(host_seminfo->semume, &target_seminfo->semume); 2455 __put_user(host_seminfo->semusz, &target_seminfo->semusz); 2456 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx); 2457 __put_user(host_seminfo->semaem, &target_seminfo->semaem); 2458 unlock_user_struct(target_seminfo, target_addr, 1); 2459 return 0; 2460 } 2461 2462 union semun { 2463 int val; 2464 struct semid_ds *buf; 2465 unsigned short *array; 2466 struct seminfo *__buf; 2467 }; 2468 2469 union target_semun { 2470 int val; 2471 abi_ulong buf; 2472 abi_ulong array; 2473 abi_ulong __buf; 2474 }; 2475 2476 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array, 2477 abi_ulong target_addr) 2478 { 2479 int nsems; 2480 unsigned short *array; 2481 union semun semun; 2482 struct semid_ds semid_ds; 2483 int i, ret; 2484 2485 semun.buf = &semid_ds; 2486 2487 ret = semctl(semid, 0, IPC_STAT, semun); 2488 if (ret == -1) 2489 return get_errno(ret); 2490 2491 nsems = semid_ds.sem_nsems; 2492 2493 *host_array = malloc(nsems*sizeof(unsigned short)); 2494 if (!*host_array) { 2495 return -TARGET_ENOMEM; 2496 } 2497 array = lock_user(VERIFY_READ, target_addr, 2498 nsems*sizeof(unsigned short), 1); 2499 if (!array) { 2500 free(*host_array); 2501 return -TARGET_EFAULT; 2502 } 2503 2504 for(i=0; i<nsems; i++) { 2505 __get_user((*host_array)[i], &array[i]); 2506 } 2507 unlock_user(array, target_addr, 0); 2508 2509 return 0; 2510 } 2511 2512 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr, 2513 unsigned short **host_array) 2514 { 2515 int nsems; 2516 unsigned short *array; 2517 union semun semun; 2518 struct semid_ds semid_ds; 2519 int i, ret; 2520 2521 semun.buf = &semid_ds; 2522 2523 ret = semctl(semid, 0, IPC_STAT, semun); 2524 if (ret == -1) 2525 return get_errno(ret); 2526 2527 nsems = semid_ds.sem_nsems; 2528 2529 array = lock_user(VERIFY_WRITE, target_addr, 2530 nsems*sizeof(unsigned short), 0); 2531 if (!array) 2532 return -TARGET_EFAULT; 2533 2534 for(i=0; i<nsems; i++) { 2535 __put_user((*host_array)[i], &array[i]); 2536 } 2537 free(*host_array); 2538 unlock_user(array, target_addr, 1); 2539 2540 return 0; 2541 } 2542 2543 static inline abi_long do_semctl(int semid, int semnum, int cmd, 2544 union target_semun target_su) 2545 { 2546 union semun arg; 2547 struct semid_ds dsarg; 2548 unsigned short *array = NULL; 2549 struct seminfo seminfo; 2550 abi_long ret = -TARGET_EINVAL; 2551 abi_long err; 2552 cmd &= 0xff; 2553 2554 switch( cmd ) { 2555 case GETVAL: 2556 case SETVAL: 2557 arg.val = tswap32(target_su.val); 2558 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2559 target_su.val = tswap32(arg.val); 2560 break; 2561 case GETALL: 2562 case SETALL: 2563 err = target_to_host_semarray(semid, &array, target_su.array); 2564 if (err) 2565 return err; 2566 arg.array = array; 2567 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2568 err = host_to_target_semarray(semid, target_su.array, &array); 2569 if (err) 2570 return err; 2571 break; 2572 case IPC_STAT: 2573 case IPC_SET: 2574 case SEM_STAT: 2575 err = target_to_host_semid_ds(&dsarg, target_su.buf); 2576 if (err) 2577 return err; 2578 arg.buf = &dsarg; 2579 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2580 err = host_to_target_semid_ds(target_su.buf, &dsarg); 2581 if (err) 2582 return err; 2583 break; 2584 case IPC_INFO: 2585 case SEM_INFO: 2586 arg.__buf = &seminfo; 2587 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2588 err = host_to_target_seminfo(target_su.__buf, &seminfo); 2589 if (err) 2590 return err; 2591 break; 2592 case IPC_RMID: 2593 case GETPID: 2594 case GETNCNT: 2595 case GETZCNT: 2596 ret = get_errno(semctl(semid, semnum, cmd, NULL)); 2597 break; 2598 } 2599 2600 return ret; 2601 } 2602 2603 struct target_sembuf { 2604 unsigned short sem_num; 2605 short sem_op; 2606 short sem_flg; 2607 }; 2608 2609 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf, 2610 abi_ulong target_addr, 2611 unsigned nsops) 2612 { 2613 struct target_sembuf *target_sembuf; 2614 int i; 2615 2616 target_sembuf = lock_user(VERIFY_READ, target_addr, 2617 nsops*sizeof(struct target_sembuf), 1); 2618 if (!target_sembuf) 2619 return -TARGET_EFAULT; 2620 2621 for(i=0; i<nsops; i++) { 2622 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num); 2623 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op); 2624 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg); 2625 } 2626 2627 unlock_user(target_sembuf, target_addr, 0); 2628 2629 return 0; 2630 } 2631 2632 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops) 2633 { 2634 struct sembuf sops[nsops]; 2635 2636 if (target_to_host_sembuf(sops, ptr, nsops)) 2637 return -TARGET_EFAULT; 2638 2639 return get_errno(semop(semid, sops, nsops)); 2640 } 2641 2642 struct target_msqid_ds 2643 { 2644 struct target_ipc_perm msg_perm; 2645 abi_ulong msg_stime; 2646 #if TARGET_ABI_BITS == 32 2647 abi_ulong __unused1; 2648 #endif 2649 abi_ulong msg_rtime; 2650 #if TARGET_ABI_BITS == 32 2651 abi_ulong __unused2; 2652 #endif 2653 abi_ulong msg_ctime; 2654 #if TARGET_ABI_BITS == 32 2655 abi_ulong __unused3; 2656 #endif 2657 abi_ulong __msg_cbytes; 2658 abi_ulong msg_qnum; 2659 abi_ulong msg_qbytes; 2660 abi_ulong msg_lspid; 2661 abi_ulong msg_lrpid; 2662 abi_ulong __unused4; 2663 abi_ulong __unused5; 2664 }; 2665 2666 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md, 2667 abi_ulong target_addr) 2668 { 2669 struct target_msqid_ds *target_md; 2670 2671 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1)) 2672 return -TARGET_EFAULT; 2673 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr)) 2674 return -TARGET_EFAULT; 2675 host_md->msg_stime = tswapal(target_md->msg_stime); 2676 host_md->msg_rtime = tswapal(target_md->msg_rtime); 2677 host_md->msg_ctime = tswapal(target_md->msg_ctime); 2678 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes); 2679 host_md->msg_qnum = tswapal(target_md->msg_qnum); 2680 host_md->msg_qbytes = tswapal(target_md->msg_qbytes); 2681 host_md->msg_lspid = tswapal(target_md->msg_lspid); 2682 host_md->msg_lrpid = tswapal(target_md->msg_lrpid); 2683 unlock_user_struct(target_md, target_addr, 0); 2684 return 0; 2685 } 2686 2687 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr, 2688 struct msqid_ds *host_md) 2689 { 2690 struct target_msqid_ds *target_md; 2691 2692 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0)) 2693 return -TARGET_EFAULT; 2694 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm))) 2695 return -TARGET_EFAULT; 2696 target_md->msg_stime = tswapal(host_md->msg_stime); 2697 target_md->msg_rtime = tswapal(host_md->msg_rtime); 2698 target_md->msg_ctime = tswapal(host_md->msg_ctime); 2699 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes); 2700 target_md->msg_qnum = tswapal(host_md->msg_qnum); 2701 target_md->msg_qbytes = tswapal(host_md->msg_qbytes); 2702 target_md->msg_lspid = tswapal(host_md->msg_lspid); 2703 target_md->msg_lrpid = tswapal(host_md->msg_lrpid); 2704 unlock_user_struct(target_md, target_addr, 1); 2705 return 0; 2706 } 2707 2708 struct target_msginfo { 2709 int msgpool; 2710 int msgmap; 2711 int msgmax; 2712 int msgmnb; 2713 int msgmni; 2714 int msgssz; 2715 int msgtql; 2716 unsigned short int msgseg; 2717 }; 2718 2719 static inline abi_long host_to_target_msginfo(abi_ulong target_addr, 2720 struct msginfo *host_msginfo) 2721 { 2722 struct target_msginfo *target_msginfo; 2723 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0)) 2724 return -TARGET_EFAULT; 2725 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool); 2726 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap); 2727 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax); 2728 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb); 2729 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni); 2730 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz); 2731 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql); 2732 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg); 2733 unlock_user_struct(target_msginfo, target_addr, 1); 2734 return 0; 2735 } 2736 2737 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr) 2738 { 2739 struct msqid_ds dsarg; 2740 struct msginfo msginfo; 2741 abi_long ret = -TARGET_EINVAL; 2742 2743 cmd &= 0xff; 2744 2745 switch (cmd) { 2746 case IPC_STAT: 2747 case IPC_SET: 2748 case MSG_STAT: 2749 if (target_to_host_msqid_ds(&dsarg,ptr)) 2750 return -TARGET_EFAULT; 2751 ret = get_errno(msgctl(msgid, cmd, &dsarg)); 2752 if (host_to_target_msqid_ds(ptr,&dsarg)) 2753 return -TARGET_EFAULT; 2754 break; 2755 case IPC_RMID: 2756 ret = get_errno(msgctl(msgid, cmd, NULL)); 2757 break; 2758 case IPC_INFO: 2759 case MSG_INFO: 2760 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo)); 2761 if (host_to_target_msginfo(ptr, &msginfo)) 2762 return -TARGET_EFAULT; 2763 break; 2764 } 2765 2766 return ret; 2767 } 2768 2769 struct target_msgbuf { 2770 abi_long mtype; 2771 char mtext[1]; 2772 }; 2773 2774 static inline abi_long do_msgsnd(int msqid, abi_long msgp, 2775 unsigned int msgsz, int msgflg) 2776 { 2777 struct target_msgbuf *target_mb; 2778 struct msgbuf *host_mb; 2779 abi_long ret = 0; 2780 2781 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0)) 2782 return -TARGET_EFAULT; 2783 host_mb = malloc(msgsz+sizeof(long)); 2784 host_mb->mtype = (abi_long) tswapal(target_mb->mtype); 2785 memcpy(host_mb->mtext, target_mb->mtext, msgsz); 2786 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg)); 2787 free(host_mb); 2788 unlock_user_struct(target_mb, msgp, 0); 2789 2790 return ret; 2791 } 2792 2793 static inline abi_long do_msgrcv(int msqid, abi_long msgp, 2794 unsigned int msgsz, abi_long msgtyp, 2795 int msgflg) 2796 { 2797 struct target_msgbuf *target_mb; 2798 char *target_mtext; 2799 struct msgbuf *host_mb; 2800 abi_long ret = 0; 2801 2802 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0)) 2803 return -TARGET_EFAULT; 2804 2805 host_mb = g_malloc(msgsz+sizeof(long)); 2806 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg)); 2807 2808 if (ret > 0) { 2809 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong); 2810 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0); 2811 if (!target_mtext) { 2812 ret = -TARGET_EFAULT; 2813 goto end; 2814 } 2815 memcpy(target_mb->mtext, host_mb->mtext, ret); 2816 unlock_user(target_mtext, target_mtext_addr, ret); 2817 } 2818 2819 target_mb->mtype = tswapal(host_mb->mtype); 2820 2821 end: 2822 if (target_mb) 2823 unlock_user_struct(target_mb, msgp, 1); 2824 g_free(host_mb); 2825 return ret; 2826 } 2827 2828 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd, 2829 abi_ulong target_addr) 2830 { 2831 struct target_shmid_ds *target_sd; 2832 2833 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2834 return -TARGET_EFAULT; 2835 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr)) 2836 return -TARGET_EFAULT; 2837 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2838 __get_user(host_sd->shm_atime, &target_sd->shm_atime); 2839 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2840 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2841 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2842 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2843 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2844 unlock_user_struct(target_sd, target_addr, 0); 2845 return 0; 2846 } 2847 2848 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr, 2849 struct shmid_ds *host_sd) 2850 { 2851 struct target_shmid_ds *target_sd; 2852 2853 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2854 return -TARGET_EFAULT; 2855 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm))) 2856 return -TARGET_EFAULT; 2857 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2858 __put_user(host_sd->shm_atime, &target_sd->shm_atime); 2859 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2860 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2861 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2862 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2863 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2864 unlock_user_struct(target_sd, target_addr, 1); 2865 return 0; 2866 } 2867 2868 struct target_shminfo { 2869 abi_ulong shmmax; 2870 abi_ulong shmmin; 2871 abi_ulong shmmni; 2872 abi_ulong shmseg; 2873 abi_ulong shmall; 2874 }; 2875 2876 static inline abi_long host_to_target_shminfo(abi_ulong target_addr, 2877 struct shminfo *host_shminfo) 2878 { 2879 struct target_shminfo *target_shminfo; 2880 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0)) 2881 return -TARGET_EFAULT; 2882 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax); 2883 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin); 2884 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni); 2885 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg); 2886 __put_user(host_shminfo->shmall, &target_shminfo->shmall); 2887 unlock_user_struct(target_shminfo, target_addr, 1); 2888 return 0; 2889 } 2890 2891 struct target_shm_info { 2892 int used_ids; 2893 abi_ulong shm_tot; 2894 abi_ulong shm_rss; 2895 abi_ulong shm_swp; 2896 abi_ulong swap_attempts; 2897 abi_ulong swap_successes; 2898 }; 2899 2900 static inline abi_long host_to_target_shm_info(abi_ulong target_addr, 2901 struct shm_info *host_shm_info) 2902 { 2903 struct target_shm_info *target_shm_info; 2904 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0)) 2905 return -TARGET_EFAULT; 2906 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids); 2907 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot); 2908 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss); 2909 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp); 2910 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts); 2911 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes); 2912 unlock_user_struct(target_shm_info, target_addr, 1); 2913 return 0; 2914 } 2915 2916 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf) 2917 { 2918 struct shmid_ds dsarg; 2919 struct shminfo shminfo; 2920 struct shm_info shm_info; 2921 abi_long ret = -TARGET_EINVAL; 2922 2923 cmd &= 0xff; 2924 2925 switch(cmd) { 2926 case IPC_STAT: 2927 case IPC_SET: 2928 case SHM_STAT: 2929 if (target_to_host_shmid_ds(&dsarg, buf)) 2930 return -TARGET_EFAULT; 2931 ret = get_errno(shmctl(shmid, cmd, &dsarg)); 2932 if (host_to_target_shmid_ds(buf, &dsarg)) 2933 return -TARGET_EFAULT; 2934 break; 2935 case IPC_INFO: 2936 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo)); 2937 if (host_to_target_shminfo(buf, &shminfo)) 2938 return -TARGET_EFAULT; 2939 break; 2940 case SHM_INFO: 2941 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info)); 2942 if (host_to_target_shm_info(buf, &shm_info)) 2943 return -TARGET_EFAULT; 2944 break; 2945 case IPC_RMID: 2946 case SHM_LOCK: 2947 case SHM_UNLOCK: 2948 ret = get_errno(shmctl(shmid, cmd, NULL)); 2949 break; 2950 } 2951 2952 return ret; 2953 } 2954 2955 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg) 2956 { 2957 abi_long raddr; 2958 void *host_raddr; 2959 struct shmid_ds shm_info; 2960 int i,ret; 2961 2962 /* find out the length of the shared memory segment */ 2963 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); 2964 if (is_error(ret)) { 2965 /* can't get length, bail out */ 2966 return ret; 2967 } 2968 2969 mmap_lock(); 2970 2971 if (shmaddr) 2972 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg); 2973 else { 2974 abi_ulong mmap_start; 2975 2976 mmap_start = mmap_find_vma(0, shm_info.shm_segsz); 2977 2978 if (mmap_start == -1) { 2979 errno = ENOMEM; 2980 host_raddr = (void *)-1; 2981 } else 2982 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP); 2983 } 2984 2985 if (host_raddr == (void *)-1) { 2986 mmap_unlock(); 2987 return get_errno((long)host_raddr); 2988 } 2989 raddr=h2g((unsigned long)host_raddr); 2990 2991 page_set_flags(raddr, raddr + shm_info.shm_segsz, 2992 PAGE_VALID | PAGE_READ | 2993 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE)); 2994 2995 for (i = 0; i < N_SHM_REGIONS; i++) { 2996 if (shm_regions[i].start == 0) { 2997 shm_regions[i].start = raddr; 2998 shm_regions[i].size = shm_info.shm_segsz; 2999 break; 3000 } 3001 } 3002 3003 mmap_unlock(); 3004 return raddr; 3005 3006 } 3007 3008 static inline abi_long do_shmdt(abi_ulong shmaddr) 3009 { 3010 int i; 3011 3012 for (i = 0; i < N_SHM_REGIONS; ++i) { 3013 if (shm_regions[i].start == shmaddr) { 3014 shm_regions[i].start = 0; 3015 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0); 3016 break; 3017 } 3018 } 3019 3020 return get_errno(shmdt(g2h(shmaddr))); 3021 } 3022 3023 #ifdef TARGET_NR_ipc 3024 /* ??? This only works with linear mappings. */ 3025 /* do_ipc() must return target values and target errnos. */ 3026 static abi_long do_ipc(unsigned int call, int first, 3027 int second, int third, 3028 abi_long ptr, abi_long fifth) 3029 { 3030 int version; 3031 abi_long ret = 0; 3032 3033 version = call >> 16; 3034 call &= 0xffff; 3035 3036 switch (call) { 3037 case IPCOP_semop: 3038 ret = do_semop(first, ptr, second); 3039 break; 3040 3041 case IPCOP_semget: 3042 ret = get_errno(semget(first, second, third)); 3043 break; 3044 3045 case IPCOP_semctl: 3046 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr); 3047 break; 3048 3049 case IPCOP_msgget: 3050 ret = get_errno(msgget(first, second)); 3051 break; 3052 3053 case IPCOP_msgsnd: 3054 ret = do_msgsnd(first, ptr, second, third); 3055 break; 3056 3057 case IPCOP_msgctl: 3058 ret = do_msgctl(first, second, ptr); 3059 break; 3060 3061 case IPCOP_msgrcv: 3062 switch (version) { 3063 case 0: 3064 { 3065 struct target_ipc_kludge { 3066 abi_long msgp; 3067 abi_long msgtyp; 3068 } *tmp; 3069 3070 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) { 3071 ret = -TARGET_EFAULT; 3072 break; 3073 } 3074 3075 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third); 3076 3077 unlock_user_struct(tmp, ptr, 0); 3078 break; 3079 } 3080 default: 3081 ret = do_msgrcv(first, ptr, second, fifth, third); 3082 } 3083 break; 3084 3085 case IPCOP_shmat: 3086 switch (version) { 3087 default: 3088 { 3089 abi_ulong raddr; 3090 raddr = do_shmat(first, ptr, second); 3091 if (is_error(raddr)) 3092 return get_errno(raddr); 3093 if (put_user_ual(raddr, third)) 3094 return -TARGET_EFAULT; 3095 break; 3096 } 3097 case 1: 3098 ret = -TARGET_EINVAL; 3099 break; 3100 } 3101 break; 3102 case IPCOP_shmdt: 3103 ret = do_shmdt(ptr); 3104 break; 3105 3106 case IPCOP_shmget: 3107 /* IPC_* flag values are the same on all linux platforms */ 3108 ret = get_errno(shmget(first, second, third)); 3109 break; 3110 3111 /* IPC_* and SHM_* command values are the same on all linux platforms */ 3112 case IPCOP_shmctl: 3113 ret = do_shmctl(first, second, ptr); 3114 break; 3115 default: 3116 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version); 3117 ret = -TARGET_ENOSYS; 3118 break; 3119 } 3120 return ret; 3121 } 3122 #endif 3123 3124 /* kernel structure types definitions */ 3125 3126 #define STRUCT(name, ...) STRUCT_ ## name, 3127 #define STRUCT_SPECIAL(name) STRUCT_ ## name, 3128 enum { 3129 #include "syscall_types.h" 3130 }; 3131 #undef STRUCT 3132 #undef STRUCT_SPECIAL 3133 3134 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL }; 3135 #define STRUCT_SPECIAL(name) 3136 #include "syscall_types.h" 3137 #undef STRUCT 3138 #undef STRUCT_SPECIAL 3139 3140 typedef struct IOCTLEntry IOCTLEntry; 3141 3142 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp, 3143 int fd, abi_long cmd, abi_long arg); 3144 3145 struct IOCTLEntry { 3146 unsigned int target_cmd; 3147 unsigned int host_cmd; 3148 const char *name; 3149 int access; 3150 do_ioctl_fn *do_ioctl; 3151 const argtype arg_type[5]; 3152 }; 3153 3154 #define IOC_R 0x0001 3155 #define IOC_W 0x0002 3156 #define IOC_RW (IOC_R | IOC_W) 3157 3158 #define MAX_STRUCT_SIZE 4096 3159 3160 #ifdef CONFIG_FIEMAP 3161 /* So fiemap access checks don't overflow on 32 bit systems. 3162 * This is very slightly smaller than the limit imposed by 3163 * the underlying kernel. 3164 */ 3165 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \ 3166 / sizeof(struct fiemap_extent)) 3167 3168 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp, 3169 int fd, abi_long cmd, abi_long arg) 3170 { 3171 /* The parameter for this ioctl is a struct fiemap followed 3172 * by an array of struct fiemap_extent whose size is set 3173 * in fiemap->fm_extent_count. The array is filled in by the 3174 * ioctl. 3175 */ 3176 int target_size_in, target_size_out; 3177 struct fiemap *fm; 3178 const argtype *arg_type = ie->arg_type; 3179 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) }; 3180 void *argptr, *p; 3181 abi_long ret; 3182 int i, extent_size = thunk_type_size(extent_arg_type, 0); 3183 uint32_t outbufsz; 3184 int free_fm = 0; 3185 3186 assert(arg_type[0] == TYPE_PTR); 3187 assert(ie->access == IOC_RW); 3188 arg_type++; 3189 target_size_in = thunk_type_size(arg_type, 0); 3190 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1); 3191 if (!argptr) { 3192 return -TARGET_EFAULT; 3193 } 3194 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3195 unlock_user(argptr, arg, 0); 3196 fm = (struct fiemap *)buf_temp; 3197 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) { 3198 return -TARGET_EINVAL; 3199 } 3200 3201 outbufsz = sizeof (*fm) + 3202 (sizeof(struct fiemap_extent) * fm->fm_extent_count); 3203 3204 if (outbufsz > MAX_STRUCT_SIZE) { 3205 /* We can't fit all the extents into the fixed size buffer. 3206 * Allocate one that is large enough and use it instead. 3207 */ 3208 fm = malloc(outbufsz); 3209 if (!fm) { 3210 return -TARGET_ENOMEM; 3211 } 3212 memcpy(fm, buf_temp, sizeof(struct fiemap)); 3213 free_fm = 1; 3214 } 3215 ret = get_errno(ioctl(fd, ie->host_cmd, fm)); 3216 if (!is_error(ret)) { 3217 target_size_out = target_size_in; 3218 /* An extent_count of 0 means we were only counting the extents 3219 * so there are no structs to copy 3220 */ 3221 if (fm->fm_extent_count != 0) { 3222 target_size_out += fm->fm_mapped_extents * extent_size; 3223 } 3224 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0); 3225 if (!argptr) { 3226 ret = -TARGET_EFAULT; 3227 } else { 3228 /* Convert the struct fiemap */ 3229 thunk_convert(argptr, fm, arg_type, THUNK_TARGET); 3230 if (fm->fm_extent_count != 0) { 3231 p = argptr + target_size_in; 3232 /* ...and then all the struct fiemap_extents */ 3233 for (i = 0; i < fm->fm_mapped_extents; i++) { 3234 thunk_convert(p, &fm->fm_extents[i], extent_arg_type, 3235 THUNK_TARGET); 3236 p += extent_size; 3237 } 3238 } 3239 unlock_user(argptr, arg, target_size_out); 3240 } 3241 } 3242 if (free_fm) { 3243 free(fm); 3244 } 3245 return ret; 3246 } 3247 #endif 3248 3249 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, 3250 int fd, abi_long cmd, abi_long arg) 3251 { 3252 const argtype *arg_type = ie->arg_type; 3253 int target_size; 3254 void *argptr; 3255 int ret; 3256 struct ifconf *host_ifconf; 3257 uint32_t outbufsz; 3258 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) }; 3259 int target_ifreq_size; 3260 int nb_ifreq; 3261 int free_buf = 0; 3262 int i; 3263 int target_ifc_len; 3264 abi_long target_ifc_buf; 3265 int host_ifc_len; 3266 char *host_ifc_buf; 3267 3268 assert(arg_type[0] == TYPE_PTR); 3269 assert(ie->access == IOC_RW); 3270 3271 arg_type++; 3272 target_size = thunk_type_size(arg_type, 0); 3273 3274 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3275 if (!argptr) 3276 return -TARGET_EFAULT; 3277 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3278 unlock_user(argptr, arg, 0); 3279 3280 host_ifconf = (struct ifconf *)(unsigned long)buf_temp; 3281 target_ifc_len = host_ifconf->ifc_len; 3282 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf; 3283 3284 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0); 3285 nb_ifreq = target_ifc_len / target_ifreq_size; 3286 host_ifc_len = nb_ifreq * sizeof(struct ifreq); 3287 3288 outbufsz = sizeof(*host_ifconf) + host_ifc_len; 3289 if (outbufsz > MAX_STRUCT_SIZE) { 3290 /* We can't fit all the extents into the fixed size buffer. 3291 * Allocate one that is large enough and use it instead. 3292 */ 3293 host_ifconf = malloc(outbufsz); 3294 if (!host_ifconf) { 3295 return -TARGET_ENOMEM; 3296 } 3297 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf)); 3298 free_buf = 1; 3299 } 3300 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf); 3301 3302 host_ifconf->ifc_len = host_ifc_len; 3303 host_ifconf->ifc_buf = host_ifc_buf; 3304 3305 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf)); 3306 if (!is_error(ret)) { 3307 /* convert host ifc_len to target ifc_len */ 3308 3309 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq); 3310 target_ifc_len = nb_ifreq * target_ifreq_size; 3311 host_ifconf->ifc_len = target_ifc_len; 3312 3313 /* restore target ifc_buf */ 3314 3315 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf; 3316 3317 /* copy struct ifconf to target user */ 3318 3319 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3320 if (!argptr) 3321 return -TARGET_EFAULT; 3322 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET); 3323 unlock_user(argptr, arg, target_size); 3324 3325 /* copy ifreq[] to target user */ 3326 3327 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0); 3328 for (i = 0; i < nb_ifreq ; i++) { 3329 thunk_convert(argptr + i * target_ifreq_size, 3330 host_ifc_buf + i * sizeof(struct ifreq), 3331 ifreq_arg_type, THUNK_TARGET); 3332 } 3333 unlock_user(argptr, target_ifc_buf, target_ifc_len); 3334 } 3335 3336 if (free_buf) { 3337 free(host_ifconf); 3338 } 3339 3340 return ret; 3341 } 3342 3343 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 3344 abi_long cmd, abi_long arg) 3345 { 3346 void *argptr; 3347 struct dm_ioctl *host_dm; 3348 abi_long guest_data; 3349 uint32_t guest_data_size; 3350 int target_size; 3351 const argtype *arg_type = ie->arg_type; 3352 abi_long ret; 3353 void *big_buf = NULL; 3354 char *host_data; 3355 3356 arg_type++; 3357 target_size = thunk_type_size(arg_type, 0); 3358 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3359 if (!argptr) { 3360 ret = -TARGET_EFAULT; 3361 goto out; 3362 } 3363 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3364 unlock_user(argptr, arg, 0); 3365 3366 /* buf_temp is too small, so fetch things into a bigger buffer */ 3367 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2); 3368 memcpy(big_buf, buf_temp, target_size); 3369 buf_temp = big_buf; 3370 host_dm = big_buf; 3371 3372 guest_data = arg + host_dm->data_start; 3373 if ((guest_data - arg) < 0) { 3374 ret = -EINVAL; 3375 goto out; 3376 } 3377 guest_data_size = host_dm->data_size - host_dm->data_start; 3378 host_data = (char*)host_dm + host_dm->data_start; 3379 3380 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1); 3381 switch (ie->host_cmd) { 3382 case DM_REMOVE_ALL: 3383 case DM_LIST_DEVICES: 3384 case DM_DEV_CREATE: 3385 case DM_DEV_REMOVE: 3386 case DM_DEV_SUSPEND: 3387 case DM_DEV_STATUS: 3388 case DM_DEV_WAIT: 3389 case DM_TABLE_STATUS: 3390 case DM_TABLE_CLEAR: 3391 case DM_TABLE_DEPS: 3392 case DM_LIST_VERSIONS: 3393 /* no input data */ 3394 break; 3395 case DM_DEV_RENAME: 3396 case DM_DEV_SET_GEOMETRY: 3397 /* data contains only strings */ 3398 memcpy(host_data, argptr, guest_data_size); 3399 break; 3400 case DM_TARGET_MSG: 3401 memcpy(host_data, argptr, guest_data_size); 3402 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr); 3403 break; 3404 case DM_TABLE_LOAD: 3405 { 3406 void *gspec = argptr; 3407 void *cur_data = host_data; 3408 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 3409 int spec_size = thunk_type_size(arg_type, 0); 3410 int i; 3411 3412 for (i = 0; i < host_dm->target_count; i++) { 3413 struct dm_target_spec *spec = cur_data; 3414 uint32_t next; 3415 int slen; 3416 3417 thunk_convert(spec, gspec, arg_type, THUNK_HOST); 3418 slen = strlen((char*)gspec + spec_size) + 1; 3419 next = spec->next; 3420 spec->next = sizeof(*spec) + slen; 3421 strcpy((char*)&spec[1], gspec + spec_size); 3422 gspec += next; 3423 cur_data += spec->next; 3424 } 3425 break; 3426 } 3427 default: 3428 ret = -TARGET_EINVAL; 3429 goto out; 3430 } 3431 unlock_user(argptr, guest_data, 0); 3432 3433 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3434 if (!is_error(ret)) { 3435 guest_data = arg + host_dm->data_start; 3436 guest_data_size = host_dm->data_size - host_dm->data_start; 3437 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0); 3438 switch (ie->host_cmd) { 3439 case DM_REMOVE_ALL: 3440 case DM_DEV_CREATE: 3441 case DM_DEV_REMOVE: 3442 case DM_DEV_RENAME: 3443 case DM_DEV_SUSPEND: 3444 case DM_DEV_STATUS: 3445 case DM_TABLE_LOAD: 3446 case DM_TABLE_CLEAR: 3447 case DM_TARGET_MSG: 3448 case DM_DEV_SET_GEOMETRY: 3449 /* no return data */ 3450 break; 3451 case DM_LIST_DEVICES: 3452 { 3453 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start; 3454 uint32_t remaining_data = guest_data_size; 3455 void *cur_data = argptr; 3456 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) }; 3457 int nl_size = 12; /* can't use thunk_size due to alignment */ 3458 3459 while (1) { 3460 uint32_t next = nl->next; 3461 if (next) { 3462 nl->next = nl_size + (strlen(nl->name) + 1); 3463 } 3464 if (remaining_data < nl->next) { 3465 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3466 break; 3467 } 3468 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET); 3469 strcpy(cur_data + nl_size, nl->name); 3470 cur_data += nl->next; 3471 remaining_data -= nl->next; 3472 if (!next) { 3473 break; 3474 } 3475 nl = (void*)nl + next; 3476 } 3477 break; 3478 } 3479 case DM_DEV_WAIT: 3480 case DM_TABLE_STATUS: 3481 { 3482 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start; 3483 void *cur_data = argptr; 3484 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 3485 int spec_size = thunk_type_size(arg_type, 0); 3486 int i; 3487 3488 for (i = 0; i < host_dm->target_count; i++) { 3489 uint32_t next = spec->next; 3490 int slen = strlen((char*)&spec[1]) + 1; 3491 spec->next = (cur_data - argptr) + spec_size + slen; 3492 if (guest_data_size < spec->next) { 3493 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3494 break; 3495 } 3496 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET); 3497 strcpy(cur_data + spec_size, (char*)&spec[1]); 3498 cur_data = argptr + spec->next; 3499 spec = (void*)host_dm + host_dm->data_start + next; 3500 } 3501 break; 3502 } 3503 case DM_TABLE_DEPS: 3504 { 3505 void *hdata = (void*)host_dm + host_dm->data_start; 3506 int count = *(uint32_t*)hdata; 3507 uint64_t *hdev = hdata + 8; 3508 uint64_t *gdev = argptr + 8; 3509 int i; 3510 3511 *(uint32_t*)argptr = tswap32(count); 3512 for (i = 0; i < count; i++) { 3513 *gdev = tswap64(*hdev); 3514 gdev++; 3515 hdev++; 3516 } 3517 break; 3518 } 3519 case DM_LIST_VERSIONS: 3520 { 3521 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start; 3522 uint32_t remaining_data = guest_data_size; 3523 void *cur_data = argptr; 3524 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) }; 3525 int vers_size = thunk_type_size(arg_type, 0); 3526 3527 while (1) { 3528 uint32_t next = vers->next; 3529 if (next) { 3530 vers->next = vers_size + (strlen(vers->name) + 1); 3531 } 3532 if (remaining_data < vers->next) { 3533 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3534 break; 3535 } 3536 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET); 3537 strcpy(cur_data + vers_size, vers->name); 3538 cur_data += vers->next; 3539 remaining_data -= vers->next; 3540 if (!next) { 3541 break; 3542 } 3543 vers = (void*)vers + next; 3544 } 3545 break; 3546 } 3547 default: 3548 ret = -TARGET_EINVAL; 3549 goto out; 3550 } 3551 unlock_user(argptr, guest_data, guest_data_size); 3552 3553 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3554 if (!argptr) { 3555 ret = -TARGET_EFAULT; 3556 goto out; 3557 } 3558 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3559 unlock_user(argptr, arg, target_size); 3560 } 3561 out: 3562 g_free(big_buf); 3563 return ret; 3564 } 3565 3566 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp, 3567 int fd, abi_long cmd, abi_long arg) 3568 { 3569 const argtype *arg_type = ie->arg_type; 3570 const StructEntry *se; 3571 const argtype *field_types; 3572 const int *dst_offsets, *src_offsets; 3573 int target_size; 3574 void *argptr; 3575 abi_ulong *target_rt_dev_ptr; 3576 unsigned long *host_rt_dev_ptr; 3577 abi_long ret; 3578 int i; 3579 3580 assert(ie->access == IOC_W); 3581 assert(*arg_type == TYPE_PTR); 3582 arg_type++; 3583 assert(*arg_type == TYPE_STRUCT); 3584 target_size = thunk_type_size(arg_type, 0); 3585 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3586 if (!argptr) { 3587 return -TARGET_EFAULT; 3588 } 3589 arg_type++; 3590 assert(*arg_type == (int)STRUCT_rtentry); 3591 se = struct_entries + *arg_type++; 3592 assert(se->convert[0] == NULL); 3593 /* convert struct here to be able to catch rt_dev string */ 3594 field_types = se->field_types; 3595 dst_offsets = se->field_offsets[THUNK_HOST]; 3596 src_offsets = se->field_offsets[THUNK_TARGET]; 3597 for (i = 0; i < se->nb_fields; i++) { 3598 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) { 3599 assert(*field_types == TYPE_PTRVOID); 3600 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]); 3601 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]); 3602 if (*target_rt_dev_ptr != 0) { 3603 *host_rt_dev_ptr = (unsigned long)lock_user_string( 3604 tswapal(*target_rt_dev_ptr)); 3605 if (!*host_rt_dev_ptr) { 3606 unlock_user(argptr, arg, 0); 3607 return -TARGET_EFAULT; 3608 } 3609 } else { 3610 *host_rt_dev_ptr = 0; 3611 } 3612 field_types++; 3613 continue; 3614 } 3615 field_types = thunk_convert(buf_temp + dst_offsets[i], 3616 argptr + src_offsets[i], 3617 field_types, THUNK_HOST); 3618 } 3619 unlock_user(argptr, arg, 0); 3620 3621 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3622 if (*host_rt_dev_ptr != 0) { 3623 unlock_user((void *)*host_rt_dev_ptr, 3624 *target_rt_dev_ptr, 0); 3625 } 3626 return ret; 3627 } 3628 3629 static IOCTLEntry ioctl_entries[] = { 3630 #define IOCTL(cmd, access, ...) \ 3631 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } }, 3632 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \ 3633 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } }, 3634 #include "ioctls.h" 3635 { 0, 0, }, 3636 }; 3637 3638 /* ??? Implement proper locking for ioctls. */ 3639 /* do_ioctl() Must return target values and target errnos. */ 3640 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg) 3641 { 3642 const IOCTLEntry *ie; 3643 const argtype *arg_type; 3644 abi_long ret; 3645 uint8_t buf_temp[MAX_STRUCT_SIZE]; 3646 int target_size; 3647 void *argptr; 3648 3649 ie = ioctl_entries; 3650 for(;;) { 3651 if (ie->target_cmd == 0) { 3652 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd); 3653 return -TARGET_ENOSYS; 3654 } 3655 if (ie->target_cmd == cmd) 3656 break; 3657 ie++; 3658 } 3659 arg_type = ie->arg_type; 3660 #if defined(DEBUG) 3661 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name); 3662 #endif 3663 if (ie->do_ioctl) { 3664 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg); 3665 } 3666 3667 switch(arg_type[0]) { 3668 case TYPE_NULL: 3669 /* no argument */ 3670 ret = get_errno(ioctl(fd, ie->host_cmd)); 3671 break; 3672 case TYPE_PTRVOID: 3673 case TYPE_INT: 3674 /* int argment */ 3675 ret = get_errno(ioctl(fd, ie->host_cmd, arg)); 3676 break; 3677 case TYPE_PTR: 3678 arg_type++; 3679 target_size = thunk_type_size(arg_type, 0); 3680 switch(ie->access) { 3681 case IOC_R: 3682 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3683 if (!is_error(ret)) { 3684 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3685 if (!argptr) 3686 return -TARGET_EFAULT; 3687 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3688 unlock_user(argptr, arg, target_size); 3689 } 3690 break; 3691 case IOC_W: 3692 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3693 if (!argptr) 3694 return -TARGET_EFAULT; 3695 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3696 unlock_user(argptr, arg, 0); 3697 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3698 break; 3699 default: 3700 case IOC_RW: 3701 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3702 if (!argptr) 3703 return -TARGET_EFAULT; 3704 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3705 unlock_user(argptr, arg, 0); 3706 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3707 if (!is_error(ret)) { 3708 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3709 if (!argptr) 3710 return -TARGET_EFAULT; 3711 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3712 unlock_user(argptr, arg, target_size); 3713 } 3714 break; 3715 } 3716 break; 3717 default: 3718 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n", 3719 (long)cmd, arg_type[0]); 3720 ret = -TARGET_ENOSYS; 3721 break; 3722 } 3723 return ret; 3724 } 3725 3726 static const bitmask_transtbl iflag_tbl[] = { 3727 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK }, 3728 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT }, 3729 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR }, 3730 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK }, 3731 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK }, 3732 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP }, 3733 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR }, 3734 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR }, 3735 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL }, 3736 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC }, 3737 { TARGET_IXON, TARGET_IXON, IXON, IXON }, 3738 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY }, 3739 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF }, 3740 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL }, 3741 { 0, 0, 0, 0 } 3742 }; 3743 3744 static const bitmask_transtbl oflag_tbl[] = { 3745 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST }, 3746 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC }, 3747 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR }, 3748 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL }, 3749 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR }, 3750 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET }, 3751 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL }, 3752 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL }, 3753 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 }, 3754 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 }, 3755 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 }, 3756 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 }, 3757 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 }, 3758 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 }, 3759 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 }, 3760 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 }, 3761 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 }, 3762 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 }, 3763 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 }, 3764 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 }, 3765 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 }, 3766 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 }, 3767 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 }, 3768 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 }, 3769 { 0, 0, 0, 0 } 3770 }; 3771 3772 static const bitmask_transtbl cflag_tbl[] = { 3773 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 }, 3774 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 }, 3775 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 }, 3776 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 }, 3777 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 }, 3778 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 }, 3779 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 }, 3780 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 }, 3781 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 }, 3782 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 }, 3783 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 }, 3784 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 }, 3785 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 }, 3786 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 }, 3787 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 }, 3788 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 }, 3789 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 }, 3790 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 }, 3791 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 }, 3792 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 }, 3793 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 }, 3794 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 }, 3795 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 }, 3796 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 }, 3797 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB }, 3798 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD }, 3799 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB }, 3800 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD }, 3801 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL }, 3802 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL }, 3803 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS }, 3804 { 0, 0, 0, 0 } 3805 }; 3806 3807 static const bitmask_transtbl lflag_tbl[] = { 3808 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG }, 3809 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON }, 3810 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE }, 3811 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO }, 3812 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE }, 3813 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK }, 3814 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL }, 3815 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH }, 3816 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP }, 3817 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL }, 3818 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT }, 3819 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE }, 3820 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO }, 3821 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN }, 3822 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN }, 3823 { 0, 0, 0, 0 } 3824 }; 3825 3826 static void target_to_host_termios (void *dst, const void *src) 3827 { 3828 struct host_termios *host = dst; 3829 const struct target_termios *target = src; 3830 3831 host->c_iflag = 3832 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl); 3833 host->c_oflag = 3834 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl); 3835 host->c_cflag = 3836 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl); 3837 host->c_lflag = 3838 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl); 3839 host->c_line = target->c_line; 3840 3841 memset(host->c_cc, 0, sizeof(host->c_cc)); 3842 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR]; 3843 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT]; 3844 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE]; 3845 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL]; 3846 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF]; 3847 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME]; 3848 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN]; 3849 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC]; 3850 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART]; 3851 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP]; 3852 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP]; 3853 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL]; 3854 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT]; 3855 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD]; 3856 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE]; 3857 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT]; 3858 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2]; 3859 } 3860 3861 static void host_to_target_termios (void *dst, const void *src) 3862 { 3863 struct target_termios *target = dst; 3864 const struct host_termios *host = src; 3865 3866 target->c_iflag = 3867 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl)); 3868 target->c_oflag = 3869 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl)); 3870 target->c_cflag = 3871 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl)); 3872 target->c_lflag = 3873 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl)); 3874 target->c_line = host->c_line; 3875 3876 memset(target->c_cc, 0, sizeof(target->c_cc)); 3877 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR]; 3878 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT]; 3879 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE]; 3880 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL]; 3881 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF]; 3882 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME]; 3883 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN]; 3884 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC]; 3885 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART]; 3886 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP]; 3887 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP]; 3888 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL]; 3889 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT]; 3890 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD]; 3891 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE]; 3892 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT]; 3893 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2]; 3894 } 3895 3896 static const StructEntry struct_termios_def = { 3897 .convert = { host_to_target_termios, target_to_host_termios }, 3898 .size = { sizeof(struct target_termios), sizeof(struct host_termios) }, 3899 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) }, 3900 }; 3901 3902 static bitmask_transtbl mmap_flags_tbl[] = { 3903 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED }, 3904 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE }, 3905 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED }, 3906 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS }, 3907 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN }, 3908 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE }, 3909 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE }, 3910 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED }, 3911 { 0, 0, 0, 0 } 3912 }; 3913 3914 #if defined(TARGET_I386) 3915 3916 /* NOTE: there is really one LDT for all the threads */ 3917 static uint8_t *ldt_table; 3918 3919 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount) 3920 { 3921 int size; 3922 void *p; 3923 3924 if (!ldt_table) 3925 return 0; 3926 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE; 3927 if (size > bytecount) 3928 size = bytecount; 3929 p = lock_user(VERIFY_WRITE, ptr, size, 0); 3930 if (!p) 3931 return -TARGET_EFAULT; 3932 /* ??? Should this by byteswapped? */ 3933 memcpy(p, ldt_table, size); 3934 unlock_user(p, ptr, size); 3935 return size; 3936 } 3937 3938 /* XXX: add locking support */ 3939 static abi_long write_ldt(CPUX86State *env, 3940 abi_ulong ptr, unsigned long bytecount, int oldmode) 3941 { 3942 struct target_modify_ldt_ldt_s ldt_info; 3943 struct target_modify_ldt_ldt_s *target_ldt_info; 3944 int seg_32bit, contents, read_exec_only, limit_in_pages; 3945 int seg_not_present, useable, lm; 3946 uint32_t *lp, entry_1, entry_2; 3947 3948 if (bytecount != sizeof(ldt_info)) 3949 return -TARGET_EINVAL; 3950 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1)) 3951 return -TARGET_EFAULT; 3952 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 3953 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 3954 ldt_info.limit = tswap32(target_ldt_info->limit); 3955 ldt_info.flags = tswap32(target_ldt_info->flags); 3956 unlock_user_struct(target_ldt_info, ptr, 0); 3957 3958 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES) 3959 return -TARGET_EINVAL; 3960 seg_32bit = ldt_info.flags & 1; 3961 contents = (ldt_info.flags >> 1) & 3; 3962 read_exec_only = (ldt_info.flags >> 3) & 1; 3963 limit_in_pages = (ldt_info.flags >> 4) & 1; 3964 seg_not_present = (ldt_info.flags >> 5) & 1; 3965 useable = (ldt_info.flags >> 6) & 1; 3966 #ifdef TARGET_ABI32 3967 lm = 0; 3968 #else 3969 lm = (ldt_info.flags >> 7) & 1; 3970 #endif 3971 if (contents == 3) { 3972 if (oldmode) 3973 return -TARGET_EINVAL; 3974 if (seg_not_present == 0) 3975 return -TARGET_EINVAL; 3976 } 3977 /* allocate the LDT */ 3978 if (!ldt_table) { 3979 env->ldt.base = target_mmap(0, 3980 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE, 3981 PROT_READ|PROT_WRITE, 3982 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 3983 if (env->ldt.base == -1) 3984 return -TARGET_ENOMEM; 3985 memset(g2h(env->ldt.base), 0, 3986 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE); 3987 env->ldt.limit = 0xffff; 3988 ldt_table = g2h(env->ldt.base); 3989 } 3990 3991 /* NOTE: same code as Linux kernel */ 3992 /* Allow LDTs to be cleared by the user. */ 3993 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 3994 if (oldmode || 3995 (contents == 0 && 3996 read_exec_only == 1 && 3997 seg_32bit == 0 && 3998 limit_in_pages == 0 && 3999 seg_not_present == 1 && 4000 useable == 0 )) { 4001 entry_1 = 0; 4002 entry_2 = 0; 4003 goto install; 4004 } 4005 } 4006 4007 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 4008 (ldt_info.limit & 0x0ffff); 4009 entry_2 = (ldt_info.base_addr & 0xff000000) | 4010 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 4011 (ldt_info.limit & 0xf0000) | 4012 ((read_exec_only ^ 1) << 9) | 4013 (contents << 10) | 4014 ((seg_not_present ^ 1) << 15) | 4015 (seg_32bit << 22) | 4016 (limit_in_pages << 23) | 4017 (lm << 21) | 4018 0x7000; 4019 if (!oldmode) 4020 entry_2 |= (useable << 20); 4021 4022 /* Install the new entry ... */ 4023 install: 4024 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3)); 4025 lp[0] = tswap32(entry_1); 4026 lp[1] = tswap32(entry_2); 4027 return 0; 4028 } 4029 4030 /* specific and weird i386 syscalls */ 4031 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr, 4032 unsigned long bytecount) 4033 { 4034 abi_long ret; 4035 4036 switch (func) { 4037 case 0: 4038 ret = read_ldt(ptr, bytecount); 4039 break; 4040 case 1: 4041 ret = write_ldt(env, ptr, bytecount, 1); 4042 break; 4043 case 0x11: 4044 ret = write_ldt(env, ptr, bytecount, 0); 4045 break; 4046 default: 4047 ret = -TARGET_ENOSYS; 4048 break; 4049 } 4050 return ret; 4051 } 4052 4053 #if defined(TARGET_I386) && defined(TARGET_ABI32) 4054 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr) 4055 { 4056 uint64_t *gdt_table = g2h(env->gdt.base); 4057 struct target_modify_ldt_ldt_s ldt_info; 4058 struct target_modify_ldt_ldt_s *target_ldt_info; 4059 int seg_32bit, contents, read_exec_only, limit_in_pages; 4060 int seg_not_present, useable, lm; 4061 uint32_t *lp, entry_1, entry_2; 4062 int i; 4063 4064 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 4065 if (!target_ldt_info) 4066 return -TARGET_EFAULT; 4067 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 4068 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 4069 ldt_info.limit = tswap32(target_ldt_info->limit); 4070 ldt_info.flags = tswap32(target_ldt_info->flags); 4071 if (ldt_info.entry_number == -1) { 4072 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) { 4073 if (gdt_table[i] == 0) { 4074 ldt_info.entry_number = i; 4075 target_ldt_info->entry_number = tswap32(i); 4076 break; 4077 } 4078 } 4079 } 4080 unlock_user_struct(target_ldt_info, ptr, 1); 4081 4082 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 4083 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX) 4084 return -TARGET_EINVAL; 4085 seg_32bit = ldt_info.flags & 1; 4086 contents = (ldt_info.flags >> 1) & 3; 4087 read_exec_only = (ldt_info.flags >> 3) & 1; 4088 limit_in_pages = (ldt_info.flags >> 4) & 1; 4089 seg_not_present = (ldt_info.flags >> 5) & 1; 4090 useable = (ldt_info.flags >> 6) & 1; 4091 #ifdef TARGET_ABI32 4092 lm = 0; 4093 #else 4094 lm = (ldt_info.flags >> 7) & 1; 4095 #endif 4096 4097 if (contents == 3) { 4098 if (seg_not_present == 0) 4099 return -TARGET_EINVAL; 4100 } 4101 4102 /* NOTE: same code as Linux kernel */ 4103 /* Allow LDTs to be cleared by the user. */ 4104 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 4105 if ((contents == 0 && 4106 read_exec_only == 1 && 4107 seg_32bit == 0 && 4108 limit_in_pages == 0 && 4109 seg_not_present == 1 && 4110 useable == 0 )) { 4111 entry_1 = 0; 4112 entry_2 = 0; 4113 goto install; 4114 } 4115 } 4116 4117 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 4118 (ldt_info.limit & 0x0ffff); 4119 entry_2 = (ldt_info.base_addr & 0xff000000) | 4120 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 4121 (ldt_info.limit & 0xf0000) | 4122 ((read_exec_only ^ 1) << 9) | 4123 (contents << 10) | 4124 ((seg_not_present ^ 1) << 15) | 4125 (seg_32bit << 22) | 4126 (limit_in_pages << 23) | 4127 (useable << 20) | 4128 (lm << 21) | 4129 0x7000; 4130 4131 /* Install the new entry ... */ 4132 install: 4133 lp = (uint32_t *)(gdt_table + ldt_info.entry_number); 4134 lp[0] = tswap32(entry_1); 4135 lp[1] = tswap32(entry_2); 4136 return 0; 4137 } 4138 4139 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr) 4140 { 4141 struct target_modify_ldt_ldt_s *target_ldt_info; 4142 uint64_t *gdt_table = g2h(env->gdt.base); 4143 uint32_t base_addr, limit, flags; 4144 int seg_32bit, contents, read_exec_only, limit_in_pages, idx; 4145 int seg_not_present, useable, lm; 4146 uint32_t *lp, entry_1, entry_2; 4147 4148 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 4149 if (!target_ldt_info) 4150 return -TARGET_EFAULT; 4151 idx = tswap32(target_ldt_info->entry_number); 4152 if (idx < TARGET_GDT_ENTRY_TLS_MIN || 4153 idx > TARGET_GDT_ENTRY_TLS_MAX) { 4154 unlock_user_struct(target_ldt_info, ptr, 1); 4155 return -TARGET_EINVAL; 4156 } 4157 lp = (uint32_t *)(gdt_table + idx); 4158 entry_1 = tswap32(lp[0]); 4159 entry_2 = tswap32(lp[1]); 4160 4161 read_exec_only = ((entry_2 >> 9) & 1) ^ 1; 4162 contents = (entry_2 >> 10) & 3; 4163 seg_not_present = ((entry_2 >> 15) & 1) ^ 1; 4164 seg_32bit = (entry_2 >> 22) & 1; 4165 limit_in_pages = (entry_2 >> 23) & 1; 4166 useable = (entry_2 >> 20) & 1; 4167 #ifdef TARGET_ABI32 4168 lm = 0; 4169 #else 4170 lm = (entry_2 >> 21) & 1; 4171 #endif 4172 flags = (seg_32bit << 0) | (contents << 1) | 4173 (read_exec_only << 3) | (limit_in_pages << 4) | 4174 (seg_not_present << 5) | (useable << 6) | (lm << 7); 4175 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000); 4176 base_addr = (entry_1 >> 16) | 4177 (entry_2 & 0xff000000) | 4178 ((entry_2 & 0xff) << 16); 4179 target_ldt_info->base_addr = tswapal(base_addr); 4180 target_ldt_info->limit = tswap32(limit); 4181 target_ldt_info->flags = tswap32(flags); 4182 unlock_user_struct(target_ldt_info, ptr, 1); 4183 return 0; 4184 } 4185 #endif /* TARGET_I386 && TARGET_ABI32 */ 4186 4187 #ifndef TARGET_ABI32 4188 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 4189 { 4190 abi_long ret = 0; 4191 abi_ulong val; 4192 int idx; 4193 4194 switch(code) { 4195 case TARGET_ARCH_SET_GS: 4196 case TARGET_ARCH_SET_FS: 4197 if (code == TARGET_ARCH_SET_GS) 4198 idx = R_GS; 4199 else 4200 idx = R_FS; 4201 cpu_x86_load_seg(env, idx, 0); 4202 env->segs[idx].base = addr; 4203 break; 4204 case TARGET_ARCH_GET_GS: 4205 case TARGET_ARCH_GET_FS: 4206 if (code == TARGET_ARCH_GET_GS) 4207 idx = R_GS; 4208 else 4209 idx = R_FS; 4210 val = env->segs[idx].base; 4211 if (put_user(val, addr, abi_ulong)) 4212 ret = -TARGET_EFAULT; 4213 break; 4214 default: 4215 ret = -TARGET_EINVAL; 4216 break; 4217 } 4218 return ret; 4219 } 4220 #endif 4221 4222 #endif /* defined(TARGET_I386) */ 4223 4224 #define NEW_STACK_SIZE 0x40000 4225 4226 4227 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; 4228 typedef struct { 4229 CPUArchState *env; 4230 pthread_mutex_t mutex; 4231 pthread_cond_t cond; 4232 pthread_t thread; 4233 uint32_t tid; 4234 abi_ulong child_tidptr; 4235 abi_ulong parent_tidptr; 4236 sigset_t sigmask; 4237 } new_thread_info; 4238 4239 static void *clone_func(void *arg) 4240 { 4241 new_thread_info *info = arg; 4242 CPUArchState *env; 4243 CPUState *cpu; 4244 TaskState *ts; 4245 4246 env = info->env; 4247 cpu = ENV_GET_CPU(env); 4248 thread_cpu = cpu; 4249 ts = (TaskState *)cpu->opaque; 4250 info->tid = gettid(); 4251 cpu->host_tid = info->tid; 4252 task_settid(ts); 4253 if (info->child_tidptr) 4254 put_user_u32(info->tid, info->child_tidptr); 4255 if (info->parent_tidptr) 4256 put_user_u32(info->tid, info->parent_tidptr); 4257 /* Enable signals. */ 4258 sigprocmask(SIG_SETMASK, &info->sigmask, NULL); 4259 /* Signal to the parent that we're ready. */ 4260 pthread_mutex_lock(&info->mutex); 4261 pthread_cond_broadcast(&info->cond); 4262 pthread_mutex_unlock(&info->mutex); 4263 /* Wait until the parent has finshed initializing the tls state. */ 4264 pthread_mutex_lock(&clone_lock); 4265 pthread_mutex_unlock(&clone_lock); 4266 cpu_loop(env); 4267 /* never exits */ 4268 return NULL; 4269 } 4270 4271 /* do_fork() Must return host values and target errnos (unlike most 4272 do_*() functions). */ 4273 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, 4274 abi_ulong parent_tidptr, target_ulong newtls, 4275 abi_ulong child_tidptr) 4276 { 4277 CPUState *cpu = ENV_GET_CPU(env); 4278 int ret; 4279 TaskState *ts; 4280 CPUState *new_cpu; 4281 CPUArchState *new_env; 4282 unsigned int nptl_flags; 4283 sigset_t sigmask; 4284 4285 /* Emulate vfork() with fork() */ 4286 if (flags & CLONE_VFORK) 4287 flags &= ~(CLONE_VFORK | CLONE_VM); 4288 4289 if (flags & CLONE_VM) { 4290 TaskState *parent_ts = (TaskState *)cpu->opaque; 4291 new_thread_info info; 4292 pthread_attr_t attr; 4293 4294 ts = g_malloc0(sizeof(TaskState)); 4295 init_task_state(ts); 4296 /* we create a new CPU instance. */ 4297 new_env = cpu_copy(env); 4298 /* Init regs that differ from the parent. */ 4299 cpu_clone_regs(new_env, newsp); 4300 new_cpu = ENV_GET_CPU(new_env); 4301 new_cpu->opaque = ts; 4302 ts->bprm = parent_ts->bprm; 4303 ts->info = parent_ts->info; 4304 nptl_flags = flags; 4305 flags &= ~CLONE_NPTL_FLAGS2; 4306 4307 if (nptl_flags & CLONE_CHILD_CLEARTID) { 4308 ts->child_tidptr = child_tidptr; 4309 } 4310 4311 if (nptl_flags & CLONE_SETTLS) 4312 cpu_set_tls (new_env, newtls); 4313 4314 /* Grab a mutex so that thread setup appears atomic. */ 4315 pthread_mutex_lock(&clone_lock); 4316 4317 memset(&info, 0, sizeof(info)); 4318 pthread_mutex_init(&info.mutex, NULL); 4319 pthread_mutex_lock(&info.mutex); 4320 pthread_cond_init(&info.cond, NULL); 4321 info.env = new_env; 4322 if (nptl_flags & CLONE_CHILD_SETTID) 4323 info.child_tidptr = child_tidptr; 4324 if (nptl_flags & CLONE_PARENT_SETTID) 4325 info.parent_tidptr = parent_tidptr; 4326 4327 ret = pthread_attr_init(&attr); 4328 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE); 4329 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 4330 /* It is not safe to deliver signals until the child has finished 4331 initializing, so temporarily block all signals. */ 4332 sigfillset(&sigmask); 4333 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); 4334 4335 ret = pthread_create(&info.thread, &attr, clone_func, &info); 4336 /* TODO: Free new CPU state if thread creation failed. */ 4337 4338 sigprocmask(SIG_SETMASK, &info.sigmask, NULL); 4339 pthread_attr_destroy(&attr); 4340 if (ret == 0) { 4341 /* Wait for the child to initialize. */ 4342 pthread_cond_wait(&info.cond, &info.mutex); 4343 ret = info.tid; 4344 if (flags & CLONE_PARENT_SETTID) 4345 put_user_u32(ret, parent_tidptr); 4346 } else { 4347 ret = -1; 4348 } 4349 pthread_mutex_unlock(&info.mutex); 4350 pthread_cond_destroy(&info.cond); 4351 pthread_mutex_destroy(&info.mutex); 4352 pthread_mutex_unlock(&clone_lock); 4353 } else { 4354 /* if no CLONE_VM, we consider it is a fork */ 4355 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) 4356 return -EINVAL; 4357 fork_start(); 4358 ret = fork(); 4359 if (ret == 0) { 4360 /* Child Process. */ 4361 cpu_clone_regs(env, newsp); 4362 fork_end(1); 4363 /* There is a race condition here. The parent process could 4364 theoretically read the TID in the child process before the child 4365 tid is set. This would require using either ptrace 4366 (not implemented) or having *_tidptr to point at a shared memory 4367 mapping. We can't repeat the spinlock hack used above because 4368 the child process gets its own copy of the lock. */ 4369 if (flags & CLONE_CHILD_SETTID) 4370 put_user_u32(gettid(), child_tidptr); 4371 if (flags & CLONE_PARENT_SETTID) 4372 put_user_u32(gettid(), parent_tidptr); 4373 ts = (TaskState *)cpu->opaque; 4374 if (flags & CLONE_SETTLS) 4375 cpu_set_tls (env, newtls); 4376 if (flags & CLONE_CHILD_CLEARTID) 4377 ts->child_tidptr = child_tidptr; 4378 } else { 4379 fork_end(0); 4380 } 4381 } 4382 return ret; 4383 } 4384 4385 /* warning : doesn't handle linux specific flags... */ 4386 static int target_to_host_fcntl_cmd(int cmd) 4387 { 4388 switch(cmd) { 4389 case TARGET_F_DUPFD: 4390 case TARGET_F_GETFD: 4391 case TARGET_F_SETFD: 4392 case TARGET_F_GETFL: 4393 case TARGET_F_SETFL: 4394 return cmd; 4395 case TARGET_F_GETLK: 4396 return F_GETLK; 4397 case TARGET_F_SETLK: 4398 return F_SETLK; 4399 case TARGET_F_SETLKW: 4400 return F_SETLKW; 4401 case TARGET_F_GETOWN: 4402 return F_GETOWN; 4403 case TARGET_F_SETOWN: 4404 return F_SETOWN; 4405 case TARGET_F_GETSIG: 4406 return F_GETSIG; 4407 case TARGET_F_SETSIG: 4408 return F_SETSIG; 4409 #if TARGET_ABI_BITS == 32 4410 case TARGET_F_GETLK64: 4411 return F_GETLK64; 4412 case TARGET_F_SETLK64: 4413 return F_SETLK64; 4414 case TARGET_F_SETLKW64: 4415 return F_SETLKW64; 4416 #endif 4417 case TARGET_F_SETLEASE: 4418 return F_SETLEASE; 4419 case TARGET_F_GETLEASE: 4420 return F_GETLEASE; 4421 #ifdef F_DUPFD_CLOEXEC 4422 case TARGET_F_DUPFD_CLOEXEC: 4423 return F_DUPFD_CLOEXEC; 4424 #endif 4425 case TARGET_F_NOTIFY: 4426 return F_NOTIFY; 4427 #ifdef F_GETOWN_EX 4428 case TARGET_F_GETOWN_EX: 4429 return F_GETOWN_EX; 4430 #endif 4431 #ifdef F_SETOWN_EX 4432 case TARGET_F_SETOWN_EX: 4433 return F_SETOWN_EX; 4434 #endif 4435 default: 4436 return -TARGET_EINVAL; 4437 } 4438 return -TARGET_EINVAL; 4439 } 4440 4441 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a } 4442 static const bitmask_transtbl flock_tbl[] = { 4443 TRANSTBL_CONVERT(F_RDLCK), 4444 TRANSTBL_CONVERT(F_WRLCK), 4445 TRANSTBL_CONVERT(F_UNLCK), 4446 TRANSTBL_CONVERT(F_EXLCK), 4447 TRANSTBL_CONVERT(F_SHLCK), 4448 { 0, 0, 0, 0 } 4449 }; 4450 4451 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) 4452 { 4453 struct flock fl; 4454 struct target_flock *target_fl; 4455 struct flock64 fl64; 4456 struct target_flock64 *target_fl64; 4457 #ifdef F_GETOWN_EX 4458 struct f_owner_ex fox; 4459 struct target_f_owner_ex *target_fox; 4460 #endif 4461 abi_long ret; 4462 int host_cmd = target_to_host_fcntl_cmd(cmd); 4463 4464 if (host_cmd == -TARGET_EINVAL) 4465 return host_cmd; 4466 4467 switch(cmd) { 4468 case TARGET_F_GETLK: 4469 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4470 return -TARGET_EFAULT; 4471 fl.l_type = 4472 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl); 4473 fl.l_whence = tswap16(target_fl->l_whence); 4474 fl.l_start = tswapal(target_fl->l_start); 4475 fl.l_len = tswapal(target_fl->l_len); 4476 fl.l_pid = tswap32(target_fl->l_pid); 4477 unlock_user_struct(target_fl, arg, 0); 4478 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4479 if (ret == 0) { 4480 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0)) 4481 return -TARGET_EFAULT; 4482 target_fl->l_type = 4483 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl); 4484 target_fl->l_whence = tswap16(fl.l_whence); 4485 target_fl->l_start = tswapal(fl.l_start); 4486 target_fl->l_len = tswapal(fl.l_len); 4487 target_fl->l_pid = tswap32(fl.l_pid); 4488 unlock_user_struct(target_fl, arg, 1); 4489 } 4490 break; 4491 4492 case TARGET_F_SETLK: 4493 case TARGET_F_SETLKW: 4494 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4495 return -TARGET_EFAULT; 4496 fl.l_type = 4497 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl); 4498 fl.l_whence = tswap16(target_fl->l_whence); 4499 fl.l_start = tswapal(target_fl->l_start); 4500 fl.l_len = tswapal(target_fl->l_len); 4501 fl.l_pid = tswap32(target_fl->l_pid); 4502 unlock_user_struct(target_fl, arg, 0); 4503 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4504 break; 4505 4506 case TARGET_F_GETLK64: 4507 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4508 return -TARGET_EFAULT; 4509 fl64.l_type = 4510 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1; 4511 fl64.l_whence = tswap16(target_fl64->l_whence); 4512 fl64.l_start = tswap64(target_fl64->l_start); 4513 fl64.l_len = tswap64(target_fl64->l_len); 4514 fl64.l_pid = tswap32(target_fl64->l_pid); 4515 unlock_user_struct(target_fl64, arg, 0); 4516 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4517 if (ret == 0) { 4518 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0)) 4519 return -TARGET_EFAULT; 4520 target_fl64->l_type = 4521 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1; 4522 target_fl64->l_whence = tswap16(fl64.l_whence); 4523 target_fl64->l_start = tswap64(fl64.l_start); 4524 target_fl64->l_len = tswap64(fl64.l_len); 4525 target_fl64->l_pid = tswap32(fl64.l_pid); 4526 unlock_user_struct(target_fl64, arg, 1); 4527 } 4528 break; 4529 case TARGET_F_SETLK64: 4530 case TARGET_F_SETLKW64: 4531 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4532 return -TARGET_EFAULT; 4533 fl64.l_type = 4534 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1; 4535 fl64.l_whence = tswap16(target_fl64->l_whence); 4536 fl64.l_start = tswap64(target_fl64->l_start); 4537 fl64.l_len = tswap64(target_fl64->l_len); 4538 fl64.l_pid = tswap32(target_fl64->l_pid); 4539 unlock_user_struct(target_fl64, arg, 0); 4540 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4541 break; 4542 4543 case TARGET_F_GETFL: 4544 ret = get_errno(fcntl(fd, host_cmd, arg)); 4545 if (ret >= 0) { 4546 ret = host_to_target_bitmask(ret, fcntl_flags_tbl); 4547 } 4548 break; 4549 4550 case TARGET_F_SETFL: 4551 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl))); 4552 break; 4553 4554 #ifdef F_GETOWN_EX 4555 case TARGET_F_GETOWN_EX: 4556 ret = get_errno(fcntl(fd, host_cmd, &fox)); 4557 if (ret >= 0) { 4558 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0)) 4559 return -TARGET_EFAULT; 4560 target_fox->type = tswap32(fox.type); 4561 target_fox->pid = tswap32(fox.pid); 4562 unlock_user_struct(target_fox, arg, 1); 4563 } 4564 break; 4565 #endif 4566 4567 #ifdef F_SETOWN_EX 4568 case TARGET_F_SETOWN_EX: 4569 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1)) 4570 return -TARGET_EFAULT; 4571 fox.type = tswap32(target_fox->type); 4572 fox.pid = tswap32(target_fox->pid); 4573 unlock_user_struct(target_fox, arg, 0); 4574 ret = get_errno(fcntl(fd, host_cmd, &fox)); 4575 break; 4576 #endif 4577 4578 case TARGET_F_SETOWN: 4579 case TARGET_F_GETOWN: 4580 case TARGET_F_SETSIG: 4581 case TARGET_F_GETSIG: 4582 case TARGET_F_SETLEASE: 4583 case TARGET_F_GETLEASE: 4584 ret = get_errno(fcntl(fd, host_cmd, arg)); 4585 break; 4586 4587 default: 4588 ret = get_errno(fcntl(fd, cmd, arg)); 4589 break; 4590 } 4591 return ret; 4592 } 4593 4594 #ifdef USE_UID16 4595 4596 static inline int high2lowuid(int uid) 4597 { 4598 if (uid > 65535) 4599 return 65534; 4600 else 4601 return uid; 4602 } 4603 4604 static inline int high2lowgid(int gid) 4605 { 4606 if (gid > 65535) 4607 return 65534; 4608 else 4609 return gid; 4610 } 4611 4612 static inline int low2highuid(int uid) 4613 { 4614 if ((int16_t)uid == -1) 4615 return -1; 4616 else 4617 return uid; 4618 } 4619 4620 static inline int low2highgid(int gid) 4621 { 4622 if ((int16_t)gid == -1) 4623 return -1; 4624 else 4625 return gid; 4626 } 4627 static inline int tswapid(int id) 4628 { 4629 return tswap16(id); 4630 } 4631 4632 #define put_user_id(x, gaddr) put_user_u16(x, gaddr) 4633 4634 #else /* !USE_UID16 */ 4635 static inline int high2lowuid(int uid) 4636 { 4637 return uid; 4638 } 4639 static inline int high2lowgid(int gid) 4640 { 4641 return gid; 4642 } 4643 static inline int low2highuid(int uid) 4644 { 4645 return uid; 4646 } 4647 static inline int low2highgid(int gid) 4648 { 4649 return gid; 4650 } 4651 static inline int tswapid(int id) 4652 { 4653 return tswap32(id); 4654 } 4655 4656 #define put_user_id(x, gaddr) put_user_u32(x, gaddr) 4657 4658 #endif /* USE_UID16 */ 4659 4660 void syscall_init(void) 4661 { 4662 IOCTLEntry *ie; 4663 const argtype *arg_type; 4664 int size; 4665 int i; 4666 4667 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def); 4668 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def); 4669 #include "syscall_types.h" 4670 #undef STRUCT 4671 #undef STRUCT_SPECIAL 4672 4673 /* Build target_to_host_errno_table[] table from 4674 * host_to_target_errno_table[]. */ 4675 for (i = 0; i < ERRNO_TABLE_SIZE; i++) { 4676 target_to_host_errno_table[host_to_target_errno_table[i]] = i; 4677 } 4678 4679 /* we patch the ioctl size if necessary. We rely on the fact that 4680 no ioctl has all the bits at '1' in the size field */ 4681 ie = ioctl_entries; 4682 while (ie->target_cmd != 0) { 4683 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) == 4684 TARGET_IOC_SIZEMASK) { 4685 arg_type = ie->arg_type; 4686 if (arg_type[0] != TYPE_PTR) { 4687 fprintf(stderr, "cannot patch size for ioctl 0x%x\n", 4688 ie->target_cmd); 4689 exit(1); 4690 } 4691 arg_type++; 4692 size = thunk_type_size(arg_type, 0); 4693 ie->target_cmd = (ie->target_cmd & 4694 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) | 4695 (size << TARGET_IOC_SIZESHIFT); 4696 } 4697 4698 /* automatic consistency check if same arch */ 4699 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 4700 (defined(__x86_64__) && defined(TARGET_X86_64)) 4701 if (unlikely(ie->target_cmd != ie->host_cmd)) { 4702 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n", 4703 ie->name, ie->target_cmd, ie->host_cmd); 4704 } 4705 #endif 4706 ie++; 4707 } 4708 } 4709 4710 #if TARGET_ABI_BITS == 32 4711 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1) 4712 { 4713 #ifdef TARGET_WORDS_BIGENDIAN 4714 return ((uint64_t)word0 << 32) | word1; 4715 #else 4716 return ((uint64_t)word1 << 32) | word0; 4717 #endif 4718 } 4719 #else /* TARGET_ABI_BITS == 32 */ 4720 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1) 4721 { 4722 return word0; 4723 } 4724 #endif /* TARGET_ABI_BITS != 32 */ 4725 4726 #ifdef TARGET_NR_truncate64 4727 static inline abi_long target_truncate64(void *cpu_env, const char *arg1, 4728 abi_long arg2, 4729 abi_long arg3, 4730 abi_long arg4) 4731 { 4732 if (regpairs_aligned(cpu_env)) { 4733 arg2 = arg3; 4734 arg3 = arg4; 4735 } 4736 return get_errno(truncate64(arg1, target_offset64(arg2, arg3))); 4737 } 4738 #endif 4739 4740 #ifdef TARGET_NR_ftruncate64 4741 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1, 4742 abi_long arg2, 4743 abi_long arg3, 4744 abi_long arg4) 4745 { 4746 if (regpairs_aligned(cpu_env)) { 4747 arg2 = arg3; 4748 arg3 = arg4; 4749 } 4750 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3))); 4751 } 4752 #endif 4753 4754 static inline abi_long target_to_host_timespec(struct timespec *host_ts, 4755 abi_ulong target_addr) 4756 { 4757 struct target_timespec *target_ts; 4758 4759 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) 4760 return -TARGET_EFAULT; 4761 host_ts->tv_sec = tswapal(target_ts->tv_sec); 4762 host_ts->tv_nsec = tswapal(target_ts->tv_nsec); 4763 unlock_user_struct(target_ts, target_addr, 0); 4764 return 0; 4765 } 4766 4767 static inline abi_long host_to_target_timespec(abi_ulong target_addr, 4768 struct timespec *host_ts) 4769 { 4770 struct target_timespec *target_ts; 4771 4772 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) 4773 return -TARGET_EFAULT; 4774 target_ts->tv_sec = tswapal(host_ts->tv_sec); 4775 target_ts->tv_nsec = tswapal(host_ts->tv_nsec); 4776 unlock_user_struct(target_ts, target_addr, 1); 4777 return 0; 4778 } 4779 4780 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec, 4781 abi_ulong target_addr) 4782 { 4783 struct target_itimerspec *target_itspec; 4784 4785 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) { 4786 return -TARGET_EFAULT; 4787 } 4788 4789 host_itspec->it_interval.tv_sec = 4790 tswapal(target_itspec->it_interval.tv_sec); 4791 host_itspec->it_interval.tv_nsec = 4792 tswapal(target_itspec->it_interval.tv_nsec); 4793 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec); 4794 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec); 4795 4796 unlock_user_struct(target_itspec, target_addr, 1); 4797 return 0; 4798 } 4799 4800 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr, 4801 struct itimerspec *host_its) 4802 { 4803 struct target_itimerspec *target_itspec; 4804 4805 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) { 4806 return -TARGET_EFAULT; 4807 } 4808 4809 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec); 4810 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec); 4811 4812 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec); 4813 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec); 4814 4815 unlock_user_struct(target_itspec, target_addr, 0); 4816 return 0; 4817 } 4818 4819 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat) 4820 static inline abi_long host_to_target_stat64(void *cpu_env, 4821 abi_ulong target_addr, 4822 struct stat *host_st) 4823 { 4824 #if defined(TARGET_ARM) && defined(TARGET_ABI32) 4825 if (((CPUARMState *)cpu_env)->eabi) { 4826 struct target_eabi_stat64 *target_st; 4827 4828 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 4829 return -TARGET_EFAULT; 4830 memset(target_st, 0, sizeof(struct target_eabi_stat64)); 4831 __put_user(host_st->st_dev, &target_st->st_dev); 4832 __put_user(host_st->st_ino, &target_st->st_ino); 4833 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 4834 __put_user(host_st->st_ino, &target_st->__st_ino); 4835 #endif 4836 __put_user(host_st->st_mode, &target_st->st_mode); 4837 __put_user(host_st->st_nlink, &target_st->st_nlink); 4838 __put_user(host_st->st_uid, &target_st->st_uid); 4839 __put_user(host_st->st_gid, &target_st->st_gid); 4840 __put_user(host_st->st_rdev, &target_st->st_rdev); 4841 __put_user(host_st->st_size, &target_st->st_size); 4842 __put_user(host_st->st_blksize, &target_st->st_blksize); 4843 __put_user(host_st->st_blocks, &target_st->st_blocks); 4844 __put_user(host_st->st_atime, &target_st->target_st_atime); 4845 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 4846 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 4847 unlock_user_struct(target_st, target_addr, 1); 4848 } else 4849 #endif 4850 { 4851 #if defined(TARGET_HAS_STRUCT_STAT64) 4852 struct target_stat64 *target_st; 4853 #else 4854 struct target_stat *target_st; 4855 #endif 4856 4857 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 4858 return -TARGET_EFAULT; 4859 memset(target_st, 0, sizeof(*target_st)); 4860 __put_user(host_st->st_dev, &target_st->st_dev); 4861 __put_user(host_st->st_ino, &target_st->st_ino); 4862 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 4863 __put_user(host_st->st_ino, &target_st->__st_ino); 4864 #endif 4865 __put_user(host_st->st_mode, &target_st->st_mode); 4866 __put_user(host_st->st_nlink, &target_st->st_nlink); 4867 __put_user(host_st->st_uid, &target_st->st_uid); 4868 __put_user(host_st->st_gid, &target_st->st_gid); 4869 __put_user(host_st->st_rdev, &target_st->st_rdev); 4870 /* XXX: better use of kernel struct */ 4871 __put_user(host_st->st_size, &target_st->st_size); 4872 __put_user(host_st->st_blksize, &target_st->st_blksize); 4873 __put_user(host_st->st_blocks, &target_st->st_blocks); 4874 __put_user(host_st->st_atime, &target_st->target_st_atime); 4875 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 4876 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 4877 unlock_user_struct(target_st, target_addr, 1); 4878 } 4879 4880 return 0; 4881 } 4882 #endif 4883 4884 /* ??? Using host futex calls even when target atomic operations 4885 are not really atomic probably breaks things. However implementing 4886 futexes locally would make futexes shared between multiple processes 4887 tricky. However they're probably useless because guest atomic 4888 operations won't work either. */ 4889 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout, 4890 target_ulong uaddr2, int val3) 4891 { 4892 struct timespec ts, *pts; 4893 int base_op; 4894 4895 /* ??? We assume FUTEX_* constants are the same on both host 4896 and target. */ 4897 #ifdef FUTEX_CMD_MASK 4898 base_op = op & FUTEX_CMD_MASK; 4899 #else 4900 base_op = op; 4901 #endif 4902 switch (base_op) { 4903 case FUTEX_WAIT: 4904 case FUTEX_WAIT_BITSET: 4905 if (timeout) { 4906 pts = &ts; 4907 target_to_host_timespec(pts, timeout); 4908 } else { 4909 pts = NULL; 4910 } 4911 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val), 4912 pts, NULL, val3)); 4913 case FUTEX_WAKE: 4914 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 4915 case FUTEX_FD: 4916 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 4917 case FUTEX_REQUEUE: 4918 case FUTEX_CMP_REQUEUE: 4919 case FUTEX_WAKE_OP: 4920 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 4921 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 4922 But the prototype takes a `struct timespec *'; insert casts 4923 to satisfy the compiler. We do not need to tswap TIMEOUT 4924 since it's not compared to guest memory. */ 4925 pts = (struct timespec *)(uintptr_t) timeout; 4926 return get_errno(sys_futex(g2h(uaddr), op, val, pts, 4927 g2h(uaddr2), 4928 (base_op == FUTEX_CMP_REQUEUE 4929 ? tswap32(val3) 4930 : val3))); 4931 default: 4932 return -TARGET_ENOSYS; 4933 } 4934 } 4935 4936 /* Map host to target signal numbers for the wait family of syscalls. 4937 Assume all other status bits are the same. */ 4938 int host_to_target_waitstatus(int status) 4939 { 4940 if (WIFSIGNALED(status)) { 4941 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f); 4942 } 4943 if (WIFSTOPPED(status)) { 4944 return (host_to_target_signal(WSTOPSIG(status)) << 8) 4945 | (status & 0xff); 4946 } 4947 return status; 4948 } 4949 4950 static int open_self_maps(void *cpu_env, int fd) 4951 { 4952 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32) 4953 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 4954 TaskState *ts = cpu->opaque; 4955 #endif 4956 FILE *fp; 4957 char *line = NULL; 4958 size_t len = 0; 4959 ssize_t read; 4960 4961 fp = fopen("/proc/self/maps", "r"); 4962 if (fp == NULL) { 4963 return -EACCES; 4964 } 4965 4966 while ((read = getline(&line, &len, fp)) != -1) { 4967 int fields, dev_maj, dev_min, inode; 4968 uint64_t min, max, offset; 4969 char flag_r, flag_w, flag_x, flag_p; 4970 char path[512] = ""; 4971 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d" 4972 " %512s", &min, &max, &flag_r, &flag_w, &flag_x, 4973 &flag_p, &offset, &dev_maj, &dev_min, &inode, path); 4974 4975 if ((fields < 10) || (fields > 11)) { 4976 continue; 4977 } 4978 if (!strncmp(path, "[stack]", 7)) { 4979 continue; 4980 } 4981 if (h2g_valid(min) && h2g_valid(max)) { 4982 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx 4983 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n", 4984 h2g(min), h2g(max), flag_r, flag_w, 4985 flag_x, flag_p, offset, dev_maj, dev_min, inode, 4986 path[0] ? " " : "", path); 4987 } 4988 } 4989 4990 free(line); 4991 fclose(fp); 4992 4993 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32) 4994 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n", 4995 (unsigned long long)ts->info->stack_limit, 4996 (unsigned long long)(ts->info->start_stack + 4997 (TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK, 4998 (unsigned long long)0); 4999 #endif 5000 5001 return 0; 5002 } 5003 5004 static int open_self_stat(void *cpu_env, int fd) 5005 { 5006 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 5007 TaskState *ts = cpu->opaque; 5008 abi_ulong start_stack = ts->info->start_stack; 5009 int i; 5010 5011 for (i = 0; i < 44; i++) { 5012 char buf[128]; 5013 int len; 5014 uint64_t val = 0; 5015 5016 if (i == 0) { 5017 /* pid */ 5018 val = getpid(); 5019 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 5020 } else if (i == 1) { 5021 /* app name */ 5022 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]); 5023 } else if (i == 27) { 5024 /* stack bottom */ 5025 val = start_stack; 5026 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 5027 } else { 5028 /* for the rest, there is MasterCard */ 5029 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' '); 5030 } 5031 5032 len = strlen(buf); 5033 if (write(fd, buf, len) != len) { 5034 return -1; 5035 } 5036 } 5037 5038 return 0; 5039 } 5040 5041 static int open_self_auxv(void *cpu_env, int fd) 5042 { 5043 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 5044 TaskState *ts = cpu->opaque; 5045 abi_ulong auxv = ts->info->saved_auxv; 5046 abi_ulong len = ts->info->auxv_len; 5047 char *ptr; 5048 5049 /* 5050 * Auxiliary vector is stored in target process stack. 5051 * read in whole auxv vector and copy it to file 5052 */ 5053 ptr = lock_user(VERIFY_READ, auxv, len, 0); 5054 if (ptr != NULL) { 5055 while (len > 0) { 5056 ssize_t r; 5057 r = write(fd, ptr, len); 5058 if (r <= 0) { 5059 break; 5060 } 5061 len -= r; 5062 ptr += r; 5063 } 5064 lseek(fd, 0, SEEK_SET); 5065 unlock_user(ptr, auxv, len); 5066 } 5067 5068 return 0; 5069 } 5070 5071 static int is_proc_myself(const char *filename, const char *entry) 5072 { 5073 if (!strncmp(filename, "/proc/", strlen("/proc/"))) { 5074 filename += strlen("/proc/"); 5075 if (!strncmp(filename, "self/", strlen("self/"))) { 5076 filename += strlen("self/"); 5077 } else if (*filename >= '1' && *filename <= '9') { 5078 char myself[80]; 5079 snprintf(myself, sizeof(myself), "%d/", getpid()); 5080 if (!strncmp(filename, myself, strlen(myself))) { 5081 filename += strlen(myself); 5082 } else { 5083 return 0; 5084 } 5085 } else { 5086 return 0; 5087 } 5088 if (!strcmp(filename, entry)) { 5089 return 1; 5090 } 5091 } 5092 return 0; 5093 } 5094 5095 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 5096 static int is_proc(const char *filename, const char *entry) 5097 { 5098 return strcmp(filename, entry) == 0; 5099 } 5100 5101 static int open_net_route(void *cpu_env, int fd) 5102 { 5103 FILE *fp; 5104 char *line = NULL; 5105 size_t len = 0; 5106 ssize_t read; 5107 5108 fp = fopen("/proc/net/route", "r"); 5109 if (fp == NULL) { 5110 return -EACCES; 5111 } 5112 5113 /* read header */ 5114 5115 read = getline(&line, &len, fp); 5116 dprintf(fd, "%s", line); 5117 5118 /* read routes */ 5119 5120 while ((read = getline(&line, &len, fp)) != -1) { 5121 char iface[16]; 5122 uint32_t dest, gw, mask; 5123 unsigned int flags, refcnt, use, metric, mtu, window, irtt; 5124 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 5125 iface, &dest, &gw, &flags, &refcnt, &use, &metric, 5126 &mask, &mtu, &window, &irtt); 5127 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 5128 iface, tswap32(dest), tswap32(gw), flags, refcnt, use, 5129 metric, tswap32(mask), mtu, window, irtt); 5130 } 5131 5132 free(line); 5133 fclose(fp); 5134 5135 return 0; 5136 } 5137 #endif 5138 5139 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode) 5140 { 5141 struct fake_open { 5142 const char *filename; 5143 int (*fill)(void *cpu_env, int fd); 5144 int (*cmp)(const char *s1, const char *s2); 5145 }; 5146 const struct fake_open *fake_open; 5147 static const struct fake_open fakes[] = { 5148 { "maps", open_self_maps, is_proc_myself }, 5149 { "stat", open_self_stat, is_proc_myself }, 5150 { "auxv", open_self_auxv, is_proc_myself }, 5151 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 5152 { "/proc/net/route", open_net_route, is_proc }, 5153 #endif 5154 { NULL, NULL, NULL } 5155 }; 5156 5157 if (is_proc_myself(pathname, "exe")) { 5158 int execfd = qemu_getauxval(AT_EXECFD); 5159 return execfd ? execfd : get_errno(open(exec_path, flags, mode)); 5160 } 5161 5162 for (fake_open = fakes; fake_open->filename; fake_open++) { 5163 if (fake_open->cmp(pathname, fake_open->filename)) { 5164 break; 5165 } 5166 } 5167 5168 if (fake_open->filename) { 5169 const char *tmpdir; 5170 char filename[PATH_MAX]; 5171 int fd, r; 5172 5173 /* create temporary file to map stat to */ 5174 tmpdir = getenv("TMPDIR"); 5175 if (!tmpdir) 5176 tmpdir = "/tmp"; 5177 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir); 5178 fd = mkstemp(filename); 5179 if (fd < 0) { 5180 return fd; 5181 } 5182 unlink(filename); 5183 5184 if ((r = fake_open->fill(cpu_env, fd))) { 5185 close(fd); 5186 return r; 5187 } 5188 lseek(fd, 0, SEEK_SET); 5189 5190 return fd; 5191 } 5192 5193 return get_errno(open(path(pathname), flags, mode)); 5194 } 5195 5196 /* do_syscall() should always have a single exit point at the end so 5197 that actions, such as logging of syscall results, can be performed. 5198 All errnos that do_syscall() returns must be -TARGET_<errcode>. */ 5199 abi_long do_syscall(void *cpu_env, int num, abi_long arg1, 5200 abi_long arg2, abi_long arg3, abi_long arg4, 5201 abi_long arg5, abi_long arg6, abi_long arg7, 5202 abi_long arg8) 5203 { 5204 CPUState *cpu = ENV_GET_CPU(cpu_env); 5205 abi_long ret; 5206 struct stat st; 5207 struct statfs stfs; 5208 void *p; 5209 5210 #ifdef DEBUG 5211 gemu_log("syscall %d", num); 5212 #endif 5213 if(do_strace) 5214 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); 5215 5216 switch(num) { 5217 case TARGET_NR_exit: 5218 /* In old applications this may be used to implement _exit(2). 5219 However in threaded applictions it is used for thread termination, 5220 and _exit_group is used for application termination. 5221 Do thread termination if we have more then one thread. */ 5222 /* FIXME: This probably breaks if a signal arrives. We should probably 5223 be disabling signals. */ 5224 if (CPU_NEXT(first_cpu)) { 5225 TaskState *ts; 5226 5227 cpu_list_lock(); 5228 /* Remove the CPU from the list. */ 5229 QTAILQ_REMOVE(&cpus, cpu, node); 5230 cpu_list_unlock(); 5231 ts = cpu->opaque; 5232 if (ts->child_tidptr) { 5233 put_user_u32(0, ts->child_tidptr); 5234 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX, 5235 NULL, NULL, 0); 5236 } 5237 thread_cpu = NULL; 5238 object_unref(OBJECT(cpu)); 5239 g_free(ts); 5240 pthread_exit(NULL); 5241 } 5242 #ifdef TARGET_GPROF 5243 _mcleanup(); 5244 #endif 5245 gdb_exit(cpu_env, arg1); 5246 _exit(arg1); 5247 ret = 0; /* avoid warning */ 5248 break; 5249 case TARGET_NR_read: 5250 if (arg3 == 0) 5251 ret = 0; 5252 else { 5253 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 5254 goto efault; 5255 ret = get_errno(read(arg1, p, arg3)); 5256 unlock_user(p, arg2, ret); 5257 } 5258 break; 5259 case TARGET_NR_write: 5260 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 5261 goto efault; 5262 ret = get_errno(write(arg1, p, arg3)); 5263 unlock_user(p, arg2, 0); 5264 break; 5265 case TARGET_NR_open: 5266 if (!(p = lock_user_string(arg1))) 5267 goto efault; 5268 ret = get_errno(do_open(cpu_env, p, 5269 target_to_host_bitmask(arg2, fcntl_flags_tbl), 5270 arg3)); 5271 unlock_user(p, arg1, 0); 5272 break; 5273 #if defined(TARGET_NR_openat) && defined(__NR_openat) 5274 case TARGET_NR_openat: 5275 if (!(p = lock_user_string(arg2))) 5276 goto efault; 5277 ret = get_errno(sys_openat(arg1, 5278 path(p), 5279 target_to_host_bitmask(arg3, fcntl_flags_tbl), 5280 arg4)); 5281 unlock_user(p, arg2, 0); 5282 break; 5283 #endif 5284 case TARGET_NR_close: 5285 ret = get_errno(close(arg1)); 5286 break; 5287 case TARGET_NR_brk: 5288 ret = do_brk(arg1); 5289 break; 5290 case TARGET_NR_fork: 5291 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0)); 5292 break; 5293 #ifdef TARGET_NR_waitpid 5294 case TARGET_NR_waitpid: 5295 { 5296 int status; 5297 ret = get_errno(waitpid(arg1, &status, arg3)); 5298 if (!is_error(ret) && arg2 && ret 5299 && put_user_s32(host_to_target_waitstatus(status), arg2)) 5300 goto efault; 5301 } 5302 break; 5303 #endif 5304 #ifdef TARGET_NR_waitid 5305 case TARGET_NR_waitid: 5306 { 5307 siginfo_t info; 5308 info.si_pid = 0; 5309 ret = get_errno(waitid(arg1, arg2, &info, arg4)); 5310 if (!is_error(ret) && arg3 && info.si_pid != 0) { 5311 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) 5312 goto efault; 5313 host_to_target_siginfo(p, &info); 5314 unlock_user(p, arg3, sizeof(target_siginfo_t)); 5315 } 5316 } 5317 break; 5318 #endif 5319 #ifdef TARGET_NR_creat /* not on alpha */ 5320 case TARGET_NR_creat: 5321 if (!(p = lock_user_string(arg1))) 5322 goto efault; 5323 ret = get_errno(creat(p, arg2)); 5324 unlock_user(p, arg1, 0); 5325 break; 5326 #endif 5327 case TARGET_NR_link: 5328 { 5329 void * p2; 5330 p = lock_user_string(arg1); 5331 p2 = lock_user_string(arg2); 5332 if (!p || !p2) 5333 ret = -TARGET_EFAULT; 5334 else 5335 ret = get_errno(link(p, p2)); 5336 unlock_user(p2, arg2, 0); 5337 unlock_user(p, arg1, 0); 5338 } 5339 break; 5340 #if defined(TARGET_NR_linkat) 5341 case TARGET_NR_linkat: 5342 { 5343 void * p2 = NULL; 5344 if (!arg2 || !arg4) 5345 goto efault; 5346 p = lock_user_string(arg2); 5347 p2 = lock_user_string(arg4); 5348 if (!p || !p2) 5349 ret = -TARGET_EFAULT; 5350 else 5351 ret = get_errno(linkat(arg1, p, arg3, p2, arg5)); 5352 unlock_user(p, arg2, 0); 5353 unlock_user(p2, arg4, 0); 5354 } 5355 break; 5356 #endif 5357 case TARGET_NR_unlink: 5358 if (!(p = lock_user_string(arg1))) 5359 goto efault; 5360 ret = get_errno(unlink(p)); 5361 unlock_user(p, arg1, 0); 5362 break; 5363 #if defined(TARGET_NR_unlinkat) 5364 case TARGET_NR_unlinkat: 5365 if (!(p = lock_user_string(arg2))) 5366 goto efault; 5367 ret = get_errno(unlinkat(arg1, p, arg3)); 5368 unlock_user(p, arg2, 0); 5369 break; 5370 #endif 5371 case TARGET_NR_execve: 5372 { 5373 char **argp, **envp; 5374 int argc, envc; 5375 abi_ulong gp; 5376 abi_ulong guest_argp; 5377 abi_ulong guest_envp; 5378 abi_ulong addr; 5379 char **q; 5380 int total_size = 0; 5381 5382 argc = 0; 5383 guest_argp = arg2; 5384 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { 5385 if (get_user_ual(addr, gp)) 5386 goto efault; 5387 if (!addr) 5388 break; 5389 argc++; 5390 } 5391 envc = 0; 5392 guest_envp = arg3; 5393 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { 5394 if (get_user_ual(addr, gp)) 5395 goto efault; 5396 if (!addr) 5397 break; 5398 envc++; 5399 } 5400 5401 argp = alloca((argc + 1) * sizeof(void *)); 5402 envp = alloca((envc + 1) * sizeof(void *)); 5403 5404 for (gp = guest_argp, q = argp; gp; 5405 gp += sizeof(abi_ulong), q++) { 5406 if (get_user_ual(addr, gp)) 5407 goto execve_efault; 5408 if (!addr) 5409 break; 5410 if (!(*q = lock_user_string(addr))) 5411 goto execve_efault; 5412 total_size += strlen(*q) + 1; 5413 } 5414 *q = NULL; 5415 5416 for (gp = guest_envp, q = envp; gp; 5417 gp += sizeof(abi_ulong), q++) { 5418 if (get_user_ual(addr, gp)) 5419 goto execve_efault; 5420 if (!addr) 5421 break; 5422 if (!(*q = lock_user_string(addr))) 5423 goto execve_efault; 5424 total_size += strlen(*q) + 1; 5425 } 5426 *q = NULL; 5427 5428 /* This case will not be caught by the host's execve() if its 5429 page size is bigger than the target's. */ 5430 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) { 5431 ret = -TARGET_E2BIG; 5432 goto execve_end; 5433 } 5434 if (!(p = lock_user_string(arg1))) 5435 goto execve_efault; 5436 ret = get_errno(execve(p, argp, envp)); 5437 unlock_user(p, arg1, 0); 5438 5439 goto execve_end; 5440 5441 execve_efault: 5442 ret = -TARGET_EFAULT; 5443 5444 execve_end: 5445 for (gp = guest_argp, q = argp; *q; 5446 gp += sizeof(abi_ulong), q++) { 5447 if (get_user_ual(addr, gp) 5448 || !addr) 5449 break; 5450 unlock_user(*q, addr, 0); 5451 } 5452 for (gp = guest_envp, q = envp; *q; 5453 gp += sizeof(abi_ulong), q++) { 5454 if (get_user_ual(addr, gp) 5455 || !addr) 5456 break; 5457 unlock_user(*q, addr, 0); 5458 } 5459 } 5460 break; 5461 case TARGET_NR_chdir: 5462 if (!(p = lock_user_string(arg1))) 5463 goto efault; 5464 ret = get_errno(chdir(p)); 5465 unlock_user(p, arg1, 0); 5466 break; 5467 #ifdef TARGET_NR_time 5468 case TARGET_NR_time: 5469 { 5470 time_t host_time; 5471 ret = get_errno(time(&host_time)); 5472 if (!is_error(ret) 5473 && arg1 5474 && put_user_sal(host_time, arg1)) 5475 goto efault; 5476 } 5477 break; 5478 #endif 5479 case TARGET_NR_mknod: 5480 if (!(p = lock_user_string(arg1))) 5481 goto efault; 5482 ret = get_errno(mknod(p, arg2, arg3)); 5483 unlock_user(p, arg1, 0); 5484 break; 5485 #if defined(TARGET_NR_mknodat) 5486 case TARGET_NR_mknodat: 5487 if (!(p = lock_user_string(arg2))) 5488 goto efault; 5489 ret = get_errno(mknodat(arg1, p, arg3, arg4)); 5490 unlock_user(p, arg2, 0); 5491 break; 5492 #endif 5493 case TARGET_NR_chmod: 5494 if (!(p = lock_user_string(arg1))) 5495 goto efault; 5496 ret = get_errno(chmod(p, arg2)); 5497 unlock_user(p, arg1, 0); 5498 break; 5499 #ifdef TARGET_NR_break 5500 case TARGET_NR_break: 5501 goto unimplemented; 5502 #endif 5503 #ifdef TARGET_NR_oldstat 5504 case TARGET_NR_oldstat: 5505 goto unimplemented; 5506 #endif 5507 case TARGET_NR_lseek: 5508 ret = get_errno(lseek(arg1, arg2, arg3)); 5509 break; 5510 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) 5511 /* Alpha specific */ 5512 case TARGET_NR_getxpid: 5513 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid(); 5514 ret = get_errno(getpid()); 5515 break; 5516 #endif 5517 #ifdef TARGET_NR_getpid 5518 case TARGET_NR_getpid: 5519 ret = get_errno(getpid()); 5520 break; 5521 #endif 5522 case TARGET_NR_mount: 5523 { 5524 /* need to look at the data field */ 5525 void *p2, *p3; 5526 p = lock_user_string(arg1); 5527 p2 = lock_user_string(arg2); 5528 p3 = lock_user_string(arg3); 5529 if (!p || !p2 || !p3) 5530 ret = -TARGET_EFAULT; 5531 else { 5532 /* FIXME - arg5 should be locked, but it isn't clear how to 5533 * do that since it's not guaranteed to be a NULL-terminated 5534 * string. 5535 */ 5536 if ( ! arg5 ) 5537 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL)); 5538 else 5539 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5))); 5540 } 5541 unlock_user(p, arg1, 0); 5542 unlock_user(p2, arg2, 0); 5543 unlock_user(p3, arg3, 0); 5544 break; 5545 } 5546 #ifdef TARGET_NR_umount 5547 case TARGET_NR_umount: 5548 if (!(p = lock_user_string(arg1))) 5549 goto efault; 5550 ret = get_errno(umount(p)); 5551 unlock_user(p, arg1, 0); 5552 break; 5553 #endif 5554 #ifdef TARGET_NR_stime /* not on alpha */ 5555 case TARGET_NR_stime: 5556 { 5557 time_t host_time; 5558 if (get_user_sal(host_time, arg1)) 5559 goto efault; 5560 ret = get_errno(stime(&host_time)); 5561 } 5562 break; 5563 #endif 5564 case TARGET_NR_ptrace: 5565 goto unimplemented; 5566 #ifdef TARGET_NR_alarm /* not on alpha */ 5567 case TARGET_NR_alarm: 5568 ret = alarm(arg1); 5569 break; 5570 #endif 5571 #ifdef TARGET_NR_oldfstat 5572 case TARGET_NR_oldfstat: 5573 goto unimplemented; 5574 #endif 5575 #ifdef TARGET_NR_pause /* not on alpha */ 5576 case TARGET_NR_pause: 5577 ret = get_errno(pause()); 5578 break; 5579 #endif 5580 #ifdef TARGET_NR_utime 5581 case TARGET_NR_utime: 5582 { 5583 struct utimbuf tbuf, *host_tbuf; 5584 struct target_utimbuf *target_tbuf; 5585 if (arg2) { 5586 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) 5587 goto efault; 5588 tbuf.actime = tswapal(target_tbuf->actime); 5589 tbuf.modtime = tswapal(target_tbuf->modtime); 5590 unlock_user_struct(target_tbuf, arg2, 0); 5591 host_tbuf = &tbuf; 5592 } else { 5593 host_tbuf = NULL; 5594 } 5595 if (!(p = lock_user_string(arg1))) 5596 goto efault; 5597 ret = get_errno(utime(p, host_tbuf)); 5598 unlock_user(p, arg1, 0); 5599 } 5600 break; 5601 #endif 5602 case TARGET_NR_utimes: 5603 { 5604 struct timeval *tvp, tv[2]; 5605 if (arg2) { 5606 if (copy_from_user_timeval(&tv[0], arg2) 5607 || copy_from_user_timeval(&tv[1], 5608 arg2 + sizeof(struct target_timeval))) 5609 goto efault; 5610 tvp = tv; 5611 } else { 5612 tvp = NULL; 5613 } 5614 if (!(p = lock_user_string(arg1))) 5615 goto efault; 5616 ret = get_errno(utimes(p, tvp)); 5617 unlock_user(p, arg1, 0); 5618 } 5619 break; 5620 #if defined(TARGET_NR_futimesat) 5621 case TARGET_NR_futimesat: 5622 { 5623 struct timeval *tvp, tv[2]; 5624 if (arg3) { 5625 if (copy_from_user_timeval(&tv[0], arg3) 5626 || copy_from_user_timeval(&tv[1], 5627 arg3 + sizeof(struct target_timeval))) 5628 goto efault; 5629 tvp = tv; 5630 } else { 5631 tvp = NULL; 5632 } 5633 if (!(p = lock_user_string(arg2))) 5634 goto efault; 5635 ret = get_errno(futimesat(arg1, path(p), tvp)); 5636 unlock_user(p, arg2, 0); 5637 } 5638 break; 5639 #endif 5640 #ifdef TARGET_NR_stty 5641 case TARGET_NR_stty: 5642 goto unimplemented; 5643 #endif 5644 #ifdef TARGET_NR_gtty 5645 case TARGET_NR_gtty: 5646 goto unimplemented; 5647 #endif 5648 case TARGET_NR_access: 5649 if (!(p = lock_user_string(arg1))) 5650 goto efault; 5651 ret = get_errno(access(path(p), arg2)); 5652 unlock_user(p, arg1, 0); 5653 break; 5654 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 5655 case TARGET_NR_faccessat: 5656 if (!(p = lock_user_string(arg2))) 5657 goto efault; 5658 ret = get_errno(faccessat(arg1, p, arg3, 0)); 5659 unlock_user(p, arg2, 0); 5660 break; 5661 #endif 5662 #ifdef TARGET_NR_nice /* not on alpha */ 5663 case TARGET_NR_nice: 5664 ret = get_errno(nice(arg1)); 5665 break; 5666 #endif 5667 #ifdef TARGET_NR_ftime 5668 case TARGET_NR_ftime: 5669 goto unimplemented; 5670 #endif 5671 case TARGET_NR_sync: 5672 sync(); 5673 ret = 0; 5674 break; 5675 case TARGET_NR_kill: 5676 ret = get_errno(kill(arg1, target_to_host_signal(arg2))); 5677 break; 5678 case TARGET_NR_rename: 5679 { 5680 void *p2; 5681 p = lock_user_string(arg1); 5682 p2 = lock_user_string(arg2); 5683 if (!p || !p2) 5684 ret = -TARGET_EFAULT; 5685 else 5686 ret = get_errno(rename(p, p2)); 5687 unlock_user(p2, arg2, 0); 5688 unlock_user(p, arg1, 0); 5689 } 5690 break; 5691 #if defined(TARGET_NR_renameat) 5692 case TARGET_NR_renameat: 5693 { 5694 void *p2; 5695 p = lock_user_string(arg2); 5696 p2 = lock_user_string(arg4); 5697 if (!p || !p2) 5698 ret = -TARGET_EFAULT; 5699 else 5700 ret = get_errno(renameat(arg1, p, arg3, p2)); 5701 unlock_user(p2, arg4, 0); 5702 unlock_user(p, arg2, 0); 5703 } 5704 break; 5705 #endif 5706 case TARGET_NR_mkdir: 5707 if (!(p = lock_user_string(arg1))) 5708 goto efault; 5709 ret = get_errno(mkdir(p, arg2)); 5710 unlock_user(p, arg1, 0); 5711 break; 5712 #if defined(TARGET_NR_mkdirat) 5713 case TARGET_NR_mkdirat: 5714 if (!(p = lock_user_string(arg2))) 5715 goto efault; 5716 ret = get_errno(mkdirat(arg1, p, arg3)); 5717 unlock_user(p, arg2, 0); 5718 break; 5719 #endif 5720 case TARGET_NR_rmdir: 5721 if (!(p = lock_user_string(arg1))) 5722 goto efault; 5723 ret = get_errno(rmdir(p)); 5724 unlock_user(p, arg1, 0); 5725 break; 5726 case TARGET_NR_dup: 5727 ret = get_errno(dup(arg1)); 5728 break; 5729 case TARGET_NR_pipe: 5730 ret = do_pipe(cpu_env, arg1, 0, 0); 5731 break; 5732 #ifdef TARGET_NR_pipe2 5733 case TARGET_NR_pipe2: 5734 ret = do_pipe(cpu_env, arg1, 5735 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1); 5736 break; 5737 #endif 5738 case TARGET_NR_times: 5739 { 5740 struct target_tms *tmsp; 5741 struct tms tms; 5742 ret = get_errno(times(&tms)); 5743 if (arg1) { 5744 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); 5745 if (!tmsp) 5746 goto efault; 5747 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime)); 5748 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime)); 5749 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime)); 5750 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime)); 5751 } 5752 if (!is_error(ret)) 5753 ret = host_to_target_clock_t(ret); 5754 } 5755 break; 5756 #ifdef TARGET_NR_prof 5757 case TARGET_NR_prof: 5758 goto unimplemented; 5759 #endif 5760 #ifdef TARGET_NR_signal 5761 case TARGET_NR_signal: 5762 goto unimplemented; 5763 #endif 5764 case TARGET_NR_acct: 5765 if (arg1 == 0) { 5766 ret = get_errno(acct(NULL)); 5767 } else { 5768 if (!(p = lock_user_string(arg1))) 5769 goto efault; 5770 ret = get_errno(acct(path(p))); 5771 unlock_user(p, arg1, 0); 5772 } 5773 break; 5774 #ifdef TARGET_NR_umount2 5775 case TARGET_NR_umount2: 5776 if (!(p = lock_user_string(arg1))) 5777 goto efault; 5778 ret = get_errno(umount2(p, arg2)); 5779 unlock_user(p, arg1, 0); 5780 break; 5781 #endif 5782 #ifdef TARGET_NR_lock 5783 case TARGET_NR_lock: 5784 goto unimplemented; 5785 #endif 5786 case TARGET_NR_ioctl: 5787 ret = do_ioctl(arg1, arg2, arg3); 5788 break; 5789 case TARGET_NR_fcntl: 5790 ret = do_fcntl(arg1, arg2, arg3); 5791 break; 5792 #ifdef TARGET_NR_mpx 5793 case TARGET_NR_mpx: 5794 goto unimplemented; 5795 #endif 5796 case TARGET_NR_setpgid: 5797 ret = get_errno(setpgid(arg1, arg2)); 5798 break; 5799 #ifdef TARGET_NR_ulimit 5800 case TARGET_NR_ulimit: 5801 goto unimplemented; 5802 #endif 5803 #ifdef TARGET_NR_oldolduname 5804 case TARGET_NR_oldolduname: 5805 goto unimplemented; 5806 #endif 5807 case TARGET_NR_umask: 5808 ret = get_errno(umask(arg1)); 5809 break; 5810 case TARGET_NR_chroot: 5811 if (!(p = lock_user_string(arg1))) 5812 goto efault; 5813 ret = get_errno(chroot(p)); 5814 unlock_user(p, arg1, 0); 5815 break; 5816 case TARGET_NR_ustat: 5817 goto unimplemented; 5818 case TARGET_NR_dup2: 5819 ret = get_errno(dup2(arg1, arg2)); 5820 break; 5821 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) 5822 case TARGET_NR_dup3: 5823 ret = get_errno(dup3(arg1, arg2, arg3)); 5824 break; 5825 #endif 5826 #ifdef TARGET_NR_getppid /* not on alpha */ 5827 case TARGET_NR_getppid: 5828 ret = get_errno(getppid()); 5829 break; 5830 #endif 5831 case TARGET_NR_getpgrp: 5832 ret = get_errno(getpgrp()); 5833 break; 5834 case TARGET_NR_setsid: 5835 ret = get_errno(setsid()); 5836 break; 5837 #ifdef TARGET_NR_sigaction 5838 case TARGET_NR_sigaction: 5839 { 5840 #if defined(TARGET_ALPHA) 5841 struct target_sigaction act, oact, *pact = 0; 5842 struct target_old_sigaction *old_act; 5843 if (arg2) { 5844 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5845 goto efault; 5846 act._sa_handler = old_act->_sa_handler; 5847 target_siginitset(&act.sa_mask, old_act->sa_mask); 5848 act.sa_flags = old_act->sa_flags; 5849 act.sa_restorer = 0; 5850 unlock_user_struct(old_act, arg2, 0); 5851 pact = &act; 5852 } 5853 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5854 if (!is_error(ret) && arg3) { 5855 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5856 goto efault; 5857 old_act->_sa_handler = oact._sa_handler; 5858 old_act->sa_mask = oact.sa_mask.sig[0]; 5859 old_act->sa_flags = oact.sa_flags; 5860 unlock_user_struct(old_act, arg3, 1); 5861 } 5862 #elif defined(TARGET_MIPS) 5863 struct target_sigaction act, oact, *pact, *old_act; 5864 5865 if (arg2) { 5866 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5867 goto efault; 5868 act._sa_handler = old_act->_sa_handler; 5869 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); 5870 act.sa_flags = old_act->sa_flags; 5871 unlock_user_struct(old_act, arg2, 0); 5872 pact = &act; 5873 } else { 5874 pact = NULL; 5875 } 5876 5877 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5878 5879 if (!is_error(ret) && arg3) { 5880 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5881 goto efault; 5882 old_act->_sa_handler = oact._sa_handler; 5883 old_act->sa_flags = oact.sa_flags; 5884 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; 5885 old_act->sa_mask.sig[1] = 0; 5886 old_act->sa_mask.sig[2] = 0; 5887 old_act->sa_mask.sig[3] = 0; 5888 unlock_user_struct(old_act, arg3, 1); 5889 } 5890 #else 5891 struct target_old_sigaction *old_act; 5892 struct target_sigaction act, oact, *pact; 5893 if (arg2) { 5894 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5895 goto efault; 5896 act._sa_handler = old_act->_sa_handler; 5897 target_siginitset(&act.sa_mask, old_act->sa_mask); 5898 act.sa_flags = old_act->sa_flags; 5899 act.sa_restorer = old_act->sa_restorer; 5900 unlock_user_struct(old_act, arg2, 0); 5901 pact = &act; 5902 } else { 5903 pact = NULL; 5904 } 5905 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5906 if (!is_error(ret) && arg3) { 5907 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5908 goto efault; 5909 old_act->_sa_handler = oact._sa_handler; 5910 old_act->sa_mask = oact.sa_mask.sig[0]; 5911 old_act->sa_flags = oact.sa_flags; 5912 old_act->sa_restorer = oact.sa_restorer; 5913 unlock_user_struct(old_act, arg3, 1); 5914 } 5915 #endif 5916 } 5917 break; 5918 #endif 5919 case TARGET_NR_rt_sigaction: 5920 { 5921 #if defined(TARGET_ALPHA) 5922 struct target_sigaction act, oact, *pact = 0; 5923 struct target_rt_sigaction *rt_act; 5924 /* ??? arg4 == sizeof(sigset_t). */ 5925 if (arg2) { 5926 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1)) 5927 goto efault; 5928 act._sa_handler = rt_act->_sa_handler; 5929 act.sa_mask = rt_act->sa_mask; 5930 act.sa_flags = rt_act->sa_flags; 5931 act.sa_restorer = arg5; 5932 unlock_user_struct(rt_act, arg2, 0); 5933 pact = &act; 5934 } 5935 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5936 if (!is_error(ret) && arg3) { 5937 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0)) 5938 goto efault; 5939 rt_act->_sa_handler = oact._sa_handler; 5940 rt_act->sa_mask = oact.sa_mask; 5941 rt_act->sa_flags = oact.sa_flags; 5942 unlock_user_struct(rt_act, arg3, 1); 5943 } 5944 #else 5945 struct target_sigaction *act; 5946 struct target_sigaction *oact; 5947 5948 if (arg2) { 5949 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) 5950 goto efault; 5951 } else 5952 act = NULL; 5953 if (arg3) { 5954 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { 5955 ret = -TARGET_EFAULT; 5956 goto rt_sigaction_fail; 5957 } 5958 } else 5959 oact = NULL; 5960 ret = get_errno(do_sigaction(arg1, act, oact)); 5961 rt_sigaction_fail: 5962 if (act) 5963 unlock_user_struct(act, arg2, 0); 5964 if (oact) 5965 unlock_user_struct(oact, arg3, 1); 5966 #endif 5967 } 5968 break; 5969 #ifdef TARGET_NR_sgetmask /* not on alpha */ 5970 case TARGET_NR_sgetmask: 5971 { 5972 sigset_t cur_set; 5973 abi_ulong target_set; 5974 do_sigprocmask(0, NULL, &cur_set); 5975 host_to_target_old_sigset(&target_set, &cur_set); 5976 ret = target_set; 5977 } 5978 break; 5979 #endif 5980 #ifdef TARGET_NR_ssetmask /* not on alpha */ 5981 case TARGET_NR_ssetmask: 5982 { 5983 sigset_t set, oset, cur_set; 5984 abi_ulong target_set = arg1; 5985 do_sigprocmask(0, NULL, &cur_set); 5986 target_to_host_old_sigset(&set, &target_set); 5987 sigorset(&set, &set, &cur_set); 5988 do_sigprocmask(SIG_SETMASK, &set, &oset); 5989 host_to_target_old_sigset(&target_set, &oset); 5990 ret = target_set; 5991 } 5992 break; 5993 #endif 5994 #ifdef TARGET_NR_sigprocmask 5995 case TARGET_NR_sigprocmask: 5996 { 5997 #if defined(TARGET_ALPHA) 5998 sigset_t set, oldset; 5999 abi_ulong mask; 6000 int how; 6001 6002 switch (arg1) { 6003 case TARGET_SIG_BLOCK: 6004 how = SIG_BLOCK; 6005 break; 6006 case TARGET_SIG_UNBLOCK: 6007 how = SIG_UNBLOCK; 6008 break; 6009 case TARGET_SIG_SETMASK: 6010 how = SIG_SETMASK; 6011 break; 6012 default: 6013 ret = -TARGET_EINVAL; 6014 goto fail; 6015 } 6016 mask = arg2; 6017 target_to_host_old_sigset(&set, &mask); 6018 6019 ret = get_errno(do_sigprocmask(how, &set, &oldset)); 6020 if (!is_error(ret)) { 6021 host_to_target_old_sigset(&mask, &oldset); 6022 ret = mask; 6023 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */ 6024 } 6025 #else 6026 sigset_t set, oldset, *set_ptr; 6027 int how; 6028 6029 if (arg2) { 6030 switch (arg1) { 6031 case TARGET_SIG_BLOCK: 6032 how = SIG_BLOCK; 6033 break; 6034 case TARGET_SIG_UNBLOCK: 6035 how = SIG_UNBLOCK; 6036 break; 6037 case TARGET_SIG_SETMASK: 6038 how = SIG_SETMASK; 6039 break; 6040 default: 6041 ret = -TARGET_EINVAL; 6042 goto fail; 6043 } 6044 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 6045 goto efault; 6046 target_to_host_old_sigset(&set, p); 6047 unlock_user(p, arg2, 0); 6048 set_ptr = &set; 6049 } else { 6050 how = 0; 6051 set_ptr = NULL; 6052 } 6053 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset)); 6054 if (!is_error(ret) && arg3) { 6055 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 6056 goto efault; 6057 host_to_target_old_sigset(p, &oldset); 6058 unlock_user(p, arg3, sizeof(target_sigset_t)); 6059 } 6060 #endif 6061 } 6062 break; 6063 #endif 6064 case TARGET_NR_rt_sigprocmask: 6065 { 6066 int how = arg1; 6067 sigset_t set, oldset, *set_ptr; 6068 6069 if (arg2) { 6070 switch(how) { 6071 case TARGET_SIG_BLOCK: 6072 how = SIG_BLOCK; 6073 break; 6074 case TARGET_SIG_UNBLOCK: 6075 how = SIG_UNBLOCK; 6076 break; 6077 case TARGET_SIG_SETMASK: 6078 how = SIG_SETMASK; 6079 break; 6080 default: 6081 ret = -TARGET_EINVAL; 6082 goto fail; 6083 } 6084 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 6085 goto efault; 6086 target_to_host_sigset(&set, p); 6087 unlock_user(p, arg2, 0); 6088 set_ptr = &set; 6089 } else { 6090 how = 0; 6091 set_ptr = NULL; 6092 } 6093 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset)); 6094 if (!is_error(ret) && arg3) { 6095 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 6096 goto efault; 6097 host_to_target_sigset(p, &oldset); 6098 unlock_user(p, arg3, sizeof(target_sigset_t)); 6099 } 6100 } 6101 break; 6102 #ifdef TARGET_NR_sigpending 6103 case TARGET_NR_sigpending: 6104 { 6105 sigset_t set; 6106 ret = get_errno(sigpending(&set)); 6107 if (!is_error(ret)) { 6108 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 6109 goto efault; 6110 host_to_target_old_sigset(p, &set); 6111 unlock_user(p, arg1, sizeof(target_sigset_t)); 6112 } 6113 } 6114 break; 6115 #endif 6116 case TARGET_NR_rt_sigpending: 6117 { 6118 sigset_t set; 6119 ret = get_errno(sigpending(&set)); 6120 if (!is_error(ret)) { 6121 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 6122 goto efault; 6123 host_to_target_sigset(p, &set); 6124 unlock_user(p, arg1, sizeof(target_sigset_t)); 6125 } 6126 } 6127 break; 6128 #ifdef TARGET_NR_sigsuspend 6129 case TARGET_NR_sigsuspend: 6130 { 6131 sigset_t set; 6132 #if defined(TARGET_ALPHA) 6133 abi_ulong mask = arg1; 6134 target_to_host_old_sigset(&set, &mask); 6135 #else 6136 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6137 goto efault; 6138 target_to_host_old_sigset(&set, p); 6139 unlock_user(p, arg1, 0); 6140 #endif 6141 ret = get_errno(sigsuspend(&set)); 6142 } 6143 break; 6144 #endif 6145 case TARGET_NR_rt_sigsuspend: 6146 { 6147 sigset_t set; 6148 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6149 goto efault; 6150 target_to_host_sigset(&set, p); 6151 unlock_user(p, arg1, 0); 6152 ret = get_errno(sigsuspend(&set)); 6153 } 6154 break; 6155 case TARGET_NR_rt_sigtimedwait: 6156 { 6157 sigset_t set; 6158 struct timespec uts, *puts; 6159 siginfo_t uinfo; 6160 6161 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6162 goto efault; 6163 target_to_host_sigset(&set, p); 6164 unlock_user(p, arg1, 0); 6165 if (arg3) { 6166 puts = &uts; 6167 target_to_host_timespec(puts, arg3); 6168 } else { 6169 puts = NULL; 6170 } 6171 ret = get_errno(sigtimedwait(&set, &uinfo, puts)); 6172 if (!is_error(ret)) { 6173 if (arg2) { 6174 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 6175 0); 6176 if (!p) { 6177 goto efault; 6178 } 6179 host_to_target_siginfo(p, &uinfo); 6180 unlock_user(p, arg2, sizeof(target_siginfo_t)); 6181 } 6182 ret = host_to_target_signal(ret); 6183 } 6184 } 6185 break; 6186 case TARGET_NR_rt_sigqueueinfo: 6187 { 6188 siginfo_t uinfo; 6189 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1))) 6190 goto efault; 6191 target_to_host_siginfo(&uinfo, p); 6192 unlock_user(p, arg1, 0); 6193 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo)); 6194 } 6195 break; 6196 #ifdef TARGET_NR_sigreturn 6197 case TARGET_NR_sigreturn: 6198 /* NOTE: ret is eax, so not transcoding must be done */ 6199 ret = do_sigreturn(cpu_env); 6200 break; 6201 #endif 6202 case TARGET_NR_rt_sigreturn: 6203 /* NOTE: ret is eax, so not transcoding must be done */ 6204 ret = do_rt_sigreturn(cpu_env); 6205 break; 6206 case TARGET_NR_sethostname: 6207 if (!(p = lock_user_string(arg1))) 6208 goto efault; 6209 ret = get_errno(sethostname(p, arg2)); 6210 unlock_user(p, arg1, 0); 6211 break; 6212 case TARGET_NR_setrlimit: 6213 { 6214 int resource = target_to_host_resource(arg1); 6215 struct target_rlimit *target_rlim; 6216 struct rlimit rlim; 6217 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) 6218 goto efault; 6219 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur); 6220 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max); 6221 unlock_user_struct(target_rlim, arg2, 0); 6222 ret = get_errno(setrlimit(resource, &rlim)); 6223 } 6224 break; 6225 case TARGET_NR_getrlimit: 6226 { 6227 int resource = target_to_host_resource(arg1); 6228 struct target_rlimit *target_rlim; 6229 struct rlimit rlim; 6230 6231 ret = get_errno(getrlimit(resource, &rlim)); 6232 if (!is_error(ret)) { 6233 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 6234 goto efault; 6235 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 6236 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 6237 unlock_user_struct(target_rlim, arg2, 1); 6238 } 6239 } 6240 break; 6241 case TARGET_NR_getrusage: 6242 { 6243 struct rusage rusage; 6244 ret = get_errno(getrusage(arg1, &rusage)); 6245 if (!is_error(ret)) { 6246 ret = host_to_target_rusage(arg2, &rusage); 6247 } 6248 } 6249 break; 6250 case TARGET_NR_gettimeofday: 6251 { 6252 struct timeval tv; 6253 ret = get_errno(gettimeofday(&tv, NULL)); 6254 if (!is_error(ret)) { 6255 if (copy_to_user_timeval(arg1, &tv)) 6256 goto efault; 6257 } 6258 } 6259 break; 6260 case TARGET_NR_settimeofday: 6261 { 6262 struct timeval tv; 6263 if (copy_from_user_timeval(&tv, arg1)) 6264 goto efault; 6265 ret = get_errno(settimeofday(&tv, NULL)); 6266 } 6267 break; 6268 #if defined(TARGET_NR_select) 6269 case TARGET_NR_select: 6270 #if defined(TARGET_S390X) || defined(TARGET_ALPHA) 6271 ret = do_select(arg1, arg2, arg3, arg4, arg5); 6272 #else 6273 { 6274 struct target_sel_arg_struct *sel; 6275 abi_ulong inp, outp, exp, tvp; 6276 long nsel; 6277 6278 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) 6279 goto efault; 6280 nsel = tswapal(sel->n); 6281 inp = tswapal(sel->inp); 6282 outp = tswapal(sel->outp); 6283 exp = tswapal(sel->exp); 6284 tvp = tswapal(sel->tvp); 6285 unlock_user_struct(sel, arg1, 0); 6286 ret = do_select(nsel, inp, outp, exp, tvp); 6287 } 6288 #endif 6289 break; 6290 #endif 6291 #ifdef TARGET_NR_pselect6 6292 case TARGET_NR_pselect6: 6293 { 6294 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr; 6295 fd_set rfds, wfds, efds; 6296 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 6297 struct timespec ts, *ts_ptr; 6298 6299 /* 6300 * The 6th arg is actually two args smashed together, 6301 * so we cannot use the C library. 6302 */ 6303 sigset_t set; 6304 struct { 6305 sigset_t *set; 6306 size_t size; 6307 } sig, *sig_ptr; 6308 6309 abi_ulong arg_sigset, arg_sigsize, *arg7; 6310 target_sigset_t *target_sigset; 6311 6312 n = arg1; 6313 rfd_addr = arg2; 6314 wfd_addr = arg3; 6315 efd_addr = arg4; 6316 ts_addr = arg5; 6317 6318 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 6319 if (ret) { 6320 goto fail; 6321 } 6322 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 6323 if (ret) { 6324 goto fail; 6325 } 6326 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 6327 if (ret) { 6328 goto fail; 6329 } 6330 6331 /* 6332 * This takes a timespec, and not a timeval, so we cannot 6333 * use the do_select() helper ... 6334 */ 6335 if (ts_addr) { 6336 if (target_to_host_timespec(&ts, ts_addr)) { 6337 goto efault; 6338 } 6339 ts_ptr = &ts; 6340 } else { 6341 ts_ptr = NULL; 6342 } 6343 6344 /* Extract the two packed args for the sigset */ 6345 if (arg6) { 6346 sig_ptr = &sig; 6347 sig.size = _NSIG / 8; 6348 6349 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1); 6350 if (!arg7) { 6351 goto efault; 6352 } 6353 arg_sigset = tswapal(arg7[0]); 6354 arg_sigsize = tswapal(arg7[1]); 6355 unlock_user(arg7, arg6, 0); 6356 6357 if (arg_sigset) { 6358 sig.set = &set; 6359 if (arg_sigsize != sizeof(*target_sigset)) { 6360 /* Like the kernel, we enforce correct size sigsets */ 6361 ret = -TARGET_EINVAL; 6362 goto fail; 6363 } 6364 target_sigset = lock_user(VERIFY_READ, arg_sigset, 6365 sizeof(*target_sigset), 1); 6366 if (!target_sigset) { 6367 goto efault; 6368 } 6369 target_to_host_sigset(&set, target_sigset); 6370 unlock_user(target_sigset, arg_sigset, 0); 6371 } else { 6372 sig.set = NULL; 6373 } 6374 } else { 6375 sig_ptr = NULL; 6376 } 6377 6378 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 6379 ts_ptr, sig_ptr)); 6380 6381 if (!is_error(ret)) { 6382 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 6383 goto efault; 6384 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 6385 goto efault; 6386 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 6387 goto efault; 6388 6389 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) 6390 goto efault; 6391 } 6392 } 6393 break; 6394 #endif 6395 case TARGET_NR_symlink: 6396 { 6397 void *p2; 6398 p = lock_user_string(arg1); 6399 p2 = lock_user_string(arg2); 6400 if (!p || !p2) 6401 ret = -TARGET_EFAULT; 6402 else 6403 ret = get_errno(symlink(p, p2)); 6404 unlock_user(p2, arg2, 0); 6405 unlock_user(p, arg1, 0); 6406 } 6407 break; 6408 #if defined(TARGET_NR_symlinkat) 6409 case TARGET_NR_symlinkat: 6410 { 6411 void *p2; 6412 p = lock_user_string(arg1); 6413 p2 = lock_user_string(arg3); 6414 if (!p || !p2) 6415 ret = -TARGET_EFAULT; 6416 else 6417 ret = get_errno(symlinkat(p, arg2, p2)); 6418 unlock_user(p2, arg3, 0); 6419 unlock_user(p, arg1, 0); 6420 } 6421 break; 6422 #endif 6423 #ifdef TARGET_NR_oldlstat 6424 case TARGET_NR_oldlstat: 6425 goto unimplemented; 6426 #endif 6427 case TARGET_NR_readlink: 6428 { 6429 void *p2; 6430 p = lock_user_string(arg1); 6431 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); 6432 if (!p || !p2) { 6433 ret = -TARGET_EFAULT; 6434 } else if (is_proc_myself((const char *)p, "exe")) { 6435 char real[PATH_MAX], *temp; 6436 temp = realpath(exec_path, real); 6437 ret = temp == NULL ? get_errno(-1) : strlen(real) ; 6438 snprintf((char *)p2, arg3, "%s", real); 6439 } else { 6440 ret = get_errno(readlink(path(p), p2, arg3)); 6441 } 6442 unlock_user(p2, arg2, ret); 6443 unlock_user(p, arg1, 0); 6444 } 6445 break; 6446 #if defined(TARGET_NR_readlinkat) 6447 case TARGET_NR_readlinkat: 6448 { 6449 void *p2; 6450 p = lock_user_string(arg2); 6451 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); 6452 if (!p || !p2) { 6453 ret = -TARGET_EFAULT; 6454 } else if (is_proc_myself((const char *)p, "exe")) { 6455 char real[PATH_MAX], *temp; 6456 temp = realpath(exec_path, real); 6457 ret = temp == NULL ? get_errno(-1) : strlen(real) ; 6458 snprintf((char *)p2, arg4, "%s", real); 6459 } else { 6460 ret = get_errno(readlinkat(arg1, path(p), p2, arg4)); 6461 } 6462 unlock_user(p2, arg3, ret); 6463 unlock_user(p, arg2, 0); 6464 } 6465 break; 6466 #endif 6467 #ifdef TARGET_NR_uselib 6468 case TARGET_NR_uselib: 6469 goto unimplemented; 6470 #endif 6471 #ifdef TARGET_NR_swapon 6472 case TARGET_NR_swapon: 6473 if (!(p = lock_user_string(arg1))) 6474 goto efault; 6475 ret = get_errno(swapon(p, arg2)); 6476 unlock_user(p, arg1, 0); 6477 break; 6478 #endif 6479 case TARGET_NR_reboot: 6480 if (arg3 == LINUX_REBOOT_CMD_RESTART2) { 6481 /* arg4 must be ignored in all other cases */ 6482 p = lock_user_string(arg4); 6483 if (!p) { 6484 goto efault; 6485 } 6486 ret = get_errno(reboot(arg1, arg2, arg3, p)); 6487 unlock_user(p, arg4, 0); 6488 } else { 6489 ret = get_errno(reboot(arg1, arg2, arg3, NULL)); 6490 } 6491 break; 6492 #ifdef TARGET_NR_readdir 6493 case TARGET_NR_readdir: 6494 goto unimplemented; 6495 #endif 6496 #ifdef TARGET_NR_mmap 6497 case TARGET_NR_mmap: 6498 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 6499 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \ 6500 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ 6501 || defined(TARGET_S390X) 6502 { 6503 abi_ulong *v; 6504 abi_ulong v1, v2, v3, v4, v5, v6; 6505 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) 6506 goto efault; 6507 v1 = tswapal(v[0]); 6508 v2 = tswapal(v[1]); 6509 v3 = tswapal(v[2]); 6510 v4 = tswapal(v[3]); 6511 v5 = tswapal(v[4]); 6512 v6 = tswapal(v[5]); 6513 unlock_user(v, arg1, 0); 6514 ret = get_errno(target_mmap(v1, v2, v3, 6515 target_to_host_bitmask(v4, mmap_flags_tbl), 6516 v5, v6)); 6517 } 6518 #else 6519 ret = get_errno(target_mmap(arg1, arg2, arg3, 6520 target_to_host_bitmask(arg4, mmap_flags_tbl), 6521 arg5, 6522 arg6)); 6523 #endif 6524 break; 6525 #endif 6526 #ifdef TARGET_NR_mmap2 6527 case TARGET_NR_mmap2: 6528 #ifndef MMAP_SHIFT 6529 #define MMAP_SHIFT 12 6530 #endif 6531 ret = get_errno(target_mmap(arg1, arg2, arg3, 6532 target_to_host_bitmask(arg4, mmap_flags_tbl), 6533 arg5, 6534 arg6 << MMAP_SHIFT)); 6535 break; 6536 #endif 6537 case TARGET_NR_munmap: 6538 ret = get_errno(target_munmap(arg1, arg2)); 6539 break; 6540 case TARGET_NR_mprotect: 6541 { 6542 TaskState *ts = cpu->opaque; 6543 /* Special hack to detect libc making the stack executable. */ 6544 if ((arg3 & PROT_GROWSDOWN) 6545 && arg1 >= ts->info->stack_limit 6546 && arg1 <= ts->info->start_stack) { 6547 arg3 &= ~PROT_GROWSDOWN; 6548 arg2 = arg2 + arg1 - ts->info->stack_limit; 6549 arg1 = ts->info->stack_limit; 6550 } 6551 } 6552 ret = get_errno(target_mprotect(arg1, arg2, arg3)); 6553 break; 6554 #ifdef TARGET_NR_mremap 6555 case TARGET_NR_mremap: 6556 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); 6557 break; 6558 #endif 6559 /* ??? msync/mlock/munlock are broken for softmmu. */ 6560 #ifdef TARGET_NR_msync 6561 case TARGET_NR_msync: 6562 ret = get_errno(msync(g2h(arg1), arg2, arg3)); 6563 break; 6564 #endif 6565 #ifdef TARGET_NR_mlock 6566 case TARGET_NR_mlock: 6567 ret = get_errno(mlock(g2h(arg1), arg2)); 6568 break; 6569 #endif 6570 #ifdef TARGET_NR_munlock 6571 case TARGET_NR_munlock: 6572 ret = get_errno(munlock(g2h(arg1), arg2)); 6573 break; 6574 #endif 6575 #ifdef TARGET_NR_mlockall 6576 case TARGET_NR_mlockall: 6577 ret = get_errno(mlockall(arg1)); 6578 break; 6579 #endif 6580 #ifdef TARGET_NR_munlockall 6581 case TARGET_NR_munlockall: 6582 ret = get_errno(munlockall()); 6583 break; 6584 #endif 6585 case TARGET_NR_truncate: 6586 if (!(p = lock_user_string(arg1))) 6587 goto efault; 6588 ret = get_errno(truncate(p, arg2)); 6589 unlock_user(p, arg1, 0); 6590 break; 6591 case TARGET_NR_ftruncate: 6592 ret = get_errno(ftruncate(arg1, arg2)); 6593 break; 6594 case TARGET_NR_fchmod: 6595 ret = get_errno(fchmod(arg1, arg2)); 6596 break; 6597 #if defined(TARGET_NR_fchmodat) 6598 case TARGET_NR_fchmodat: 6599 if (!(p = lock_user_string(arg2))) 6600 goto efault; 6601 ret = get_errno(fchmodat(arg1, p, arg3, 0)); 6602 unlock_user(p, arg2, 0); 6603 break; 6604 #endif 6605 case TARGET_NR_getpriority: 6606 /* Note that negative values are valid for getpriority, so we must 6607 differentiate based on errno settings. */ 6608 errno = 0; 6609 ret = getpriority(arg1, arg2); 6610 if (ret == -1 && errno != 0) { 6611 ret = -host_to_target_errno(errno); 6612 break; 6613 } 6614 #ifdef TARGET_ALPHA 6615 /* Return value is the unbiased priority. Signal no error. */ 6616 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; 6617 #else 6618 /* Return value is a biased priority to avoid negative numbers. */ 6619 ret = 20 - ret; 6620 #endif 6621 break; 6622 case TARGET_NR_setpriority: 6623 ret = get_errno(setpriority(arg1, arg2, arg3)); 6624 break; 6625 #ifdef TARGET_NR_profil 6626 case TARGET_NR_profil: 6627 goto unimplemented; 6628 #endif 6629 case TARGET_NR_statfs: 6630 if (!(p = lock_user_string(arg1))) 6631 goto efault; 6632 ret = get_errno(statfs(path(p), &stfs)); 6633 unlock_user(p, arg1, 0); 6634 convert_statfs: 6635 if (!is_error(ret)) { 6636 struct target_statfs *target_stfs; 6637 6638 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) 6639 goto efault; 6640 __put_user(stfs.f_type, &target_stfs->f_type); 6641 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 6642 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 6643 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 6644 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 6645 __put_user(stfs.f_files, &target_stfs->f_files); 6646 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 6647 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 6648 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 6649 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 6650 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 6651 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 6652 unlock_user_struct(target_stfs, arg2, 1); 6653 } 6654 break; 6655 case TARGET_NR_fstatfs: 6656 ret = get_errno(fstatfs(arg1, &stfs)); 6657 goto convert_statfs; 6658 #ifdef TARGET_NR_statfs64 6659 case TARGET_NR_statfs64: 6660 if (!(p = lock_user_string(arg1))) 6661 goto efault; 6662 ret = get_errno(statfs(path(p), &stfs)); 6663 unlock_user(p, arg1, 0); 6664 convert_statfs64: 6665 if (!is_error(ret)) { 6666 struct target_statfs64 *target_stfs; 6667 6668 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) 6669 goto efault; 6670 __put_user(stfs.f_type, &target_stfs->f_type); 6671 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 6672 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 6673 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 6674 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 6675 __put_user(stfs.f_files, &target_stfs->f_files); 6676 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 6677 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 6678 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 6679 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 6680 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 6681 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 6682 unlock_user_struct(target_stfs, arg3, 1); 6683 } 6684 break; 6685 case TARGET_NR_fstatfs64: 6686 ret = get_errno(fstatfs(arg1, &stfs)); 6687 goto convert_statfs64; 6688 #endif 6689 #ifdef TARGET_NR_ioperm 6690 case TARGET_NR_ioperm: 6691 goto unimplemented; 6692 #endif 6693 #ifdef TARGET_NR_socketcall 6694 case TARGET_NR_socketcall: 6695 ret = do_socketcall(arg1, arg2); 6696 break; 6697 #endif 6698 #ifdef TARGET_NR_accept 6699 case TARGET_NR_accept: 6700 ret = do_accept4(arg1, arg2, arg3, 0); 6701 break; 6702 #endif 6703 #ifdef TARGET_NR_accept4 6704 case TARGET_NR_accept4: 6705 #ifdef CONFIG_ACCEPT4 6706 ret = do_accept4(arg1, arg2, arg3, arg4); 6707 #else 6708 goto unimplemented; 6709 #endif 6710 break; 6711 #endif 6712 #ifdef TARGET_NR_bind 6713 case TARGET_NR_bind: 6714 ret = do_bind(arg1, arg2, arg3); 6715 break; 6716 #endif 6717 #ifdef TARGET_NR_connect 6718 case TARGET_NR_connect: 6719 ret = do_connect(arg1, arg2, arg3); 6720 break; 6721 #endif 6722 #ifdef TARGET_NR_getpeername 6723 case TARGET_NR_getpeername: 6724 ret = do_getpeername(arg1, arg2, arg3); 6725 break; 6726 #endif 6727 #ifdef TARGET_NR_getsockname 6728 case TARGET_NR_getsockname: 6729 ret = do_getsockname(arg1, arg2, arg3); 6730 break; 6731 #endif 6732 #ifdef TARGET_NR_getsockopt 6733 case TARGET_NR_getsockopt: 6734 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5); 6735 break; 6736 #endif 6737 #ifdef TARGET_NR_listen 6738 case TARGET_NR_listen: 6739 ret = get_errno(listen(arg1, arg2)); 6740 break; 6741 #endif 6742 #ifdef TARGET_NR_recv 6743 case TARGET_NR_recv: 6744 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); 6745 break; 6746 #endif 6747 #ifdef TARGET_NR_recvfrom 6748 case TARGET_NR_recvfrom: 6749 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); 6750 break; 6751 #endif 6752 #ifdef TARGET_NR_recvmsg 6753 case TARGET_NR_recvmsg: 6754 ret = do_sendrecvmsg(arg1, arg2, arg3, 0); 6755 break; 6756 #endif 6757 #ifdef TARGET_NR_send 6758 case TARGET_NR_send: 6759 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0); 6760 break; 6761 #endif 6762 #ifdef TARGET_NR_sendmsg 6763 case TARGET_NR_sendmsg: 6764 ret = do_sendrecvmsg(arg1, arg2, arg3, 1); 6765 break; 6766 #endif 6767 #ifdef TARGET_NR_sendmmsg 6768 case TARGET_NR_sendmmsg: 6769 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1); 6770 break; 6771 case TARGET_NR_recvmmsg: 6772 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0); 6773 break; 6774 #endif 6775 #ifdef TARGET_NR_sendto 6776 case TARGET_NR_sendto: 6777 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); 6778 break; 6779 #endif 6780 #ifdef TARGET_NR_shutdown 6781 case TARGET_NR_shutdown: 6782 ret = get_errno(shutdown(arg1, arg2)); 6783 break; 6784 #endif 6785 #ifdef TARGET_NR_socket 6786 case TARGET_NR_socket: 6787 ret = do_socket(arg1, arg2, arg3); 6788 break; 6789 #endif 6790 #ifdef TARGET_NR_socketpair 6791 case TARGET_NR_socketpair: 6792 ret = do_socketpair(arg1, arg2, arg3, arg4); 6793 break; 6794 #endif 6795 #ifdef TARGET_NR_setsockopt 6796 case TARGET_NR_setsockopt: 6797 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); 6798 break; 6799 #endif 6800 6801 case TARGET_NR_syslog: 6802 if (!(p = lock_user_string(arg2))) 6803 goto efault; 6804 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); 6805 unlock_user(p, arg2, 0); 6806 break; 6807 6808 case TARGET_NR_setitimer: 6809 { 6810 struct itimerval value, ovalue, *pvalue; 6811 6812 if (arg2) { 6813 pvalue = &value; 6814 if (copy_from_user_timeval(&pvalue->it_interval, arg2) 6815 || copy_from_user_timeval(&pvalue->it_value, 6816 arg2 + sizeof(struct target_timeval))) 6817 goto efault; 6818 } else { 6819 pvalue = NULL; 6820 } 6821 ret = get_errno(setitimer(arg1, pvalue, &ovalue)); 6822 if (!is_error(ret) && arg3) { 6823 if (copy_to_user_timeval(arg3, 6824 &ovalue.it_interval) 6825 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), 6826 &ovalue.it_value)) 6827 goto efault; 6828 } 6829 } 6830 break; 6831 case TARGET_NR_getitimer: 6832 { 6833 struct itimerval value; 6834 6835 ret = get_errno(getitimer(arg1, &value)); 6836 if (!is_error(ret) && arg2) { 6837 if (copy_to_user_timeval(arg2, 6838 &value.it_interval) 6839 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), 6840 &value.it_value)) 6841 goto efault; 6842 } 6843 } 6844 break; 6845 case TARGET_NR_stat: 6846 if (!(p = lock_user_string(arg1))) 6847 goto efault; 6848 ret = get_errno(stat(path(p), &st)); 6849 unlock_user(p, arg1, 0); 6850 goto do_stat; 6851 case TARGET_NR_lstat: 6852 if (!(p = lock_user_string(arg1))) 6853 goto efault; 6854 ret = get_errno(lstat(path(p), &st)); 6855 unlock_user(p, arg1, 0); 6856 goto do_stat; 6857 case TARGET_NR_fstat: 6858 { 6859 ret = get_errno(fstat(arg1, &st)); 6860 do_stat: 6861 if (!is_error(ret)) { 6862 struct target_stat *target_st; 6863 6864 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) 6865 goto efault; 6866 memset(target_st, 0, sizeof(*target_st)); 6867 __put_user(st.st_dev, &target_st->st_dev); 6868 __put_user(st.st_ino, &target_st->st_ino); 6869 __put_user(st.st_mode, &target_st->st_mode); 6870 __put_user(st.st_uid, &target_st->st_uid); 6871 __put_user(st.st_gid, &target_st->st_gid); 6872 __put_user(st.st_nlink, &target_st->st_nlink); 6873 __put_user(st.st_rdev, &target_st->st_rdev); 6874 __put_user(st.st_size, &target_st->st_size); 6875 __put_user(st.st_blksize, &target_st->st_blksize); 6876 __put_user(st.st_blocks, &target_st->st_blocks); 6877 __put_user(st.st_atime, &target_st->target_st_atime); 6878 __put_user(st.st_mtime, &target_st->target_st_mtime); 6879 __put_user(st.st_ctime, &target_st->target_st_ctime); 6880 unlock_user_struct(target_st, arg2, 1); 6881 } 6882 } 6883 break; 6884 #ifdef TARGET_NR_olduname 6885 case TARGET_NR_olduname: 6886 goto unimplemented; 6887 #endif 6888 #ifdef TARGET_NR_iopl 6889 case TARGET_NR_iopl: 6890 goto unimplemented; 6891 #endif 6892 case TARGET_NR_vhangup: 6893 ret = get_errno(vhangup()); 6894 break; 6895 #ifdef TARGET_NR_idle 6896 case TARGET_NR_idle: 6897 goto unimplemented; 6898 #endif 6899 #ifdef TARGET_NR_syscall 6900 case TARGET_NR_syscall: 6901 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5, 6902 arg6, arg7, arg8, 0); 6903 break; 6904 #endif 6905 case TARGET_NR_wait4: 6906 { 6907 int status; 6908 abi_long status_ptr = arg2; 6909 struct rusage rusage, *rusage_ptr; 6910 abi_ulong target_rusage = arg4; 6911 abi_long rusage_err; 6912 if (target_rusage) 6913 rusage_ptr = &rusage; 6914 else 6915 rusage_ptr = NULL; 6916 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr)); 6917 if (!is_error(ret)) { 6918 if (status_ptr && ret) { 6919 status = host_to_target_waitstatus(status); 6920 if (put_user_s32(status, status_ptr)) 6921 goto efault; 6922 } 6923 if (target_rusage) { 6924 rusage_err = host_to_target_rusage(target_rusage, &rusage); 6925 if (rusage_err) { 6926 ret = rusage_err; 6927 } 6928 } 6929 } 6930 } 6931 break; 6932 #ifdef TARGET_NR_swapoff 6933 case TARGET_NR_swapoff: 6934 if (!(p = lock_user_string(arg1))) 6935 goto efault; 6936 ret = get_errno(swapoff(p)); 6937 unlock_user(p, arg1, 0); 6938 break; 6939 #endif 6940 case TARGET_NR_sysinfo: 6941 { 6942 struct target_sysinfo *target_value; 6943 struct sysinfo value; 6944 ret = get_errno(sysinfo(&value)); 6945 if (!is_error(ret) && arg1) 6946 { 6947 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) 6948 goto efault; 6949 __put_user(value.uptime, &target_value->uptime); 6950 __put_user(value.loads[0], &target_value->loads[0]); 6951 __put_user(value.loads[1], &target_value->loads[1]); 6952 __put_user(value.loads[2], &target_value->loads[2]); 6953 __put_user(value.totalram, &target_value->totalram); 6954 __put_user(value.freeram, &target_value->freeram); 6955 __put_user(value.sharedram, &target_value->sharedram); 6956 __put_user(value.bufferram, &target_value->bufferram); 6957 __put_user(value.totalswap, &target_value->totalswap); 6958 __put_user(value.freeswap, &target_value->freeswap); 6959 __put_user(value.procs, &target_value->procs); 6960 __put_user(value.totalhigh, &target_value->totalhigh); 6961 __put_user(value.freehigh, &target_value->freehigh); 6962 __put_user(value.mem_unit, &target_value->mem_unit); 6963 unlock_user_struct(target_value, arg1, 1); 6964 } 6965 } 6966 break; 6967 #ifdef TARGET_NR_ipc 6968 case TARGET_NR_ipc: 6969 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6); 6970 break; 6971 #endif 6972 #ifdef TARGET_NR_semget 6973 case TARGET_NR_semget: 6974 ret = get_errno(semget(arg1, arg2, arg3)); 6975 break; 6976 #endif 6977 #ifdef TARGET_NR_semop 6978 case TARGET_NR_semop: 6979 ret = do_semop(arg1, arg2, arg3); 6980 break; 6981 #endif 6982 #ifdef TARGET_NR_semctl 6983 case TARGET_NR_semctl: 6984 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4); 6985 break; 6986 #endif 6987 #ifdef TARGET_NR_msgctl 6988 case TARGET_NR_msgctl: 6989 ret = do_msgctl(arg1, arg2, arg3); 6990 break; 6991 #endif 6992 #ifdef TARGET_NR_msgget 6993 case TARGET_NR_msgget: 6994 ret = get_errno(msgget(arg1, arg2)); 6995 break; 6996 #endif 6997 #ifdef TARGET_NR_msgrcv 6998 case TARGET_NR_msgrcv: 6999 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5); 7000 break; 7001 #endif 7002 #ifdef TARGET_NR_msgsnd 7003 case TARGET_NR_msgsnd: 7004 ret = do_msgsnd(arg1, arg2, arg3, arg4); 7005 break; 7006 #endif 7007 #ifdef TARGET_NR_shmget 7008 case TARGET_NR_shmget: 7009 ret = get_errno(shmget(arg1, arg2, arg3)); 7010 break; 7011 #endif 7012 #ifdef TARGET_NR_shmctl 7013 case TARGET_NR_shmctl: 7014 ret = do_shmctl(arg1, arg2, arg3); 7015 break; 7016 #endif 7017 #ifdef TARGET_NR_shmat 7018 case TARGET_NR_shmat: 7019 ret = do_shmat(arg1, arg2, arg3); 7020 break; 7021 #endif 7022 #ifdef TARGET_NR_shmdt 7023 case TARGET_NR_shmdt: 7024 ret = do_shmdt(arg1); 7025 break; 7026 #endif 7027 case TARGET_NR_fsync: 7028 ret = get_errno(fsync(arg1)); 7029 break; 7030 case TARGET_NR_clone: 7031 /* Linux manages to have three different orderings for its 7032 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines 7033 * match the kernel's CONFIG_CLONE_* settings. 7034 * Microblaze is further special in that it uses a sixth 7035 * implicit argument to clone for the TLS pointer. 7036 */ 7037 #if defined(TARGET_MICROBLAZE) 7038 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5)); 7039 #elif defined(TARGET_CLONE_BACKWARDS) 7040 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); 7041 #elif defined(TARGET_CLONE_BACKWARDS2) 7042 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); 7043 #else 7044 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); 7045 #endif 7046 break; 7047 #ifdef __NR_exit_group 7048 /* new thread calls */ 7049 case TARGET_NR_exit_group: 7050 #ifdef TARGET_GPROF 7051 _mcleanup(); 7052 #endif 7053 gdb_exit(cpu_env, arg1); 7054 ret = get_errno(exit_group(arg1)); 7055 break; 7056 #endif 7057 case TARGET_NR_setdomainname: 7058 if (!(p = lock_user_string(arg1))) 7059 goto efault; 7060 ret = get_errno(setdomainname(p, arg2)); 7061 unlock_user(p, arg1, 0); 7062 break; 7063 case TARGET_NR_uname: 7064 /* no need to transcode because we use the linux syscall */ 7065 { 7066 struct new_utsname * buf; 7067 7068 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) 7069 goto efault; 7070 ret = get_errno(sys_uname(buf)); 7071 if (!is_error(ret)) { 7072 /* Overrite the native machine name with whatever is being 7073 emulated. */ 7074 strcpy (buf->machine, cpu_to_uname_machine(cpu_env)); 7075 /* Allow the user to override the reported release. */ 7076 if (qemu_uname_release && *qemu_uname_release) 7077 strcpy (buf->release, qemu_uname_release); 7078 } 7079 unlock_user_struct(buf, arg1, 1); 7080 } 7081 break; 7082 #ifdef TARGET_I386 7083 case TARGET_NR_modify_ldt: 7084 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3); 7085 break; 7086 #if !defined(TARGET_X86_64) 7087 case TARGET_NR_vm86old: 7088 goto unimplemented; 7089 case TARGET_NR_vm86: 7090 ret = do_vm86(cpu_env, arg1, arg2); 7091 break; 7092 #endif 7093 #endif 7094 case TARGET_NR_adjtimex: 7095 goto unimplemented; 7096 #ifdef TARGET_NR_create_module 7097 case TARGET_NR_create_module: 7098 #endif 7099 case TARGET_NR_init_module: 7100 case TARGET_NR_delete_module: 7101 #ifdef TARGET_NR_get_kernel_syms 7102 case TARGET_NR_get_kernel_syms: 7103 #endif 7104 goto unimplemented; 7105 case TARGET_NR_quotactl: 7106 goto unimplemented; 7107 case TARGET_NR_getpgid: 7108 ret = get_errno(getpgid(arg1)); 7109 break; 7110 case TARGET_NR_fchdir: 7111 ret = get_errno(fchdir(arg1)); 7112 break; 7113 #ifdef TARGET_NR_bdflush /* not on x86_64 */ 7114 case TARGET_NR_bdflush: 7115 goto unimplemented; 7116 #endif 7117 #ifdef TARGET_NR_sysfs 7118 case TARGET_NR_sysfs: 7119 goto unimplemented; 7120 #endif 7121 case TARGET_NR_personality: 7122 ret = get_errno(personality(arg1)); 7123 break; 7124 #ifdef TARGET_NR_afs_syscall 7125 case TARGET_NR_afs_syscall: 7126 goto unimplemented; 7127 #endif 7128 #ifdef TARGET_NR__llseek /* Not on alpha */ 7129 case TARGET_NR__llseek: 7130 { 7131 int64_t res; 7132 #if !defined(__NR_llseek) 7133 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5); 7134 if (res == -1) { 7135 ret = get_errno(res); 7136 } else { 7137 ret = 0; 7138 } 7139 #else 7140 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); 7141 #endif 7142 if ((ret == 0) && put_user_s64(res, arg4)) { 7143 goto efault; 7144 } 7145 } 7146 break; 7147 #endif 7148 case TARGET_NR_getdents: 7149 #ifdef __NR_getdents 7150 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 7151 { 7152 struct target_dirent *target_dirp; 7153 struct linux_dirent *dirp; 7154 abi_long count = arg3; 7155 7156 dirp = malloc(count); 7157 if (!dirp) { 7158 ret = -TARGET_ENOMEM; 7159 goto fail; 7160 } 7161 7162 ret = get_errno(sys_getdents(arg1, dirp, count)); 7163 if (!is_error(ret)) { 7164 struct linux_dirent *de; 7165 struct target_dirent *tde; 7166 int len = ret; 7167 int reclen, treclen; 7168 int count1, tnamelen; 7169 7170 count1 = 0; 7171 de = dirp; 7172 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7173 goto efault; 7174 tde = target_dirp; 7175 while (len > 0) { 7176 reclen = de->d_reclen; 7177 tnamelen = reclen - offsetof(struct linux_dirent, d_name); 7178 assert(tnamelen >= 0); 7179 treclen = tnamelen + offsetof(struct target_dirent, d_name); 7180 assert(count1 + treclen <= count); 7181 tde->d_reclen = tswap16(treclen); 7182 tde->d_ino = tswapal(de->d_ino); 7183 tde->d_off = tswapal(de->d_off); 7184 memcpy(tde->d_name, de->d_name, tnamelen); 7185 de = (struct linux_dirent *)((char *)de + reclen); 7186 len -= reclen; 7187 tde = (struct target_dirent *)((char *)tde + treclen); 7188 count1 += treclen; 7189 } 7190 ret = count1; 7191 unlock_user(target_dirp, arg2, ret); 7192 } 7193 free(dirp); 7194 } 7195 #else 7196 { 7197 struct linux_dirent *dirp; 7198 abi_long count = arg3; 7199 7200 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7201 goto efault; 7202 ret = get_errno(sys_getdents(arg1, dirp, count)); 7203 if (!is_error(ret)) { 7204 struct linux_dirent *de; 7205 int len = ret; 7206 int reclen; 7207 de = dirp; 7208 while (len > 0) { 7209 reclen = de->d_reclen; 7210 if (reclen > len) 7211 break; 7212 de->d_reclen = tswap16(reclen); 7213 tswapls(&de->d_ino); 7214 tswapls(&de->d_off); 7215 de = (struct linux_dirent *)((char *)de + reclen); 7216 len -= reclen; 7217 } 7218 } 7219 unlock_user(dirp, arg2, ret); 7220 } 7221 #endif 7222 #else 7223 /* Implement getdents in terms of getdents64 */ 7224 { 7225 struct linux_dirent64 *dirp; 7226 abi_long count = arg3; 7227 7228 dirp = lock_user(VERIFY_WRITE, arg2, count, 0); 7229 if (!dirp) { 7230 goto efault; 7231 } 7232 ret = get_errno(sys_getdents64(arg1, dirp, count)); 7233 if (!is_error(ret)) { 7234 /* Convert the dirent64 structs to target dirent. We do this 7235 * in-place, since we can guarantee that a target_dirent is no 7236 * larger than a dirent64; however this means we have to be 7237 * careful to read everything before writing in the new format. 7238 */ 7239 struct linux_dirent64 *de; 7240 struct target_dirent *tde; 7241 int len = ret; 7242 int tlen = 0; 7243 7244 de = dirp; 7245 tde = (struct target_dirent *)dirp; 7246 while (len > 0) { 7247 int namelen, treclen; 7248 int reclen = de->d_reclen; 7249 uint64_t ino = de->d_ino; 7250 int64_t off = de->d_off; 7251 uint8_t type = de->d_type; 7252 7253 namelen = strlen(de->d_name); 7254 treclen = offsetof(struct target_dirent, d_name) 7255 + namelen + 2; 7256 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long)); 7257 7258 memmove(tde->d_name, de->d_name, namelen + 1); 7259 tde->d_ino = tswapal(ino); 7260 tde->d_off = tswapal(off); 7261 tde->d_reclen = tswap16(treclen); 7262 /* The target_dirent type is in what was formerly a padding 7263 * byte at the end of the structure: 7264 */ 7265 *(((char *)tde) + treclen - 1) = type; 7266 7267 de = (struct linux_dirent64 *)((char *)de + reclen); 7268 tde = (struct target_dirent *)((char *)tde + treclen); 7269 len -= reclen; 7270 tlen += treclen; 7271 } 7272 ret = tlen; 7273 } 7274 unlock_user(dirp, arg2, ret); 7275 } 7276 #endif 7277 break; 7278 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 7279 case TARGET_NR_getdents64: 7280 { 7281 struct linux_dirent64 *dirp; 7282 abi_long count = arg3; 7283 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7284 goto efault; 7285 ret = get_errno(sys_getdents64(arg1, dirp, count)); 7286 if (!is_error(ret)) { 7287 struct linux_dirent64 *de; 7288 int len = ret; 7289 int reclen; 7290 de = dirp; 7291 while (len > 0) { 7292 reclen = de->d_reclen; 7293 if (reclen > len) 7294 break; 7295 de->d_reclen = tswap16(reclen); 7296 tswap64s((uint64_t *)&de->d_ino); 7297 tswap64s((uint64_t *)&de->d_off); 7298 de = (struct linux_dirent64 *)((char *)de + reclen); 7299 len -= reclen; 7300 } 7301 } 7302 unlock_user(dirp, arg2, ret); 7303 } 7304 break; 7305 #endif /* TARGET_NR_getdents64 */ 7306 #if defined(TARGET_NR__newselect) 7307 case TARGET_NR__newselect: 7308 ret = do_select(arg1, arg2, arg3, arg4, arg5); 7309 break; 7310 #endif 7311 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) 7312 # ifdef TARGET_NR_poll 7313 case TARGET_NR_poll: 7314 # endif 7315 # ifdef TARGET_NR_ppoll 7316 case TARGET_NR_ppoll: 7317 # endif 7318 { 7319 struct target_pollfd *target_pfd; 7320 unsigned int nfds = arg2; 7321 int timeout = arg3; 7322 struct pollfd *pfd; 7323 unsigned int i; 7324 7325 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1); 7326 if (!target_pfd) 7327 goto efault; 7328 7329 pfd = alloca(sizeof(struct pollfd) * nfds); 7330 for(i = 0; i < nfds; i++) { 7331 pfd[i].fd = tswap32(target_pfd[i].fd); 7332 pfd[i].events = tswap16(target_pfd[i].events); 7333 } 7334 7335 # ifdef TARGET_NR_ppoll 7336 if (num == TARGET_NR_ppoll) { 7337 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; 7338 target_sigset_t *target_set; 7339 sigset_t _set, *set = &_set; 7340 7341 if (arg3) { 7342 if (target_to_host_timespec(timeout_ts, arg3)) { 7343 unlock_user(target_pfd, arg1, 0); 7344 goto efault; 7345 } 7346 } else { 7347 timeout_ts = NULL; 7348 } 7349 7350 if (arg4) { 7351 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1); 7352 if (!target_set) { 7353 unlock_user(target_pfd, arg1, 0); 7354 goto efault; 7355 } 7356 target_to_host_sigset(set, target_set); 7357 } else { 7358 set = NULL; 7359 } 7360 7361 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8)); 7362 7363 if (!is_error(ret) && arg3) { 7364 host_to_target_timespec(arg3, timeout_ts); 7365 } 7366 if (arg4) { 7367 unlock_user(target_set, arg4, 0); 7368 } 7369 } else 7370 # endif 7371 ret = get_errno(poll(pfd, nfds, timeout)); 7372 7373 if (!is_error(ret)) { 7374 for(i = 0; i < nfds; i++) { 7375 target_pfd[i].revents = tswap16(pfd[i].revents); 7376 } 7377 } 7378 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds); 7379 } 7380 break; 7381 #endif 7382 case TARGET_NR_flock: 7383 /* NOTE: the flock constant seems to be the same for every 7384 Linux platform */ 7385 ret = get_errno(flock(arg1, arg2)); 7386 break; 7387 case TARGET_NR_readv: 7388 { 7389 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 7390 if (vec != NULL) { 7391 ret = get_errno(readv(arg1, vec, arg3)); 7392 unlock_iovec(vec, arg2, arg3, 1); 7393 } else { 7394 ret = -host_to_target_errno(errno); 7395 } 7396 } 7397 break; 7398 case TARGET_NR_writev: 7399 { 7400 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 7401 if (vec != NULL) { 7402 ret = get_errno(writev(arg1, vec, arg3)); 7403 unlock_iovec(vec, arg2, arg3, 0); 7404 } else { 7405 ret = -host_to_target_errno(errno); 7406 } 7407 } 7408 break; 7409 case TARGET_NR_getsid: 7410 ret = get_errno(getsid(arg1)); 7411 break; 7412 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ 7413 case TARGET_NR_fdatasync: 7414 ret = get_errno(fdatasync(arg1)); 7415 break; 7416 #endif 7417 case TARGET_NR__sysctl: 7418 /* We don't implement this, but ENOTDIR is always a safe 7419 return value. */ 7420 ret = -TARGET_ENOTDIR; 7421 break; 7422 case TARGET_NR_sched_getaffinity: 7423 { 7424 unsigned int mask_size; 7425 unsigned long *mask; 7426 7427 /* 7428 * sched_getaffinity needs multiples of ulong, so need to take 7429 * care of mismatches between target ulong and host ulong sizes. 7430 */ 7431 if (arg2 & (sizeof(abi_ulong) - 1)) { 7432 ret = -TARGET_EINVAL; 7433 break; 7434 } 7435 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 7436 7437 mask = alloca(mask_size); 7438 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask)); 7439 7440 if (!is_error(ret)) { 7441 if (copy_to_user(arg3, mask, ret)) { 7442 goto efault; 7443 } 7444 } 7445 } 7446 break; 7447 case TARGET_NR_sched_setaffinity: 7448 { 7449 unsigned int mask_size; 7450 unsigned long *mask; 7451 7452 /* 7453 * sched_setaffinity needs multiples of ulong, so need to take 7454 * care of mismatches between target ulong and host ulong sizes. 7455 */ 7456 if (arg2 & (sizeof(abi_ulong) - 1)) { 7457 ret = -TARGET_EINVAL; 7458 break; 7459 } 7460 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 7461 7462 mask = alloca(mask_size); 7463 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) { 7464 goto efault; 7465 } 7466 memcpy(mask, p, arg2); 7467 unlock_user_struct(p, arg2, 0); 7468 7469 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask)); 7470 } 7471 break; 7472 case TARGET_NR_sched_setparam: 7473 { 7474 struct sched_param *target_schp; 7475 struct sched_param schp; 7476 7477 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) 7478 goto efault; 7479 schp.sched_priority = tswap32(target_schp->sched_priority); 7480 unlock_user_struct(target_schp, arg2, 0); 7481 ret = get_errno(sched_setparam(arg1, &schp)); 7482 } 7483 break; 7484 case TARGET_NR_sched_getparam: 7485 { 7486 struct sched_param *target_schp; 7487 struct sched_param schp; 7488 ret = get_errno(sched_getparam(arg1, &schp)); 7489 if (!is_error(ret)) { 7490 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) 7491 goto efault; 7492 target_schp->sched_priority = tswap32(schp.sched_priority); 7493 unlock_user_struct(target_schp, arg2, 1); 7494 } 7495 } 7496 break; 7497 case TARGET_NR_sched_setscheduler: 7498 { 7499 struct sched_param *target_schp; 7500 struct sched_param schp; 7501 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) 7502 goto efault; 7503 schp.sched_priority = tswap32(target_schp->sched_priority); 7504 unlock_user_struct(target_schp, arg3, 0); 7505 ret = get_errno(sched_setscheduler(arg1, arg2, &schp)); 7506 } 7507 break; 7508 case TARGET_NR_sched_getscheduler: 7509 ret = get_errno(sched_getscheduler(arg1)); 7510 break; 7511 case TARGET_NR_sched_yield: 7512 ret = get_errno(sched_yield()); 7513 break; 7514 case TARGET_NR_sched_get_priority_max: 7515 ret = get_errno(sched_get_priority_max(arg1)); 7516 break; 7517 case TARGET_NR_sched_get_priority_min: 7518 ret = get_errno(sched_get_priority_min(arg1)); 7519 break; 7520 case TARGET_NR_sched_rr_get_interval: 7521 { 7522 struct timespec ts; 7523 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 7524 if (!is_error(ret)) { 7525 host_to_target_timespec(arg2, &ts); 7526 } 7527 } 7528 break; 7529 case TARGET_NR_nanosleep: 7530 { 7531 struct timespec req, rem; 7532 target_to_host_timespec(&req, arg1); 7533 ret = get_errno(nanosleep(&req, &rem)); 7534 if (is_error(ret) && arg2) { 7535 host_to_target_timespec(arg2, &rem); 7536 } 7537 } 7538 break; 7539 #ifdef TARGET_NR_query_module 7540 case TARGET_NR_query_module: 7541 goto unimplemented; 7542 #endif 7543 #ifdef TARGET_NR_nfsservctl 7544 case TARGET_NR_nfsservctl: 7545 goto unimplemented; 7546 #endif 7547 case TARGET_NR_prctl: 7548 switch (arg1) { 7549 case PR_GET_PDEATHSIG: 7550 { 7551 int deathsig; 7552 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5)); 7553 if (!is_error(ret) && arg2 7554 && put_user_ual(deathsig, arg2)) { 7555 goto efault; 7556 } 7557 break; 7558 } 7559 #ifdef PR_GET_NAME 7560 case PR_GET_NAME: 7561 { 7562 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1); 7563 if (!name) { 7564 goto efault; 7565 } 7566 ret = get_errno(prctl(arg1, (unsigned long)name, 7567 arg3, arg4, arg5)); 7568 unlock_user(name, arg2, 16); 7569 break; 7570 } 7571 case PR_SET_NAME: 7572 { 7573 void *name = lock_user(VERIFY_READ, arg2, 16, 1); 7574 if (!name) { 7575 goto efault; 7576 } 7577 ret = get_errno(prctl(arg1, (unsigned long)name, 7578 arg3, arg4, arg5)); 7579 unlock_user(name, arg2, 0); 7580 break; 7581 } 7582 #endif 7583 default: 7584 /* Most prctl options have no pointer arguments */ 7585 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5)); 7586 break; 7587 } 7588 break; 7589 #ifdef TARGET_NR_arch_prctl 7590 case TARGET_NR_arch_prctl: 7591 #if defined(TARGET_I386) && !defined(TARGET_ABI32) 7592 ret = do_arch_prctl(cpu_env, arg1, arg2); 7593 break; 7594 #else 7595 goto unimplemented; 7596 #endif 7597 #endif 7598 #ifdef TARGET_NR_pread64 7599 case TARGET_NR_pread64: 7600 if (regpairs_aligned(cpu_env)) { 7601 arg4 = arg5; 7602 arg5 = arg6; 7603 } 7604 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 7605 goto efault; 7606 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); 7607 unlock_user(p, arg2, ret); 7608 break; 7609 case TARGET_NR_pwrite64: 7610 if (regpairs_aligned(cpu_env)) { 7611 arg4 = arg5; 7612 arg5 = arg6; 7613 } 7614 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 7615 goto efault; 7616 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); 7617 unlock_user(p, arg2, 0); 7618 break; 7619 #endif 7620 case TARGET_NR_getcwd: 7621 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) 7622 goto efault; 7623 ret = get_errno(sys_getcwd1(p, arg2)); 7624 unlock_user(p, arg1, ret); 7625 break; 7626 case TARGET_NR_capget: 7627 case TARGET_NR_capset: 7628 { 7629 struct target_user_cap_header *target_header; 7630 struct target_user_cap_data *target_data = NULL; 7631 struct __user_cap_header_struct header; 7632 struct __user_cap_data_struct data[2]; 7633 struct __user_cap_data_struct *dataptr = NULL; 7634 int i, target_datalen; 7635 int data_items = 1; 7636 7637 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) { 7638 goto efault; 7639 } 7640 header.version = tswap32(target_header->version); 7641 header.pid = tswap32(target_header->pid); 7642 7643 if (header.version != _LINUX_CAPABILITY_VERSION) { 7644 /* Version 2 and up takes pointer to two user_data structs */ 7645 data_items = 2; 7646 } 7647 7648 target_datalen = sizeof(*target_data) * data_items; 7649 7650 if (arg2) { 7651 if (num == TARGET_NR_capget) { 7652 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0); 7653 } else { 7654 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1); 7655 } 7656 if (!target_data) { 7657 unlock_user_struct(target_header, arg1, 0); 7658 goto efault; 7659 } 7660 7661 if (num == TARGET_NR_capset) { 7662 for (i = 0; i < data_items; i++) { 7663 data[i].effective = tswap32(target_data[i].effective); 7664 data[i].permitted = tswap32(target_data[i].permitted); 7665 data[i].inheritable = tswap32(target_data[i].inheritable); 7666 } 7667 } 7668 7669 dataptr = data; 7670 } 7671 7672 if (num == TARGET_NR_capget) { 7673 ret = get_errno(capget(&header, dataptr)); 7674 } else { 7675 ret = get_errno(capset(&header, dataptr)); 7676 } 7677 7678 /* The kernel always updates version for both capget and capset */ 7679 target_header->version = tswap32(header.version); 7680 unlock_user_struct(target_header, arg1, 1); 7681 7682 if (arg2) { 7683 if (num == TARGET_NR_capget) { 7684 for (i = 0; i < data_items; i++) { 7685 target_data[i].effective = tswap32(data[i].effective); 7686 target_data[i].permitted = tswap32(data[i].permitted); 7687 target_data[i].inheritable = tswap32(data[i].inheritable); 7688 } 7689 unlock_user(target_data, arg2, target_datalen); 7690 } else { 7691 unlock_user(target_data, arg2, 0); 7692 } 7693 } 7694 break; 7695 } 7696 case TARGET_NR_sigaltstack: 7697 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \ 7698 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \ 7699 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC) 7700 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env)); 7701 break; 7702 #else 7703 goto unimplemented; 7704 #endif 7705 7706 #ifdef CONFIG_SENDFILE 7707 case TARGET_NR_sendfile: 7708 { 7709 off_t *offp = NULL; 7710 off_t off; 7711 if (arg3) { 7712 ret = get_user_sal(off, arg3); 7713 if (is_error(ret)) { 7714 break; 7715 } 7716 offp = &off; 7717 } 7718 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 7719 if (!is_error(ret) && arg3) { 7720 abi_long ret2 = put_user_sal(off, arg3); 7721 if (is_error(ret2)) { 7722 ret = ret2; 7723 } 7724 } 7725 break; 7726 } 7727 #ifdef TARGET_NR_sendfile64 7728 case TARGET_NR_sendfile64: 7729 { 7730 off_t *offp = NULL; 7731 off_t off; 7732 if (arg3) { 7733 ret = get_user_s64(off, arg3); 7734 if (is_error(ret)) { 7735 break; 7736 } 7737 offp = &off; 7738 } 7739 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 7740 if (!is_error(ret) && arg3) { 7741 abi_long ret2 = put_user_s64(off, arg3); 7742 if (is_error(ret2)) { 7743 ret = ret2; 7744 } 7745 } 7746 break; 7747 } 7748 #endif 7749 #else 7750 case TARGET_NR_sendfile: 7751 #ifdef TARGET_NR_sendfile64 7752 case TARGET_NR_sendfile64: 7753 #endif 7754 goto unimplemented; 7755 #endif 7756 7757 #ifdef TARGET_NR_getpmsg 7758 case TARGET_NR_getpmsg: 7759 goto unimplemented; 7760 #endif 7761 #ifdef TARGET_NR_putpmsg 7762 case TARGET_NR_putpmsg: 7763 goto unimplemented; 7764 #endif 7765 #ifdef TARGET_NR_vfork 7766 case TARGET_NR_vfork: 7767 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 7768 0, 0, 0, 0)); 7769 break; 7770 #endif 7771 #ifdef TARGET_NR_ugetrlimit 7772 case TARGET_NR_ugetrlimit: 7773 { 7774 struct rlimit rlim; 7775 int resource = target_to_host_resource(arg1); 7776 ret = get_errno(getrlimit(resource, &rlim)); 7777 if (!is_error(ret)) { 7778 struct target_rlimit *target_rlim; 7779 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 7780 goto efault; 7781 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 7782 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 7783 unlock_user_struct(target_rlim, arg2, 1); 7784 } 7785 break; 7786 } 7787 #endif 7788 #ifdef TARGET_NR_truncate64 7789 case TARGET_NR_truncate64: 7790 if (!(p = lock_user_string(arg1))) 7791 goto efault; 7792 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); 7793 unlock_user(p, arg1, 0); 7794 break; 7795 #endif 7796 #ifdef TARGET_NR_ftruncate64 7797 case TARGET_NR_ftruncate64: 7798 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); 7799 break; 7800 #endif 7801 #ifdef TARGET_NR_stat64 7802 case TARGET_NR_stat64: 7803 if (!(p = lock_user_string(arg1))) 7804 goto efault; 7805 ret = get_errno(stat(path(p), &st)); 7806 unlock_user(p, arg1, 0); 7807 if (!is_error(ret)) 7808 ret = host_to_target_stat64(cpu_env, arg2, &st); 7809 break; 7810 #endif 7811 #ifdef TARGET_NR_lstat64 7812 case TARGET_NR_lstat64: 7813 if (!(p = lock_user_string(arg1))) 7814 goto efault; 7815 ret = get_errno(lstat(path(p), &st)); 7816 unlock_user(p, arg1, 0); 7817 if (!is_error(ret)) 7818 ret = host_to_target_stat64(cpu_env, arg2, &st); 7819 break; 7820 #endif 7821 #ifdef TARGET_NR_fstat64 7822 case TARGET_NR_fstat64: 7823 ret = get_errno(fstat(arg1, &st)); 7824 if (!is_error(ret)) 7825 ret = host_to_target_stat64(cpu_env, arg2, &st); 7826 break; 7827 #endif 7828 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) 7829 #ifdef TARGET_NR_fstatat64 7830 case TARGET_NR_fstatat64: 7831 #endif 7832 #ifdef TARGET_NR_newfstatat 7833 case TARGET_NR_newfstatat: 7834 #endif 7835 if (!(p = lock_user_string(arg2))) 7836 goto efault; 7837 ret = get_errno(fstatat(arg1, path(p), &st, arg4)); 7838 if (!is_error(ret)) 7839 ret = host_to_target_stat64(cpu_env, arg3, &st); 7840 break; 7841 #endif 7842 case TARGET_NR_lchown: 7843 if (!(p = lock_user_string(arg1))) 7844 goto efault; 7845 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); 7846 unlock_user(p, arg1, 0); 7847 break; 7848 #ifdef TARGET_NR_getuid 7849 case TARGET_NR_getuid: 7850 ret = get_errno(high2lowuid(getuid())); 7851 break; 7852 #endif 7853 #ifdef TARGET_NR_getgid 7854 case TARGET_NR_getgid: 7855 ret = get_errno(high2lowgid(getgid())); 7856 break; 7857 #endif 7858 #ifdef TARGET_NR_geteuid 7859 case TARGET_NR_geteuid: 7860 ret = get_errno(high2lowuid(geteuid())); 7861 break; 7862 #endif 7863 #ifdef TARGET_NR_getegid 7864 case TARGET_NR_getegid: 7865 ret = get_errno(high2lowgid(getegid())); 7866 break; 7867 #endif 7868 case TARGET_NR_setreuid: 7869 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); 7870 break; 7871 case TARGET_NR_setregid: 7872 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); 7873 break; 7874 case TARGET_NR_getgroups: 7875 { 7876 int gidsetsize = arg1; 7877 target_id *target_grouplist; 7878 gid_t *grouplist; 7879 int i; 7880 7881 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7882 ret = get_errno(getgroups(gidsetsize, grouplist)); 7883 if (gidsetsize == 0) 7884 break; 7885 if (!is_error(ret)) { 7886 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0); 7887 if (!target_grouplist) 7888 goto efault; 7889 for(i = 0;i < ret; i++) 7890 target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); 7891 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id)); 7892 } 7893 } 7894 break; 7895 case TARGET_NR_setgroups: 7896 { 7897 int gidsetsize = arg1; 7898 target_id *target_grouplist; 7899 gid_t *grouplist = NULL; 7900 int i; 7901 if (gidsetsize) { 7902 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7903 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1); 7904 if (!target_grouplist) { 7905 ret = -TARGET_EFAULT; 7906 goto fail; 7907 } 7908 for (i = 0; i < gidsetsize; i++) { 7909 grouplist[i] = low2highgid(tswapid(target_grouplist[i])); 7910 } 7911 unlock_user(target_grouplist, arg2, 0); 7912 } 7913 ret = get_errno(setgroups(gidsetsize, grouplist)); 7914 } 7915 break; 7916 case TARGET_NR_fchown: 7917 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); 7918 break; 7919 #if defined(TARGET_NR_fchownat) 7920 case TARGET_NR_fchownat: 7921 if (!(p = lock_user_string(arg2))) 7922 goto efault; 7923 ret = get_errno(fchownat(arg1, p, low2highuid(arg3), 7924 low2highgid(arg4), arg5)); 7925 unlock_user(p, arg2, 0); 7926 break; 7927 #endif 7928 #ifdef TARGET_NR_setresuid 7929 case TARGET_NR_setresuid: 7930 ret = get_errno(setresuid(low2highuid(arg1), 7931 low2highuid(arg2), 7932 low2highuid(arg3))); 7933 break; 7934 #endif 7935 #ifdef TARGET_NR_getresuid 7936 case TARGET_NR_getresuid: 7937 { 7938 uid_t ruid, euid, suid; 7939 ret = get_errno(getresuid(&ruid, &euid, &suid)); 7940 if (!is_error(ret)) { 7941 if (put_user_id(high2lowuid(ruid), arg1) 7942 || put_user_id(high2lowuid(euid), arg2) 7943 || put_user_id(high2lowuid(suid), arg3)) 7944 goto efault; 7945 } 7946 } 7947 break; 7948 #endif 7949 #ifdef TARGET_NR_getresgid 7950 case TARGET_NR_setresgid: 7951 ret = get_errno(setresgid(low2highgid(arg1), 7952 low2highgid(arg2), 7953 low2highgid(arg3))); 7954 break; 7955 #endif 7956 #ifdef TARGET_NR_getresgid 7957 case TARGET_NR_getresgid: 7958 { 7959 gid_t rgid, egid, sgid; 7960 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 7961 if (!is_error(ret)) { 7962 if (put_user_id(high2lowgid(rgid), arg1) 7963 || put_user_id(high2lowgid(egid), arg2) 7964 || put_user_id(high2lowgid(sgid), arg3)) 7965 goto efault; 7966 } 7967 } 7968 break; 7969 #endif 7970 case TARGET_NR_chown: 7971 if (!(p = lock_user_string(arg1))) 7972 goto efault; 7973 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); 7974 unlock_user(p, arg1, 0); 7975 break; 7976 case TARGET_NR_setuid: 7977 ret = get_errno(setuid(low2highuid(arg1))); 7978 break; 7979 case TARGET_NR_setgid: 7980 ret = get_errno(setgid(low2highgid(arg1))); 7981 break; 7982 case TARGET_NR_setfsuid: 7983 ret = get_errno(setfsuid(arg1)); 7984 break; 7985 case TARGET_NR_setfsgid: 7986 ret = get_errno(setfsgid(arg1)); 7987 break; 7988 7989 #ifdef TARGET_NR_lchown32 7990 case TARGET_NR_lchown32: 7991 if (!(p = lock_user_string(arg1))) 7992 goto efault; 7993 ret = get_errno(lchown(p, arg2, arg3)); 7994 unlock_user(p, arg1, 0); 7995 break; 7996 #endif 7997 #ifdef TARGET_NR_getuid32 7998 case TARGET_NR_getuid32: 7999 ret = get_errno(getuid()); 8000 break; 8001 #endif 8002 8003 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) 8004 /* Alpha specific */ 8005 case TARGET_NR_getxuid: 8006 { 8007 uid_t euid; 8008 euid=geteuid(); 8009 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid; 8010 } 8011 ret = get_errno(getuid()); 8012 break; 8013 #endif 8014 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) 8015 /* Alpha specific */ 8016 case TARGET_NR_getxgid: 8017 { 8018 uid_t egid; 8019 egid=getegid(); 8020 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid; 8021 } 8022 ret = get_errno(getgid()); 8023 break; 8024 #endif 8025 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) 8026 /* Alpha specific */ 8027 case TARGET_NR_osf_getsysinfo: 8028 ret = -TARGET_EOPNOTSUPP; 8029 switch (arg1) { 8030 case TARGET_GSI_IEEE_FP_CONTROL: 8031 { 8032 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env); 8033 8034 /* Copied from linux ieee_fpcr_to_swcr. */ 8035 swcr = (fpcr >> 35) & SWCR_STATUS_MASK; 8036 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ; 8037 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV 8038 | SWCR_TRAP_ENABLE_DZE 8039 | SWCR_TRAP_ENABLE_OVF); 8040 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF 8041 | SWCR_TRAP_ENABLE_INE); 8042 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ; 8043 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO; 8044 8045 if (put_user_u64 (swcr, arg2)) 8046 goto efault; 8047 ret = 0; 8048 } 8049 break; 8050 8051 /* case GSI_IEEE_STATE_AT_SIGNAL: 8052 -- Not implemented in linux kernel. 8053 case GSI_UACPROC: 8054 -- Retrieves current unaligned access state; not much used. 8055 case GSI_PROC_TYPE: 8056 -- Retrieves implver information; surely not used. 8057 case GSI_GET_HWRPB: 8058 -- Grabs a copy of the HWRPB; surely not used. 8059 */ 8060 } 8061 break; 8062 #endif 8063 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) 8064 /* Alpha specific */ 8065 case TARGET_NR_osf_setsysinfo: 8066 ret = -TARGET_EOPNOTSUPP; 8067 switch (arg1) { 8068 case TARGET_SSI_IEEE_FP_CONTROL: 8069 { 8070 uint64_t swcr, fpcr, orig_fpcr; 8071 8072 if (get_user_u64 (swcr, arg2)) { 8073 goto efault; 8074 } 8075 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 8076 fpcr = orig_fpcr & FPCR_DYN_MASK; 8077 8078 /* Copied from linux ieee_swcr_to_fpcr. */ 8079 fpcr |= (swcr & SWCR_STATUS_MASK) << 35; 8080 fpcr |= (swcr & SWCR_MAP_DMZ) << 36; 8081 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV 8082 | SWCR_TRAP_ENABLE_DZE 8083 | SWCR_TRAP_ENABLE_OVF)) << 48; 8084 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF 8085 | SWCR_TRAP_ENABLE_INE)) << 57; 8086 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0); 8087 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41; 8088 8089 cpu_alpha_store_fpcr(cpu_env, fpcr); 8090 ret = 0; 8091 } 8092 break; 8093 8094 case TARGET_SSI_IEEE_RAISE_EXCEPTION: 8095 { 8096 uint64_t exc, fpcr, orig_fpcr; 8097 int si_code; 8098 8099 if (get_user_u64(exc, arg2)) { 8100 goto efault; 8101 } 8102 8103 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 8104 8105 /* We only add to the exception status here. */ 8106 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35); 8107 8108 cpu_alpha_store_fpcr(cpu_env, fpcr); 8109 ret = 0; 8110 8111 /* Old exceptions are not signaled. */ 8112 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK); 8113 8114 /* If any exceptions set by this call, 8115 and are unmasked, send a signal. */ 8116 si_code = 0; 8117 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) { 8118 si_code = TARGET_FPE_FLTRES; 8119 } 8120 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) { 8121 si_code = TARGET_FPE_FLTUND; 8122 } 8123 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) { 8124 si_code = TARGET_FPE_FLTOVF; 8125 } 8126 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) { 8127 si_code = TARGET_FPE_FLTDIV; 8128 } 8129 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) { 8130 si_code = TARGET_FPE_FLTINV; 8131 } 8132 if (si_code != 0) { 8133 target_siginfo_t info; 8134 info.si_signo = SIGFPE; 8135 info.si_errno = 0; 8136 info.si_code = si_code; 8137 info._sifields._sigfault._addr 8138 = ((CPUArchState *)cpu_env)->pc; 8139 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info); 8140 } 8141 } 8142 break; 8143 8144 /* case SSI_NVPAIRS: 8145 -- Used with SSIN_UACPROC to enable unaligned accesses. 8146 case SSI_IEEE_STATE_AT_SIGNAL: 8147 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: 8148 -- Not implemented in linux kernel 8149 */ 8150 } 8151 break; 8152 #endif 8153 #ifdef TARGET_NR_osf_sigprocmask 8154 /* Alpha specific. */ 8155 case TARGET_NR_osf_sigprocmask: 8156 { 8157 abi_ulong mask; 8158 int how; 8159 sigset_t set, oldset; 8160 8161 switch(arg1) { 8162 case TARGET_SIG_BLOCK: 8163 how = SIG_BLOCK; 8164 break; 8165 case TARGET_SIG_UNBLOCK: 8166 how = SIG_UNBLOCK; 8167 break; 8168 case TARGET_SIG_SETMASK: 8169 how = SIG_SETMASK; 8170 break; 8171 default: 8172 ret = -TARGET_EINVAL; 8173 goto fail; 8174 } 8175 mask = arg2; 8176 target_to_host_old_sigset(&set, &mask); 8177 do_sigprocmask(how, &set, &oldset); 8178 host_to_target_old_sigset(&mask, &oldset); 8179 ret = mask; 8180 } 8181 break; 8182 #endif 8183 8184 #ifdef TARGET_NR_getgid32 8185 case TARGET_NR_getgid32: 8186 ret = get_errno(getgid()); 8187 break; 8188 #endif 8189 #ifdef TARGET_NR_geteuid32 8190 case TARGET_NR_geteuid32: 8191 ret = get_errno(geteuid()); 8192 break; 8193 #endif 8194 #ifdef TARGET_NR_getegid32 8195 case TARGET_NR_getegid32: 8196 ret = get_errno(getegid()); 8197 break; 8198 #endif 8199 #ifdef TARGET_NR_setreuid32 8200 case TARGET_NR_setreuid32: 8201 ret = get_errno(setreuid(arg1, arg2)); 8202 break; 8203 #endif 8204 #ifdef TARGET_NR_setregid32 8205 case TARGET_NR_setregid32: 8206 ret = get_errno(setregid(arg1, arg2)); 8207 break; 8208 #endif 8209 #ifdef TARGET_NR_getgroups32 8210 case TARGET_NR_getgroups32: 8211 { 8212 int gidsetsize = arg1; 8213 uint32_t *target_grouplist; 8214 gid_t *grouplist; 8215 int i; 8216 8217 grouplist = alloca(gidsetsize * sizeof(gid_t)); 8218 ret = get_errno(getgroups(gidsetsize, grouplist)); 8219 if (gidsetsize == 0) 8220 break; 8221 if (!is_error(ret)) { 8222 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); 8223 if (!target_grouplist) { 8224 ret = -TARGET_EFAULT; 8225 goto fail; 8226 } 8227 for(i = 0;i < ret; i++) 8228 target_grouplist[i] = tswap32(grouplist[i]); 8229 unlock_user(target_grouplist, arg2, gidsetsize * 4); 8230 } 8231 } 8232 break; 8233 #endif 8234 #ifdef TARGET_NR_setgroups32 8235 case TARGET_NR_setgroups32: 8236 { 8237 int gidsetsize = arg1; 8238 uint32_t *target_grouplist; 8239 gid_t *grouplist; 8240 int i; 8241 8242 grouplist = alloca(gidsetsize * sizeof(gid_t)); 8243 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); 8244 if (!target_grouplist) { 8245 ret = -TARGET_EFAULT; 8246 goto fail; 8247 } 8248 for(i = 0;i < gidsetsize; i++) 8249 grouplist[i] = tswap32(target_grouplist[i]); 8250 unlock_user(target_grouplist, arg2, 0); 8251 ret = get_errno(setgroups(gidsetsize, grouplist)); 8252 } 8253 break; 8254 #endif 8255 #ifdef TARGET_NR_fchown32 8256 case TARGET_NR_fchown32: 8257 ret = get_errno(fchown(arg1, arg2, arg3)); 8258 break; 8259 #endif 8260 #ifdef TARGET_NR_setresuid32 8261 case TARGET_NR_setresuid32: 8262 ret = get_errno(setresuid(arg1, arg2, arg3)); 8263 break; 8264 #endif 8265 #ifdef TARGET_NR_getresuid32 8266 case TARGET_NR_getresuid32: 8267 { 8268 uid_t ruid, euid, suid; 8269 ret = get_errno(getresuid(&ruid, &euid, &suid)); 8270 if (!is_error(ret)) { 8271 if (put_user_u32(ruid, arg1) 8272 || put_user_u32(euid, arg2) 8273 || put_user_u32(suid, arg3)) 8274 goto efault; 8275 } 8276 } 8277 break; 8278 #endif 8279 #ifdef TARGET_NR_setresgid32 8280 case TARGET_NR_setresgid32: 8281 ret = get_errno(setresgid(arg1, arg2, arg3)); 8282 break; 8283 #endif 8284 #ifdef TARGET_NR_getresgid32 8285 case TARGET_NR_getresgid32: 8286 { 8287 gid_t rgid, egid, sgid; 8288 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 8289 if (!is_error(ret)) { 8290 if (put_user_u32(rgid, arg1) 8291 || put_user_u32(egid, arg2) 8292 || put_user_u32(sgid, arg3)) 8293 goto efault; 8294 } 8295 } 8296 break; 8297 #endif 8298 #ifdef TARGET_NR_chown32 8299 case TARGET_NR_chown32: 8300 if (!(p = lock_user_string(arg1))) 8301 goto efault; 8302 ret = get_errno(chown(p, arg2, arg3)); 8303 unlock_user(p, arg1, 0); 8304 break; 8305 #endif 8306 #ifdef TARGET_NR_setuid32 8307 case TARGET_NR_setuid32: 8308 ret = get_errno(setuid(arg1)); 8309 break; 8310 #endif 8311 #ifdef TARGET_NR_setgid32 8312 case TARGET_NR_setgid32: 8313 ret = get_errno(setgid(arg1)); 8314 break; 8315 #endif 8316 #ifdef TARGET_NR_setfsuid32 8317 case TARGET_NR_setfsuid32: 8318 ret = get_errno(setfsuid(arg1)); 8319 break; 8320 #endif 8321 #ifdef TARGET_NR_setfsgid32 8322 case TARGET_NR_setfsgid32: 8323 ret = get_errno(setfsgid(arg1)); 8324 break; 8325 #endif 8326 8327 case TARGET_NR_pivot_root: 8328 goto unimplemented; 8329 #ifdef TARGET_NR_mincore 8330 case TARGET_NR_mincore: 8331 { 8332 void *a; 8333 ret = -TARGET_EFAULT; 8334 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0))) 8335 goto efault; 8336 if (!(p = lock_user_string(arg3))) 8337 goto mincore_fail; 8338 ret = get_errno(mincore(a, arg2, p)); 8339 unlock_user(p, arg3, ret); 8340 mincore_fail: 8341 unlock_user(a, arg1, 0); 8342 } 8343 break; 8344 #endif 8345 #ifdef TARGET_NR_arm_fadvise64_64 8346 case TARGET_NR_arm_fadvise64_64: 8347 { 8348 /* 8349 * arm_fadvise64_64 looks like fadvise64_64 but 8350 * with different argument order 8351 */ 8352 abi_long temp; 8353 temp = arg3; 8354 arg3 = arg4; 8355 arg4 = temp; 8356 } 8357 #endif 8358 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64) 8359 #ifdef TARGET_NR_fadvise64_64 8360 case TARGET_NR_fadvise64_64: 8361 #endif 8362 #ifdef TARGET_NR_fadvise64 8363 case TARGET_NR_fadvise64: 8364 #endif 8365 #ifdef TARGET_S390X 8366 switch (arg4) { 8367 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ 8368 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ 8369 case 6: arg4 = POSIX_FADV_DONTNEED; break; 8370 case 7: arg4 = POSIX_FADV_NOREUSE; break; 8371 default: break; 8372 } 8373 #endif 8374 ret = -posix_fadvise(arg1, arg2, arg3, arg4); 8375 break; 8376 #endif 8377 #ifdef TARGET_NR_madvise 8378 case TARGET_NR_madvise: 8379 /* A straight passthrough may not be safe because qemu sometimes 8380 turns private file-backed mappings into anonymous mappings. 8381 This will break MADV_DONTNEED. 8382 This is a hint, so ignoring and returning success is ok. */ 8383 ret = get_errno(0); 8384 break; 8385 #endif 8386 #if TARGET_ABI_BITS == 32 8387 case TARGET_NR_fcntl64: 8388 { 8389 int cmd; 8390 struct flock64 fl; 8391 struct target_flock64 *target_fl; 8392 #ifdef TARGET_ARM 8393 struct target_eabi_flock64 *target_efl; 8394 #endif 8395 8396 cmd = target_to_host_fcntl_cmd(arg2); 8397 if (cmd == -TARGET_EINVAL) { 8398 ret = cmd; 8399 break; 8400 } 8401 8402 switch(arg2) { 8403 case TARGET_F_GETLK64: 8404 #ifdef TARGET_ARM 8405 if (((CPUARMState *)cpu_env)->eabi) { 8406 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 8407 goto efault; 8408 fl.l_type = tswap16(target_efl->l_type); 8409 fl.l_whence = tswap16(target_efl->l_whence); 8410 fl.l_start = tswap64(target_efl->l_start); 8411 fl.l_len = tswap64(target_efl->l_len); 8412 fl.l_pid = tswap32(target_efl->l_pid); 8413 unlock_user_struct(target_efl, arg3, 0); 8414 } else 8415 #endif 8416 { 8417 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 8418 goto efault; 8419 fl.l_type = tswap16(target_fl->l_type); 8420 fl.l_whence = tswap16(target_fl->l_whence); 8421 fl.l_start = tswap64(target_fl->l_start); 8422 fl.l_len = tswap64(target_fl->l_len); 8423 fl.l_pid = tswap32(target_fl->l_pid); 8424 unlock_user_struct(target_fl, arg3, 0); 8425 } 8426 ret = get_errno(fcntl(arg1, cmd, &fl)); 8427 if (ret == 0) { 8428 #ifdef TARGET_ARM 8429 if (((CPUARMState *)cpu_env)->eabi) { 8430 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0)) 8431 goto efault; 8432 target_efl->l_type = tswap16(fl.l_type); 8433 target_efl->l_whence = tswap16(fl.l_whence); 8434 target_efl->l_start = tswap64(fl.l_start); 8435 target_efl->l_len = tswap64(fl.l_len); 8436 target_efl->l_pid = tswap32(fl.l_pid); 8437 unlock_user_struct(target_efl, arg3, 1); 8438 } else 8439 #endif 8440 { 8441 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0)) 8442 goto efault; 8443 target_fl->l_type = tswap16(fl.l_type); 8444 target_fl->l_whence = tswap16(fl.l_whence); 8445 target_fl->l_start = tswap64(fl.l_start); 8446 target_fl->l_len = tswap64(fl.l_len); 8447 target_fl->l_pid = tswap32(fl.l_pid); 8448 unlock_user_struct(target_fl, arg3, 1); 8449 } 8450 } 8451 break; 8452 8453 case TARGET_F_SETLK64: 8454 case TARGET_F_SETLKW64: 8455 #ifdef TARGET_ARM 8456 if (((CPUARMState *)cpu_env)->eabi) { 8457 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 8458 goto efault; 8459 fl.l_type = tswap16(target_efl->l_type); 8460 fl.l_whence = tswap16(target_efl->l_whence); 8461 fl.l_start = tswap64(target_efl->l_start); 8462 fl.l_len = tswap64(target_efl->l_len); 8463 fl.l_pid = tswap32(target_efl->l_pid); 8464 unlock_user_struct(target_efl, arg3, 0); 8465 } else 8466 #endif 8467 { 8468 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 8469 goto efault; 8470 fl.l_type = tswap16(target_fl->l_type); 8471 fl.l_whence = tswap16(target_fl->l_whence); 8472 fl.l_start = tswap64(target_fl->l_start); 8473 fl.l_len = tswap64(target_fl->l_len); 8474 fl.l_pid = tswap32(target_fl->l_pid); 8475 unlock_user_struct(target_fl, arg3, 0); 8476 } 8477 ret = get_errno(fcntl(arg1, cmd, &fl)); 8478 break; 8479 default: 8480 ret = do_fcntl(arg1, arg2, arg3); 8481 break; 8482 } 8483 break; 8484 } 8485 #endif 8486 #ifdef TARGET_NR_cacheflush 8487 case TARGET_NR_cacheflush: 8488 /* self-modifying code is handled automatically, so nothing needed */ 8489 ret = 0; 8490 break; 8491 #endif 8492 #ifdef TARGET_NR_security 8493 case TARGET_NR_security: 8494 goto unimplemented; 8495 #endif 8496 #ifdef TARGET_NR_getpagesize 8497 case TARGET_NR_getpagesize: 8498 ret = TARGET_PAGE_SIZE; 8499 break; 8500 #endif 8501 case TARGET_NR_gettid: 8502 ret = get_errno(gettid()); 8503 break; 8504 #ifdef TARGET_NR_readahead 8505 case TARGET_NR_readahead: 8506 #if TARGET_ABI_BITS == 32 8507 if (regpairs_aligned(cpu_env)) { 8508 arg2 = arg3; 8509 arg3 = arg4; 8510 arg4 = arg5; 8511 } 8512 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4)); 8513 #else 8514 ret = get_errno(readahead(arg1, arg2, arg3)); 8515 #endif 8516 break; 8517 #endif 8518 #ifdef CONFIG_ATTR 8519 #ifdef TARGET_NR_setxattr 8520 case TARGET_NR_listxattr: 8521 case TARGET_NR_llistxattr: 8522 { 8523 void *p, *b = 0; 8524 if (arg2) { 8525 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 8526 if (!b) { 8527 ret = -TARGET_EFAULT; 8528 break; 8529 } 8530 } 8531 p = lock_user_string(arg1); 8532 if (p) { 8533 if (num == TARGET_NR_listxattr) { 8534 ret = get_errno(listxattr(p, b, arg3)); 8535 } else { 8536 ret = get_errno(llistxattr(p, b, arg3)); 8537 } 8538 } else { 8539 ret = -TARGET_EFAULT; 8540 } 8541 unlock_user(p, arg1, 0); 8542 unlock_user(b, arg2, arg3); 8543 break; 8544 } 8545 case TARGET_NR_flistxattr: 8546 { 8547 void *b = 0; 8548 if (arg2) { 8549 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 8550 if (!b) { 8551 ret = -TARGET_EFAULT; 8552 break; 8553 } 8554 } 8555 ret = get_errno(flistxattr(arg1, b, arg3)); 8556 unlock_user(b, arg2, arg3); 8557 break; 8558 } 8559 case TARGET_NR_setxattr: 8560 case TARGET_NR_lsetxattr: 8561 { 8562 void *p, *n, *v = 0; 8563 if (arg3) { 8564 v = lock_user(VERIFY_READ, arg3, arg4, 1); 8565 if (!v) { 8566 ret = -TARGET_EFAULT; 8567 break; 8568 } 8569 } 8570 p = lock_user_string(arg1); 8571 n = lock_user_string(arg2); 8572 if (p && n) { 8573 if (num == TARGET_NR_setxattr) { 8574 ret = get_errno(setxattr(p, n, v, arg4, arg5)); 8575 } else { 8576 ret = get_errno(lsetxattr(p, n, v, arg4, arg5)); 8577 } 8578 } else { 8579 ret = -TARGET_EFAULT; 8580 } 8581 unlock_user(p, arg1, 0); 8582 unlock_user(n, arg2, 0); 8583 unlock_user(v, arg3, 0); 8584 } 8585 break; 8586 case TARGET_NR_fsetxattr: 8587 { 8588 void *n, *v = 0; 8589 if (arg3) { 8590 v = lock_user(VERIFY_READ, arg3, arg4, 1); 8591 if (!v) { 8592 ret = -TARGET_EFAULT; 8593 break; 8594 } 8595 } 8596 n = lock_user_string(arg2); 8597 if (n) { 8598 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5)); 8599 } else { 8600 ret = -TARGET_EFAULT; 8601 } 8602 unlock_user(n, arg2, 0); 8603 unlock_user(v, arg3, 0); 8604 } 8605 break; 8606 case TARGET_NR_getxattr: 8607 case TARGET_NR_lgetxattr: 8608 { 8609 void *p, *n, *v = 0; 8610 if (arg3) { 8611 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 8612 if (!v) { 8613 ret = -TARGET_EFAULT; 8614 break; 8615 } 8616 } 8617 p = lock_user_string(arg1); 8618 n = lock_user_string(arg2); 8619 if (p && n) { 8620 if (num == TARGET_NR_getxattr) { 8621 ret = get_errno(getxattr(p, n, v, arg4)); 8622 } else { 8623 ret = get_errno(lgetxattr(p, n, v, arg4)); 8624 } 8625 } else { 8626 ret = -TARGET_EFAULT; 8627 } 8628 unlock_user(p, arg1, 0); 8629 unlock_user(n, arg2, 0); 8630 unlock_user(v, arg3, arg4); 8631 } 8632 break; 8633 case TARGET_NR_fgetxattr: 8634 { 8635 void *n, *v = 0; 8636 if (arg3) { 8637 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 8638 if (!v) { 8639 ret = -TARGET_EFAULT; 8640 break; 8641 } 8642 } 8643 n = lock_user_string(arg2); 8644 if (n) { 8645 ret = get_errno(fgetxattr(arg1, n, v, arg4)); 8646 } else { 8647 ret = -TARGET_EFAULT; 8648 } 8649 unlock_user(n, arg2, 0); 8650 unlock_user(v, arg3, arg4); 8651 } 8652 break; 8653 case TARGET_NR_removexattr: 8654 case TARGET_NR_lremovexattr: 8655 { 8656 void *p, *n; 8657 p = lock_user_string(arg1); 8658 n = lock_user_string(arg2); 8659 if (p && n) { 8660 if (num == TARGET_NR_removexattr) { 8661 ret = get_errno(removexattr(p, n)); 8662 } else { 8663 ret = get_errno(lremovexattr(p, n)); 8664 } 8665 } else { 8666 ret = -TARGET_EFAULT; 8667 } 8668 unlock_user(p, arg1, 0); 8669 unlock_user(n, arg2, 0); 8670 } 8671 break; 8672 case TARGET_NR_fremovexattr: 8673 { 8674 void *n; 8675 n = lock_user_string(arg2); 8676 if (n) { 8677 ret = get_errno(fremovexattr(arg1, n)); 8678 } else { 8679 ret = -TARGET_EFAULT; 8680 } 8681 unlock_user(n, arg2, 0); 8682 } 8683 break; 8684 #endif 8685 #endif /* CONFIG_ATTR */ 8686 #ifdef TARGET_NR_set_thread_area 8687 case TARGET_NR_set_thread_area: 8688 #if defined(TARGET_MIPS) 8689 ((CPUMIPSState *) cpu_env)->tls_value = arg1; 8690 ret = 0; 8691 break; 8692 #elif defined(TARGET_CRIS) 8693 if (arg1 & 0xff) 8694 ret = -TARGET_EINVAL; 8695 else { 8696 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1; 8697 ret = 0; 8698 } 8699 break; 8700 #elif defined(TARGET_I386) && defined(TARGET_ABI32) 8701 ret = do_set_thread_area(cpu_env, arg1); 8702 break; 8703 #elif defined(TARGET_M68K) 8704 { 8705 TaskState *ts = cpu->opaque; 8706 ts->tp_value = arg1; 8707 ret = 0; 8708 break; 8709 } 8710 #else 8711 goto unimplemented_nowarn; 8712 #endif 8713 #endif 8714 #ifdef TARGET_NR_get_thread_area 8715 case TARGET_NR_get_thread_area: 8716 #if defined(TARGET_I386) && defined(TARGET_ABI32) 8717 ret = do_get_thread_area(cpu_env, arg1); 8718 break; 8719 #elif defined(TARGET_M68K) 8720 { 8721 TaskState *ts = cpu->opaque; 8722 ret = ts->tp_value; 8723 break; 8724 } 8725 #else 8726 goto unimplemented_nowarn; 8727 #endif 8728 #endif 8729 #ifdef TARGET_NR_getdomainname 8730 case TARGET_NR_getdomainname: 8731 goto unimplemented_nowarn; 8732 #endif 8733 8734 #ifdef TARGET_NR_clock_gettime 8735 case TARGET_NR_clock_gettime: 8736 { 8737 struct timespec ts; 8738 ret = get_errno(clock_gettime(arg1, &ts)); 8739 if (!is_error(ret)) { 8740 host_to_target_timespec(arg2, &ts); 8741 } 8742 break; 8743 } 8744 #endif 8745 #ifdef TARGET_NR_clock_getres 8746 case TARGET_NR_clock_getres: 8747 { 8748 struct timespec ts; 8749 ret = get_errno(clock_getres(arg1, &ts)); 8750 if (!is_error(ret)) { 8751 host_to_target_timespec(arg2, &ts); 8752 } 8753 break; 8754 } 8755 #endif 8756 #ifdef TARGET_NR_clock_nanosleep 8757 case TARGET_NR_clock_nanosleep: 8758 { 8759 struct timespec ts; 8760 target_to_host_timespec(&ts, arg3); 8761 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL)); 8762 if (arg4) 8763 host_to_target_timespec(arg4, &ts); 8764 break; 8765 } 8766 #endif 8767 8768 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 8769 case TARGET_NR_set_tid_address: 8770 ret = get_errno(set_tid_address((int *)g2h(arg1))); 8771 break; 8772 #endif 8773 8774 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 8775 case TARGET_NR_tkill: 8776 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2))); 8777 break; 8778 #endif 8779 8780 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 8781 case TARGET_NR_tgkill: 8782 ret = get_errno(sys_tgkill((int)arg1, (int)arg2, 8783 target_to_host_signal(arg3))); 8784 break; 8785 #endif 8786 8787 #ifdef TARGET_NR_set_robust_list 8788 case TARGET_NR_set_robust_list: 8789 case TARGET_NR_get_robust_list: 8790 /* The ABI for supporting robust futexes has userspace pass 8791 * the kernel a pointer to a linked list which is updated by 8792 * userspace after the syscall; the list is walked by the kernel 8793 * when the thread exits. Since the linked list in QEMU guest 8794 * memory isn't a valid linked list for the host and we have 8795 * no way to reliably intercept the thread-death event, we can't 8796 * support these. Silently return ENOSYS so that guest userspace 8797 * falls back to a non-robust futex implementation (which should 8798 * be OK except in the corner case of the guest crashing while 8799 * holding a mutex that is shared with another process via 8800 * shared memory). 8801 */ 8802 goto unimplemented_nowarn; 8803 #endif 8804 8805 #if defined(TARGET_NR_utimensat) 8806 case TARGET_NR_utimensat: 8807 { 8808 struct timespec *tsp, ts[2]; 8809 if (!arg3) { 8810 tsp = NULL; 8811 } else { 8812 target_to_host_timespec(ts, arg3); 8813 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec)); 8814 tsp = ts; 8815 } 8816 if (!arg2) 8817 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 8818 else { 8819 if (!(p = lock_user_string(arg2))) { 8820 ret = -TARGET_EFAULT; 8821 goto fail; 8822 } 8823 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 8824 unlock_user(p, arg2, 0); 8825 } 8826 } 8827 break; 8828 #endif 8829 case TARGET_NR_futex: 8830 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6); 8831 break; 8832 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 8833 case TARGET_NR_inotify_init: 8834 ret = get_errno(sys_inotify_init()); 8835 break; 8836 #endif 8837 #ifdef CONFIG_INOTIFY1 8838 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 8839 case TARGET_NR_inotify_init1: 8840 ret = get_errno(sys_inotify_init1(arg1)); 8841 break; 8842 #endif 8843 #endif 8844 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 8845 case TARGET_NR_inotify_add_watch: 8846 p = lock_user_string(arg2); 8847 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3)); 8848 unlock_user(p, arg2, 0); 8849 break; 8850 #endif 8851 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 8852 case TARGET_NR_inotify_rm_watch: 8853 ret = get_errno(sys_inotify_rm_watch(arg1, arg2)); 8854 break; 8855 #endif 8856 8857 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 8858 case TARGET_NR_mq_open: 8859 { 8860 struct mq_attr posix_mq_attr; 8861 8862 p = lock_user_string(arg1 - 1); 8863 if (arg4 != 0) 8864 copy_from_user_mq_attr (&posix_mq_attr, arg4); 8865 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr)); 8866 unlock_user (p, arg1, 0); 8867 } 8868 break; 8869 8870 case TARGET_NR_mq_unlink: 8871 p = lock_user_string(arg1 - 1); 8872 ret = get_errno(mq_unlink(p)); 8873 unlock_user (p, arg1, 0); 8874 break; 8875 8876 case TARGET_NR_mq_timedsend: 8877 { 8878 struct timespec ts; 8879 8880 p = lock_user (VERIFY_READ, arg2, arg3, 1); 8881 if (arg5 != 0) { 8882 target_to_host_timespec(&ts, arg5); 8883 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts)); 8884 host_to_target_timespec(arg5, &ts); 8885 } 8886 else 8887 ret = get_errno(mq_send(arg1, p, arg3, arg4)); 8888 unlock_user (p, arg2, arg3); 8889 } 8890 break; 8891 8892 case TARGET_NR_mq_timedreceive: 8893 { 8894 struct timespec ts; 8895 unsigned int prio; 8896 8897 p = lock_user (VERIFY_READ, arg2, arg3, 1); 8898 if (arg5 != 0) { 8899 target_to_host_timespec(&ts, arg5); 8900 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts)); 8901 host_to_target_timespec(arg5, &ts); 8902 } 8903 else 8904 ret = get_errno(mq_receive(arg1, p, arg3, &prio)); 8905 unlock_user (p, arg2, arg3); 8906 if (arg4 != 0) 8907 put_user_u32(prio, arg4); 8908 } 8909 break; 8910 8911 /* Not implemented for now... */ 8912 /* case TARGET_NR_mq_notify: */ 8913 /* break; */ 8914 8915 case TARGET_NR_mq_getsetattr: 8916 { 8917 struct mq_attr posix_mq_attr_in, posix_mq_attr_out; 8918 ret = 0; 8919 if (arg3 != 0) { 8920 ret = mq_getattr(arg1, &posix_mq_attr_out); 8921 copy_to_user_mq_attr(arg3, &posix_mq_attr_out); 8922 } 8923 if (arg2 != 0) { 8924 copy_from_user_mq_attr(&posix_mq_attr_in, arg2); 8925 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out); 8926 } 8927 8928 } 8929 break; 8930 #endif 8931 8932 #ifdef CONFIG_SPLICE 8933 #ifdef TARGET_NR_tee 8934 case TARGET_NR_tee: 8935 { 8936 ret = get_errno(tee(arg1,arg2,arg3,arg4)); 8937 } 8938 break; 8939 #endif 8940 #ifdef TARGET_NR_splice 8941 case TARGET_NR_splice: 8942 { 8943 loff_t loff_in, loff_out; 8944 loff_t *ploff_in = NULL, *ploff_out = NULL; 8945 if(arg2) { 8946 get_user_u64(loff_in, arg2); 8947 ploff_in = &loff_in; 8948 } 8949 if(arg4) { 8950 get_user_u64(loff_out, arg2); 8951 ploff_out = &loff_out; 8952 } 8953 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); 8954 } 8955 break; 8956 #endif 8957 #ifdef TARGET_NR_vmsplice 8958 case TARGET_NR_vmsplice: 8959 { 8960 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 8961 if (vec != NULL) { 8962 ret = get_errno(vmsplice(arg1, vec, arg3, arg4)); 8963 unlock_iovec(vec, arg2, arg3, 0); 8964 } else { 8965 ret = -host_to_target_errno(errno); 8966 } 8967 } 8968 break; 8969 #endif 8970 #endif /* CONFIG_SPLICE */ 8971 #ifdef CONFIG_EVENTFD 8972 #if defined(TARGET_NR_eventfd) 8973 case TARGET_NR_eventfd: 8974 ret = get_errno(eventfd(arg1, 0)); 8975 break; 8976 #endif 8977 #if defined(TARGET_NR_eventfd2) 8978 case TARGET_NR_eventfd2: 8979 { 8980 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)); 8981 if (arg2 & TARGET_O_NONBLOCK) { 8982 host_flags |= O_NONBLOCK; 8983 } 8984 if (arg2 & TARGET_O_CLOEXEC) { 8985 host_flags |= O_CLOEXEC; 8986 } 8987 ret = get_errno(eventfd(arg1, host_flags)); 8988 break; 8989 } 8990 #endif 8991 #endif /* CONFIG_EVENTFD */ 8992 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) 8993 case TARGET_NR_fallocate: 8994 #if TARGET_ABI_BITS == 32 8995 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4), 8996 target_offset64(arg5, arg6))); 8997 #else 8998 ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); 8999 #endif 9000 break; 9001 #endif 9002 #if defined(CONFIG_SYNC_FILE_RANGE) 9003 #if defined(TARGET_NR_sync_file_range) 9004 case TARGET_NR_sync_file_range: 9005 #if TARGET_ABI_BITS == 32 9006 #if defined(TARGET_MIPS) 9007 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 9008 target_offset64(arg5, arg6), arg7)); 9009 #else 9010 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3), 9011 target_offset64(arg4, arg5), arg6)); 9012 #endif /* !TARGET_MIPS */ 9013 #else 9014 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4)); 9015 #endif 9016 break; 9017 #endif 9018 #if defined(TARGET_NR_sync_file_range2) 9019 case TARGET_NR_sync_file_range2: 9020 /* This is like sync_file_range but the arguments are reordered */ 9021 #if TARGET_ABI_BITS == 32 9022 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 9023 target_offset64(arg5, arg6), arg2)); 9024 #else 9025 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2)); 9026 #endif 9027 break; 9028 #endif 9029 #endif 9030 #if defined(CONFIG_EPOLL) 9031 #if defined(TARGET_NR_epoll_create) 9032 case TARGET_NR_epoll_create: 9033 ret = get_errno(epoll_create(arg1)); 9034 break; 9035 #endif 9036 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1) 9037 case TARGET_NR_epoll_create1: 9038 ret = get_errno(epoll_create1(arg1)); 9039 break; 9040 #endif 9041 #if defined(TARGET_NR_epoll_ctl) 9042 case TARGET_NR_epoll_ctl: 9043 { 9044 struct epoll_event ep; 9045 struct epoll_event *epp = 0; 9046 if (arg4) { 9047 struct target_epoll_event *target_ep; 9048 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) { 9049 goto efault; 9050 } 9051 ep.events = tswap32(target_ep->events); 9052 /* The epoll_data_t union is just opaque data to the kernel, 9053 * so we transfer all 64 bits across and need not worry what 9054 * actual data type it is. 9055 */ 9056 ep.data.u64 = tswap64(target_ep->data.u64); 9057 unlock_user_struct(target_ep, arg4, 0); 9058 epp = &ep; 9059 } 9060 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp)); 9061 break; 9062 } 9063 #endif 9064 9065 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT) 9066 #define IMPLEMENT_EPOLL_PWAIT 9067 #endif 9068 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT) 9069 #if defined(TARGET_NR_epoll_wait) 9070 case TARGET_NR_epoll_wait: 9071 #endif 9072 #if defined(IMPLEMENT_EPOLL_PWAIT) 9073 case TARGET_NR_epoll_pwait: 9074 #endif 9075 { 9076 struct target_epoll_event *target_ep; 9077 struct epoll_event *ep; 9078 int epfd = arg1; 9079 int maxevents = arg3; 9080 int timeout = arg4; 9081 9082 target_ep = lock_user(VERIFY_WRITE, arg2, 9083 maxevents * sizeof(struct target_epoll_event), 1); 9084 if (!target_ep) { 9085 goto efault; 9086 } 9087 9088 ep = alloca(maxevents * sizeof(struct epoll_event)); 9089 9090 switch (num) { 9091 #if defined(IMPLEMENT_EPOLL_PWAIT) 9092 case TARGET_NR_epoll_pwait: 9093 { 9094 target_sigset_t *target_set; 9095 sigset_t _set, *set = &_set; 9096 9097 if (arg5) { 9098 target_set = lock_user(VERIFY_READ, arg5, 9099 sizeof(target_sigset_t), 1); 9100 if (!target_set) { 9101 unlock_user(target_ep, arg2, 0); 9102 goto efault; 9103 } 9104 target_to_host_sigset(set, target_set); 9105 unlock_user(target_set, arg5, 0); 9106 } else { 9107 set = NULL; 9108 } 9109 9110 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set)); 9111 break; 9112 } 9113 #endif 9114 #if defined(TARGET_NR_epoll_wait) 9115 case TARGET_NR_epoll_wait: 9116 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout)); 9117 break; 9118 #endif 9119 default: 9120 ret = -TARGET_ENOSYS; 9121 } 9122 if (!is_error(ret)) { 9123 int i; 9124 for (i = 0; i < ret; i++) { 9125 target_ep[i].events = tswap32(ep[i].events); 9126 target_ep[i].data.u64 = tswap64(ep[i].data.u64); 9127 } 9128 } 9129 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event)); 9130 break; 9131 } 9132 #endif 9133 #endif 9134 #ifdef TARGET_NR_prlimit64 9135 case TARGET_NR_prlimit64: 9136 { 9137 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */ 9138 struct target_rlimit64 *target_rnew, *target_rold; 9139 struct host_rlimit64 rnew, rold, *rnewp = 0; 9140 if (arg3) { 9141 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { 9142 goto efault; 9143 } 9144 rnew.rlim_cur = tswap64(target_rnew->rlim_cur); 9145 rnew.rlim_max = tswap64(target_rnew->rlim_max); 9146 unlock_user_struct(target_rnew, arg3, 0); 9147 rnewp = &rnew; 9148 } 9149 9150 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0)); 9151 if (!is_error(ret) && arg4) { 9152 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { 9153 goto efault; 9154 } 9155 target_rold->rlim_cur = tswap64(rold.rlim_cur); 9156 target_rold->rlim_max = tswap64(rold.rlim_max); 9157 unlock_user_struct(target_rold, arg4, 1); 9158 } 9159 break; 9160 } 9161 #endif 9162 #ifdef TARGET_NR_gethostname 9163 case TARGET_NR_gethostname: 9164 { 9165 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0); 9166 if (name) { 9167 ret = get_errno(gethostname(name, arg2)); 9168 unlock_user(name, arg1, arg2); 9169 } else { 9170 ret = -TARGET_EFAULT; 9171 } 9172 break; 9173 } 9174 #endif 9175 #ifdef TARGET_NR_atomic_cmpxchg_32 9176 case TARGET_NR_atomic_cmpxchg_32: 9177 { 9178 /* should use start_exclusive from main.c */ 9179 abi_ulong mem_value; 9180 if (get_user_u32(mem_value, arg6)) { 9181 target_siginfo_t info; 9182 info.si_signo = SIGSEGV; 9183 info.si_errno = 0; 9184 info.si_code = TARGET_SEGV_MAPERR; 9185 info._sifields._sigfault._addr = arg6; 9186 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info); 9187 ret = 0xdeadbeef; 9188 9189 } 9190 if (mem_value == arg2) 9191 put_user_u32(arg1, arg6); 9192 ret = mem_value; 9193 break; 9194 } 9195 #endif 9196 #ifdef TARGET_NR_atomic_barrier 9197 case TARGET_NR_atomic_barrier: 9198 { 9199 /* Like the kernel implementation and the qemu arm barrier, no-op this? */ 9200 ret = 0; 9201 break; 9202 } 9203 #endif 9204 9205 #ifdef TARGET_NR_timer_create 9206 case TARGET_NR_timer_create: 9207 { 9208 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */ 9209 9210 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL; 9211 struct target_sigevent *ptarget_sevp; 9212 struct target_timer_t *ptarget_timer; 9213 9214 int clkid = arg1; 9215 int timer_index = next_free_host_timer(); 9216 9217 if (timer_index < 0) { 9218 ret = -TARGET_EAGAIN; 9219 } else { 9220 timer_t *phtimer = g_posix_timers + timer_index; 9221 9222 if (arg2) { 9223 if (!lock_user_struct(VERIFY_READ, ptarget_sevp, arg2, 1)) { 9224 goto efault; 9225 } 9226 9227 host_sevp.sigev_signo = tswap32(ptarget_sevp->sigev_signo); 9228 host_sevp.sigev_notify = tswap32(ptarget_sevp->sigev_notify); 9229 9230 phost_sevp = &host_sevp; 9231 } 9232 9233 ret = get_errno(timer_create(clkid, phost_sevp, phtimer)); 9234 if (ret) { 9235 phtimer = NULL; 9236 } else { 9237 if (!lock_user_struct(VERIFY_WRITE, ptarget_timer, arg3, 1)) { 9238 goto efault; 9239 } 9240 ptarget_timer->ptr = tswap32(0xcafe0000 | timer_index); 9241 unlock_user_struct(ptarget_timer, arg3, 1); 9242 } 9243 } 9244 break; 9245 } 9246 #endif 9247 9248 #ifdef TARGET_NR_timer_settime 9249 case TARGET_NR_timer_settime: 9250 { 9251 /* args: timer_t timerid, int flags, const struct itimerspec *new_value, 9252 * struct itimerspec * old_value */ 9253 arg1 &= 0xffff; 9254 if (arg3 == 0 || arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9255 ret = -TARGET_EINVAL; 9256 } else { 9257 timer_t htimer = g_posix_timers[arg1]; 9258 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},}; 9259 9260 target_to_host_itimerspec(&hspec_new, arg3); 9261 ret = get_errno( 9262 timer_settime(htimer, arg2, &hspec_new, &hspec_old)); 9263 host_to_target_itimerspec(arg2, &hspec_old); 9264 } 9265 break; 9266 } 9267 #endif 9268 9269 #ifdef TARGET_NR_timer_gettime 9270 case TARGET_NR_timer_gettime: 9271 { 9272 /* args: timer_t timerid, struct itimerspec *curr_value */ 9273 arg1 &= 0xffff; 9274 if (!arg2) { 9275 return -TARGET_EFAULT; 9276 } else if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9277 ret = -TARGET_EINVAL; 9278 } else { 9279 timer_t htimer = g_posix_timers[arg1]; 9280 struct itimerspec hspec; 9281 ret = get_errno(timer_gettime(htimer, &hspec)); 9282 9283 if (host_to_target_itimerspec(arg2, &hspec)) { 9284 ret = -TARGET_EFAULT; 9285 } 9286 } 9287 break; 9288 } 9289 #endif 9290 9291 #ifdef TARGET_NR_timer_getoverrun 9292 case TARGET_NR_timer_getoverrun: 9293 { 9294 /* args: timer_t timerid */ 9295 arg1 &= 0xffff; 9296 if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9297 ret = -TARGET_EINVAL; 9298 } else { 9299 timer_t htimer = g_posix_timers[arg1]; 9300 ret = get_errno(timer_getoverrun(htimer)); 9301 } 9302 break; 9303 } 9304 #endif 9305 9306 #ifdef TARGET_NR_timer_delete 9307 case TARGET_NR_timer_delete: 9308 { 9309 /* args: timer_t timerid */ 9310 arg1 &= 0xffff; 9311 if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9312 ret = -TARGET_EINVAL; 9313 } else { 9314 timer_t htimer = g_posix_timers[arg1]; 9315 ret = get_errno(timer_delete(htimer)); 9316 g_posix_timers[arg1] = 0; 9317 } 9318 break; 9319 } 9320 #endif 9321 9322 default: 9323 unimplemented: 9324 gemu_log("qemu: Unsupported syscall: %d\n", num); 9325 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list) 9326 unimplemented_nowarn: 9327 #endif 9328 ret = -TARGET_ENOSYS; 9329 break; 9330 } 9331 fail: 9332 #ifdef DEBUG 9333 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret); 9334 #endif 9335 if(do_strace) 9336 print_syscall_ret(num, ret); 9337 return ret; 9338 efault: 9339 ret = -TARGET_EFAULT; 9340 goto fail; 9341 } 9342