1 /* 2 * Linux syscalls 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #define _ATFILE_SOURCE 20 #include "qemu/osdep.h" 21 #include "qemu/cutils.h" 22 #include "qemu/path.h" 23 #include <elf.h> 24 #include <endian.h> 25 #include <grp.h> 26 #include <sys/ipc.h> 27 #include <sys/msg.h> 28 #include <sys/wait.h> 29 #include <sys/mount.h> 30 #include <sys/file.h> 31 #include <sys/fsuid.h> 32 #include <sys/personality.h> 33 #include <sys/prctl.h> 34 #include <sys/resource.h> 35 #include <sys/swap.h> 36 #include <linux/capability.h> 37 #include <sched.h> 38 #include <sys/timex.h> 39 #ifdef __ia64__ 40 int __clone2(int (*fn)(void *), void *child_stack_base, 41 size_t stack_size, int flags, void *arg, ...); 42 #endif 43 #include <sys/socket.h> 44 #include <sys/un.h> 45 #include <sys/uio.h> 46 #include <poll.h> 47 #include <sys/times.h> 48 #include <sys/shm.h> 49 #include <sys/sem.h> 50 #include <sys/statfs.h> 51 #include <time.h> 52 #include <utime.h> 53 #include <sys/sysinfo.h> 54 #include <sys/signalfd.h> 55 //#include <sys/user.h> 56 #include <netinet/ip.h> 57 #include <netinet/tcp.h> 58 #include <linux/wireless.h> 59 #include <linux/icmp.h> 60 #include <linux/icmpv6.h> 61 #include <linux/errqueue.h> 62 #include <linux/random.h> 63 #include "qemu-common.h" 64 #ifdef CONFIG_TIMERFD 65 #include <sys/timerfd.h> 66 #endif 67 #ifdef TARGET_GPROF 68 #include <sys/gmon.h> 69 #endif 70 #ifdef CONFIG_EVENTFD 71 #include <sys/eventfd.h> 72 #endif 73 #ifdef CONFIG_EPOLL 74 #include <sys/epoll.h> 75 #endif 76 #ifdef CONFIG_ATTR 77 #include "qemu/xattr.h" 78 #endif 79 #ifdef CONFIG_SENDFILE 80 #include <sys/sendfile.h> 81 #endif 82 83 #define termios host_termios 84 #define winsize host_winsize 85 #define termio host_termio 86 #define sgttyb host_sgttyb /* same as target */ 87 #define tchars host_tchars /* same as target */ 88 #define ltchars host_ltchars /* same as target */ 89 90 #include <linux/termios.h> 91 #include <linux/unistd.h> 92 #include <linux/cdrom.h> 93 #include <linux/hdreg.h> 94 #include <linux/soundcard.h> 95 #include <linux/kd.h> 96 #include <linux/mtio.h> 97 #include <linux/fs.h> 98 #if defined(CONFIG_FIEMAP) 99 #include <linux/fiemap.h> 100 #endif 101 #include <linux/fb.h> 102 #include <linux/vt.h> 103 #include <linux/dm-ioctl.h> 104 #include <linux/reboot.h> 105 #include <linux/route.h> 106 #include <linux/filter.h> 107 #include <linux/blkpg.h> 108 #include <netpacket/packet.h> 109 #include <linux/netlink.h> 110 #ifdef CONFIG_RTNETLINK 111 #include <linux/rtnetlink.h> 112 #include <linux/if_bridge.h> 113 #endif 114 #include <linux/audit.h> 115 #include "linux_loop.h" 116 #include "uname.h" 117 118 #include "qemu.h" 119 120 #ifndef CLONE_IO 121 #define CLONE_IO 0x80000000 /* Clone io context */ 122 #endif 123 124 /* We can't directly call the host clone syscall, because this will 125 * badly confuse libc (breaking mutexes, for example). So we must 126 * divide clone flags into: 127 * * flag combinations that look like pthread_create() 128 * * flag combinations that look like fork() 129 * * flags we can implement within QEMU itself 130 * * flags we can't support and will return an error for 131 */ 132 /* For thread creation, all these flags must be present; for 133 * fork, none must be present. 134 */ 135 #define CLONE_THREAD_FLAGS \ 136 (CLONE_VM | CLONE_FS | CLONE_FILES | \ 137 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM) 138 139 /* These flags are ignored: 140 * CLONE_DETACHED is now ignored by the kernel; 141 * CLONE_IO is just an optimisation hint to the I/O scheduler 142 */ 143 #define CLONE_IGNORED_FLAGS \ 144 (CLONE_DETACHED | CLONE_IO) 145 146 /* Flags for fork which we can implement within QEMU itself */ 147 #define CLONE_OPTIONAL_FORK_FLAGS \ 148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \ 149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID) 150 151 /* Flags for thread creation which we can implement within QEMU itself */ 152 #define CLONE_OPTIONAL_THREAD_FLAGS \ 153 (CLONE_SETTLS | CLONE_PARENT_SETTID | \ 154 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT) 155 156 #define CLONE_INVALID_FORK_FLAGS \ 157 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS)) 158 159 #define CLONE_INVALID_THREAD_FLAGS \ 160 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \ 161 CLONE_IGNORED_FLAGS)) 162 163 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits 164 * have almost all been allocated. We cannot support any of 165 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC, 166 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED. 167 * The checks against the invalid thread masks above will catch these. 168 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.) 169 */ 170 171 //#define DEBUG 172 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted 173 * once. This exercises the codepaths for restart. 174 */ 175 //#define DEBUG_ERESTARTSYS 176 177 //#include <linux/msdos_fs.h> 178 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2]) 179 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2]) 180 181 #undef _syscall0 182 #undef _syscall1 183 #undef _syscall2 184 #undef _syscall3 185 #undef _syscall4 186 #undef _syscall5 187 #undef _syscall6 188 189 #define _syscall0(type,name) \ 190 static type name (void) \ 191 { \ 192 return syscall(__NR_##name); \ 193 } 194 195 #define _syscall1(type,name,type1,arg1) \ 196 static type name (type1 arg1) \ 197 { \ 198 return syscall(__NR_##name, arg1); \ 199 } 200 201 #define _syscall2(type,name,type1,arg1,type2,arg2) \ 202 static type name (type1 arg1,type2 arg2) \ 203 { \ 204 return syscall(__NR_##name, arg1, arg2); \ 205 } 206 207 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 208 static type name (type1 arg1,type2 arg2,type3 arg3) \ 209 { \ 210 return syscall(__NR_##name, arg1, arg2, arg3); \ 211 } 212 213 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 214 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ 215 { \ 216 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 217 } 218 219 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 220 type5,arg5) \ 221 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ 222 { \ 223 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 224 } 225 226 227 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 228 type5,arg5,type6,arg6) \ 229 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \ 230 type6 arg6) \ 231 { \ 232 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 233 } 234 235 236 #define __NR_sys_uname __NR_uname 237 #define __NR_sys_getcwd1 __NR_getcwd 238 #define __NR_sys_getdents __NR_getdents 239 #define __NR_sys_getdents64 __NR_getdents64 240 #define __NR_sys_getpriority __NR_getpriority 241 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo 242 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo 243 #define __NR_sys_syslog __NR_syslog 244 #define __NR_sys_futex __NR_futex 245 #define __NR_sys_inotify_init __NR_inotify_init 246 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch 247 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch 248 249 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \ 250 defined(__s390x__) 251 #define __NR__llseek __NR_lseek 252 #endif 253 254 /* Newer kernel ports have llseek() instead of _llseek() */ 255 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek) 256 #define TARGET_NR__llseek TARGET_NR_llseek 257 #endif 258 259 #ifdef __NR_gettid 260 _syscall0(int, gettid) 261 #else 262 /* This is a replacement for the host gettid() and must return a host 263 errno. */ 264 static int gettid(void) { 265 return -ENOSYS; 266 } 267 #endif 268 #if defined(TARGET_NR_getdents) && defined(__NR_getdents) 269 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count); 270 #endif 271 #if !defined(__NR_getdents) || \ 272 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64)) 273 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count); 274 #endif 275 #if defined(TARGET_NR__llseek) && defined(__NR_llseek) 276 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo, 277 loff_t *, res, uint, wh); 278 #endif 279 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo) 280 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig, 281 siginfo_t *, uinfo) 282 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len) 283 #ifdef __NR_exit_group 284 _syscall1(int,exit_group,int,error_code) 285 #endif 286 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 287 _syscall1(int,set_tid_address,int *,tidptr) 288 #endif 289 #if defined(TARGET_NR_futex) && defined(__NR_futex) 290 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, 291 const struct timespec *,timeout,int *,uaddr2,int,val3) 292 #endif 293 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity 294 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, 295 unsigned long *, user_mask_ptr); 296 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity 297 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len, 298 unsigned long *, user_mask_ptr); 299 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd, 300 void *, arg); 301 _syscall2(int, capget, struct __user_cap_header_struct *, header, 302 struct __user_cap_data_struct *, data); 303 _syscall2(int, capset, struct __user_cap_header_struct *, header, 304 struct __user_cap_data_struct *, data); 305 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get) 306 _syscall2(int, ioprio_get, int, which, int, who) 307 #endif 308 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set) 309 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio) 310 #endif 311 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom) 312 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags) 313 #endif 314 315 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp) 316 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type, 317 unsigned long, idx1, unsigned long, idx2) 318 #endif 319 320 static bitmask_transtbl fcntl_flags_tbl[] = { 321 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, }, 322 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, }, 323 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, }, 324 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, }, 325 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, }, 326 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, }, 327 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, }, 328 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, }, 329 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, }, 330 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, }, 331 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, }, 332 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, }, 333 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, }, 334 #if defined(O_DIRECT) 335 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, }, 336 #endif 337 #if defined(O_NOATIME) 338 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME }, 339 #endif 340 #if defined(O_CLOEXEC) 341 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC }, 342 #endif 343 #if defined(O_PATH) 344 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH }, 345 #endif 346 #if defined(O_TMPFILE) 347 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE }, 348 #endif 349 /* Don't terminate the list prematurely on 64-bit host+guest. */ 350 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0 351 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, }, 352 #endif 353 { 0, 0, 0, 0 } 354 }; 355 356 enum { 357 QEMU_IFLA_BR_UNSPEC, 358 QEMU_IFLA_BR_FORWARD_DELAY, 359 QEMU_IFLA_BR_HELLO_TIME, 360 QEMU_IFLA_BR_MAX_AGE, 361 QEMU_IFLA_BR_AGEING_TIME, 362 QEMU_IFLA_BR_STP_STATE, 363 QEMU_IFLA_BR_PRIORITY, 364 QEMU_IFLA_BR_VLAN_FILTERING, 365 QEMU_IFLA_BR_VLAN_PROTOCOL, 366 QEMU_IFLA_BR_GROUP_FWD_MASK, 367 QEMU_IFLA_BR_ROOT_ID, 368 QEMU_IFLA_BR_BRIDGE_ID, 369 QEMU_IFLA_BR_ROOT_PORT, 370 QEMU_IFLA_BR_ROOT_PATH_COST, 371 QEMU_IFLA_BR_TOPOLOGY_CHANGE, 372 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED, 373 QEMU_IFLA_BR_HELLO_TIMER, 374 QEMU_IFLA_BR_TCN_TIMER, 375 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER, 376 QEMU_IFLA_BR_GC_TIMER, 377 QEMU_IFLA_BR_GROUP_ADDR, 378 QEMU_IFLA_BR_FDB_FLUSH, 379 QEMU_IFLA_BR_MCAST_ROUTER, 380 QEMU_IFLA_BR_MCAST_SNOOPING, 381 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR, 382 QEMU_IFLA_BR_MCAST_QUERIER, 383 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY, 384 QEMU_IFLA_BR_MCAST_HASH_MAX, 385 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT, 386 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT, 387 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL, 388 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL, 389 QEMU_IFLA_BR_MCAST_QUERIER_INTVL, 390 QEMU_IFLA_BR_MCAST_QUERY_INTVL, 391 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, 392 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL, 393 QEMU_IFLA_BR_NF_CALL_IPTABLES, 394 QEMU_IFLA_BR_NF_CALL_IP6TABLES, 395 QEMU_IFLA_BR_NF_CALL_ARPTABLES, 396 QEMU_IFLA_BR_VLAN_DEFAULT_PVID, 397 QEMU_IFLA_BR_PAD, 398 QEMU_IFLA_BR_VLAN_STATS_ENABLED, 399 QEMU_IFLA_BR_MCAST_STATS_ENABLED, 400 QEMU___IFLA_BR_MAX, 401 }; 402 403 enum { 404 QEMU_IFLA_UNSPEC, 405 QEMU_IFLA_ADDRESS, 406 QEMU_IFLA_BROADCAST, 407 QEMU_IFLA_IFNAME, 408 QEMU_IFLA_MTU, 409 QEMU_IFLA_LINK, 410 QEMU_IFLA_QDISC, 411 QEMU_IFLA_STATS, 412 QEMU_IFLA_COST, 413 QEMU_IFLA_PRIORITY, 414 QEMU_IFLA_MASTER, 415 QEMU_IFLA_WIRELESS, 416 QEMU_IFLA_PROTINFO, 417 QEMU_IFLA_TXQLEN, 418 QEMU_IFLA_MAP, 419 QEMU_IFLA_WEIGHT, 420 QEMU_IFLA_OPERSTATE, 421 QEMU_IFLA_LINKMODE, 422 QEMU_IFLA_LINKINFO, 423 QEMU_IFLA_NET_NS_PID, 424 QEMU_IFLA_IFALIAS, 425 QEMU_IFLA_NUM_VF, 426 QEMU_IFLA_VFINFO_LIST, 427 QEMU_IFLA_STATS64, 428 QEMU_IFLA_VF_PORTS, 429 QEMU_IFLA_PORT_SELF, 430 QEMU_IFLA_AF_SPEC, 431 QEMU_IFLA_GROUP, 432 QEMU_IFLA_NET_NS_FD, 433 QEMU_IFLA_EXT_MASK, 434 QEMU_IFLA_PROMISCUITY, 435 QEMU_IFLA_NUM_TX_QUEUES, 436 QEMU_IFLA_NUM_RX_QUEUES, 437 QEMU_IFLA_CARRIER, 438 QEMU_IFLA_PHYS_PORT_ID, 439 QEMU_IFLA_CARRIER_CHANGES, 440 QEMU_IFLA_PHYS_SWITCH_ID, 441 QEMU_IFLA_LINK_NETNSID, 442 QEMU_IFLA_PHYS_PORT_NAME, 443 QEMU_IFLA_PROTO_DOWN, 444 QEMU_IFLA_GSO_MAX_SEGS, 445 QEMU_IFLA_GSO_MAX_SIZE, 446 QEMU_IFLA_PAD, 447 QEMU_IFLA_XDP, 448 QEMU___IFLA_MAX 449 }; 450 451 enum { 452 QEMU_IFLA_BRPORT_UNSPEC, 453 QEMU_IFLA_BRPORT_STATE, 454 QEMU_IFLA_BRPORT_PRIORITY, 455 QEMU_IFLA_BRPORT_COST, 456 QEMU_IFLA_BRPORT_MODE, 457 QEMU_IFLA_BRPORT_GUARD, 458 QEMU_IFLA_BRPORT_PROTECT, 459 QEMU_IFLA_BRPORT_FAST_LEAVE, 460 QEMU_IFLA_BRPORT_LEARNING, 461 QEMU_IFLA_BRPORT_UNICAST_FLOOD, 462 QEMU_IFLA_BRPORT_PROXYARP, 463 QEMU_IFLA_BRPORT_LEARNING_SYNC, 464 QEMU_IFLA_BRPORT_PROXYARP_WIFI, 465 QEMU_IFLA_BRPORT_ROOT_ID, 466 QEMU_IFLA_BRPORT_BRIDGE_ID, 467 QEMU_IFLA_BRPORT_DESIGNATED_PORT, 468 QEMU_IFLA_BRPORT_DESIGNATED_COST, 469 QEMU_IFLA_BRPORT_ID, 470 QEMU_IFLA_BRPORT_NO, 471 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK, 472 QEMU_IFLA_BRPORT_CONFIG_PENDING, 473 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER, 474 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER, 475 QEMU_IFLA_BRPORT_HOLD_TIMER, 476 QEMU_IFLA_BRPORT_FLUSH, 477 QEMU_IFLA_BRPORT_MULTICAST_ROUTER, 478 QEMU_IFLA_BRPORT_PAD, 479 QEMU___IFLA_BRPORT_MAX 480 }; 481 482 enum { 483 QEMU_IFLA_INFO_UNSPEC, 484 QEMU_IFLA_INFO_KIND, 485 QEMU_IFLA_INFO_DATA, 486 QEMU_IFLA_INFO_XSTATS, 487 QEMU_IFLA_INFO_SLAVE_KIND, 488 QEMU_IFLA_INFO_SLAVE_DATA, 489 QEMU___IFLA_INFO_MAX, 490 }; 491 492 enum { 493 QEMU_IFLA_INET_UNSPEC, 494 QEMU_IFLA_INET_CONF, 495 QEMU___IFLA_INET_MAX, 496 }; 497 498 enum { 499 QEMU_IFLA_INET6_UNSPEC, 500 QEMU_IFLA_INET6_FLAGS, 501 QEMU_IFLA_INET6_CONF, 502 QEMU_IFLA_INET6_STATS, 503 QEMU_IFLA_INET6_MCAST, 504 QEMU_IFLA_INET6_CACHEINFO, 505 QEMU_IFLA_INET6_ICMP6STATS, 506 QEMU_IFLA_INET6_TOKEN, 507 QEMU_IFLA_INET6_ADDR_GEN_MODE, 508 QEMU___IFLA_INET6_MAX 509 }; 510 511 typedef abi_long (*TargetFdDataFunc)(void *, size_t); 512 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t); 513 typedef struct TargetFdTrans { 514 TargetFdDataFunc host_to_target_data; 515 TargetFdDataFunc target_to_host_data; 516 TargetFdAddrFunc target_to_host_addr; 517 } TargetFdTrans; 518 519 static TargetFdTrans **target_fd_trans; 520 521 static unsigned int target_fd_max; 522 523 static TargetFdDataFunc fd_trans_target_to_host_data(int fd) 524 { 525 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) { 526 return target_fd_trans[fd]->target_to_host_data; 527 } 528 return NULL; 529 } 530 531 static TargetFdDataFunc fd_trans_host_to_target_data(int fd) 532 { 533 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) { 534 return target_fd_trans[fd]->host_to_target_data; 535 } 536 return NULL; 537 } 538 539 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd) 540 { 541 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) { 542 return target_fd_trans[fd]->target_to_host_addr; 543 } 544 return NULL; 545 } 546 547 static void fd_trans_register(int fd, TargetFdTrans *trans) 548 { 549 unsigned int oldmax; 550 551 if (fd >= target_fd_max) { 552 oldmax = target_fd_max; 553 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */ 554 target_fd_trans = g_renew(TargetFdTrans *, 555 target_fd_trans, target_fd_max); 556 memset((void *)(target_fd_trans + oldmax), 0, 557 (target_fd_max - oldmax) * sizeof(TargetFdTrans *)); 558 } 559 target_fd_trans[fd] = trans; 560 } 561 562 static void fd_trans_unregister(int fd) 563 { 564 if (fd >= 0 && fd < target_fd_max) { 565 target_fd_trans[fd] = NULL; 566 } 567 } 568 569 static void fd_trans_dup(int oldfd, int newfd) 570 { 571 fd_trans_unregister(newfd); 572 if (oldfd < target_fd_max && target_fd_trans[oldfd]) { 573 fd_trans_register(newfd, target_fd_trans[oldfd]); 574 } 575 } 576 577 static int sys_getcwd1(char *buf, size_t size) 578 { 579 if (getcwd(buf, size) == NULL) { 580 /* getcwd() sets errno */ 581 return (-1); 582 } 583 return strlen(buf)+1; 584 } 585 586 #ifdef TARGET_NR_utimensat 587 #if defined(__NR_utimensat) 588 #define __NR_sys_utimensat __NR_utimensat 589 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname, 590 const struct timespec *,tsp,int,flags) 591 #else 592 static int sys_utimensat(int dirfd, const char *pathname, 593 const struct timespec times[2], int flags) 594 { 595 errno = ENOSYS; 596 return -1; 597 } 598 #endif 599 #endif /* TARGET_NR_utimensat */ 600 601 #ifdef CONFIG_INOTIFY 602 #include <sys/inotify.h> 603 604 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 605 static int sys_inotify_init(void) 606 { 607 return (inotify_init()); 608 } 609 #endif 610 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 611 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask) 612 { 613 return (inotify_add_watch(fd, pathname, mask)); 614 } 615 #endif 616 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 617 static int sys_inotify_rm_watch(int fd, int32_t wd) 618 { 619 return (inotify_rm_watch(fd, wd)); 620 } 621 #endif 622 #ifdef CONFIG_INOTIFY1 623 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 624 static int sys_inotify_init1(int flags) 625 { 626 return (inotify_init1(flags)); 627 } 628 #endif 629 #endif 630 #else 631 /* Userspace can usually survive runtime without inotify */ 632 #undef TARGET_NR_inotify_init 633 #undef TARGET_NR_inotify_init1 634 #undef TARGET_NR_inotify_add_watch 635 #undef TARGET_NR_inotify_rm_watch 636 #endif /* CONFIG_INOTIFY */ 637 638 #if defined(TARGET_NR_prlimit64) 639 #ifndef __NR_prlimit64 640 # define __NR_prlimit64 -1 641 #endif 642 #define __NR_sys_prlimit64 __NR_prlimit64 643 /* The glibc rlimit structure may not be that used by the underlying syscall */ 644 struct host_rlimit64 { 645 uint64_t rlim_cur; 646 uint64_t rlim_max; 647 }; 648 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource, 649 const struct host_rlimit64 *, new_limit, 650 struct host_rlimit64 *, old_limit) 651 #endif 652 653 654 #if defined(TARGET_NR_timer_create) 655 /* Maxiumum of 32 active POSIX timers allowed at any one time. */ 656 static timer_t g_posix_timers[32] = { 0, } ; 657 658 static inline int next_free_host_timer(void) 659 { 660 int k ; 661 /* FIXME: Does finding the next free slot require a lock? */ 662 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) { 663 if (g_posix_timers[k] == 0) { 664 g_posix_timers[k] = (timer_t) 1; 665 return k; 666 } 667 } 668 return -1; 669 } 670 #endif 671 672 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */ 673 #ifdef TARGET_ARM 674 static inline int regpairs_aligned(void *cpu_env, int num) 675 { 676 return ((((CPUARMState *)cpu_env)->eabi) == 1) ; 677 } 678 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32) 679 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; } 680 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64) 681 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs 682 * of registers which translates to the same as ARM/MIPS, because we start with 683 * r3 as arg1 */ 684 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; } 685 #elif defined(TARGET_SH4) 686 /* SH4 doesn't align register pairs, except for p{read,write}64 */ 687 static inline int regpairs_aligned(void *cpu_env, int num) 688 { 689 switch (num) { 690 case TARGET_NR_pread64: 691 case TARGET_NR_pwrite64: 692 return 1; 693 694 default: 695 return 0; 696 } 697 } 698 #else 699 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; } 700 #endif 701 702 #define ERRNO_TABLE_SIZE 1200 703 704 /* target_to_host_errno_table[] is initialized from 705 * host_to_target_errno_table[] in syscall_init(). */ 706 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = { 707 }; 708 709 /* 710 * This list is the union of errno values overridden in asm-<arch>/errno.h 711 * minus the errnos that are not actually generic to all archs. 712 */ 713 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = { 714 [EAGAIN] = TARGET_EAGAIN, 715 [EIDRM] = TARGET_EIDRM, 716 [ECHRNG] = TARGET_ECHRNG, 717 [EL2NSYNC] = TARGET_EL2NSYNC, 718 [EL3HLT] = TARGET_EL3HLT, 719 [EL3RST] = TARGET_EL3RST, 720 [ELNRNG] = TARGET_ELNRNG, 721 [EUNATCH] = TARGET_EUNATCH, 722 [ENOCSI] = TARGET_ENOCSI, 723 [EL2HLT] = TARGET_EL2HLT, 724 [EDEADLK] = TARGET_EDEADLK, 725 [ENOLCK] = TARGET_ENOLCK, 726 [EBADE] = TARGET_EBADE, 727 [EBADR] = TARGET_EBADR, 728 [EXFULL] = TARGET_EXFULL, 729 [ENOANO] = TARGET_ENOANO, 730 [EBADRQC] = TARGET_EBADRQC, 731 [EBADSLT] = TARGET_EBADSLT, 732 [EBFONT] = TARGET_EBFONT, 733 [ENOSTR] = TARGET_ENOSTR, 734 [ENODATA] = TARGET_ENODATA, 735 [ETIME] = TARGET_ETIME, 736 [ENOSR] = TARGET_ENOSR, 737 [ENONET] = TARGET_ENONET, 738 [ENOPKG] = TARGET_ENOPKG, 739 [EREMOTE] = TARGET_EREMOTE, 740 [ENOLINK] = TARGET_ENOLINK, 741 [EADV] = TARGET_EADV, 742 [ESRMNT] = TARGET_ESRMNT, 743 [ECOMM] = TARGET_ECOMM, 744 [EPROTO] = TARGET_EPROTO, 745 [EDOTDOT] = TARGET_EDOTDOT, 746 [EMULTIHOP] = TARGET_EMULTIHOP, 747 [EBADMSG] = TARGET_EBADMSG, 748 [ENAMETOOLONG] = TARGET_ENAMETOOLONG, 749 [EOVERFLOW] = TARGET_EOVERFLOW, 750 [ENOTUNIQ] = TARGET_ENOTUNIQ, 751 [EBADFD] = TARGET_EBADFD, 752 [EREMCHG] = TARGET_EREMCHG, 753 [ELIBACC] = TARGET_ELIBACC, 754 [ELIBBAD] = TARGET_ELIBBAD, 755 [ELIBSCN] = TARGET_ELIBSCN, 756 [ELIBMAX] = TARGET_ELIBMAX, 757 [ELIBEXEC] = TARGET_ELIBEXEC, 758 [EILSEQ] = TARGET_EILSEQ, 759 [ENOSYS] = TARGET_ENOSYS, 760 [ELOOP] = TARGET_ELOOP, 761 [ERESTART] = TARGET_ERESTART, 762 [ESTRPIPE] = TARGET_ESTRPIPE, 763 [ENOTEMPTY] = TARGET_ENOTEMPTY, 764 [EUSERS] = TARGET_EUSERS, 765 [ENOTSOCK] = TARGET_ENOTSOCK, 766 [EDESTADDRREQ] = TARGET_EDESTADDRREQ, 767 [EMSGSIZE] = TARGET_EMSGSIZE, 768 [EPROTOTYPE] = TARGET_EPROTOTYPE, 769 [ENOPROTOOPT] = TARGET_ENOPROTOOPT, 770 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT, 771 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT, 772 [EOPNOTSUPP] = TARGET_EOPNOTSUPP, 773 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT, 774 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT, 775 [EADDRINUSE] = TARGET_EADDRINUSE, 776 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL, 777 [ENETDOWN] = TARGET_ENETDOWN, 778 [ENETUNREACH] = TARGET_ENETUNREACH, 779 [ENETRESET] = TARGET_ENETRESET, 780 [ECONNABORTED] = TARGET_ECONNABORTED, 781 [ECONNRESET] = TARGET_ECONNRESET, 782 [ENOBUFS] = TARGET_ENOBUFS, 783 [EISCONN] = TARGET_EISCONN, 784 [ENOTCONN] = TARGET_ENOTCONN, 785 [EUCLEAN] = TARGET_EUCLEAN, 786 [ENOTNAM] = TARGET_ENOTNAM, 787 [ENAVAIL] = TARGET_ENAVAIL, 788 [EISNAM] = TARGET_EISNAM, 789 [EREMOTEIO] = TARGET_EREMOTEIO, 790 [EDQUOT] = TARGET_EDQUOT, 791 [ESHUTDOWN] = TARGET_ESHUTDOWN, 792 [ETOOMANYREFS] = TARGET_ETOOMANYREFS, 793 [ETIMEDOUT] = TARGET_ETIMEDOUT, 794 [ECONNREFUSED] = TARGET_ECONNREFUSED, 795 [EHOSTDOWN] = TARGET_EHOSTDOWN, 796 [EHOSTUNREACH] = TARGET_EHOSTUNREACH, 797 [EALREADY] = TARGET_EALREADY, 798 [EINPROGRESS] = TARGET_EINPROGRESS, 799 [ESTALE] = TARGET_ESTALE, 800 [ECANCELED] = TARGET_ECANCELED, 801 [ENOMEDIUM] = TARGET_ENOMEDIUM, 802 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE, 803 #ifdef ENOKEY 804 [ENOKEY] = TARGET_ENOKEY, 805 #endif 806 #ifdef EKEYEXPIRED 807 [EKEYEXPIRED] = TARGET_EKEYEXPIRED, 808 #endif 809 #ifdef EKEYREVOKED 810 [EKEYREVOKED] = TARGET_EKEYREVOKED, 811 #endif 812 #ifdef EKEYREJECTED 813 [EKEYREJECTED] = TARGET_EKEYREJECTED, 814 #endif 815 #ifdef EOWNERDEAD 816 [EOWNERDEAD] = TARGET_EOWNERDEAD, 817 #endif 818 #ifdef ENOTRECOVERABLE 819 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE, 820 #endif 821 #ifdef ENOMSG 822 [ENOMSG] = TARGET_ENOMSG, 823 #endif 824 #ifdef ERKFILL 825 [ERFKILL] = TARGET_ERFKILL, 826 #endif 827 #ifdef EHWPOISON 828 [EHWPOISON] = TARGET_EHWPOISON, 829 #endif 830 }; 831 832 static inline int host_to_target_errno(int err) 833 { 834 if (err >= 0 && err < ERRNO_TABLE_SIZE && 835 host_to_target_errno_table[err]) { 836 return host_to_target_errno_table[err]; 837 } 838 return err; 839 } 840 841 static inline int target_to_host_errno(int err) 842 { 843 if (err >= 0 && err < ERRNO_TABLE_SIZE && 844 target_to_host_errno_table[err]) { 845 return target_to_host_errno_table[err]; 846 } 847 return err; 848 } 849 850 static inline abi_long get_errno(abi_long ret) 851 { 852 if (ret == -1) 853 return -host_to_target_errno(errno); 854 else 855 return ret; 856 } 857 858 static inline int is_error(abi_long ret) 859 { 860 return (abi_ulong)ret >= (abi_ulong)(-4096); 861 } 862 863 const char *target_strerror(int err) 864 { 865 if (err == TARGET_ERESTARTSYS) { 866 return "To be restarted"; 867 } 868 if (err == TARGET_QEMU_ESIGRETURN) { 869 return "Successful exit from sigreturn"; 870 } 871 872 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) { 873 return NULL; 874 } 875 return strerror(target_to_host_errno(err)); 876 } 877 878 #define safe_syscall0(type, name) \ 879 static type safe_##name(void) \ 880 { \ 881 return safe_syscall(__NR_##name); \ 882 } 883 884 #define safe_syscall1(type, name, type1, arg1) \ 885 static type safe_##name(type1 arg1) \ 886 { \ 887 return safe_syscall(__NR_##name, arg1); \ 888 } 889 890 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \ 891 static type safe_##name(type1 arg1, type2 arg2) \ 892 { \ 893 return safe_syscall(__NR_##name, arg1, arg2); \ 894 } 895 896 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \ 897 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \ 898 { \ 899 return safe_syscall(__NR_##name, arg1, arg2, arg3); \ 900 } 901 902 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \ 903 type4, arg4) \ 904 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ 905 { \ 906 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 907 } 908 909 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \ 910 type4, arg4, type5, arg5) \ 911 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 912 type5 arg5) \ 913 { \ 914 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 915 } 916 917 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \ 918 type4, arg4, type5, arg5, type6, arg6) \ 919 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 920 type5 arg5, type6 arg6) \ 921 { \ 922 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 923 } 924 925 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count) 926 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count) 927 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \ 928 int, flags, mode_t, mode) 929 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \ 930 struct rusage *, rusage) 931 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \ 932 int, options, struct rusage *, rusage) 933 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp) 934 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \ 935 fd_set *, exceptfds, struct timespec *, timeout, void *, sig) 936 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds, 937 struct timespec *, tsp, const sigset_t *, sigmask, 938 size_t, sigsetsize) 939 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events, 940 int, maxevents, int, timeout, const sigset_t *, sigmask, 941 size_t, sigsetsize) 942 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \ 943 const struct timespec *,timeout,int *,uaddr2,int,val3) 944 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize) 945 safe_syscall2(int, kill, pid_t, pid, int, sig) 946 safe_syscall2(int, tkill, int, tid, int, sig) 947 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig) 948 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt) 949 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt) 950 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt, 951 unsigned long, pos_l, unsigned long, pos_h) 952 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt, 953 unsigned long, pos_l, unsigned long, pos_h) 954 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr, 955 socklen_t, addrlen) 956 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len, 957 int, flags, const struct sockaddr *, addr, socklen_t, addrlen) 958 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len, 959 int, flags, struct sockaddr *, addr, socklen_t *, addrlen) 960 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags) 961 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags) 962 safe_syscall2(int, flock, int, fd, int, operation) 963 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo, 964 const struct timespec *, uts, size_t, sigsetsize) 965 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len, 966 int, flags) 967 safe_syscall2(int, nanosleep, const struct timespec *, req, 968 struct timespec *, rem) 969 #ifdef TARGET_NR_clock_nanosleep 970 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags, 971 const struct timespec *, req, struct timespec *, rem) 972 #endif 973 #ifdef __NR_msgsnd 974 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz, 975 int, flags) 976 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz, 977 long, msgtype, int, flags) 978 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops, 979 unsigned, nsops, const struct timespec *, timeout) 980 #else 981 /* This host kernel architecture uses a single ipc syscall; fake up 982 * wrappers for the sub-operations to hide this implementation detail. 983 * Annoyingly we can't include linux/ipc.h to get the constant definitions 984 * for the call parameter because some structs in there conflict with the 985 * sys/ipc.h ones. So we just define them here, and rely on them being 986 * the same for all host architectures. 987 */ 988 #define Q_SEMTIMEDOP 4 989 #define Q_MSGSND 11 990 #define Q_MSGRCV 12 991 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP)) 992 993 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third, 994 void *, ptr, long, fifth) 995 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags) 996 { 997 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0); 998 } 999 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags) 1000 { 1001 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type); 1002 } 1003 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops, 1004 const struct timespec *timeout) 1005 { 1006 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops, 1007 (long)timeout); 1008 } 1009 #endif 1010 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 1011 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr, 1012 size_t, len, unsigned, prio, const struct timespec *, timeout) 1013 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr, 1014 size_t, len, unsigned *, prio, const struct timespec *, timeout) 1015 #endif 1016 /* We do ioctl like this rather than via safe_syscall3 to preserve the 1017 * "third argument might be integer or pointer or not present" behaviour of 1018 * the libc function. 1019 */ 1020 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__) 1021 /* Similarly for fcntl. Note that callers must always: 1022 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK 1023 * use the flock64 struct rather than unsuffixed flock 1024 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts. 1025 */ 1026 #ifdef __NR_fcntl64 1027 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__) 1028 #else 1029 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__) 1030 #endif 1031 1032 static inline int host_to_target_sock_type(int host_type) 1033 { 1034 int target_type; 1035 1036 switch (host_type & 0xf /* SOCK_TYPE_MASK */) { 1037 case SOCK_DGRAM: 1038 target_type = TARGET_SOCK_DGRAM; 1039 break; 1040 case SOCK_STREAM: 1041 target_type = TARGET_SOCK_STREAM; 1042 break; 1043 default: 1044 target_type = host_type & 0xf /* SOCK_TYPE_MASK */; 1045 break; 1046 } 1047 1048 #if defined(SOCK_CLOEXEC) 1049 if (host_type & SOCK_CLOEXEC) { 1050 target_type |= TARGET_SOCK_CLOEXEC; 1051 } 1052 #endif 1053 1054 #if defined(SOCK_NONBLOCK) 1055 if (host_type & SOCK_NONBLOCK) { 1056 target_type |= TARGET_SOCK_NONBLOCK; 1057 } 1058 #endif 1059 1060 return target_type; 1061 } 1062 1063 static abi_ulong target_brk; 1064 static abi_ulong target_original_brk; 1065 static abi_ulong brk_page; 1066 1067 void target_set_brk(abi_ulong new_brk) 1068 { 1069 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk); 1070 brk_page = HOST_PAGE_ALIGN(target_brk); 1071 } 1072 1073 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0) 1074 #define DEBUGF_BRK(message, args...) 1075 1076 /* do_brk() must return target values and target errnos. */ 1077 abi_long do_brk(abi_ulong new_brk) 1078 { 1079 abi_long mapped_addr; 1080 abi_ulong new_alloc_size; 1081 1082 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk); 1083 1084 if (!new_brk) { 1085 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk); 1086 return target_brk; 1087 } 1088 if (new_brk < target_original_brk) { 1089 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n", 1090 target_brk); 1091 return target_brk; 1092 } 1093 1094 /* If the new brk is less than the highest page reserved to the 1095 * target heap allocation, set it and we're almost done... */ 1096 if (new_brk <= brk_page) { 1097 /* Heap contents are initialized to zero, as for anonymous 1098 * mapped pages. */ 1099 if (new_brk > target_brk) { 1100 memset(g2h(target_brk), 0, new_brk - target_brk); 1101 } 1102 target_brk = new_brk; 1103 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk); 1104 return target_brk; 1105 } 1106 1107 /* We need to allocate more memory after the brk... Note that 1108 * we don't use MAP_FIXED because that will map over the top of 1109 * any existing mapping (like the one with the host libc or qemu 1110 * itself); instead we treat "mapped but at wrong address" as 1111 * a failure and unmap again. 1112 */ 1113 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page); 1114 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, 1115 PROT_READ|PROT_WRITE, 1116 MAP_ANON|MAP_PRIVATE, 0, 0)); 1117 1118 if (mapped_addr == brk_page) { 1119 /* Heap contents are initialized to zero, as for anonymous 1120 * mapped pages. Technically the new pages are already 1121 * initialized to zero since they *are* anonymous mapped 1122 * pages, however we have to take care with the contents that 1123 * come from the remaining part of the previous page: it may 1124 * contains garbage data due to a previous heap usage (grown 1125 * then shrunken). */ 1126 memset(g2h(target_brk), 0, brk_page - target_brk); 1127 1128 target_brk = new_brk; 1129 brk_page = HOST_PAGE_ALIGN(target_brk); 1130 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n", 1131 target_brk); 1132 return target_brk; 1133 } else if (mapped_addr != -1) { 1134 /* Mapped but at wrong address, meaning there wasn't actually 1135 * enough space for this brk. 1136 */ 1137 target_munmap(mapped_addr, new_alloc_size); 1138 mapped_addr = -1; 1139 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk); 1140 } 1141 else { 1142 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk); 1143 } 1144 1145 #if defined(TARGET_ALPHA) 1146 /* We (partially) emulate OSF/1 on Alpha, which requires we 1147 return a proper errno, not an unchanged brk value. */ 1148 return -TARGET_ENOMEM; 1149 #endif 1150 /* For everything else, return the previous break. */ 1151 return target_brk; 1152 } 1153 1154 static inline abi_long copy_from_user_fdset(fd_set *fds, 1155 abi_ulong target_fds_addr, 1156 int n) 1157 { 1158 int i, nw, j, k; 1159 abi_ulong b, *target_fds; 1160 1161 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS); 1162 if (!(target_fds = lock_user(VERIFY_READ, 1163 target_fds_addr, 1164 sizeof(abi_ulong) * nw, 1165 1))) 1166 return -TARGET_EFAULT; 1167 1168 FD_ZERO(fds); 1169 k = 0; 1170 for (i = 0; i < nw; i++) { 1171 /* grab the abi_ulong */ 1172 __get_user(b, &target_fds[i]); 1173 for (j = 0; j < TARGET_ABI_BITS; j++) { 1174 /* check the bit inside the abi_ulong */ 1175 if ((b >> j) & 1) 1176 FD_SET(k, fds); 1177 k++; 1178 } 1179 } 1180 1181 unlock_user(target_fds, target_fds_addr, 0); 1182 1183 return 0; 1184 } 1185 1186 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr, 1187 abi_ulong target_fds_addr, 1188 int n) 1189 { 1190 if (target_fds_addr) { 1191 if (copy_from_user_fdset(fds, target_fds_addr, n)) 1192 return -TARGET_EFAULT; 1193 *fds_ptr = fds; 1194 } else { 1195 *fds_ptr = NULL; 1196 } 1197 return 0; 1198 } 1199 1200 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr, 1201 const fd_set *fds, 1202 int n) 1203 { 1204 int i, nw, j, k; 1205 abi_long v; 1206 abi_ulong *target_fds; 1207 1208 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS); 1209 if (!(target_fds = lock_user(VERIFY_WRITE, 1210 target_fds_addr, 1211 sizeof(abi_ulong) * nw, 1212 0))) 1213 return -TARGET_EFAULT; 1214 1215 k = 0; 1216 for (i = 0; i < nw; i++) { 1217 v = 0; 1218 for (j = 0; j < TARGET_ABI_BITS; j++) { 1219 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j); 1220 k++; 1221 } 1222 __put_user(v, &target_fds[i]); 1223 } 1224 1225 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw); 1226 1227 return 0; 1228 } 1229 1230 #if defined(__alpha__) 1231 #define HOST_HZ 1024 1232 #else 1233 #define HOST_HZ 100 1234 #endif 1235 1236 static inline abi_long host_to_target_clock_t(long ticks) 1237 { 1238 #if HOST_HZ == TARGET_HZ 1239 return ticks; 1240 #else 1241 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ; 1242 #endif 1243 } 1244 1245 static inline abi_long host_to_target_rusage(abi_ulong target_addr, 1246 const struct rusage *rusage) 1247 { 1248 struct target_rusage *target_rusage; 1249 1250 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0)) 1251 return -TARGET_EFAULT; 1252 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec); 1253 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec); 1254 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec); 1255 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec); 1256 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss); 1257 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss); 1258 target_rusage->ru_idrss = tswapal(rusage->ru_idrss); 1259 target_rusage->ru_isrss = tswapal(rusage->ru_isrss); 1260 target_rusage->ru_minflt = tswapal(rusage->ru_minflt); 1261 target_rusage->ru_majflt = tswapal(rusage->ru_majflt); 1262 target_rusage->ru_nswap = tswapal(rusage->ru_nswap); 1263 target_rusage->ru_inblock = tswapal(rusage->ru_inblock); 1264 target_rusage->ru_oublock = tswapal(rusage->ru_oublock); 1265 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd); 1266 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv); 1267 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals); 1268 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw); 1269 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw); 1270 unlock_user_struct(target_rusage, target_addr, 1); 1271 1272 return 0; 1273 } 1274 1275 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim) 1276 { 1277 abi_ulong target_rlim_swap; 1278 rlim_t result; 1279 1280 target_rlim_swap = tswapal(target_rlim); 1281 if (target_rlim_swap == TARGET_RLIM_INFINITY) 1282 return RLIM_INFINITY; 1283 1284 result = target_rlim_swap; 1285 if (target_rlim_swap != (rlim_t)result) 1286 return RLIM_INFINITY; 1287 1288 return result; 1289 } 1290 1291 static inline abi_ulong host_to_target_rlim(rlim_t rlim) 1292 { 1293 abi_ulong target_rlim_swap; 1294 abi_ulong result; 1295 1296 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim) 1297 target_rlim_swap = TARGET_RLIM_INFINITY; 1298 else 1299 target_rlim_swap = rlim; 1300 result = tswapal(target_rlim_swap); 1301 1302 return result; 1303 } 1304 1305 static inline int target_to_host_resource(int code) 1306 { 1307 switch (code) { 1308 case TARGET_RLIMIT_AS: 1309 return RLIMIT_AS; 1310 case TARGET_RLIMIT_CORE: 1311 return RLIMIT_CORE; 1312 case TARGET_RLIMIT_CPU: 1313 return RLIMIT_CPU; 1314 case TARGET_RLIMIT_DATA: 1315 return RLIMIT_DATA; 1316 case TARGET_RLIMIT_FSIZE: 1317 return RLIMIT_FSIZE; 1318 case TARGET_RLIMIT_LOCKS: 1319 return RLIMIT_LOCKS; 1320 case TARGET_RLIMIT_MEMLOCK: 1321 return RLIMIT_MEMLOCK; 1322 case TARGET_RLIMIT_MSGQUEUE: 1323 return RLIMIT_MSGQUEUE; 1324 case TARGET_RLIMIT_NICE: 1325 return RLIMIT_NICE; 1326 case TARGET_RLIMIT_NOFILE: 1327 return RLIMIT_NOFILE; 1328 case TARGET_RLIMIT_NPROC: 1329 return RLIMIT_NPROC; 1330 case TARGET_RLIMIT_RSS: 1331 return RLIMIT_RSS; 1332 case TARGET_RLIMIT_RTPRIO: 1333 return RLIMIT_RTPRIO; 1334 case TARGET_RLIMIT_SIGPENDING: 1335 return RLIMIT_SIGPENDING; 1336 case TARGET_RLIMIT_STACK: 1337 return RLIMIT_STACK; 1338 default: 1339 return code; 1340 } 1341 } 1342 1343 static inline abi_long copy_from_user_timeval(struct timeval *tv, 1344 abi_ulong target_tv_addr) 1345 { 1346 struct target_timeval *target_tv; 1347 1348 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) 1349 return -TARGET_EFAULT; 1350 1351 __get_user(tv->tv_sec, &target_tv->tv_sec); 1352 __get_user(tv->tv_usec, &target_tv->tv_usec); 1353 1354 unlock_user_struct(target_tv, target_tv_addr, 0); 1355 1356 return 0; 1357 } 1358 1359 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr, 1360 const struct timeval *tv) 1361 { 1362 struct target_timeval *target_tv; 1363 1364 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) 1365 return -TARGET_EFAULT; 1366 1367 __put_user(tv->tv_sec, &target_tv->tv_sec); 1368 __put_user(tv->tv_usec, &target_tv->tv_usec); 1369 1370 unlock_user_struct(target_tv, target_tv_addr, 1); 1371 1372 return 0; 1373 } 1374 1375 static inline abi_long copy_from_user_timezone(struct timezone *tz, 1376 abi_ulong target_tz_addr) 1377 { 1378 struct target_timezone *target_tz; 1379 1380 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) { 1381 return -TARGET_EFAULT; 1382 } 1383 1384 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest); 1385 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime); 1386 1387 unlock_user_struct(target_tz, target_tz_addr, 0); 1388 1389 return 0; 1390 } 1391 1392 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 1393 #include <mqueue.h> 1394 1395 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr, 1396 abi_ulong target_mq_attr_addr) 1397 { 1398 struct target_mq_attr *target_mq_attr; 1399 1400 if (!lock_user_struct(VERIFY_READ, target_mq_attr, 1401 target_mq_attr_addr, 1)) 1402 return -TARGET_EFAULT; 1403 1404 __get_user(attr->mq_flags, &target_mq_attr->mq_flags); 1405 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1406 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1407 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1408 1409 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0); 1410 1411 return 0; 1412 } 1413 1414 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr, 1415 const struct mq_attr *attr) 1416 { 1417 struct target_mq_attr *target_mq_attr; 1418 1419 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr, 1420 target_mq_attr_addr, 0)) 1421 return -TARGET_EFAULT; 1422 1423 __put_user(attr->mq_flags, &target_mq_attr->mq_flags); 1424 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1425 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1426 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1427 1428 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1); 1429 1430 return 0; 1431 } 1432 #endif 1433 1434 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) 1435 /* do_select() must return target values and target errnos. */ 1436 static abi_long do_select(int n, 1437 abi_ulong rfd_addr, abi_ulong wfd_addr, 1438 abi_ulong efd_addr, abi_ulong target_tv_addr) 1439 { 1440 fd_set rfds, wfds, efds; 1441 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 1442 struct timeval tv; 1443 struct timespec ts, *ts_ptr; 1444 abi_long ret; 1445 1446 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 1447 if (ret) { 1448 return ret; 1449 } 1450 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 1451 if (ret) { 1452 return ret; 1453 } 1454 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 1455 if (ret) { 1456 return ret; 1457 } 1458 1459 if (target_tv_addr) { 1460 if (copy_from_user_timeval(&tv, target_tv_addr)) 1461 return -TARGET_EFAULT; 1462 ts.tv_sec = tv.tv_sec; 1463 ts.tv_nsec = tv.tv_usec * 1000; 1464 ts_ptr = &ts; 1465 } else { 1466 ts_ptr = NULL; 1467 } 1468 1469 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 1470 ts_ptr, NULL)); 1471 1472 if (!is_error(ret)) { 1473 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 1474 return -TARGET_EFAULT; 1475 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 1476 return -TARGET_EFAULT; 1477 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 1478 return -TARGET_EFAULT; 1479 1480 if (target_tv_addr) { 1481 tv.tv_sec = ts.tv_sec; 1482 tv.tv_usec = ts.tv_nsec / 1000; 1483 if (copy_to_user_timeval(target_tv_addr, &tv)) { 1484 return -TARGET_EFAULT; 1485 } 1486 } 1487 } 1488 1489 return ret; 1490 } 1491 1492 #if defined(TARGET_WANT_OLD_SYS_SELECT) 1493 static abi_long do_old_select(abi_ulong arg1) 1494 { 1495 struct target_sel_arg_struct *sel; 1496 abi_ulong inp, outp, exp, tvp; 1497 long nsel; 1498 1499 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) { 1500 return -TARGET_EFAULT; 1501 } 1502 1503 nsel = tswapal(sel->n); 1504 inp = tswapal(sel->inp); 1505 outp = tswapal(sel->outp); 1506 exp = tswapal(sel->exp); 1507 tvp = tswapal(sel->tvp); 1508 1509 unlock_user_struct(sel, arg1, 0); 1510 1511 return do_select(nsel, inp, outp, exp, tvp); 1512 } 1513 #endif 1514 #endif 1515 1516 static abi_long do_pipe2(int host_pipe[], int flags) 1517 { 1518 #ifdef CONFIG_PIPE2 1519 return pipe2(host_pipe, flags); 1520 #else 1521 return -ENOSYS; 1522 #endif 1523 } 1524 1525 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes, 1526 int flags, int is_pipe2) 1527 { 1528 int host_pipe[2]; 1529 abi_long ret; 1530 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe); 1531 1532 if (is_error(ret)) 1533 return get_errno(ret); 1534 1535 /* Several targets have special calling conventions for the original 1536 pipe syscall, but didn't replicate this into the pipe2 syscall. */ 1537 if (!is_pipe2) { 1538 #if defined(TARGET_ALPHA) 1539 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1]; 1540 return host_pipe[0]; 1541 #elif defined(TARGET_MIPS) 1542 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1]; 1543 return host_pipe[0]; 1544 #elif defined(TARGET_SH4) 1545 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1]; 1546 return host_pipe[0]; 1547 #elif defined(TARGET_SPARC) 1548 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1]; 1549 return host_pipe[0]; 1550 #endif 1551 } 1552 1553 if (put_user_s32(host_pipe[0], pipedes) 1554 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0]))) 1555 return -TARGET_EFAULT; 1556 return get_errno(ret); 1557 } 1558 1559 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn, 1560 abi_ulong target_addr, 1561 socklen_t len) 1562 { 1563 struct target_ip_mreqn *target_smreqn; 1564 1565 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1); 1566 if (!target_smreqn) 1567 return -TARGET_EFAULT; 1568 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr; 1569 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr; 1570 if (len == sizeof(struct target_ip_mreqn)) 1571 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex); 1572 unlock_user(target_smreqn, target_addr, 0); 1573 1574 return 0; 1575 } 1576 1577 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr, 1578 abi_ulong target_addr, 1579 socklen_t len) 1580 { 1581 const socklen_t unix_maxlen = sizeof (struct sockaddr_un); 1582 sa_family_t sa_family; 1583 struct target_sockaddr *target_saddr; 1584 1585 if (fd_trans_target_to_host_addr(fd)) { 1586 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len); 1587 } 1588 1589 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 1590 if (!target_saddr) 1591 return -TARGET_EFAULT; 1592 1593 sa_family = tswap16(target_saddr->sa_family); 1594 1595 /* Oops. The caller might send a incomplete sun_path; sun_path 1596 * must be terminated by \0 (see the manual page), but 1597 * unfortunately it is quite common to specify sockaddr_un 1598 * length as "strlen(x->sun_path)" while it should be 1599 * "strlen(...) + 1". We'll fix that here if needed. 1600 * Linux kernel has a similar feature. 1601 */ 1602 1603 if (sa_family == AF_UNIX) { 1604 if (len < unix_maxlen && len > 0) { 1605 char *cp = (char*)target_saddr; 1606 1607 if ( cp[len-1] && !cp[len] ) 1608 len++; 1609 } 1610 if (len > unix_maxlen) 1611 len = unix_maxlen; 1612 } 1613 1614 memcpy(addr, target_saddr, len); 1615 addr->sa_family = sa_family; 1616 if (sa_family == AF_NETLINK) { 1617 struct sockaddr_nl *nladdr; 1618 1619 nladdr = (struct sockaddr_nl *)addr; 1620 nladdr->nl_pid = tswap32(nladdr->nl_pid); 1621 nladdr->nl_groups = tswap32(nladdr->nl_groups); 1622 } else if (sa_family == AF_PACKET) { 1623 struct target_sockaddr_ll *lladdr; 1624 1625 lladdr = (struct target_sockaddr_ll *)addr; 1626 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex); 1627 lladdr->sll_hatype = tswap16(lladdr->sll_hatype); 1628 } 1629 unlock_user(target_saddr, target_addr, 0); 1630 1631 return 0; 1632 } 1633 1634 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr, 1635 struct sockaddr *addr, 1636 socklen_t len) 1637 { 1638 struct target_sockaddr *target_saddr; 1639 1640 if (len == 0) { 1641 return 0; 1642 } 1643 assert(addr); 1644 1645 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0); 1646 if (!target_saddr) 1647 return -TARGET_EFAULT; 1648 memcpy(target_saddr, addr, len); 1649 if (len >= offsetof(struct target_sockaddr, sa_family) + 1650 sizeof(target_saddr->sa_family)) { 1651 target_saddr->sa_family = tswap16(addr->sa_family); 1652 } 1653 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) { 1654 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr; 1655 target_nl->nl_pid = tswap32(target_nl->nl_pid); 1656 target_nl->nl_groups = tswap32(target_nl->nl_groups); 1657 } else if (addr->sa_family == AF_PACKET) { 1658 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr; 1659 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex); 1660 target_ll->sll_hatype = tswap16(target_ll->sll_hatype); 1661 } else if (addr->sa_family == AF_INET6 && 1662 len >= sizeof(struct target_sockaddr_in6)) { 1663 struct target_sockaddr_in6 *target_in6 = 1664 (struct target_sockaddr_in6 *)target_saddr; 1665 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id); 1666 } 1667 unlock_user(target_saddr, target_addr, len); 1668 1669 return 0; 1670 } 1671 1672 static inline abi_long target_to_host_cmsg(struct msghdr *msgh, 1673 struct target_msghdr *target_msgh) 1674 { 1675 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1676 abi_long msg_controllen; 1677 abi_ulong target_cmsg_addr; 1678 struct target_cmsghdr *target_cmsg, *target_cmsg_start; 1679 socklen_t space = 0; 1680 1681 msg_controllen = tswapal(target_msgh->msg_controllen); 1682 if (msg_controllen < sizeof (struct target_cmsghdr)) 1683 goto the_end; 1684 target_cmsg_addr = tswapal(target_msgh->msg_control); 1685 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1); 1686 target_cmsg_start = target_cmsg; 1687 if (!target_cmsg) 1688 return -TARGET_EFAULT; 1689 1690 while (cmsg && target_cmsg) { 1691 void *data = CMSG_DATA(cmsg); 1692 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1693 1694 int len = tswapal(target_cmsg->cmsg_len) 1695 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr)); 1696 1697 space += CMSG_SPACE(len); 1698 if (space > msgh->msg_controllen) { 1699 space -= CMSG_SPACE(len); 1700 /* This is a QEMU bug, since we allocated the payload 1701 * area ourselves (unlike overflow in host-to-target 1702 * conversion, which is just the guest giving us a buffer 1703 * that's too small). It can't happen for the payload types 1704 * we currently support; if it becomes an issue in future 1705 * we would need to improve our allocation strategy to 1706 * something more intelligent than "twice the size of the 1707 * target buffer we're reading from". 1708 */ 1709 gemu_log("Host cmsg overflow\n"); 1710 break; 1711 } 1712 1713 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) { 1714 cmsg->cmsg_level = SOL_SOCKET; 1715 } else { 1716 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level); 1717 } 1718 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type); 1719 cmsg->cmsg_len = CMSG_LEN(len); 1720 1721 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) { 1722 int *fd = (int *)data; 1723 int *target_fd = (int *)target_data; 1724 int i, numfds = len / sizeof(int); 1725 1726 for (i = 0; i < numfds; i++) { 1727 __get_user(fd[i], target_fd + i); 1728 } 1729 } else if (cmsg->cmsg_level == SOL_SOCKET 1730 && cmsg->cmsg_type == SCM_CREDENTIALS) { 1731 struct ucred *cred = (struct ucred *)data; 1732 struct target_ucred *target_cred = 1733 (struct target_ucred *)target_data; 1734 1735 __get_user(cred->pid, &target_cred->pid); 1736 __get_user(cred->uid, &target_cred->uid); 1737 __get_user(cred->gid, &target_cred->gid); 1738 } else { 1739 gemu_log("Unsupported ancillary data: %d/%d\n", 1740 cmsg->cmsg_level, cmsg->cmsg_type); 1741 memcpy(data, target_data, len); 1742 } 1743 1744 cmsg = CMSG_NXTHDR(msgh, cmsg); 1745 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg, 1746 target_cmsg_start); 1747 } 1748 unlock_user(target_cmsg, target_cmsg_addr, 0); 1749 the_end: 1750 msgh->msg_controllen = space; 1751 return 0; 1752 } 1753 1754 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, 1755 struct msghdr *msgh) 1756 { 1757 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1758 abi_long msg_controllen; 1759 abi_ulong target_cmsg_addr; 1760 struct target_cmsghdr *target_cmsg, *target_cmsg_start; 1761 socklen_t space = 0; 1762 1763 msg_controllen = tswapal(target_msgh->msg_controllen); 1764 if (msg_controllen < sizeof (struct target_cmsghdr)) 1765 goto the_end; 1766 target_cmsg_addr = tswapal(target_msgh->msg_control); 1767 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0); 1768 target_cmsg_start = target_cmsg; 1769 if (!target_cmsg) 1770 return -TARGET_EFAULT; 1771 1772 while (cmsg && target_cmsg) { 1773 void *data = CMSG_DATA(cmsg); 1774 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1775 1776 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr)); 1777 int tgt_len, tgt_space; 1778 1779 /* We never copy a half-header but may copy half-data; 1780 * this is Linux's behaviour in put_cmsg(). Note that 1781 * truncation here is a guest problem (which we report 1782 * to the guest via the CTRUNC bit), unlike truncation 1783 * in target_to_host_cmsg, which is a QEMU bug. 1784 */ 1785 if (msg_controllen < sizeof(struct target_cmsghdr)) { 1786 target_msgh->msg_flags |= tswap32(MSG_CTRUNC); 1787 break; 1788 } 1789 1790 if (cmsg->cmsg_level == SOL_SOCKET) { 1791 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET); 1792 } else { 1793 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level); 1794 } 1795 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type); 1796 1797 /* Payload types which need a different size of payload on 1798 * the target must adjust tgt_len here. 1799 */ 1800 switch (cmsg->cmsg_level) { 1801 case SOL_SOCKET: 1802 switch (cmsg->cmsg_type) { 1803 case SO_TIMESTAMP: 1804 tgt_len = sizeof(struct target_timeval); 1805 break; 1806 default: 1807 break; 1808 } 1809 default: 1810 tgt_len = len; 1811 break; 1812 } 1813 1814 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) { 1815 target_msgh->msg_flags |= tswap32(MSG_CTRUNC); 1816 tgt_len = msg_controllen - sizeof(struct target_cmsghdr); 1817 } 1818 1819 /* We must now copy-and-convert len bytes of payload 1820 * into tgt_len bytes of destination space. Bear in mind 1821 * that in both source and destination we may be dealing 1822 * with a truncated value! 1823 */ 1824 switch (cmsg->cmsg_level) { 1825 case SOL_SOCKET: 1826 switch (cmsg->cmsg_type) { 1827 case SCM_RIGHTS: 1828 { 1829 int *fd = (int *)data; 1830 int *target_fd = (int *)target_data; 1831 int i, numfds = tgt_len / sizeof(int); 1832 1833 for (i = 0; i < numfds; i++) { 1834 __put_user(fd[i], target_fd + i); 1835 } 1836 break; 1837 } 1838 case SO_TIMESTAMP: 1839 { 1840 struct timeval *tv = (struct timeval *)data; 1841 struct target_timeval *target_tv = 1842 (struct target_timeval *)target_data; 1843 1844 if (len != sizeof(struct timeval) || 1845 tgt_len != sizeof(struct target_timeval)) { 1846 goto unimplemented; 1847 } 1848 1849 /* copy struct timeval to target */ 1850 __put_user(tv->tv_sec, &target_tv->tv_sec); 1851 __put_user(tv->tv_usec, &target_tv->tv_usec); 1852 break; 1853 } 1854 case SCM_CREDENTIALS: 1855 { 1856 struct ucred *cred = (struct ucred *)data; 1857 struct target_ucred *target_cred = 1858 (struct target_ucred *)target_data; 1859 1860 __put_user(cred->pid, &target_cred->pid); 1861 __put_user(cred->uid, &target_cred->uid); 1862 __put_user(cred->gid, &target_cred->gid); 1863 break; 1864 } 1865 default: 1866 goto unimplemented; 1867 } 1868 break; 1869 1870 case SOL_IP: 1871 switch (cmsg->cmsg_type) { 1872 case IP_TTL: 1873 { 1874 uint32_t *v = (uint32_t *)data; 1875 uint32_t *t_int = (uint32_t *)target_data; 1876 1877 if (len != sizeof(uint32_t) || 1878 tgt_len != sizeof(uint32_t)) { 1879 goto unimplemented; 1880 } 1881 __put_user(*v, t_int); 1882 break; 1883 } 1884 case IP_RECVERR: 1885 { 1886 struct errhdr_t { 1887 struct sock_extended_err ee; 1888 struct sockaddr_in offender; 1889 }; 1890 struct errhdr_t *errh = (struct errhdr_t *)data; 1891 struct errhdr_t *target_errh = 1892 (struct errhdr_t *)target_data; 1893 1894 if (len != sizeof(struct errhdr_t) || 1895 tgt_len != sizeof(struct errhdr_t)) { 1896 goto unimplemented; 1897 } 1898 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno); 1899 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin); 1900 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type); 1901 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code); 1902 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad); 1903 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info); 1904 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data); 1905 host_to_target_sockaddr((unsigned long) &target_errh->offender, 1906 (void *) &errh->offender, sizeof(errh->offender)); 1907 break; 1908 } 1909 default: 1910 goto unimplemented; 1911 } 1912 break; 1913 1914 case SOL_IPV6: 1915 switch (cmsg->cmsg_type) { 1916 case IPV6_HOPLIMIT: 1917 { 1918 uint32_t *v = (uint32_t *)data; 1919 uint32_t *t_int = (uint32_t *)target_data; 1920 1921 if (len != sizeof(uint32_t) || 1922 tgt_len != sizeof(uint32_t)) { 1923 goto unimplemented; 1924 } 1925 __put_user(*v, t_int); 1926 break; 1927 } 1928 case IPV6_RECVERR: 1929 { 1930 struct errhdr6_t { 1931 struct sock_extended_err ee; 1932 struct sockaddr_in6 offender; 1933 }; 1934 struct errhdr6_t *errh = (struct errhdr6_t *)data; 1935 struct errhdr6_t *target_errh = 1936 (struct errhdr6_t *)target_data; 1937 1938 if (len != sizeof(struct errhdr6_t) || 1939 tgt_len != sizeof(struct errhdr6_t)) { 1940 goto unimplemented; 1941 } 1942 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno); 1943 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin); 1944 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type); 1945 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code); 1946 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad); 1947 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info); 1948 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data); 1949 host_to_target_sockaddr((unsigned long) &target_errh->offender, 1950 (void *) &errh->offender, sizeof(errh->offender)); 1951 break; 1952 } 1953 default: 1954 goto unimplemented; 1955 } 1956 break; 1957 1958 default: 1959 unimplemented: 1960 gemu_log("Unsupported ancillary data: %d/%d\n", 1961 cmsg->cmsg_level, cmsg->cmsg_type); 1962 memcpy(target_data, data, MIN(len, tgt_len)); 1963 if (tgt_len > len) { 1964 memset(target_data + len, 0, tgt_len - len); 1965 } 1966 } 1967 1968 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len)); 1969 tgt_space = TARGET_CMSG_SPACE(tgt_len); 1970 if (msg_controllen < tgt_space) { 1971 tgt_space = msg_controllen; 1972 } 1973 msg_controllen -= tgt_space; 1974 space += tgt_space; 1975 cmsg = CMSG_NXTHDR(msgh, cmsg); 1976 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg, 1977 target_cmsg_start); 1978 } 1979 unlock_user(target_cmsg, target_cmsg_addr, space); 1980 the_end: 1981 target_msgh->msg_controllen = tswapal(space); 1982 return 0; 1983 } 1984 1985 static void tswap_nlmsghdr(struct nlmsghdr *nlh) 1986 { 1987 nlh->nlmsg_len = tswap32(nlh->nlmsg_len); 1988 nlh->nlmsg_type = tswap16(nlh->nlmsg_type); 1989 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags); 1990 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq); 1991 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid); 1992 } 1993 1994 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh, 1995 size_t len, 1996 abi_long (*host_to_target_nlmsg) 1997 (struct nlmsghdr *)) 1998 { 1999 uint32_t nlmsg_len; 2000 abi_long ret; 2001 2002 while (len > sizeof(struct nlmsghdr)) { 2003 2004 nlmsg_len = nlh->nlmsg_len; 2005 if (nlmsg_len < sizeof(struct nlmsghdr) || 2006 nlmsg_len > len) { 2007 break; 2008 } 2009 2010 switch (nlh->nlmsg_type) { 2011 case NLMSG_DONE: 2012 tswap_nlmsghdr(nlh); 2013 return 0; 2014 case NLMSG_NOOP: 2015 break; 2016 case NLMSG_ERROR: 2017 { 2018 struct nlmsgerr *e = NLMSG_DATA(nlh); 2019 e->error = tswap32(e->error); 2020 tswap_nlmsghdr(&e->msg); 2021 tswap_nlmsghdr(nlh); 2022 return 0; 2023 } 2024 default: 2025 ret = host_to_target_nlmsg(nlh); 2026 if (ret < 0) { 2027 tswap_nlmsghdr(nlh); 2028 return ret; 2029 } 2030 break; 2031 } 2032 tswap_nlmsghdr(nlh); 2033 len -= NLMSG_ALIGN(nlmsg_len); 2034 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len)); 2035 } 2036 return 0; 2037 } 2038 2039 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh, 2040 size_t len, 2041 abi_long (*target_to_host_nlmsg) 2042 (struct nlmsghdr *)) 2043 { 2044 int ret; 2045 2046 while (len > sizeof(struct nlmsghdr)) { 2047 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) || 2048 tswap32(nlh->nlmsg_len) > len) { 2049 break; 2050 } 2051 tswap_nlmsghdr(nlh); 2052 switch (nlh->nlmsg_type) { 2053 case NLMSG_DONE: 2054 return 0; 2055 case NLMSG_NOOP: 2056 break; 2057 case NLMSG_ERROR: 2058 { 2059 struct nlmsgerr *e = NLMSG_DATA(nlh); 2060 e->error = tswap32(e->error); 2061 tswap_nlmsghdr(&e->msg); 2062 return 0; 2063 } 2064 default: 2065 ret = target_to_host_nlmsg(nlh); 2066 if (ret < 0) { 2067 return ret; 2068 } 2069 } 2070 len -= NLMSG_ALIGN(nlh->nlmsg_len); 2071 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len)); 2072 } 2073 return 0; 2074 } 2075 2076 #ifdef CONFIG_RTNETLINK 2077 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr, 2078 size_t len, void *context, 2079 abi_long (*host_to_target_nlattr) 2080 (struct nlattr *, 2081 void *context)) 2082 { 2083 unsigned short nla_len; 2084 abi_long ret; 2085 2086 while (len > sizeof(struct nlattr)) { 2087 nla_len = nlattr->nla_len; 2088 if (nla_len < sizeof(struct nlattr) || 2089 nla_len > len) { 2090 break; 2091 } 2092 ret = host_to_target_nlattr(nlattr, context); 2093 nlattr->nla_len = tswap16(nlattr->nla_len); 2094 nlattr->nla_type = tswap16(nlattr->nla_type); 2095 if (ret < 0) { 2096 return ret; 2097 } 2098 len -= NLA_ALIGN(nla_len); 2099 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len)); 2100 } 2101 return 0; 2102 } 2103 2104 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr, 2105 size_t len, 2106 abi_long (*host_to_target_rtattr) 2107 (struct rtattr *)) 2108 { 2109 unsigned short rta_len; 2110 abi_long ret; 2111 2112 while (len > sizeof(struct rtattr)) { 2113 rta_len = rtattr->rta_len; 2114 if (rta_len < sizeof(struct rtattr) || 2115 rta_len > len) { 2116 break; 2117 } 2118 ret = host_to_target_rtattr(rtattr); 2119 rtattr->rta_len = tswap16(rtattr->rta_len); 2120 rtattr->rta_type = tswap16(rtattr->rta_type); 2121 if (ret < 0) { 2122 return ret; 2123 } 2124 len -= RTA_ALIGN(rta_len); 2125 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len)); 2126 } 2127 return 0; 2128 } 2129 2130 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN) 2131 2132 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr, 2133 void *context) 2134 { 2135 uint16_t *u16; 2136 uint32_t *u32; 2137 uint64_t *u64; 2138 2139 switch (nlattr->nla_type) { 2140 /* no data */ 2141 case QEMU_IFLA_BR_FDB_FLUSH: 2142 break; 2143 /* binary */ 2144 case QEMU_IFLA_BR_GROUP_ADDR: 2145 break; 2146 /* uint8_t */ 2147 case QEMU_IFLA_BR_VLAN_FILTERING: 2148 case QEMU_IFLA_BR_TOPOLOGY_CHANGE: 2149 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED: 2150 case QEMU_IFLA_BR_MCAST_ROUTER: 2151 case QEMU_IFLA_BR_MCAST_SNOOPING: 2152 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR: 2153 case QEMU_IFLA_BR_MCAST_QUERIER: 2154 case QEMU_IFLA_BR_NF_CALL_IPTABLES: 2155 case QEMU_IFLA_BR_NF_CALL_IP6TABLES: 2156 case QEMU_IFLA_BR_NF_CALL_ARPTABLES: 2157 break; 2158 /* uint16_t */ 2159 case QEMU_IFLA_BR_PRIORITY: 2160 case QEMU_IFLA_BR_VLAN_PROTOCOL: 2161 case QEMU_IFLA_BR_GROUP_FWD_MASK: 2162 case QEMU_IFLA_BR_ROOT_PORT: 2163 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID: 2164 u16 = NLA_DATA(nlattr); 2165 *u16 = tswap16(*u16); 2166 break; 2167 /* uint32_t */ 2168 case QEMU_IFLA_BR_FORWARD_DELAY: 2169 case QEMU_IFLA_BR_HELLO_TIME: 2170 case QEMU_IFLA_BR_MAX_AGE: 2171 case QEMU_IFLA_BR_AGEING_TIME: 2172 case QEMU_IFLA_BR_STP_STATE: 2173 case QEMU_IFLA_BR_ROOT_PATH_COST: 2174 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY: 2175 case QEMU_IFLA_BR_MCAST_HASH_MAX: 2176 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT: 2177 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT: 2178 u32 = NLA_DATA(nlattr); 2179 *u32 = tswap32(*u32); 2180 break; 2181 /* uint64_t */ 2182 case QEMU_IFLA_BR_HELLO_TIMER: 2183 case QEMU_IFLA_BR_TCN_TIMER: 2184 case QEMU_IFLA_BR_GC_TIMER: 2185 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER: 2186 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL: 2187 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL: 2188 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL: 2189 case QEMU_IFLA_BR_MCAST_QUERY_INTVL: 2190 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL: 2191 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL: 2192 u64 = NLA_DATA(nlattr); 2193 *u64 = tswap64(*u64); 2194 break; 2195 /* ifla_bridge_id: uin8_t[] */ 2196 case QEMU_IFLA_BR_ROOT_ID: 2197 case QEMU_IFLA_BR_BRIDGE_ID: 2198 break; 2199 default: 2200 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type); 2201 break; 2202 } 2203 return 0; 2204 } 2205 2206 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr, 2207 void *context) 2208 { 2209 uint16_t *u16; 2210 uint32_t *u32; 2211 uint64_t *u64; 2212 2213 switch (nlattr->nla_type) { 2214 /* uint8_t */ 2215 case QEMU_IFLA_BRPORT_STATE: 2216 case QEMU_IFLA_BRPORT_MODE: 2217 case QEMU_IFLA_BRPORT_GUARD: 2218 case QEMU_IFLA_BRPORT_PROTECT: 2219 case QEMU_IFLA_BRPORT_FAST_LEAVE: 2220 case QEMU_IFLA_BRPORT_LEARNING: 2221 case QEMU_IFLA_BRPORT_UNICAST_FLOOD: 2222 case QEMU_IFLA_BRPORT_PROXYARP: 2223 case QEMU_IFLA_BRPORT_LEARNING_SYNC: 2224 case QEMU_IFLA_BRPORT_PROXYARP_WIFI: 2225 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK: 2226 case QEMU_IFLA_BRPORT_CONFIG_PENDING: 2227 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER: 2228 break; 2229 /* uint16_t */ 2230 case QEMU_IFLA_BRPORT_PRIORITY: 2231 case QEMU_IFLA_BRPORT_DESIGNATED_PORT: 2232 case QEMU_IFLA_BRPORT_DESIGNATED_COST: 2233 case QEMU_IFLA_BRPORT_ID: 2234 case QEMU_IFLA_BRPORT_NO: 2235 u16 = NLA_DATA(nlattr); 2236 *u16 = tswap16(*u16); 2237 break; 2238 /* uin32_t */ 2239 case QEMU_IFLA_BRPORT_COST: 2240 u32 = NLA_DATA(nlattr); 2241 *u32 = tswap32(*u32); 2242 break; 2243 /* uint64_t */ 2244 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER: 2245 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER: 2246 case QEMU_IFLA_BRPORT_HOLD_TIMER: 2247 u64 = NLA_DATA(nlattr); 2248 *u64 = tswap64(*u64); 2249 break; 2250 /* ifla_bridge_id: uint8_t[] */ 2251 case QEMU_IFLA_BRPORT_ROOT_ID: 2252 case QEMU_IFLA_BRPORT_BRIDGE_ID: 2253 break; 2254 default: 2255 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type); 2256 break; 2257 } 2258 return 0; 2259 } 2260 2261 struct linkinfo_context { 2262 int len; 2263 char *name; 2264 int slave_len; 2265 char *slave_name; 2266 }; 2267 2268 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr, 2269 void *context) 2270 { 2271 struct linkinfo_context *li_context = context; 2272 2273 switch (nlattr->nla_type) { 2274 /* string */ 2275 case QEMU_IFLA_INFO_KIND: 2276 li_context->name = NLA_DATA(nlattr); 2277 li_context->len = nlattr->nla_len - NLA_HDRLEN; 2278 break; 2279 case QEMU_IFLA_INFO_SLAVE_KIND: 2280 li_context->slave_name = NLA_DATA(nlattr); 2281 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN; 2282 break; 2283 /* stats */ 2284 case QEMU_IFLA_INFO_XSTATS: 2285 /* FIXME: only used by CAN */ 2286 break; 2287 /* nested */ 2288 case QEMU_IFLA_INFO_DATA: 2289 if (strncmp(li_context->name, "bridge", 2290 li_context->len) == 0) { 2291 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), 2292 nlattr->nla_len, 2293 NULL, 2294 host_to_target_data_bridge_nlattr); 2295 } else { 2296 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name); 2297 } 2298 break; 2299 case QEMU_IFLA_INFO_SLAVE_DATA: 2300 if (strncmp(li_context->slave_name, "bridge", 2301 li_context->slave_len) == 0) { 2302 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), 2303 nlattr->nla_len, 2304 NULL, 2305 host_to_target_slave_data_bridge_nlattr); 2306 } else { 2307 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n", 2308 li_context->slave_name); 2309 } 2310 break; 2311 default: 2312 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type); 2313 break; 2314 } 2315 2316 return 0; 2317 } 2318 2319 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr, 2320 void *context) 2321 { 2322 uint32_t *u32; 2323 int i; 2324 2325 switch (nlattr->nla_type) { 2326 case QEMU_IFLA_INET_CONF: 2327 u32 = NLA_DATA(nlattr); 2328 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32); 2329 i++) { 2330 u32[i] = tswap32(u32[i]); 2331 } 2332 break; 2333 default: 2334 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type); 2335 } 2336 return 0; 2337 } 2338 2339 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr, 2340 void *context) 2341 { 2342 uint32_t *u32; 2343 uint64_t *u64; 2344 struct ifla_cacheinfo *ci; 2345 int i; 2346 2347 switch (nlattr->nla_type) { 2348 /* binaries */ 2349 case QEMU_IFLA_INET6_TOKEN: 2350 break; 2351 /* uint8_t */ 2352 case QEMU_IFLA_INET6_ADDR_GEN_MODE: 2353 break; 2354 /* uint32_t */ 2355 case QEMU_IFLA_INET6_FLAGS: 2356 u32 = NLA_DATA(nlattr); 2357 *u32 = tswap32(*u32); 2358 break; 2359 /* uint32_t[] */ 2360 case QEMU_IFLA_INET6_CONF: 2361 u32 = NLA_DATA(nlattr); 2362 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32); 2363 i++) { 2364 u32[i] = tswap32(u32[i]); 2365 } 2366 break; 2367 /* ifla_cacheinfo */ 2368 case QEMU_IFLA_INET6_CACHEINFO: 2369 ci = NLA_DATA(nlattr); 2370 ci->max_reasm_len = tswap32(ci->max_reasm_len); 2371 ci->tstamp = tswap32(ci->tstamp); 2372 ci->reachable_time = tswap32(ci->reachable_time); 2373 ci->retrans_time = tswap32(ci->retrans_time); 2374 break; 2375 /* uint64_t[] */ 2376 case QEMU_IFLA_INET6_STATS: 2377 case QEMU_IFLA_INET6_ICMP6STATS: 2378 u64 = NLA_DATA(nlattr); 2379 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64); 2380 i++) { 2381 u64[i] = tswap64(u64[i]); 2382 } 2383 break; 2384 default: 2385 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type); 2386 } 2387 return 0; 2388 } 2389 2390 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr, 2391 void *context) 2392 { 2393 switch (nlattr->nla_type) { 2394 case AF_INET: 2395 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len, 2396 NULL, 2397 host_to_target_data_inet_nlattr); 2398 case AF_INET6: 2399 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len, 2400 NULL, 2401 host_to_target_data_inet6_nlattr); 2402 default: 2403 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type); 2404 break; 2405 } 2406 return 0; 2407 } 2408 2409 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr) 2410 { 2411 uint32_t *u32; 2412 struct rtnl_link_stats *st; 2413 struct rtnl_link_stats64 *st64; 2414 struct rtnl_link_ifmap *map; 2415 struct linkinfo_context li_context; 2416 2417 switch (rtattr->rta_type) { 2418 /* binary stream */ 2419 case QEMU_IFLA_ADDRESS: 2420 case QEMU_IFLA_BROADCAST: 2421 /* string */ 2422 case QEMU_IFLA_IFNAME: 2423 case QEMU_IFLA_QDISC: 2424 break; 2425 /* uin8_t */ 2426 case QEMU_IFLA_OPERSTATE: 2427 case QEMU_IFLA_LINKMODE: 2428 case QEMU_IFLA_CARRIER: 2429 case QEMU_IFLA_PROTO_DOWN: 2430 break; 2431 /* uint32_t */ 2432 case QEMU_IFLA_MTU: 2433 case QEMU_IFLA_LINK: 2434 case QEMU_IFLA_WEIGHT: 2435 case QEMU_IFLA_TXQLEN: 2436 case QEMU_IFLA_CARRIER_CHANGES: 2437 case QEMU_IFLA_NUM_RX_QUEUES: 2438 case QEMU_IFLA_NUM_TX_QUEUES: 2439 case QEMU_IFLA_PROMISCUITY: 2440 case QEMU_IFLA_EXT_MASK: 2441 case QEMU_IFLA_LINK_NETNSID: 2442 case QEMU_IFLA_GROUP: 2443 case QEMU_IFLA_MASTER: 2444 case QEMU_IFLA_NUM_VF: 2445 case QEMU_IFLA_GSO_MAX_SEGS: 2446 case QEMU_IFLA_GSO_MAX_SIZE: 2447 u32 = RTA_DATA(rtattr); 2448 *u32 = tswap32(*u32); 2449 break; 2450 /* struct rtnl_link_stats */ 2451 case QEMU_IFLA_STATS: 2452 st = RTA_DATA(rtattr); 2453 st->rx_packets = tswap32(st->rx_packets); 2454 st->tx_packets = tswap32(st->tx_packets); 2455 st->rx_bytes = tswap32(st->rx_bytes); 2456 st->tx_bytes = tswap32(st->tx_bytes); 2457 st->rx_errors = tswap32(st->rx_errors); 2458 st->tx_errors = tswap32(st->tx_errors); 2459 st->rx_dropped = tswap32(st->rx_dropped); 2460 st->tx_dropped = tswap32(st->tx_dropped); 2461 st->multicast = tswap32(st->multicast); 2462 st->collisions = tswap32(st->collisions); 2463 2464 /* detailed rx_errors: */ 2465 st->rx_length_errors = tswap32(st->rx_length_errors); 2466 st->rx_over_errors = tswap32(st->rx_over_errors); 2467 st->rx_crc_errors = tswap32(st->rx_crc_errors); 2468 st->rx_frame_errors = tswap32(st->rx_frame_errors); 2469 st->rx_fifo_errors = tswap32(st->rx_fifo_errors); 2470 st->rx_missed_errors = tswap32(st->rx_missed_errors); 2471 2472 /* detailed tx_errors */ 2473 st->tx_aborted_errors = tswap32(st->tx_aborted_errors); 2474 st->tx_carrier_errors = tswap32(st->tx_carrier_errors); 2475 st->tx_fifo_errors = tswap32(st->tx_fifo_errors); 2476 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors); 2477 st->tx_window_errors = tswap32(st->tx_window_errors); 2478 2479 /* for cslip etc */ 2480 st->rx_compressed = tswap32(st->rx_compressed); 2481 st->tx_compressed = tswap32(st->tx_compressed); 2482 break; 2483 /* struct rtnl_link_stats64 */ 2484 case QEMU_IFLA_STATS64: 2485 st64 = RTA_DATA(rtattr); 2486 st64->rx_packets = tswap64(st64->rx_packets); 2487 st64->tx_packets = tswap64(st64->tx_packets); 2488 st64->rx_bytes = tswap64(st64->rx_bytes); 2489 st64->tx_bytes = tswap64(st64->tx_bytes); 2490 st64->rx_errors = tswap64(st64->rx_errors); 2491 st64->tx_errors = tswap64(st64->tx_errors); 2492 st64->rx_dropped = tswap64(st64->rx_dropped); 2493 st64->tx_dropped = tswap64(st64->tx_dropped); 2494 st64->multicast = tswap64(st64->multicast); 2495 st64->collisions = tswap64(st64->collisions); 2496 2497 /* detailed rx_errors: */ 2498 st64->rx_length_errors = tswap64(st64->rx_length_errors); 2499 st64->rx_over_errors = tswap64(st64->rx_over_errors); 2500 st64->rx_crc_errors = tswap64(st64->rx_crc_errors); 2501 st64->rx_frame_errors = tswap64(st64->rx_frame_errors); 2502 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors); 2503 st64->rx_missed_errors = tswap64(st64->rx_missed_errors); 2504 2505 /* detailed tx_errors */ 2506 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors); 2507 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors); 2508 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors); 2509 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors); 2510 st64->tx_window_errors = tswap64(st64->tx_window_errors); 2511 2512 /* for cslip etc */ 2513 st64->rx_compressed = tswap64(st64->rx_compressed); 2514 st64->tx_compressed = tswap64(st64->tx_compressed); 2515 break; 2516 /* struct rtnl_link_ifmap */ 2517 case QEMU_IFLA_MAP: 2518 map = RTA_DATA(rtattr); 2519 map->mem_start = tswap64(map->mem_start); 2520 map->mem_end = tswap64(map->mem_end); 2521 map->base_addr = tswap64(map->base_addr); 2522 map->irq = tswap16(map->irq); 2523 break; 2524 /* nested */ 2525 case QEMU_IFLA_LINKINFO: 2526 memset(&li_context, 0, sizeof(li_context)); 2527 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len, 2528 &li_context, 2529 host_to_target_data_linkinfo_nlattr); 2530 case QEMU_IFLA_AF_SPEC: 2531 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len, 2532 NULL, 2533 host_to_target_data_spec_nlattr); 2534 default: 2535 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type); 2536 break; 2537 } 2538 return 0; 2539 } 2540 2541 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr) 2542 { 2543 uint32_t *u32; 2544 struct ifa_cacheinfo *ci; 2545 2546 switch (rtattr->rta_type) { 2547 /* binary: depends on family type */ 2548 case IFA_ADDRESS: 2549 case IFA_LOCAL: 2550 break; 2551 /* string */ 2552 case IFA_LABEL: 2553 break; 2554 /* u32 */ 2555 case IFA_FLAGS: 2556 case IFA_BROADCAST: 2557 u32 = RTA_DATA(rtattr); 2558 *u32 = tswap32(*u32); 2559 break; 2560 /* struct ifa_cacheinfo */ 2561 case IFA_CACHEINFO: 2562 ci = RTA_DATA(rtattr); 2563 ci->ifa_prefered = tswap32(ci->ifa_prefered); 2564 ci->ifa_valid = tswap32(ci->ifa_valid); 2565 ci->cstamp = tswap32(ci->cstamp); 2566 ci->tstamp = tswap32(ci->tstamp); 2567 break; 2568 default: 2569 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type); 2570 break; 2571 } 2572 return 0; 2573 } 2574 2575 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr) 2576 { 2577 uint32_t *u32; 2578 switch (rtattr->rta_type) { 2579 /* binary: depends on family type */ 2580 case RTA_GATEWAY: 2581 case RTA_DST: 2582 case RTA_PREFSRC: 2583 break; 2584 /* u32 */ 2585 case RTA_PRIORITY: 2586 case RTA_TABLE: 2587 case RTA_OIF: 2588 u32 = RTA_DATA(rtattr); 2589 *u32 = tswap32(*u32); 2590 break; 2591 default: 2592 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type); 2593 break; 2594 } 2595 return 0; 2596 } 2597 2598 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr, 2599 uint32_t rtattr_len) 2600 { 2601 return host_to_target_for_each_rtattr(rtattr, rtattr_len, 2602 host_to_target_data_link_rtattr); 2603 } 2604 2605 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr, 2606 uint32_t rtattr_len) 2607 { 2608 return host_to_target_for_each_rtattr(rtattr, rtattr_len, 2609 host_to_target_data_addr_rtattr); 2610 } 2611 2612 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr, 2613 uint32_t rtattr_len) 2614 { 2615 return host_to_target_for_each_rtattr(rtattr, rtattr_len, 2616 host_to_target_data_route_rtattr); 2617 } 2618 2619 static abi_long host_to_target_data_route(struct nlmsghdr *nlh) 2620 { 2621 uint32_t nlmsg_len; 2622 struct ifinfomsg *ifi; 2623 struct ifaddrmsg *ifa; 2624 struct rtmsg *rtm; 2625 2626 nlmsg_len = nlh->nlmsg_len; 2627 switch (nlh->nlmsg_type) { 2628 case RTM_NEWLINK: 2629 case RTM_DELLINK: 2630 case RTM_GETLINK: 2631 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) { 2632 ifi = NLMSG_DATA(nlh); 2633 ifi->ifi_type = tswap16(ifi->ifi_type); 2634 ifi->ifi_index = tswap32(ifi->ifi_index); 2635 ifi->ifi_flags = tswap32(ifi->ifi_flags); 2636 ifi->ifi_change = tswap32(ifi->ifi_change); 2637 host_to_target_link_rtattr(IFLA_RTA(ifi), 2638 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi))); 2639 } 2640 break; 2641 case RTM_NEWADDR: 2642 case RTM_DELADDR: 2643 case RTM_GETADDR: 2644 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) { 2645 ifa = NLMSG_DATA(nlh); 2646 ifa->ifa_index = tswap32(ifa->ifa_index); 2647 host_to_target_addr_rtattr(IFA_RTA(ifa), 2648 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa))); 2649 } 2650 break; 2651 case RTM_NEWROUTE: 2652 case RTM_DELROUTE: 2653 case RTM_GETROUTE: 2654 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) { 2655 rtm = NLMSG_DATA(nlh); 2656 rtm->rtm_flags = tswap32(rtm->rtm_flags); 2657 host_to_target_route_rtattr(RTM_RTA(rtm), 2658 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm))); 2659 } 2660 break; 2661 default: 2662 return -TARGET_EINVAL; 2663 } 2664 return 0; 2665 } 2666 2667 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh, 2668 size_t len) 2669 { 2670 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route); 2671 } 2672 2673 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr, 2674 size_t len, 2675 abi_long (*target_to_host_rtattr) 2676 (struct rtattr *)) 2677 { 2678 abi_long ret; 2679 2680 while (len >= sizeof(struct rtattr)) { 2681 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) || 2682 tswap16(rtattr->rta_len) > len) { 2683 break; 2684 } 2685 rtattr->rta_len = tswap16(rtattr->rta_len); 2686 rtattr->rta_type = tswap16(rtattr->rta_type); 2687 ret = target_to_host_rtattr(rtattr); 2688 if (ret < 0) { 2689 return ret; 2690 } 2691 len -= RTA_ALIGN(rtattr->rta_len); 2692 rtattr = (struct rtattr *)(((char *)rtattr) + 2693 RTA_ALIGN(rtattr->rta_len)); 2694 } 2695 return 0; 2696 } 2697 2698 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr) 2699 { 2700 switch (rtattr->rta_type) { 2701 default: 2702 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type); 2703 break; 2704 } 2705 return 0; 2706 } 2707 2708 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr) 2709 { 2710 switch (rtattr->rta_type) { 2711 /* binary: depends on family type */ 2712 case IFA_LOCAL: 2713 case IFA_ADDRESS: 2714 break; 2715 default: 2716 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type); 2717 break; 2718 } 2719 return 0; 2720 } 2721 2722 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr) 2723 { 2724 uint32_t *u32; 2725 switch (rtattr->rta_type) { 2726 /* binary: depends on family type */ 2727 case RTA_DST: 2728 case RTA_SRC: 2729 case RTA_GATEWAY: 2730 break; 2731 /* u32 */ 2732 case RTA_PRIORITY: 2733 case RTA_OIF: 2734 u32 = RTA_DATA(rtattr); 2735 *u32 = tswap32(*u32); 2736 break; 2737 default: 2738 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type); 2739 break; 2740 } 2741 return 0; 2742 } 2743 2744 static void target_to_host_link_rtattr(struct rtattr *rtattr, 2745 uint32_t rtattr_len) 2746 { 2747 target_to_host_for_each_rtattr(rtattr, rtattr_len, 2748 target_to_host_data_link_rtattr); 2749 } 2750 2751 static void target_to_host_addr_rtattr(struct rtattr *rtattr, 2752 uint32_t rtattr_len) 2753 { 2754 target_to_host_for_each_rtattr(rtattr, rtattr_len, 2755 target_to_host_data_addr_rtattr); 2756 } 2757 2758 static void target_to_host_route_rtattr(struct rtattr *rtattr, 2759 uint32_t rtattr_len) 2760 { 2761 target_to_host_for_each_rtattr(rtattr, rtattr_len, 2762 target_to_host_data_route_rtattr); 2763 } 2764 2765 static abi_long target_to_host_data_route(struct nlmsghdr *nlh) 2766 { 2767 struct ifinfomsg *ifi; 2768 struct ifaddrmsg *ifa; 2769 struct rtmsg *rtm; 2770 2771 switch (nlh->nlmsg_type) { 2772 case RTM_GETLINK: 2773 break; 2774 case RTM_NEWLINK: 2775 case RTM_DELLINK: 2776 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) { 2777 ifi = NLMSG_DATA(nlh); 2778 ifi->ifi_type = tswap16(ifi->ifi_type); 2779 ifi->ifi_index = tswap32(ifi->ifi_index); 2780 ifi->ifi_flags = tswap32(ifi->ifi_flags); 2781 ifi->ifi_change = tswap32(ifi->ifi_change); 2782 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len - 2783 NLMSG_LENGTH(sizeof(*ifi))); 2784 } 2785 break; 2786 case RTM_GETADDR: 2787 case RTM_NEWADDR: 2788 case RTM_DELADDR: 2789 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) { 2790 ifa = NLMSG_DATA(nlh); 2791 ifa->ifa_index = tswap32(ifa->ifa_index); 2792 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len - 2793 NLMSG_LENGTH(sizeof(*ifa))); 2794 } 2795 break; 2796 case RTM_GETROUTE: 2797 break; 2798 case RTM_NEWROUTE: 2799 case RTM_DELROUTE: 2800 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) { 2801 rtm = NLMSG_DATA(nlh); 2802 rtm->rtm_flags = tswap32(rtm->rtm_flags); 2803 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len - 2804 NLMSG_LENGTH(sizeof(*rtm))); 2805 } 2806 break; 2807 default: 2808 return -TARGET_EOPNOTSUPP; 2809 } 2810 return 0; 2811 } 2812 2813 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len) 2814 { 2815 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route); 2816 } 2817 #endif /* CONFIG_RTNETLINK */ 2818 2819 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh) 2820 { 2821 switch (nlh->nlmsg_type) { 2822 default: 2823 gemu_log("Unknown host audit message type %d\n", 2824 nlh->nlmsg_type); 2825 return -TARGET_EINVAL; 2826 } 2827 return 0; 2828 } 2829 2830 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh, 2831 size_t len) 2832 { 2833 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit); 2834 } 2835 2836 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh) 2837 { 2838 switch (nlh->nlmsg_type) { 2839 case AUDIT_USER: 2840 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG: 2841 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2: 2842 break; 2843 default: 2844 gemu_log("Unknown target audit message type %d\n", 2845 nlh->nlmsg_type); 2846 return -TARGET_EINVAL; 2847 } 2848 2849 return 0; 2850 } 2851 2852 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len) 2853 { 2854 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit); 2855 } 2856 2857 /* do_setsockopt() Must return target values and target errnos. */ 2858 static abi_long do_setsockopt(int sockfd, int level, int optname, 2859 abi_ulong optval_addr, socklen_t optlen) 2860 { 2861 abi_long ret; 2862 int val; 2863 struct ip_mreqn *ip_mreq; 2864 struct ip_mreq_source *ip_mreq_source; 2865 2866 switch(level) { 2867 case SOL_TCP: 2868 /* TCP options all take an 'int' value. */ 2869 if (optlen < sizeof(uint32_t)) 2870 return -TARGET_EINVAL; 2871 2872 if (get_user_u32(val, optval_addr)) 2873 return -TARGET_EFAULT; 2874 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 2875 break; 2876 case SOL_IP: 2877 switch(optname) { 2878 case IP_TOS: 2879 case IP_TTL: 2880 case IP_HDRINCL: 2881 case IP_ROUTER_ALERT: 2882 case IP_RECVOPTS: 2883 case IP_RETOPTS: 2884 case IP_PKTINFO: 2885 case IP_MTU_DISCOVER: 2886 case IP_RECVERR: 2887 case IP_RECVTTL: 2888 case IP_RECVTOS: 2889 #ifdef IP_FREEBIND 2890 case IP_FREEBIND: 2891 #endif 2892 case IP_MULTICAST_TTL: 2893 case IP_MULTICAST_LOOP: 2894 val = 0; 2895 if (optlen >= sizeof(uint32_t)) { 2896 if (get_user_u32(val, optval_addr)) 2897 return -TARGET_EFAULT; 2898 } else if (optlen >= 1) { 2899 if (get_user_u8(val, optval_addr)) 2900 return -TARGET_EFAULT; 2901 } 2902 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 2903 break; 2904 case IP_ADD_MEMBERSHIP: 2905 case IP_DROP_MEMBERSHIP: 2906 if (optlen < sizeof (struct target_ip_mreq) || 2907 optlen > sizeof (struct target_ip_mreqn)) 2908 return -TARGET_EINVAL; 2909 2910 ip_mreq = (struct ip_mreqn *) alloca(optlen); 2911 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen); 2912 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen)); 2913 break; 2914 2915 case IP_BLOCK_SOURCE: 2916 case IP_UNBLOCK_SOURCE: 2917 case IP_ADD_SOURCE_MEMBERSHIP: 2918 case IP_DROP_SOURCE_MEMBERSHIP: 2919 if (optlen != sizeof (struct target_ip_mreq_source)) 2920 return -TARGET_EINVAL; 2921 2922 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1); 2923 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen)); 2924 unlock_user (ip_mreq_source, optval_addr, 0); 2925 break; 2926 2927 default: 2928 goto unimplemented; 2929 } 2930 break; 2931 case SOL_IPV6: 2932 switch (optname) { 2933 case IPV6_MTU_DISCOVER: 2934 case IPV6_MTU: 2935 case IPV6_V6ONLY: 2936 case IPV6_RECVPKTINFO: 2937 case IPV6_UNICAST_HOPS: 2938 case IPV6_RECVERR: 2939 case IPV6_RECVHOPLIMIT: 2940 case IPV6_2292HOPLIMIT: 2941 case IPV6_CHECKSUM: 2942 val = 0; 2943 if (optlen < sizeof(uint32_t)) { 2944 return -TARGET_EINVAL; 2945 } 2946 if (get_user_u32(val, optval_addr)) { 2947 return -TARGET_EFAULT; 2948 } 2949 ret = get_errno(setsockopt(sockfd, level, optname, 2950 &val, sizeof(val))); 2951 break; 2952 case IPV6_PKTINFO: 2953 { 2954 struct in6_pktinfo pki; 2955 2956 if (optlen < sizeof(pki)) { 2957 return -TARGET_EINVAL; 2958 } 2959 2960 if (copy_from_user(&pki, optval_addr, sizeof(pki))) { 2961 return -TARGET_EFAULT; 2962 } 2963 2964 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex); 2965 2966 ret = get_errno(setsockopt(sockfd, level, optname, 2967 &pki, sizeof(pki))); 2968 break; 2969 } 2970 default: 2971 goto unimplemented; 2972 } 2973 break; 2974 case SOL_ICMPV6: 2975 switch (optname) { 2976 case ICMPV6_FILTER: 2977 { 2978 struct icmp6_filter icmp6f; 2979 2980 if (optlen > sizeof(icmp6f)) { 2981 optlen = sizeof(icmp6f); 2982 } 2983 2984 if (copy_from_user(&icmp6f, optval_addr, optlen)) { 2985 return -TARGET_EFAULT; 2986 } 2987 2988 for (val = 0; val < 8; val++) { 2989 icmp6f.data[val] = tswap32(icmp6f.data[val]); 2990 } 2991 2992 ret = get_errno(setsockopt(sockfd, level, optname, 2993 &icmp6f, optlen)); 2994 break; 2995 } 2996 default: 2997 goto unimplemented; 2998 } 2999 break; 3000 case SOL_RAW: 3001 switch (optname) { 3002 case ICMP_FILTER: 3003 case IPV6_CHECKSUM: 3004 /* those take an u32 value */ 3005 if (optlen < sizeof(uint32_t)) { 3006 return -TARGET_EINVAL; 3007 } 3008 3009 if (get_user_u32(val, optval_addr)) { 3010 return -TARGET_EFAULT; 3011 } 3012 ret = get_errno(setsockopt(sockfd, level, optname, 3013 &val, sizeof(val))); 3014 break; 3015 3016 default: 3017 goto unimplemented; 3018 } 3019 break; 3020 case TARGET_SOL_SOCKET: 3021 switch (optname) { 3022 case TARGET_SO_RCVTIMEO: 3023 { 3024 struct timeval tv; 3025 3026 optname = SO_RCVTIMEO; 3027 3028 set_timeout: 3029 if (optlen != sizeof(struct target_timeval)) { 3030 return -TARGET_EINVAL; 3031 } 3032 3033 if (copy_from_user_timeval(&tv, optval_addr)) { 3034 return -TARGET_EFAULT; 3035 } 3036 3037 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 3038 &tv, sizeof(tv))); 3039 return ret; 3040 } 3041 case TARGET_SO_SNDTIMEO: 3042 optname = SO_SNDTIMEO; 3043 goto set_timeout; 3044 case TARGET_SO_ATTACH_FILTER: 3045 { 3046 struct target_sock_fprog *tfprog; 3047 struct target_sock_filter *tfilter; 3048 struct sock_fprog fprog; 3049 struct sock_filter *filter; 3050 int i; 3051 3052 if (optlen != sizeof(*tfprog)) { 3053 return -TARGET_EINVAL; 3054 } 3055 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) { 3056 return -TARGET_EFAULT; 3057 } 3058 if (!lock_user_struct(VERIFY_READ, tfilter, 3059 tswapal(tfprog->filter), 0)) { 3060 unlock_user_struct(tfprog, optval_addr, 1); 3061 return -TARGET_EFAULT; 3062 } 3063 3064 fprog.len = tswap16(tfprog->len); 3065 filter = g_try_new(struct sock_filter, fprog.len); 3066 if (filter == NULL) { 3067 unlock_user_struct(tfilter, tfprog->filter, 1); 3068 unlock_user_struct(tfprog, optval_addr, 1); 3069 return -TARGET_ENOMEM; 3070 } 3071 for (i = 0; i < fprog.len; i++) { 3072 filter[i].code = tswap16(tfilter[i].code); 3073 filter[i].jt = tfilter[i].jt; 3074 filter[i].jf = tfilter[i].jf; 3075 filter[i].k = tswap32(tfilter[i].k); 3076 } 3077 fprog.filter = filter; 3078 3079 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, 3080 SO_ATTACH_FILTER, &fprog, sizeof(fprog))); 3081 g_free(filter); 3082 3083 unlock_user_struct(tfilter, tfprog->filter, 1); 3084 unlock_user_struct(tfprog, optval_addr, 1); 3085 return ret; 3086 } 3087 case TARGET_SO_BINDTODEVICE: 3088 { 3089 char *dev_ifname, *addr_ifname; 3090 3091 if (optlen > IFNAMSIZ - 1) { 3092 optlen = IFNAMSIZ - 1; 3093 } 3094 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1); 3095 if (!dev_ifname) { 3096 return -TARGET_EFAULT; 3097 } 3098 optname = SO_BINDTODEVICE; 3099 addr_ifname = alloca(IFNAMSIZ); 3100 memcpy(addr_ifname, dev_ifname, optlen); 3101 addr_ifname[optlen] = 0; 3102 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 3103 addr_ifname, optlen)); 3104 unlock_user (dev_ifname, optval_addr, 0); 3105 return ret; 3106 } 3107 /* Options with 'int' argument. */ 3108 case TARGET_SO_DEBUG: 3109 optname = SO_DEBUG; 3110 break; 3111 case TARGET_SO_REUSEADDR: 3112 optname = SO_REUSEADDR; 3113 break; 3114 case TARGET_SO_TYPE: 3115 optname = SO_TYPE; 3116 break; 3117 case TARGET_SO_ERROR: 3118 optname = SO_ERROR; 3119 break; 3120 case TARGET_SO_DONTROUTE: 3121 optname = SO_DONTROUTE; 3122 break; 3123 case TARGET_SO_BROADCAST: 3124 optname = SO_BROADCAST; 3125 break; 3126 case TARGET_SO_SNDBUF: 3127 optname = SO_SNDBUF; 3128 break; 3129 case TARGET_SO_SNDBUFFORCE: 3130 optname = SO_SNDBUFFORCE; 3131 break; 3132 case TARGET_SO_RCVBUF: 3133 optname = SO_RCVBUF; 3134 break; 3135 case TARGET_SO_RCVBUFFORCE: 3136 optname = SO_RCVBUFFORCE; 3137 break; 3138 case TARGET_SO_KEEPALIVE: 3139 optname = SO_KEEPALIVE; 3140 break; 3141 case TARGET_SO_OOBINLINE: 3142 optname = SO_OOBINLINE; 3143 break; 3144 case TARGET_SO_NO_CHECK: 3145 optname = SO_NO_CHECK; 3146 break; 3147 case TARGET_SO_PRIORITY: 3148 optname = SO_PRIORITY; 3149 break; 3150 #ifdef SO_BSDCOMPAT 3151 case TARGET_SO_BSDCOMPAT: 3152 optname = SO_BSDCOMPAT; 3153 break; 3154 #endif 3155 case TARGET_SO_PASSCRED: 3156 optname = SO_PASSCRED; 3157 break; 3158 case TARGET_SO_PASSSEC: 3159 optname = SO_PASSSEC; 3160 break; 3161 case TARGET_SO_TIMESTAMP: 3162 optname = SO_TIMESTAMP; 3163 break; 3164 case TARGET_SO_RCVLOWAT: 3165 optname = SO_RCVLOWAT; 3166 break; 3167 default: 3168 goto unimplemented; 3169 } 3170 if (optlen < sizeof(uint32_t)) 3171 return -TARGET_EINVAL; 3172 3173 if (get_user_u32(val, optval_addr)) 3174 return -TARGET_EFAULT; 3175 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val))); 3176 break; 3177 default: 3178 unimplemented: 3179 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname); 3180 ret = -TARGET_ENOPROTOOPT; 3181 } 3182 return ret; 3183 } 3184 3185 /* do_getsockopt() Must return target values and target errnos. */ 3186 static abi_long do_getsockopt(int sockfd, int level, int optname, 3187 abi_ulong optval_addr, abi_ulong optlen) 3188 { 3189 abi_long ret; 3190 int len, val; 3191 socklen_t lv; 3192 3193 switch(level) { 3194 case TARGET_SOL_SOCKET: 3195 level = SOL_SOCKET; 3196 switch (optname) { 3197 /* These don't just return a single integer */ 3198 case TARGET_SO_LINGER: 3199 case TARGET_SO_RCVTIMEO: 3200 case TARGET_SO_SNDTIMEO: 3201 case TARGET_SO_PEERNAME: 3202 goto unimplemented; 3203 case TARGET_SO_PEERCRED: { 3204 struct ucred cr; 3205 socklen_t crlen; 3206 struct target_ucred *tcr; 3207 3208 if (get_user_u32(len, optlen)) { 3209 return -TARGET_EFAULT; 3210 } 3211 if (len < 0) { 3212 return -TARGET_EINVAL; 3213 } 3214 3215 crlen = sizeof(cr); 3216 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED, 3217 &cr, &crlen)); 3218 if (ret < 0) { 3219 return ret; 3220 } 3221 if (len > crlen) { 3222 len = crlen; 3223 } 3224 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) { 3225 return -TARGET_EFAULT; 3226 } 3227 __put_user(cr.pid, &tcr->pid); 3228 __put_user(cr.uid, &tcr->uid); 3229 __put_user(cr.gid, &tcr->gid); 3230 unlock_user_struct(tcr, optval_addr, 1); 3231 if (put_user_u32(len, optlen)) { 3232 return -TARGET_EFAULT; 3233 } 3234 break; 3235 } 3236 /* Options with 'int' argument. */ 3237 case TARGET_SO_DEBUG: 3238 optname = SO_DEBUG; 3239 goto int_case; 3240 case TARGET_SO_REUSEADDR: 3241 optname = SO_REUSEADDR; 3242 goto int_case; 3243 case TARGET_SO_TYPE: 3244 optname = SO_TYPE; 3245 goto int_case; 3246 case TARGET_SO_ERROR: 3247 optname = SO_ERROR; 3248 goto int_case; 3249 case TARGET_SO_DONTROUTE: 3250 optname = SO_DONTROUTE; 3251 goto int_case; 3252 case TARGET_SO_BROADCAST: 3253 optname = SO_BROADCAST; 3254 goto int_case; 3255 case TARGET_SO_SNDBUF: 3256 optname = SO_SNDBUF; 3257 goto int_case; 3258 case TARGET_SO_RCVBUF: 3259 optname = SO_RCVBUF; 3260 goto int_case; 3261 case TARGET_SO_KEEPALIVE: 3262 optname = SO_KEEPALIVE; 3263 goto int_case; 3264 case TARGET_SO_OOBINLINE: 3265 optname = SO_OOBINLINE; 3266 goto int_case; 3267 case TARGET_SO_NO_CHECK: 3268 optname = SO_NO_CHECK; 3269 goto int_case; 3270 case TARGET_SO_PRIORITY: 3271 optname = SO_PRIORITY; 3272 goto int_case; 3273 #ifdef SO_BSDCOMPAT 3274 case TARGET_SO_BSDCOMPAT: 3275 optname = SO_BSDCOMPAT; 3276 goto int_case; 3277 #endif 3278 case TARGET_SO_PASSCRED: 3279 optname = SO_PASSCRED; 3280 goto int_case; 3281 case TARGET_SO_TIMESTAMP: 3282 optname = SO_TIMESTAMP; 3283 goto int_case; 3284 case TARGET_SO_RCVLOWAT: 3285 optname = SO_RCVLOWAT; 3286 goto int_case; 3287 case TARGET_SO_ACCEPTCONN: 3288 optname = SO_ACCEPTCONN; 3289 goto int_case; 3290 default: 3291 goto int_case; 3292 } 3293 break; 3294 case SOL_TCP: 3295 /* TCP options all take an 'int' value. */ 3296 int_case: 3297 if (get_user_u32(len, optlen)) 3298 return -TARGET_EFAULT; 3299 if (len < 0) 3300 return -TARGET_EINVAL; 3301 lv = sizeof(lv); 3302 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 3303 if (ret < 0) 3304 return ret; 3305 if (optname == SO_TYPE) { 3306 val = host_to_target_sock_type(val); 3307 } 3308 if (len > lv) 3309 len = lv; 3310 if (len == 4) { 3311 if (put_user_u32(val, optval_addr)) 3312 return -TARGET_EFAULT; 3313 } else { 3314 if (put_user_u8(val, optval_addr)) 3315 return -TARGET_EFAULT; 3316 } 3317 if (put_user_u32(len, optlen)) 3318 return -TARGET_EFAULT; 3319 break; 3320 case SOL_IP: 3321 switch(optname) { 3322 case IP_TOS: 3323 case IP_TTL: 3324 case IP_HDRINCL: 3325 case IP_ROUTER_ALERT: 3326 case IP_RECVOPTS: 3327 case IP_RETOPTS: 3328 case IP_PKTINFO: 3329 case IP_MTU_DISCOVER: 3330 case IP_RECVERR: 3331 case IP_RECVTOS: 3332 #ifdef IP_FREEBIND 3333 case IP_FREEBIND: 3334 #endif 3335 case IP_MULTICAST_TTL: 3336 case IP_MULTICAST_LOOP: 3337 if (get_user_u32(len, optlen)) 3338 return -TARGET_EFAULT; 3339 if (len < 0) 3340 return -TARGET_EINVAL; 3341 lv = sizeof(lv); 3342 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 3343 if (ret < 0) 3344 return ret; 3345 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 3346 len = 1; 3347 if (put_user_u32(len, optlen) 3348 || put_user_u8(val, optval_addr)) 3349 return -TARGET_EFAULT; 3350 } else { 3351 if (len > sizeof(int)) 3352 len = sizeof(int); 3353 if (put_user_u32(len, optlen) 3354 || put_user_u32(val, optval_addr)) 3355 return -TARGET_EFAULT; 3356 } 3357 break; 3358 default: 3359 ret = -TARGET_ENOPROTOOPT; 3360 break; 3361 } 3362 break; 3363 default: 3364 unimplemented: 3365 gemu_log("getsockopt level=%d optname=%d not yet supported\n", 3366 level, optname); 3367 ret = -TARGET_EOPNOTSUPP; 3368 break; 3369 } 3370 return ret; 3371 } 3372 3373 static struct iovec *lock_iovec(int type, abi_ulong target_addr, 3374 abi_ulong count, int copy) 3375 { 3376 struct target_iovec *target_vec; 3377 struct iovec *vec; 3378 abi_ulong total_len, max_len; 3379 int i; 3380 int err = 0; 3381 bool bad_address = false; 3382 3383 if (count == 0) { 3384 errno = 0; 3385 return NULL; 3386 } 3387 if (count > IOV_MAX) { 3388 errno = EINVAL; 3389 return NULL; 3390 } 3391 3392 vec = g_try_new0(struct iovec, count); 3393 if (vec == NULL) { 3394 errno = ENOMEM; 3395 return NULL; 3396 } 3397 3398 target_vec = lock_user(VERIFY_READ, target_addr, 3399 count * sizeof(struct target_iovec), 1); 3400 if (target_vec == NULL) { 3401 err = EFAULT; 3402 goto fail2; 3403 } 3404 3405 /* ??? If host page size > target page size, this will result in a 3406 value larger than what we can actually support. */ 3407 max_len = 0x7fffffff & TARGET_PAGE_MASK; 3408 total_len = 0; 3409 3410 for (i = 0; i < count; i++) { 3411 abi_ulong base = tswapal(target_vec[i].iov_base); 3412 abi_long len = tswapal(target_vec[i].iov_len); 3413 3414 if (len < 0) { 3415 err = EINVAL; 3416 goto fail; 3417 } else if (len == 0) { 3418 /* Zero length pointer is ignored. */ 3419 vec[i].iov_base = 0; 3420 } else { 3421 vec[i].iov_base = lock_user(type, base, len, copy); 3422 /* If the first buffer pointer is bad, this is a fault. But 3423 * subsequent bad buffers will result in a partial write; this 3424 * is realized by filling the vector with null pointers and 3425 * zero lengths. */ 3426 if (!vec[i].iov_base) { 3427 if (i == 0) { 3428 err = EFAULT; 3429 goto fail; 3430 } else { 3431 bad_address = true; 3432 } 3433 } 3434 if (bad_address) { 3435 len = 0; 3436 } 3437 if (len > max_len - total_len) { 3438 len = max_len - total_len; 3439 } 3440 } 3441 vec[i].iov_len = len; 3442 total_len += len; 3443 } 3444 3445 unlock_user(target_vec, target_addr, 0); 3446 return vec; 3447 3448 fail: 3449 while (--i >= 0) { 3450 if (tswapal(target_vec[i].iov_len) > 0) { 3451 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0); 3452 } 3453 } 3454 unlock_user(target_vec, target_addr, 0); 3455 fail2: 3456 g_free(vec); 3457 errno = err; 3458 return NULL; 3459 } 3460 3461 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr, 3462 abi_ulong count, int copy) 3463 { 3464 struct target_iovec *target_vec; 3465 int i; 3466 3467 target_vec = lock_user(VERIFY_READ, target_addr, 3468 count * sizeof(struct target_iovec), 1); 3469 if (target_vec) { 3470 for (i = 0; i < count; i++) { 3471 abi_ulong base = tswapal(target_vec[i].iov_base); 3472 abi_long len = tswapal(target_vec[i].iov_len); 3473 if (len < 0) { 3474 break; 3475 } 3476 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0); 3477 } 3478 unlock_user(target_vec, target_addr, 0); 3479 } 3480 3481 g_free(vec); 3482 } 3483 3484 static inline int target_to_host_sock_type(int *type) 3485 { 3486 int host_type = 0; 3487 int target_type = *type; 3488 3489 switch (target_type & TARGET_SOCK_TYPE_MASK) { 3490 case TARGET_SOCK_DGRAM: 3491 host_type = SOCK_DGRAM; 3492 break; 3493 case TARGET_SOCK_STREAM: 3494 host_type = SOCK_STREAM; 3495 break; 3496 default: 3497 host_type = target_type & TARGET_SOCK_TYPE_MASK; 3498 break; 3499 } 3500 if (target_type & TARGET_SOCK_CLOEXEC) { 3501 #if defined(SOCK_CLOEXEC) 3502 host_type |= SOCK_CLOEXEC; 3503 #else 3504 return -TARGET_EINVAL; 3505 #endif 3506 } 3507 if (target_type & TARGET_SOCK_NONBLOCK) { 3508 #if defined(SOCK_NONBLOCK) 3509 host_type |= SOCK_NONBLOCK; 3510 #elif !defined(O_NONBLOCK) 3511 return -TARGET_EINVAL; 3512 #endif 3513 } 3514 *type = host_type; 3515 return 0; 3516 } 3517 3518 /* Try to emulate socket type flags after socket creation. */ 3519 static int sock_flags_fixup(int fd, int target_type) 3520 { 3521 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK) 3522 if (target_type & TARGET_SOCK_NONBLOCK) { 3523 int flags = fcntl(fd, F_GETFL); 3524 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) { 3525 close(fd); 3526 return -TARGET_EINVAL; 3527 } 3528 } 3529 #endif 3530 return fd; 3531 } 3532 3533 static abi_long packet_target_to_host_sockaddr(void *host_addr, 3534 abi_ulong target_addr, 3535 socklen_t len) 3536 { 3537 struct sockaddr *addr = host_addr; 3538 struct target_sockaddr *target_saddr; 3539 3540 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 3541 if (!target_saddr) { 3542 return -TARGET_EFAULT; 3543 } 3544 3545 memcpy(addr, target_saddr, len); 3546 addr->sa_family = tswap16(target_saddr->sa_family); 3547 /* spkt_protocol is big-endian */ 3548 3549 unlock_user(target_saddr, target_addr, 0); 3550 return 0; 3551 } 3552 3553 static TargetFdTrans target_packet_trans = { 3554 .target_to_host_addr = packet_target_to_host_sockaddr, 3555 }; 3556 3557 #ifdef CONFIG_RTNETLINK 3558 static abi_long netlink_route_target_to_host(void *buf, size_t len) 3559 { 3560 abi_long ret; 3561 3562 ret = target_to_host_nlmsg_route(buf, len); 3563 if (ret < 0) { 3564 return ret; 3565 } 3566 3567 return len; 3568 } 3569 3570 static abi_long netlink_route_host_to_target(void *buf, size_t len) 3571 { 3572 abi_long ret; 3573 3574 ret = host_to_target_nlmsg_route(buf, len); 3575 if (ret < 0) { 3576 return ret; 3577 } 3578 3579 return len; 3580 } 3581 3582 static TargetFdTrans target_netlink_route_trans = { 3583 .target_to_host_data = netlink_route_target_to_host, 3584 .host_to_target_data = netlink_route_host_to_target, 3585 }; 3586 #endif /* CONFIG_RTNETLINK */ 3587 3588 static abi_long netlink_audit_target_to_host(void *buf, size_t len) 3589 { 3590 abi_long ret; 3591 3592 ret = target_to_host_nlmsg_audit(buf, len); 3593 if (ret < 0) { 3594 return ret; 3595 } 3596 3597 return len; 3598 } 3599 3600 static abi_long netlink_audit_host_to_target(void *buf, size_t len) 3601 { 3602 abi_long ret; 3603 3604 ret = host_to_target_nlmsg_audit(buf, len); 3605 if (ret < 0) { 3606 return ret; 3607 } 3608 3609 return len; 3610 } 3611 3612 static TargetFdTrans target_netlink_audit_trans = { 3613 .target_to_host_data = netlink_audit_target_to_host, 3614 .host_to_target_data = netlink_audit_host_to_target, 3615 }; 3616 3617 /* do_socket() Must return target values and target errnos. */ 3618 static abi_long do_socket(int domain, int type, int protocol) 3619 { 3620 int target_type = type; 3621 int ret; 3622 3623 ret = target_to_host_sock_type(&type); 3624 if (ret) { 3625 return ret; 3626 } 3627 3628 if (domain == PF_NETLINK && !( 3629 #ifdef CONFIG_RTNETLINK 3630 protocol == NETLINK_ROUTE || 3631 #endif 3632 protocol == NETLINK_KOBJECT_UEVENT || 3633 protocol == NETLINK_AUDIT)) { 3634 return -EPFNOSUPPORT; 3635 } 3636 3637 if (domain == AF_PACKET || 3638 (domain == AF_INET && type == SOCK_PACKET)) { 3639 protocol = tswap16(protocol); 3640 } 3641 3642 ret = get_errno(socket(domain, type, protocol)); 3643 if (ret >= 0) { 3644 ret = sock_flags_fixup(ret, target_type); 3645 if (type == SOCK_PACKET) { 3646 /* Manage an obsolete case : 3647 * if socket type is SOCK_PACKET, bind by name 3648 */ 3649 fd_trans_register(ret, &target_packet_trans); 3650 } else if (domain == PF_NETLINK) { 3651 switch (protocol) { 3652 #ifdef CONFIG_RTNETLINK 3653 case NETLINK_ROUTE: 3654 fd_trans_register(ret, &target_netlink_route_trans); 3655 break; 3656 #endif 3657 case NETLINK_KOBJECT_UEVENT: 3658 /* nothing to do: messages are strings */ 3659 break; 3660 case NETLINK_AUDIT: 3661 fd_trans_register(ret, &target_netlink_audit_trans); 3662 break; 3663 default: 3664 g_assert_not_reached(); 3665 } 3666 } 3667 } 3668 return ret; 3669 } 3670 3671 /* do_bind() Must return target values and target errnos. */ 3672 static abi_long do_bind(int sockfd, abi_ulong target_addr, 3673 socklen_t addrlen) 3674 { 3675 void *addr; 3676 abi_long ret; 3677 3678 if ((int)addrlen < 0) { 3679 return -TARGET_EINVAL; 3680 } 3681 3682 addr = alloca(addrlen+1); 3683 3684 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen); 3685 if (ret) 3686 return ret; 3687 3688 return get_errno(bind(sockfd, addr, addrlen)); 3689 } 3690 3691 /* do_connect() Must return target values and target errnos. */ 3692 static abi_long do_connect(int sockfd, abi_ulong target_addr, 3693 socklen_t addrlen) 3694 { 3695 void *addr; 3696 abi_long ret; 3697 3698 if ((int)addrlen < 0) { 3699 return -TARGET_EINVAL; 3700 } 3701 3702 addr = alloca(addrlen+1); 3703 3704 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen); 3705 if (ret) 3706 return ret; 3707 3708 return get_errno(safe_connect(sockfd, addr, addrlen)); 3709 } 3710 3711 /* do_sendrecvmsg_locked() Must return target values and target errnos. */ 3712 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp, 3713 int flags, int send) 3714 { 3715 abi_long ret, len; 3716 struct msghdr msg; 3717 abi_ulong count; 3718 struct iovec *vec; 3719 abi_ulong target_vec; 3720 3721 if (msgp->msg_name) { 3722 msg.msg_namelen = tswap32(msgp->msg_namelen); 3723 msg.msg_name = alloca(msg.msg_namelen+1); 3724 ret = target_to_host_sockaddr(fd, msg.msg_name, 3725 tswapal(msgp->msg_name), 3726 msg.msg_namelen); 3727 if (ret == -TARGET_EFAULT) { 3728 /* For connected sockets msg_name and msg_namelen must 3729 * be ignored, so returning EFAULT immediately is wrong. 3730 * Instead, pass a bad msg_name to the host kernel, and 3731 * let it decide whether to return EFAULT or not. 3732 */ 3733 msg.msg_name = (void *)-1; 3734 } else if (ret) { 3735 goto out2; 3736 } 3737 } else { 3738 msg.msg_name = NULL; 3739 msg.msg_namelen = 0; 3740 } 3741 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen); 3742 msg.msg_control = alloca(msg.msg_controllen); 3743 msg.msg_flags = tswap32(msgp->msg_flags); 3744 3745 count = tswapal(msgp->msg_iovlen); 3746 target_vec = tswapal(msgp->msg_iov); 3747 3748 if (count > IOV_MAX) { 3749 /* sendrcvmsg returns a different errno for this condition than 3750 * readv/writev, so we must catch it here before lock_iovec() does. 3751 */ 3752 ret = -TARGET_EMSGSIZE; 3753 goto out2; 3754 } 3755 3756 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, 3757 target_vec, count, send); 3758 if (vec == NULL) { 3759 ret = -host_to_target_errno(errno); 3760 goto out2; 3761 } 3762 msg.msg_iovlen = count; 3763 msg.msg_iov = vec; 3764 3765 if (send) { 3766 if (fd_trans_target_to_host_data(fd)) { 3767 void *host_msg; 3768 3769 host_msg = g_malloc(msg.msg_iov->iov_len); 3770 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len); 3771 ret = fd_trans_target_to_host_data(fd)(host_msg, 3772 msg.msg_iov->iov_len); 3773 if (ret >= 0) { 3774 msg.msg_iov->iov_base = host_msg; 3775 ret = get_errno(safe_sendmsg(fd, &msg, flags)); 3776 } 3777 g_free(host_msg); 3778 } else { 3779 ret = target_to_host_cmsg(&msg, msgp); 3780 if (ret == 0) { 3781 ret = get_errno(safe_sendmsg(fd, &msg, flags)); 3782 } 3783 } 3784 } else { 3785 ret = get_errno(safe_recvmsg(fd, &msg, flags)); 3786 if (!is_error(ret)) { 3787 len = ret; 3788 if (fd_trans_host_to_target_data(fd)) { 3789 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base, 3790 len); 3791 } else { 3792 ret = host_to_target_cmsg(msgp, &msg); 3793 } 3794 if (!is_error(ret)) { 3795 msgp->msg_namelen = tswap32(msg.msg_namelen); 3796 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) { 3797 ret = host_to_target_sockaddr(tswapal(msgp->msg_name), 3798 msg.msg_name, msg.msg_namelen); 3799 if (ret) { 3800 goto out; 3801 } 3802 } 3803 3804 ret = len; 3805 } 3806 } 3807 } 3808 3809 out: 3810 unlock_iovec(vec, target_vec, count, !send); 3811 out2: 3812 return ret; 3813 } 3814 3815 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg, 3816 int flags, int send) 3817 { 3818 abi_long ret; 3819 struct target_msghdr *msgp; 3820 3821 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE, 3822 msgp, 3823 target_msg, 3824 send ? 1 : 0)) { 3825 return -TARGET_EFAULT; 3826 } 3827 ret = do_sendrecvmsg_locked(fd, msgp, flags, send); 3828 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 3829 return ret; 3830 } 3831 3832 /* We don't rely on the C library to have sendmmsg/recvmmsg support, 3833 * so it might not have this *mmsg-specific flag either. 3834 */ 3835 #ifndef MSG_WAITFORONE 3836 #define MSG_WAITFORONE 0x10000 3837 #endif 3838 3839 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec, 3840 unsigned int vlen, unsigned int flags, 3841 int send) 3842 { 3843 struct target_mmsghdr *mmsgp; 3844 abi_long ret = 0; 3845 int i; 3846 3847 if (vlen > UIO_MAXIOV) { 3848 vlen = UIO_MAXIOV; 3849 } 3850 3851 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1); 3852 if (!mmsgp) { 3853 return -TARGET_EFAULT; 3854 } 3855 3856 for (i = 0; i < vlen; i++) { 3857 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send); 3858 if (is_error(ret)) { 3859 break; 3860 } 3861 mmsgp[i].msg_len = tswap32(ret); 3862 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ 3863 if (flags & MSG_WAITFORONE) { 3864 flags |= MSG_DONTWAIT; 3865 } 3866 } 3867 3868 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i); 3869 3870 /* Return number of datagrams sent if we sent any at all; 3871 * otherwise return the error. 3872 */ 3873 if (i) { 3874 return i; 3875 } 3876 return ret; 3877 } 3878 3879 /* do_accept4() Must return target values and target errnos. */ 3880 static abi_long do_accept4(int fd, abi_ulong target_addr, 3881 abi_ulong target_addrlen_addr, int flags) 3882 { 3883 socklen_t addrlen; 3884 void *addr; 3885 abi_long ret; 3886 int host_flags; 3887 3888 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl); 3889 3890 if (target_addr == 0) { 3891 return get_errno(safe_accept4(fd, NULL, NULL, host_flags)); 3892 } 3893 3894 /* linux returns EINVAL if addrlen pointer is invalid */ 3895 if (get_user_u32(addrlen, target_addrlen_addr)) 3896 return -TARGET_EINVAL; 3897 3898 if ((int)addrlen < 0) { 3899 return -TARGET_EINVAL; 3900 } 3901 3902 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 3903 return -TARGET_EINVAL; 3904 3905 addr = alloca(addrlen); 3906 3907 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags)); 3908 if (!is_error(ret)) { 3909 host_to_target_sockaddr(target_addr, addr, addrlen); 3910 if (put_user_u32(addrlen, target_addrlen_addr)) 3911 ret = -TARGET_EFAULT; 3912 } 3913 return ret; 3914 } 3915 3916 /* do_getpeername() Must return target values and target errnos. */ 3917 static abi_long do_getpeername(int fd, abi_ulong target_addr, 3918 abi_ulong target_addrlen_addr) 3919 { 3920 socklen_t addrlen; 3921 void *addr; 3922 abi_long ret; 3923 3924 if (get_user_u32(addrlen, target_addrlen_addr)) 3925 return -TARGET_EFAULT; 3926 3927 if ((int)addrlen < 0) { 3928 return -TARGET_EINVAL; 3929 } 3930 3931 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 3932 return -TARGET_EFAULT; 3933 3934 addr = alloca(addrlen); 3935 3936 ret = get_errno(getpeername(fd, addr, &addrlen)); 3937 if (!is_error(ret)) { 3938 host_to_target_sockaddr(target_addr, addr, addrlen); 3939 if (put_user_u32(addrlen, target_addrlen_addr)) 3940 ret = -TARGET_EFAULT; 3941 } 3942 return ret; 3943 } 3944 3945 /* do_getsockname() Must return target values and target errnos. */ 3946 static abi_long do_getsockname(int fd, abi_ulong target_addr, 3947 abi_ulong target_addrlen_addr) 3948 { 3949 socklen_t addrlen; 3950 void *addr; 3951 abi_long ret; 3952 3953 if (get_user_u32(addrlen, target_addrlen_addr)) 3954 return -TARGET_EFAULT; 3955 3956 if ((int)addrlen < 0) { 3957 return -TARGET_EINVAL; 3958 } 3959 3960 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 3961 return -TARGET_EFAULT; 3962 3963 addr = alloca(addrlen); 3964 3965 ret = get_errno(getsockname(fd, addr, &addrlen)); 3966 if (!is_error(ret)) { 3967 host_to_target_sockaddr(target_addr, addr, addrlen); 3968 if (put_user_u32(addrlen, target_addrlen_addr)) 3969 ret = -TARGET_EFAULT; 3970 } 3971 return ret; 3972 } 3973 3974 /* do_socketpair() Must return target values and target errnos. */ 3975 static abi_long do_socketpair(int domain, int type, int protocol, 3976 abi_ulong target_tab_addr) 3977 { 3978 int tab[2]; 3979 abi_long ret; 3980 3981 target_to_host_sock_type(&type); 3982 3983 ret = get_errno(socketpair(domain, type, protocol, tab)); 3984 if (!is_error(ret)) { 3985 if (put_user_s32(tab[0], target_tab_addr) 3986 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0]))) 3987 ret = -TARGET_EFAULT; 3988 } 3989 return ret; 3990 } 3991 3992 /* do_sendto() Must return target values and target errnos. */ 3993 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags, 3994 abi_ulong target_addr, socklen_t addrlen) 3995 { 3996 void *addr; 3997 void *host_msg; 3998 void *copy_msg = NULL; 3999 abi_long ret; 4000 4001 if ((int)addrlen < 0) { 4002 return -TARGET_EINVAL; 4003 } 4004 4005 host_msg = lock_user(VERIFY_READ, msg, len, 1); 4006 if (!host_msg) 4007 return -TARGET_EFAULT; 4008 if (fd_trans_target_to_host_data(fd)) { 4009 copy_msg = host_msg; 4010 host_msg = g_malloc(len); 4011 memcpy(host_msg, copy_msg, len); 4012 ret = fd_trans_target_to_host_data(fd)(host_msg, len); 4013 if (ret < 0) { 4014 goto fail; 4015 } 4016 } 4017 if (target_addr) { 4018 addr = alloca(addrlen+1); 4019 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen); 4020 if (ret) { 4021 goto fail; 4022 } 4023 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen)); 4024 } else { 4025 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0)); 4026 } 4027 fail: 4028 if (copy_msg) { 4029 g_free(host_msg); 4030 host_msg = copy_msg; 4031 } 4032 unlock_user(host_msg, msg, 0); 4033 return ret; 4034 } 4035 4036 /* do_recvfrom() Must return target values and target errnos. */ 4037 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags, 4038 abi_ulong target_addr, 4039 abi_ulong target_addrlen) 4040 { 4041 socklen_t addrlen; 4042 void *addr; 4043 void *host_msg; 4044 abi_long ret; 4045 4046 host_msg = lock_user(VERIFY_WRITE, msg, len, 0); 4047 if (!host_msg) 4048 return -TARGET_EFAULT; 4049 if (target_addr) { 4050 if (get_user_u32(addrlen, target_addrlen)) { 4051 ret = -TARGET_EFAULT; 4052 goto fail; 4053 } 4054 if ((int)addrlen < 0) { 4055 ret = -TARGET_EINVAL; 4056 goto fail; 4057 } 4058 addr = alloca(addrlen); 4059 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, 4060 addr, &addrlen)); 4061 } else { 4062 addr = NULL; /* To keep compiler quiet. */ 4063 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0)); 4064 } 4065 if (!is_error(ret)) { 4066 if (fd_trans_host_to_target_data(fd)) { 4067 ret = fd_trans_host_to_target_data(fd)(host_msg, ret); 4068 } 4069 if (target_addr) { 4070 host_to_target_sockaddr(target_addr, addr, addrlen); 4071 if (put_user_u32(addrlen, target_addrlen)) { 4072 ret = -TARGET_EFAULT; 4073 goto fail; 4074 } 4075 } 4076 unlock_user(host_msg, msg, len); 4077 } else { 4078 fail: 4079 unlock_user(host_msg, msg, 0); 4080 } 4081 return ret; 4082 } 4083 4084 #ifdef TARGET_NR_socketcall 4085 /* do_socketcall() must return target values and target errnos. */ 4086 static abi_long do_socketcall(int num, abi_ulong vptr) 4087 { 4088 static const unsigned nargs[] = { /* number of arguments per operation */ 4089 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */ 4090 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */ 4091 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */ 4092 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */ 4093 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */ 4094 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */ 4095 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */ 4096 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */ 4097 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */ 4098 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */ 4099 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */ 4100 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */ 4101 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */ 4102 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */ 4103 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */ 4104 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */ 4105 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */ 4106 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */ 4107 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */ 4108 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */ 4109 }; 4110 abi_long a[6]; /* max 6 args */ 4111 unsigned i; 4112 4113 /* check the range of the first argument num */ 4114 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */ 4115 if (num < 1 || num > TARGET_SYS_SENDMMSG) { 4116 return -TARGET_EINVAL; 4117 } 4118 /* ensure we have space for args */ 4119 if (nargs[num] > ARRAY_SIZE(a)) { 4120 return -TARGET_EINVAL; 4121 } 4122 /* collect the arguments in a[] according to nargs[] */ 4123 for (i = 0; i < nargs[num]; ++i) { 4124 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) { 4125 return -TARGET_EFAULT; 4126 } 4127 } 4128 /* now when we have the args, invoke the appropriate underlying function */ 4129 switch (num) { 4130 case TARGET_SYS_SOCKET: /* domain, type, protocol */ 4131 return do_socket(a[0], a[1], a[2]); 4132 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */ 4133 return do_bind(a[0], a[1], a[2]); 4134 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */ 4135 return do_connect(a[0], a[1], a[2]); 4136 case TARGET_SYS_LISTEN: /* sockfd, backlog */ 4137 return get_errno(listen(a[0], a[1])); 4138 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */ 4139 return do_accept4(a[0], a[1], a[2], 0); 4140 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */ 4141 return do_getsockname(a[0], a[1], a[2]); 4142 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */ 4143 return do_getpeername(a[0], a[1], a[2]); 4144 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */ 4145 return do_socketpair(a[0], a[1], a[2], a[3]); 4146 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */ 4147 return do_sendto(a[0], a[1], a[2], a[3], 0, 0); 4148 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */ 4149 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0); 4150 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */ 4151 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]); 4152 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */ 4153 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]); 4154 case TARGET_SYS_SHUTDOWN: /* sockfd, how */ 4155 return get_errno(shutdown(a[0], a[1])); 4156 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */ 4157 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]); 4158 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */ 4159 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]); 4160 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */ 4161 return do_sendrecvmsg(a[0], a[1], a[2], 1); 4162 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */ 4163 return do_sendrecvmsg(a[0], a[1], a[2], 0); 4164 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */ 4165 return do_accept4(a[0], a[1], a[2], a[3]); 4166 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */ 4167 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0); 4168 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */ 4169 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1); 4170 default: 4171 gemu_log("Unsupported socketcall: %d\n", num); 4172 return -TARGET_EINVAL; 4173 } 4174 } 4175 #endif 4176 4177 #define N_SHM_REGIONS 32 4178 4179 static struct shm_region { 4180 abi_ulong start; 4181 abi_ulong size; 4182 bool in_use; 4183 } shm_regions[N_SHM_REGIONS]; 4184 4185 #ifndef TARGET_SEMID64_DS 4186 /* asm-generic version of this struct */ 4187 struct target_semid64_ds 4188 { 4189 struct target_ipc_perm sem_perm; 4190 abi_ulong sem_otime; 4191 #if TARGET_ABI_BITS == 32 4192 abi_ulong __unused1; 4193 #endif 4194 abi_ulong sem_ctime; 4195 #if TARGET_ABI_BITS == 32 4196 abi_ulong __unused2; 4197 #endif 4198 abi_ulong sem_nsems; 4199 abi_ulong __unused3; 4200 abi_ulong __unused4; 4201 }; 4202 #endif 4203 4204 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip, 4205 abi_ulong target_addr) 4206 { 4207 struct target_ipc_perm *target_ip; 4208 struct target_semid64_ds *target_sd; 4209 4210 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 4211 return -TARGET_EFAULT; 4212 target_ip = &(target_sd->sem_perm); 4213 host_ip->__key = tswap32(target_ip->__key); 4214 host_ip->uid = tswap32(target_ip->uid); 4215 host_ip->gid = tswap32(target_ip->gid); 4216 host_ip->cuid = tswap32(target_ip->cuid); 4217 host_ip->cgid = tswap32(target_ip->cgid); 4218 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 4219 host_ip->mode = tswap32(target_ip->mode); 4220 #else 4221 host_ip->mode = tswap16(target_ip->mode); 4222 #endif 4223 #if defined(TARGET_PPC) 4224 host_ip->__seq = tswap32(target_ip->__seq); 4225 #else 4226 host_ip->__seq = tswap16(target_ip->__seq); 4227 #endif 4228 unlock_user_struct(target_sd, target_addr, 0); 4229 return 0; 4230 } 4231 4232 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr, 4233 struct ipc_perm *host_ip) 4234 { 4235 struct target_ipc_perm *target_ip; 4236 struct target_semid64_ds *target_sd; 4237 4238 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 4239 return -TARGET_EFAULT; 4240 target_ip = &(target_sd->sem_perm); 4241 target_ip->__key = tswap32(host_ip->__key); 4242 target_ip->uid = tswap32(host_ip->uid); 4243 target_ip->gid = tswap32(host_ip->gid); 4244 target_ip->cuid = tswap32(host_ip->cuid); 4245 target_ip->cgid = tswap32(host_ip->cgid); 4246 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 4247 target_ip->mode = tswap32(host_ip->mode); 4248 #else 4249 target_ip->mode = tswap16(host_ip->mode); 4250 #endif 4251 #if defined(TARGET_PPC) 4252 target_ip->__seq = tswap32(host_ip->__seq); 4253 #else 4254 target_ip->__seq = tswap16(host_ip->__seq); 4255 #endif 4256 unlock_user_struct(target_sd, target_addr, 1); 4257 return 0; 4258 } 4259 4260 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd, 4261 abi_ulong target_addr) 4262 { 4263 struct target_semid64_ds *target_sd; 4264 4265 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 4266 return -TARGET_EFAULT; 4267 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr)) 4268 return -TARGET_EFAULT; 4269 host_sd->sem_nsems = tswapal(target_sd->sem_nsems); 4270 host_sd->sem_otime = tswapal(target_sd->sem_otime); 4271 host_sd->sem_ctime = tswapal(target_sd->sem_ctime); 4272 unlock_user_struct(target_sd, target_addr, 0); 4273 return 0; 4274 } 4275 4276 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr, 4277 struct semid_ds *host_sd) 4278 { 4279 struct target_semid64_ds *target_sd; 4280 4281 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 4282 return -TARGET_EFAULT; 4283 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm))) 4284 return -TARGET_EFAULT; 4285 target_sd->sem_nsems = tswapal(host_sd->sem_nsems); 4286 target_sd->sem_otime = tswapal(host_sd->sem_otime); 4287 target_sd->sem_ctime = tswapal(host_sd->sem_ctime); 4288 unlock_user_struct(target_sd, target_addr, 1); 4289 return 0; 4290 } 4291 4292 struct target_seminfo { 4293 int semmap; 4294 int semmni; 4295 int semmns; 4296 int semmnu; 4297 int semmsl; 4298 int semopm; 4299 int semume; 4300 int semusz; 4301 int semvmx; 4302 int semaem; 4303 }; 4304 4305 static inline abi_long host_to_target_seminfo(abi_ulong target_addr, 4306 struct seminfo *host_seminfo) 4307 { 4308 struct target_seminfo *target_seminfo; 4309 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0)) 4310 return -TARGET_EFAULT; 4311 __put_user(host_seminfo->semmap, &target_seminfo->semmap); 4312 __put_user(host_seminfo->semmni, &target_seminfo->semmni); 4313 __put_user(host_seminfo->semmns, &target_seminfo->semmns); 4314 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu); 4315 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl); 4316 __put_user(host_seminfo->semopm, &target_seminfo->semopm); 4317 __put_user(host_seminfo->semume, &target_seminfo->semume); 4318 __put_user(host_seminfo->semusz, &target_seminfo->semusz); 4319 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx); 4320 __put_user(host_seminfo->semaem, &target_seminfo->semaem); 4321 unlock_user_struct(target_seminfo, target_addr, 1); 4322 return 0; 4323 } 4324 4325 union semun { 4326 int val; 4327 struct semid_ds *buf; 4328 unsigned short *array; 4329 struct seminfo *__buf; 4330 }; 4331 4332 union target_semun { 4333 int val; 4334 abi_ulong buf; 4335 abi_ulong array; 4336 abi_ulong __buf; 4337 }; 4338 4339 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array, 4340 abi_ulong target_addr) 4341 { 4342 int nsems; 4343 unsigned short *array; 4344 union semun semun; 4345 struct semid_ds semid_ds; 4346 int i, ret; 4347 4348 semun.buf = &semid_ds; 4349 4350 ret = semctl(semid, 0, IPC_STAT, semun); 4351 if (ret == -1) 4352 return get_errno(ret); 4353 4354 nsems = semid_ds.sem_nsems; 4355 4356 *host_array = g_try_new(unsigned short, nsems); 4357 if (!*host_array) { 4358 return -TARGET_ENOMEM; 4359 } 4360 array = lock_user(VERIFY_READ, target_addr, 4361 nsems*sizeof(unsigned short), 1); 4362 if (!array) { 4363 g_free(*host_array); 4364 return -TARGET_EFAULT; 4365 } 4366 4367 for(i=0; i<nsems; i++) { 4368 __get_user((*host_array)[i], &array[i]); 4369 } 4370 unlock_user(array, target_addr, 0); 4371 4372 return 0; 4373 } 4374 4375 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr, 4376 unsigned short **host_array) 4377 { 4378 int nsems; 4379 unsigned short *array; 4380 union semun semun; 4381 struct semid_ds semid_ds; 4382 int i, ret; 4383 4384 semun.buf = &semid_ds; 4385 4386 ret = semctl(semid, 0, IPC_STAT, semun); 4387 if (ret == -1) 4388 return get_errno(ret); 4389 4390 nsems = semid_ds.sem_nsems; 4391 4392 array = lock_user(VERIFY_WRITE, target_addr, 4393 nsems*sizeof(unsigned short), 0); 4394 if (!array) 4395 return -TARGET_EFAULT; 4396 4397 for(i=0; i<nsems; i++) { 4398 __put_user((*host_array)[i], &array[i]); 4399 } 4400 g_free(*host_array); 4401 unlock_user(array, target_addr, 1); 4402 4403 return 0; 4404 } 4405 4406 static inline abi_long do_semctl(int semid, int semnum, int cmd, 4407 abi_ulong target_arg) 4408 { 4409 union target_semun target_su = { .buf = target_arg }; 4410 union semun arg; 4411 struct semid_ds dsarg; 4412 unsigned short *array = NULL; 4413 struct seminfo seminfo; 4414 abi_long ret = -TARGET_EINVAL; 4415 abi_long err; 4416 cmd &= 0xff; 4417 4418 switch( cmd ) { 4419 case GETVAL: 4420 case SETVAL: 4421 /* In 64 bit cross-endian situations, we will erroneously pick up 4422 * the wrong half of the union for the "val" element. To rectify 4423 * this, the entire 8-byte structure is byteswapped, followed by 4424 * a swap of the 4 byte val field. In other cases, the data is 4425 * already in proper host byte order. */ 4426 if (sizeof(target_su.val) != (sizeof(target_su.buf))) { 4427 target_su.buf = tswapal(target_su.buf); 4428 arg.val = tswap32(target_su.val); 4429 } else { 4430 arg.val = target_su.val; 4431 } 4432 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4433 break; 4434 case GETALL: 4435 case SETALL: 4436 err = target_to_host_semarray(semid, &array, target_su.array); 4437 if (err) 4438 return err; 4439 arg.array = array; 4440 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4441 err = host_to_target_semarray(semid, target_su.array, &array); 4442 if (err) 4443 return err; 4444 break; 4445 case IPC_STAT: 4446 case IPC_SET: 4447 case SEM_STAT: 4448 err = target_to_host_semid_ds(&dsarg, target_su.buf); 4449 if (err) 4450 return err; 4451 arg.buf = &dsarg; 4452 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4453 err = host_to_target_semid_ds(target_su.buf, &dsarg); 4454 if (err) 4455 return err; 4456 break; 4457 case IPC_INFO: 4458 case SEM_INFO: 4459 arg.__buf = &seminfo; 4460 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4461 err = host_to_target_seminfo(target_su.__buf, &seminfo); 4462 if (err) 4463 return err; 4464 break; 4465 case IPC_RMID: 4466 case GETPID: 4467 case GETNCNT: 4468 case GETZCNT: 4469 ret = get_errno(semctl(semid, semnum, cmd, NULL)); 4470 break; 4471 } 4472 4473 return ret; 4474 } 4475 4476 struct target_sembuf { 4477 unsigned short sem_num; 4478 short sem_op; 4479 short sem_flg; 4480 }; 4481 4482 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf, 4483 abi_ulong target_addr, 4484 unsigned nsops) 4485 { 4486 struct target_sembuf *target_sembuf; 4487 int i; 4488 4489 target_sembuf = lock_user(VERIFY_READ, target_addr, 4490 nsops*sizeof(struct target_sembuf), 1); 4491 if (!target_sembuf) 4492 return -TARGET_EFAULT; 4493 4494 for(i=0; i<nsops; i++) { 4495 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num); 4496 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op); 4497 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg); 4498 } 4499 4500 unlock_user(target_sembuf, target_addr, 0); 4501 4502 return 0; 4503 } 4504 4505 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops) 4506 { 4507 struct sembuf sops[nsops]; 4508 4509 if (target_to_host_sembuf(sops, ptr, nsops)) 4510 return -TARGET_EFAULT; 4511 4512 return get_errno(safe_semtimedop(semid, sops, nsops, NULL)); 4513 } 4514 4515 struct target_msqid_ds 4516 { 4517 struct target_ipc_perm msg_perm; 4518 abi_ulong msg_stime; 4519 #if TARGET_ABI_BITS == 32 4520 abi_ulong __unused1; 4521 #endif 4522 abi_ulong msg_rtime; 4523 #if TARGET_ABI_BITS == 32 4524 abi_ulong __unused2; 4525 #endif 4526 abi_ulong msg_ctime; 4527 #if TARGET_ABI_BITS == 32 4528 abi_ulong __unused3; 4529 #endif 4530 abi_ulong __msg_cbytes; 4531 abi_ulong msg_qnum; 4532 abi_ulong msg_qbytes; 4533 abi_ulong msg_lspid; 4534 abi_ulong msg_lrpid; 4535 abi_ulong __unused4; 4536 abi_ulong __unused5; 4537 }; 4538 4539 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md, 4540 abi_ulong target_addr) 4541 { 4542 struct target_msqid_ds *target_md; 4543 4544 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1)) 4545 return -TARGET_EFAULT; 4546 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr)) 4547 return -TARGET_EFAULT; 4548 host_md->msg_stime = tswapal(target_md->msg_stime); 4549 host_md->msg_rtime = tswapal(target_md->msg_rtime); 4550 host_md->msg_ctime = tswapal(target_md->msg_ctime); 4551 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes); 4552 host_md->msg_qnum = tswapal(target_md->msg_qnum); 4553 host_md->msg_qbytes = tswapal(target_md->msg_qbytes); 4554 host_md->msg_lspid = tswapal(target_md->msg_lspid); 4555 host_md->msg_lrpid = tswapal(target_md->msg_lrpid); 4556 unlock_user_struct(target_md, target_addr, 0); 4557 return 0; 4558 } 4559 4560 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr, 4561 struct msqid_ds *host_md) 4562 { 4563 struct target_msqid_ds *target_md; 4564 4565 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0)) 4566 return -TARGET_EFAULT; 4567 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm))) 4568 return -TARGET_EFAULT; 4569 target_md->msg_stime = tswapal(host_md->msg_stime); 4570 target_md->msg_rtime = tswapal(host_md->msg_rtime); 4571 target_md->msg_ctime = tswapal(host_md->msg_ctime); 4572 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes); 4573 target_md->msg_qnum = tswapal(host_md->msg_qnum); 4574 target_md->msg_qbytes = tswapal(host_md->msg_qbytes); 4575 target_md->msg_lspid = tswapal(host_md->msg_lspid); 4576 target_md->msg_lrpid = tswapal(host_md->msg_lrpid); 4577 unlock_user_struct(target_md, target_addr, 1); 4578 return 0; 4579 } 4580 4581 struct target_msginfo { 4582 int msgpool; 4583 int msgmap; 4584 int msgmax; 4585 int msgmnb; 4586 int msgmni; 4587 int msgssz; 4588 int msgtql; 4589 unsigned short int msgseg; 4590 }; 4591 4592 static inline abi_long host_to_target_msginfo(abi_ulong target_addr, 4593 struct msginfo *host_msginfo) 4594 { 4595 struct target_msginfo *target_msginfo; 4596 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0)) 4597 return -TARGET_EFAULT; 4598 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool); 4599 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap); 4600 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax); 4601 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb); 4602 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni); 4603 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz); 4604 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql); 4605 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg); 4606 unlock_user_struct(target_msginfo, target_addr, 1); 4607 return 0; 4608 } 4609 4610 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr) 4611 { 4612 struct msqid_ds dsarg; 4613 struct msginfo msginfo; 4614 abi_long ret = -TARGET_EINVAL; 4615 4616 cmd &= 0xff; 4617 4618 switch (cmd) { 4619 case IPC_STAT: 4620 case IPC_SET: 4621 case MSG_STAT: 4622 if (target_to_host_msqid_ds(&dsarg,ptr)) 4623 return -TARGET_EFAULT; 4624 ret = get_errno(msgctl(msgid, cmd, &dsarg)); 4625 if (host_to_target_msqid_ds(ptr,&dsarg)) 4626 return -TARGET_EFAULT; 4627 break; 4628 case IPC_RMID: 4629 ret = get_errno(msgctl(msgid, cmd, NULL)); 4630 break; 4631 case IPC_INFO: 4632 case MSG_INFO: 4633 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo)); 4634 if (host_to_target_msginfo(ptr, &msginfo)) 4635 return -TARGET_EFAULT; 4636 break; 4637 } 4638 4639 return ret; 4640 } 4641 4642 struct target_msgbuf { 4643 abi_long mtype; 4644 char mtext[1]; 4645 }; 4646 4647 static inline abi_long do_msgsnd(int msqid, abi_long msgp, 4648 ssize_t msgsz, int msgflg) 4649 { 4650 struct target_msgbuf *target_mb; 4651 struct msgbuf *host_mb; 4652 abi_long ret = 0; 4653 4654 if (msgsz < 0) { 4655 return -TARGET_EINVAL; 4656 } 4657 4658 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0)) 4659 return -TARGET_EFAULT; 4660 host_mb = g_try_malloc(msgsz + sizeof(long)); 4661 if (!host_mb) { 4662 unlock_user_struct(target_mb, msgp, 0); 4663 return -TARGET_ENOMEM; 4664 } 4665 host_mb->mtype = (abi_long) tswapal(target_mb->mtype); 4666 memcpy(host_mb->mtext, target_mb->mtext, msgsz); 4667 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg)); 4668 g_free(host_mb); 4669 unlock_user_struct(target_mb, msgp, 0); 4670 4671 return ret; 4672 } 4673 4674 static inline abi_long do_msgrcv(int msqid, abi_long msgp, 4675 ssize_t msgsz, abi_long msgtyp, 4676 int msgflg) 4677 { 4678 struct target_msgbuf *target_mb; 4679 char *target_mtext; 4680 struct msgbuf *host_mb; 4681 abi_long ret = 0; 4682 4683 if (msgsz < 0) { 4684 return -TARGET_EINVAL; 4685 } 4686 4687 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0)) 4688 return -TARGET_EFAULT; 4689 4690 host_mb = g_try_malloc(msgsz + sizeof(long)); 4691 if (!host_mb) { 4692 ret = -TARGET_ENOMEM; 4693 goto end; 4694 } 4695 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg)); 4696 4697 if (ret > 0) { 4698 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong); 4699 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0); 4700 if (!target_mtext) { 4701 ret = -TARGET_EFAULT; 4702 goto end; 4703 } 4704 memcpy(target_mb->mtext, host_mb->mtext, ret); 4705 unlock_user(target_mtext, target_mtext_addr, ret); 4706 } 4707 4708 target_mb->mtype = tswapal(host_mb->mtype); 4709 4710 end: 4711 if (target_mb) 4712 unlock_user_struct(target_mb, msgp, 1); 4713 g_free(host_mb); 4714 return ret; 4715 } 4716 4717 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd, 4718 abi_ulong target_addr) 4719 { 4720 struct target_shmid_ds *target_sd; 4721 4722 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 4723 return -TARGET_EFAULT; 4724 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr)) 4725 return -TARGET_EFAULT; 4726 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz); 4727 __get_user(host_sd->shm_atime, &target_sd->shm_atime); 4728 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime); 4729 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime); 4730 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid); 4731 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid); 4732 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch); 4733 unlock_user_struct(target_sd, target_addr, 0); 4734 return 0; 4735 } 4736 4737 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr, 4738 struct shmid_ds *host_sd) 4739 { 4740 struct target_shmid_ds *target_sd; 4741 4742 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 4743 return -TARGET_EFAULT; 4744 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm))) 4745 return -TARGET_EFAULT; 4746 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz); 4747 __put_user(host_sd->shm_atime, &target_sd->shm_atime); 4748 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime); 4749 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime); 4750 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid); 4751 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid); 4752 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch); 4753 unlock_user_struct(target_sd, target_addr, 1); 4754 return 0; 4755 } 4756 4757 struct target_shminfo { 4758 abi_ulong shmmax; 4759 abi_ulong shmmin; 4760 abi_ulong shmmni; 4761 abi_ulong shmseg; 4762 abi_ulong shmall; 4763 }; 4764 4765 static inline abi_long host_to_target_shminfo(abi_ulong target_addr, 4766 struct shminfo *host_shminfo) 4767 { 4768 struct target_shminfo *target_shminfo; 4769 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0)) 4770 return -TARGET_EFAULT; 4771 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax); 4772 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin); 4773 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni); 4774 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg); 4775 __put_user(host_shminfo->shmall, &target_shminfo->shmall); 4776 unlock_user_struct(target_shminfo, target_addr, 1); 4777 return 0; 4778 } 4779 4780 struct target_shm_info { 4781 int used_ids; 4782 abi_ulong shm_tot; 4783 abi_ulong shm_rss; 4784 abi_ulong shm_swp; 4785 abi_ulong swap_attempts; 4786 abi_ulong swap_successes; 4787 }; 4788 4789 static inline abi_long host_to_target_shm_info(abi_ulong target_addr, 4790 struct shm_info *host_shm_info) 4791 { 4792 struct target_shm_info *target_shm_info; 4793 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0)) 4794 return -TARGET_EFAULT; 4795 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids); 4796 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot); 4797 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss); 4798 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp); 4799 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts); 4800 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes); 4801 unlock_user_struct(target_shm_info, target_addr, 1); 4802 return 0; 4803 } 4804 4805 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf) 4806 { 4807 struct shmid_ds dsarg; 4808 struct shminfo shminfo; 4809 struct shm_info shm_info; 4810 abi_long ret = -TARGET_EINVAL; 4811 4812 cmd &= 0xff; 4813 4814 switch(cmd) { 4815 case IPC_STAT: 4816 case IPC_SET: 4817 case SHM_STAT: 4818 if (target_to_host_shmid_ds(&dsarg, buf)) 4819 return -TARGET_EFAULT; 4820 ret = get_errno(shmctl(shmid, cmd, &dsarg)); 4821 if (host_to_target_shmid_ds(buf, &dsarg)) 4822 return -TARGET_EFAULT; 4823 break; 4824 case IPC_INFO: 4825 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo)); 4826 if (host_to_target_shminfo(buf, &shminfo)) 4827 return -TARGET_EFAULT; 4828 break; 4829 case SHM_INFO: 4830 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info)); 4831 if (host_to_target_shm_info(buf, &shm_info)) 4832 return -TARGET_EFAULT; 4833 break; 4834 case IPC_RMID: 4835 case SHM_LOCK: 4836 case SHM_UNLOCK: 4837 ret = get_errno(shmctl(shmid, cmd, NULL)); 4838 break; 4839 } 4840 4841 return ret; 4842 } 4843 4844 #ifndef TARGET_FORCE_SHMLBA 4845 /* For most architectures, SHMLBA is the same as the page size; 4846 * some architectures have larger values, in which case they should 4847 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function. 4848 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA 4849 * and defining its own value for SHMLBA. 4850 * 4851 * The kernel also permits SHMLBA to be set by the architecture to a 4852 * value larger than the page size without setting __ARCH_FORCE_SHMLBA; 4853 * this means that addresses are rounded to the large size if 4854 * SHM_RND is set but addresses not aligned to that size are not rejected 4855 * as long as they are at least page-aligned. Since the only architecture 4856 * which uses this is ia64 this code doesn't provide for that oddity. 4857 */ 4858 static inline abi_ulong target_shmlba(CPUArchState *cpu_env) 4859 { 4860 return TARGET_PAGE_SIZE; 4861 } 4862 #endif 4863 4864 static inline abi_ulong do_shmat(CPUArchState *cpu_env, 4865 int shmid, abi_ulong shmaddr, int shmflg) 4866 { 4867 abi_long raddr; 4868 void *host_raddr; 4869 struct shmid_ds shm_info; 4870 int i,ret; 4871 abi_ulong shmlba; 4872 4873 /* find out the length of the shared memory segment */ 4874 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); 4875 if (is_error(ret)) { 4876 /* can't get length, bail out */ 4877 return ret; 4878 } 4879 4880 shmlba = target_shmlba(cpu_env); 4881 4882 if (shmaddr & (shmlba - 1)) { 4883 if (shmflg & SHM_RND) { 4884 shmaddr &= ~(shmlba - 1); 4885 } else { 4886 return -TARGET_EINVAL; 4887 } 4888 } 4889 4890 mmap_lock(); 4891 4892 if (shmaddr) 4893 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg); 4894 else { 4895 abi_ulong mmap_start; 4896 4897 mmap_start = mmap_find_vma(0, shm_info.shm_segsz); 4898 4899 if (mmap_start == -1) { 4900 errno = ENOMEM; 4901 host_raddr = (void *)-1; 4902 } else 4903 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP); 4904 } 4905 4906 if (host_raddr == (void *)-1) { 4907 mmap_unlock(); 4908 return get_errno((long)host_raddr); 4909 } 4910 raddr=h2g((unsigned long)host_raddr); 4911 4912 page_set_flags(raddr, raddr + shm_info.shm_segsz, 4913 PAGE_VALID | PAGE_READ | 4914 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE)); 4915 4916 for (i = 0; i < N_SHM_REGIONS; i++) { 4917 if (!shm_regions[i].in_use) { 4918 shm_regions[i].in_use = true; 4919 shm_regions[i].start = raddr; 4920 shm_regions[i].size = shm_info.shm_segsz; 4921 break; 4922 } 4923 } 4924 4925 mmap_unlock(); 4926 return raddr; 4927 4928 } 4929 4930 static inline abi_long do_shmdt(abi_ulong shmaddr) 4931 { 4932 int i; 4933 4934 for (i = 0; i < N_SHM_REGIONS; ++i) { 4935 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) { 4936 shm_regions[i].in_use = false; 4937 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0); 4938 break; 4939 } 4940 } 4941 4942 return get_errno(shmdt(g2h(shmaddr))); 4943 } 4944 4945 #ifdef TARGET_NR_ipc 4946 /* ??? This only works with linear mappings. */ 4947 /* do_ipc() must return target values and target errnos. */ 4948 static abi_long do_ipc(CPUArchState *cpu_env, 4949 unsigned int call, abi_long first, 4950 abi_long second, abi_long third, 4951 abi_long ptr, abi_long fifth) 4952 { 4953 int version; 4954 abi_long ret = 0; 4955 4956 version = call >> 16; 4957 call &= 0xffff; 4958 4959 switch (call) { 4960 case IPCOP_semop: 4961 ret = do_semop(first, ptr, second); 4962 break; 4963 4964 case IPCOP_semget: 4965 ret = get_errno(semget(first, second, third)); 4966 break; 4967 4968 case IPCOP_semctl: { 4969 /* The semun argument to semctl is passed by value, so dereference the 4970 * ptr argument. */ 4971 abi_ulong atptr; 4972 get_user_ual(atptr, ptr); 4973 ret = do_semctl(first, second, third, atptr); 4974 break; 4975 } 4976 4977 case IPCOP_msgget: 4978 ret = get_errno(msgget(first, second)); 4979 break; 4980 4981 case IPCOP_msgsnd: 4982 ret = do_msgsnd(first, ptr, second, third); 4983 break; 4984 4985 case IPCOP_msgctl: 4986 ret = do_msgctl(first, second, ptr); 4987 break; 4988 4989 case IPCOP_msgrcv: 4990 switch (version) { 4991 case 0: 4992 { 4993 struct target_ipc_kludge { 4994 abi_long msgp; 4995 abi_long msgtyp; 4996 } *tmp; 4997 4998 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) { 4999 ret = -TARGET_EFAULT; 5000 break; 5001 } 5002 5003 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third); 5004 5005 unlock_user_struct(tmp, ptr, 0); 5006 break; 5007 } 5008 default: 5009 ret = do_msgrcv(first, ptr, second, fifth, third); 5010 } 5011 break; 5012 5013 case IPCOP_shmat: 5014 switch (version) { 5015 default: 5016 { 5017 abi_ulong raddr; 5018 raddr = do_shmat(cpu_env, first, ptr, second); 5019 if (is_error(raddr)) 5020 return get_errno(raddr); 5021 if (put_user_ual(raddr, third)) 5022 return -TARGET_EFAULT; 5023 break; 5024 } 5025 case 1: 5026 ret = -TARGET_EINVAL; 5027 break; 5028 } 5029 break; 5030 case IPCOP_shmdt: 5031 ret = do_shmdt(ptr); 5032 break; 5033 5034 case IPCOP_shmget: 5035 /* IPC_* flag values are the same on all linux platforms */ 5036 ret = get_errno(shmget(first, second, third)); 5037 break; 5038 5039 /* IPC_* and SHM_* command values are the same on all linux platforms */ 5040 case IPCOP_shmctl: 5041 ret = do_shmctl(first, second, ptr); 5042 break; 5043 default: 5044 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version); 5045 ret = -TARGET_ENOSYS; 5046 break; 5047 } 5048 return ret; 5049 } 5050 #endif 5051 5052 /* kernel structure types definitions */ 5053 5054 #define STRUCT(name, ...) STRUCT_ ## name, 5055 #define STRUCT_SPECIAL(name) STRUCT_ ## name, 5056 enum { 5057 #include "syscall_types.h" 5058 STRUCT_MAX 5059 }; 5060 #undef STRUCT 5061 #undef STRUCT_SPECIAL 5062 5063 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL }; 5064 #define STRUCT_SPECIAL(name) 5065 #include "syscall_types.h" 5066 #undef STRUCT 5067 #undef STRUCT_SPECIAL 5068 5069 typedef struct IOCTLEntry IOCTLEntry; 5070 5071 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp, 5072 int fd, int cmd, abi_long arg); 5073 5074 struct IOCTLEntry { 5075 int target_cmd; 5076 unsigned int host_cmd; 5077 const char *name; 5078 int access; 5079 do_ioctl_fn *do_ioctl; 5080 const argtype arg_type[5]; 5081 }; 5082 5083 #define IOC_R 0x0001 5084 #define IOC_W 0x0002 5085 #define IOC_RW (IOC_R | IOC_W) 5086 5087 #define MAX_STRUCT_SIZE 4096 5088 5089 #ifdef CONFIG_FIEMAP 5090 /* So fiemap access checks don't overflow on 32 bit systems. 5091 * This is very slightly smaller than the limit imposed by 5092 * the underlying kernel. 5093 */ 5094 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \ 5095 / sizeof(struct fiemap_extent)) 5096 5097 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp, 5098 int fd, int cmd, abi_long arg) 5099 { 5100 /* The parameter for this ioctl is a struct fiemap followed 5101 * by an array of struct fiemap_extent whose size is set 5102 * in fiemap->fm_extent_count. The array is filled in by the 5103 * ioctl. 5104 */ 5105 int target_size_in, target_size_out; 5106 struct fiemap *fm; 5107 const argtype *arg_type = ie->arg_type; 5108 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) }; 5109 void *argptr, *p; 5110 abi_long ret; 5111 int i, extent_size = thunk_type_size(extent_arg_type, 0); 5112 uint32_t outbufsz; 5113 int free_fm = 0; 5114 5115 assert(arg_type[0] == TYPE_PTR); 5116 assert(ie->access == IOC_RW); 5117 arg_type++; 5118 target_size_in = thunk_type_size(arg_type, 0); 5119 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1); 5120 if (!argptr) { 5121 return -TARGET_EFAULT; 5122 } 5123 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5124 unlock_user(argptr, arg, 0); 5125 fm = (struct fiemap *)buf_temp; 5126 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) { 5127 return -TARGET_EINVAL; 5128 } 5129 5130 outbufsz = sizeof (*fm) + 5131 (sizeof(struct fiemap_extent) * fm->fm_extent_count); 5132 5133 if (outbufsz > MAX_STRUCT_SIZE) { 5134 /* We can't fit all the extents into the fixed size buffer. 5135 * Allocate one that is large enough and use it instead. 5136 */ 5137 fm = g_try_malloc(outbufsz); 5138 if (!fm) { 5139 return -TARGET_ENOMEM; 5140 } 5141 memcpy(fm, buf_temp, sizeof(struct fiemap)); 5142 free_fm = 1; 5143 } 5144 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm)); 5145 if (!is_error(ret)) { 5146 target_size_out = target_size_in; 5147 /* An extent_count of 0 means we were only counting the extents 5148 * so there are no structs to copy 5149 */ 5150 if (fm->fm_extent_count != 0) { 5151 target_size_out += fm->fm_mapped_extents * extent_size; 5152 } 5153 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0); 5154 if (!argptr) { 5155 ret = -TARGET_EFAULT; 5156 } else { 5157 /* Convert the struct fiemap */ 5158 thunk_convert(argptr, fm, arg_type, THUNK_TARGET); 5159 if (fm->fm_extent_count != 0) { 5160 p = argptr + target_size_in; 5161 /* ...and then all the struct fiemap_extents */ 5162 for (i = 0; i < fm->fm_mapped_extents; i++) { 5163 thunk_convert(p, &fm->fm_extents[i], extent_arg_type, 5164 THUNK_TARGET); 5165 p += extent_size; 5166 } 5167 } 5168 unlock_user(argptr, arg, target_size_out); 5169 } 5170 } 5171 if (free_fm) { 5172 g_free(fm); 5173 } 5174 return ret; 5175 } 5176 #endif 5177 5178 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, 5179 int fd, int cmd, abi_long arg) 5180 { 5181 const argtype *arg_type = ie->arg_type; 5182 int target_size; 5183 void *argptr; 5184 int ret; 5185 struct ifconf *host_ifconf; 5186 uint32_t outbufsz; 5187 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) }; 5188 int target_ifreq_size; 5189 int nb_ifreq; 5190 int free_buf = 0; 5191 int i; 5192 int target_ifc_len; 5193 abi_long target_ifc_buf; 5194 int host_ifc_len; 5195 char *host_ifc_buf; 5196 5197 assert(arg_type[0] == TYPE_PTR); 5198 assert(ie->access == IOC_RW); 5199 5200 arg_type++; 5201 target_size = thunk_type_size(arg_type, 0); 5202 5203 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5204 if (!argptr) 5205 return -TARGET_EFAULT; 5206 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5207 unlock_user(argptr, arg, 0); 5208 5209 host_ifconf = (struct ifconf *)(unsigned long)buf_temp; 5210 target_ifc_len = host_ifconf->ifc_len; 5211 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf; 5212 5213 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0); 5214 nb_ifreq = target_ifc_len / target_ifreq_size; 5215 host_ifc_len = nb_ifreq * sizeof(struct ifreq); 5216 5217 outbufsz = sizeof(*host_ifconf) + host_ifc_len; 5218 if (outbufsz > MAX_STRUCT_SIZE) { 5219 /* We can't fit all the extents into the fixed size buffer. 5220 * Allocate one that is large enough and use it instead. 5221 */ 5222 host_ifconf = malloc(outbufsz); 5223 if (!host_ifconf) { 5224 return -TARGET_ENOMEM; 5225 } 5226 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf)); 5227 free_buf = 1; 5228 } 5229 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf); 5230 5231 host_ifconf->ifc_len = host_ifc_len; 5232 host_ifconf->ifc_buf = host_ifc_buf; 5233 5234 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf)); 5235 if (!is_error(ret)) { 5236 /* convert host ifc_len to target ifc_len */ 5237 5238 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq); 5239 target_ifc_len = nb_ifreq * target_ifreq_size; 5240 host_ifconf->ifc_len = target_ifc_len; 5241 5242 /* restore target ifc_buf */ 5243 5244 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf; 5245 5246 /* copy struct ifconf to target user */ 5247 5248 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5249 if (!argptr) 5250 return -TARGET_EFAULT; 5251 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET); 5252 unlock_user(argptr, arg, target_size); 5253 5254 /* copy ifreq[] to target user */ 5255 5256 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0); 5257 for (i = 0; i < nb_ifreq ; i++) { 5258 thunk_convert(argptr + i * target_ifreq_size, 5259 host_ifc_buf + i * sizeof(struct ifreq), 5260 ifreq_arg_type, THUNK_TARGET); 5261 } 5262 unlock_user(argptr, target_ifc_buf, target_ifc_len); 5263 } 5264 5265 if (free_buf) { 5266 free(host_ifconf); 5267 } 5268 5269 return ret; 5270 } 5271 5272 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 5273 int cmd, abi_long arg) 5274 { 5275 void *argptr; 5276 struct dm_ioctl *host_dm; 5277 abi_long guest_data; 5278 uint32_t guest_data_size; 5279 int target_size; 5280 const argtype *arg_type = ie->arg_type; 5281 abi_long ret; 5282 void *big_buf = NULL; 5283 char *host_data; 5284 5285 arg_type++; 5286 target_size = thunk_type_size(arg_type, 0); 5287 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5288 if (!argptr) { 5289 ret = -TARGET_EFAULT; 5290 goto out; 5291 } 5292 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5293 unlock_user(argptr, arg, 0); 5294 5295 /* buf_temp is too small, so fetch things into a bigger buffer */ 5296 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2); 5297 memcpy(big_buf, buf_temp, target_size); 5298 buf_temp = big_buf; 5299 host_dm = big_buf; 5300 5301 guest_data = arg + host_dm->data_start; 5302 if ((guest_data - arg) < 0) { 5303 ret = -TARGET_EINVAL; 5304 goto out; 5305 } 5306 guest_data_size = host_dm->data_size - host_dm->data_start; 5307 host_data = (char*)host_dm + host_dm->data_start; 5308 5309 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1); 5310 if (!argptr) { 5311 ret = -TARGET_EFAULT; 5312 goto out; 5313 } 5314 5315 switch (ie->host_cmd) { 5316 case DM_REMOVE_ALL: 5317 case DM_LIST_DEVICES: 5318 case DM_DEV_CREATE: 5319 case DM_DEV_REMOVE: 5320 case DM_DEV_SUSPEND: 5321 case DM_DEV_STATUS: 5322 case DM_DEV_WAIT: 5323 case DM_TABLE_STATUS: 5324 case DM_TABLE_CLEAR: 5325 case DM_TABLE_DEPS: 5326 case DM_LIST_VERSIONS: 5327 /* no input data */ 5328 break; 5329 case DM_DEV_RENAME: 5330 case DM_DEV_SET_GEOMETRY: 5331 /* data contains only strings */ 5332 memcpy(host_data, argptr, guest_data_size); 5333 break; 5334 case DM_TARGET_MSG: 5335 memcpy(host_data, argptr, guest_data_size); 5336 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr); 5337 break; 5338 case DM_TABLE_LOAD: 5339 { 5340 void *gspec = argptr; 5341 void *cur_data = host_data; 5342 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 5343 int spec_size = thunk_type_size(arg_type, 0); 5344 int i; 5345 5346 for (i = 0; i < host_dm->target_count; i++) { 5347 struct dm_target_spec *spec = cur_data; 5348 uint32_t next; 5349 int slen; 5350 5351 thunk_convert(spec, gspec, arg_type, THUNK_HOST); 5352 slen = strlen((char*)gspec + spec_size) + 1; 5353 next = spec->next; 5354 spec->next = sizeof(*spec) + slen; 5355 strcpy((char*)&spec[1], gspec + spec_size); 5356 gspec += next; 5357 cur_data += spec->next; 5358 } 5359 break; 5360 } 5361 default: 5362 ret = -TARGET_EINVAL; 5363 unlock_user(argptr, guest_data, 0); 5364 goto out; 5365 } 5366 unlock_user(argptr, guest_data, 0); 5367 5368 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5369 if (!is_error(ret)) { 5370 guest_data = arg + host_dm->data_start; 5371 guest_data_size = host_dm->data_size - host_dm->data_start; 5372 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0); 5373 switch (ie->host_cmd) { 5374 case DM_REMOVE_ALL: 5375 case DM_DEV_CREATE: 5376 case DM_DEV_REMOVE: 5377 case DM_DEV_RENAME: 5378 case DM_DEV_SUSPEND: 5379 case DM_DEV_STATUS: 5380 case DM_TABLE_LOAD: 5381 case DM_TABLE_CLEAR: 5382 case DM_TARGET_MSG: 5383 case DM_DEV_SET_GEOMETRY: 5384 /* no return data */ 5385 break; 5386 case DM_LIST_DEVICES: 5387 { 5388 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start; 5389 uint32_t remaining_data = guest_data_size; 5390 void *cur_data = argptr; 5391 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) }; 5392 int nl_size = 12; /* can't use thunk_size due to alignment */ 5393 5394 while (1) { 5395 uint32_t next = nl->next; 5396 if (next) { 5397 nl->next = nl_size + (strlen(nl->name) + 1); 5398 } 5399 if (remaining_data < nl->next) { 5400 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5401 break; 5402 } 5403 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET); 5404 strcpy(cur_data + nl_size, nl->name); 5405 cur_data += nl->next; 5406 remaining_data -= nl->next; 5407 if (!next) { 5408 break; 5409 } 5410 nl = (void*)nl + next; 5411 } 5412 break; 5413 } 5414 case DM_DEV_WAIT: 5415 case DM_TABLE_STATUS: 5416 { 5417 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start; 5418 void *cur_data = argptr; 5419 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 5420 int spec_size = thunk_type_size(arg_type, 0); 5421 int i; 5422 5423 for (i = 0; i < host_dm->target_count; i++) { 5424 uint32_t next = spec->next; 5425 int slen = strlen((char*)&spec[1]) + 1; 5426 spec->next = (cur_data - argptr) + spec_size + slen; 5427 if (guest_data_size < spec->next) { 5428 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5429 break; 5430 } 5431 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET); 5432 strcpy(cur_data + spec_size, (char*)&spec[1]); 5433 cur_data = argptr + spec->next; 5434 spec = (void*)host_dm + host_dm->data_start + next; 5435 } 5436 break; 5437 } 5438 case DM_TABLE_DEPS: 5439 { 5440 void *hdata = (void*)host_dm + host_dm->data_start; 5441 int count = *(uint32_t*)hdata; 5442 uint64_t *hdev = hdata + 8; 5443 uint64_t *gdev = argptr + 8; 5444 int i; 5445 5446 *(uint32_t*)argptr = tswap32(count); 5447 for (i = 0; i < count; i++) { 5448 *gdev = tswap64(*hdev); 5449 gdev++; 5450 hdev++; 5451 } 5452 break; 5453 } 5454 case DM_LIST_VERSIONS: 5455 { 5456 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start; 5457 uint32_t remaining_data = guest_data_size; 5458 void *cur_data = argptr; 5459 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) }; 5460 int vers_size = thunk_type_size(arg_type, 0); 5461 5462 while (1) { 5463 uint32_t next = vers->next; 5464 if (next) { 5465 vers->next = vers_size + (strlen(vers->name) + 1); 5466 } 5467 if (remaining_data < vers->next) { 5468 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5469 break; 5470 } 5471 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET); 5472 strcpy(cur_data + vers_size, vers->name); 5473 cur_data += vers->next; 5474 remaining_data -= vers->next; 5475 if (!next) { 5476 break; 5477 } 5478 vers = (void*)vers + next; 5479 } 5480 break; 5481 } 5482 default: 5483 unlock_user(argptr, guest_data, 0); 5484 ret = -TARGET_EINVAL; 5485 goto out; 5486 } 5487 unlock_user(argptr, guest_data, guest_data_size); 5488 5489 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5490 if (!argptr) { 5491 ret = -TARGET_EFAULT; 5492 goto out; 5493 } 5494 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5495 unlock_user(argptr, arg, target_size); 5496 } 5497 out: 5498 g_free(big_buf); 5499 return ret; 5500 } 5501 5502 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 5503 int cmd, abi_long arg) 5504 { 5505 void *argptr; 5506 int target_size; 5507 const argtype *arg_type = ie->arg_type; 5508 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) }; 5509 abi_long ret; 5510 5511 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp; 5512 struct blkpg_partition host_part; 5513 5514 /* Read and convert blkpg */ 5515 arg_type++; 5516 target_size = thunk_type_size(arg_type, 0); 5517 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5518 if (!argptr) { 5519 ret = -TARGET_EFAULT; 5520 goto out; 5521 } 5522 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5523 unlock_user(argptr, arg, 0); 5524 5525 switch (host_blkpg->op) { 5526 case BLKPG_ADD_PARTITION: 5527 case BLKPG_DEL_PARTITION: 5528 /* payload is struct blkpg_partition */ 5529 break; 5530 default: 5531 /* Unknown opcode */ 5532 ret = -TARGET_EINVAL; 5533 goto out; 5534 } 5535 5536 /* Read and convert blkpg->data */ 5537 arg = (abi_long)(uintptr_t)host_blkpg->data; 5538 target_size = thunk_type_size(part_arg_type, 0); 5539 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5540 if (!argptr) { 5541 ret = -TARGET_EFAULT; 5542 goto out; 5543 } 5544 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST); 5545 unlock_user(argptr, arg, 0); 5546 5547 /* Swizzle the data pointer to our local copy and call! */ 5548 host_blkpg->data = &host_part; 5549 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg)); 5550 5551 out: 5552 return ret; 5553 } 5554 5555 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp, 5556 int fd, int cmd, abi_long arg) 5557 { 5558 const argtype *arg_type = ie->arg_type; 5559 const StructEntry *se; 5560 const argtype *field_types; 5561 const int *dst_offsets, *src_offsets; 5562 int target_size; 5563 void *argptr; 5564 abi_ulong *target_rt_dev_ptr; 5565 unsigned long *host_rt_dev_ptr; 5566 abi_long ret; 5567 int i; 5568 5569 assert(ie->access == IOC_W); 5570 assert(*arg_type == TYPE_PTR); 5571 arg_type++; 5572 assert(*arg_type == TYPE_STRUCT); 5573 target_size = thunk_type_size(arg_type, 0); 5574 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5575 if (!argptr) { 5576 return -TARGET_EFAULT; 5577 } 5578 arg_type++; 5579 assert(*arg_type == (int)STRUCT_rtentry); 5580 se = struct_entries + *arg_type++; 5581 assert(se->convert[0] == NULL); 5582 /* convert struct here to be able to catch rt_dev string */ 5583 field_types = se->field_types; 5584 dst_offsets = se->field_offsets[THUNK_HOST]; 5585 src_offsets = se->field_offsets[THUNK_TARGET]; 5586 for (i = 0; i < se->nb_fields; i++) { 5587 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) { 5588 assert(*field_types == TYPE_PTRVOID); 5589 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]); 5590 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]); 5591 if (*target_rt_dev_ptr != 0) { 5592 *host_rt_dev_ptr = (unsigned long)lock_user_string( 5593 tswapal(*target_rt_dev_ptr)); 5594 if (!*host_rt_dev_ptr) { 5595 unlock_user(argptr, arg, 0); 5596 return -TARGET_EFAULT; 5597 } 5598 } else { 5599 *host_rt_dev_ptr = 0; 5600 } 5601 field_types++; 5602 continue; 5603 } 5604 field_types = thunk_convert(buf_temp + dst_offsets[i], 5605 argptr + src_offsets[i], 5606 field_types, THUNK_HOST); 5607 } 5608 unlock_user(argptr, arg, 0); 5609 5610 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5611 if (*host_rt_dev_ptr != 0) { 5612 unlock_user((void *)*host_rt_dev_ptr, 5613 *target_rt_dev_ptr, 0); 5614 } 5615 return ret; 5616 } 5617 5618 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp, 5619 int fd, int cmd, abi_long arg) 5620 { 5621 int sig = target_to_host_signal(arg); 5622 return get_errno(safe_ioctl(fd, ie->host_cmd, sig)); 5623 } 5624 5625 static IOCTLEntry ioctl_entries[] = { 5626 #define IOCTL(cmd, access, ...) \ 5627 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } }, 5628 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \ 5629 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } }, 5630 #define IOCTL_IGNORE(cmd) \ 5631 { TARGET_ ## cmd, 0, #cmd }, 5632 #include "ioctls.h" 5633 { 0, 0, }, 5634 }; 5635 5636 /* ??? Implement proper locking for ioctls. */ 5637 /* do_ioctl() Must return target values and target errnos. */ 5638 static abi_long do_ioctl(int fd, int cmd, abi_long arg) 5639 { 5640 const IOCTLEntry *ie; 5641 const argtype *arg_type; 5642 abi_long ret; 5643 uint8_t buf_temp[MAX_STRUCT_SIZE]; 5644 int target_size; 5645 void *argptr; 5646 5647 ie = ioctl_entries; 5648 for(;;) { 5649 if (ie->target_cmd == 0) { 5650 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd); 5651 return -TARGET_ENOSYS; 5652 } 5653 if (ie->target_cmd == cmd) 5654 break; 5655 ie++; 5656 } 5657 arg_type = ie->arg_type; 5658 #if defined(DEBUG) 5659 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name); 5660 #endif 5661 if (ie->do_ioctl) { 5662 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg); 5663 } else if (!ie->host_cmd) { 5664 /* Some architectures define BSD ioctls in their headers 5665 that are not implemented in Linux. */ 5666 return -TARGET_ENOSYS; 5667 } 5668 5669 switch(arg_type[0]) { 5670 case TYPE_NULL: 5671 /* no argument */ 5672 ret = get_errno(safe_ioctl(fd, ie->host_cmd)); 5673 break; 5674 case TYPE_PTRVOID: 5675 case TYPE_INT: 5676 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg)); 5677 break; 5678 case TYPE_PTR: 5679 arg_type++; 5680 target_size = thunk_type_size(arg_type, 0); 5681 switch(ie->access) { 5682 case IOC_R: 5683 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5684 if (!is_error(ret)) { 5685 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5686 if (!argptr) 5687 return -TARGET_EFAULT; 5688 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5689 unlock_user(argptr, arg, target_size); 5690 } 5691 break; 5692 case IOC_W: 5693 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5694 if (!argptr) 5695 return -TARGET_EFAULT; 5696 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5697 unlock_user(argptr, arg, 0); 5698 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5699 break; 5700 default: 5701 case IOC_RW: 5702 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5703 if (!argptr) 5704 return -TARGET_EFAULT; 5705 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5706 unlock_user(argptr, arg, 0); 5707 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5708 if (!is_error(ret)) { 5709 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5710 if (!argptr) 5711 return -TARGET_EFAULT; 5712 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5713 unlock_user(argptr, arg, target_size); 5714 } 5715 break; 5716 } 5717 break; 5718 default: 5719 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n", 5720 (long)cmd, arg_type[0]); 5721 ret = -TARGET_ENOSYS; 5722 break; 5723 } 5724 return ret; 5725 } 5726 5727 static const bitmask_transtbl iflag_tbl[] = { 5728 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK }, 5729 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT }, 5730 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR }, 5731 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK }, 5732 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK }, 5733 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP }, 5734 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR }, 5735 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR }, 5736 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL }, 5737 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC }, 5738 { TARGET_IXON, TARGET_IXON, IXON, IXON }, 5739 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY }, 5740 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF }, 5741 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL }, 5742 { 0, 0, 0, 0 } 5743 }; 5744 5745 static const bitmask_transtbl oflag_tbl[] = { 5746 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST }, 5747 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC }, 5748 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR }, 5749 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL }, 5750 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR }, 5751 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET }, 5752 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL }, 5753 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL }, 5754 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 }, 5755 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 }, 5756 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 }, 5757 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 }, 5758 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 }, 5759 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 }, 5760 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 }, 5761 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 }, 5762 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 }, 5763 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 }, 5764 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 }, 5765 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 }, 5766 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 }, 5767 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 }, 5768 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 }, 5769 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 }, 5770 { 0, 0, 0, 0 } 5771 }; 5772 5773 static const bitmask_transtbl cflag_tbl[] = { 5774 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 }, 5775 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 }, 5776 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 }, 5777 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 }, 5778 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 }, 5779 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 }, 5780 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 }, 5781 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 }, 5782 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 }, 5783 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 }, 5784 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 }, 5785 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 }, 5786 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 }, 5787 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 }, 5788 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 }, 5789 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 }, 5790 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 }, 5791 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 }, 5792 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 }, 5793 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 }, 5794 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 }, 5795 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 }, 5796 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 }, 5797 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 }, 5798 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB }, 5799 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD }, 5800 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB }, 5801 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD }, 5802 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL }, 5803 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL }, 5804 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS }, 5805 { 0, 0, 0, 0 } 5806 }; 5807 5808 static const bitmask_transtbl lflag_tbl[] = { 5809 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG }, 5810 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON }, 5811 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE }, 5812 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO }, 5813 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE }, 5814 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK }, 5815 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL }, 5816 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH }, 5817 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP }, 5818 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL }, 5819 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT }, 5820 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE }, 5821 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO }, 5822 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN }, 5823 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN }, 5824 { 0, 0, 0, 0 } 5825 }; 5826 5827 static void target_to_host_termios (void *dst, const void *src) 5828 { 5829 struct host_termios *host = dst; 5830 const struct target_termios *target = src; 5831 5832 host->c_iflag = 5833 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl); 5834 host->c_oflag = 5835 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl); 5836 host->c_cflag = 5837 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl); 5838 host->c_lflag = 5839 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl); 5840 host->c_line = target->c_line; 5841 5842 memset(host->c_cc, 0, sizeof(host->c_cc)); 5843 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR]; 5844 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT]; 5845 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE]; 5846 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL]; 5847 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF]; 5848 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME]; 5849 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN]; 5850 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC]; 5851 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART]; 5852 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP]; 5853 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP]; 5854 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL]; 5855 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT]; 5856 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD]; 5857 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE]; 5858 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT]; 5859 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2]; 5860 } 5861 5862 static void host_to_target_termios (void *dst, const void *src) 5863 { 5864 struct target_termios *target = dst; 5865 const struct host_termios *host = src; 5866 5867 target->c_iflag = 5868 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl)); 5869 target->c_oflag = 5870 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl)); 5871 target->c_cflag = 5872 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl)); 5873 target->c_lflag = 5874 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl)); 5875 target->c_line = host->c_line; 5876 5877 memset(target->c_cc, 0, sizeof(target->c_cc)); 5878 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR]; 5879 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT]; 5880 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE]; 5881 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL]; 5882 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF]; 5883 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME]; 5884 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN]; 5885 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC]; 5886 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART]; 5887 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP]; 5888 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP]; 5889 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL]; 5890 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT]; 5891 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD]; 5892 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE]; 5893 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT]; 5894 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2]; 5895 } 5896 5897 static const StructEntry struct_termios_def = { 5898 .convert = { host_to_target_termios, target_to_host_termios }, 5899 .size = { sizeof(struct target_termios), sizeof(struct host_termios) }, 5900 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) }, 5901 }; 5902 5903 static bitmask_transtbl mmap_flags_tbl[] = { 5904 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED }, 5905 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE }, 5906 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED }, 5907 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, 5908 MAP_ANONYMOUS, MAP_ANONYMOUS }, 5909 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, 5910 MAP_GROWSDOWN, MAP_GROWSDOWN }, 5911 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, 5912 MAP_DENYWRITE, MAP_DENYWRITE }, 5913 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, 5914 MAP_EXECUTABLE, MAP_EXECUTABLE }, 5915 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED }, 5916 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, 5917 MAP_NORESERVE, MAP_NORESERVE }, 5918 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB }, 5919 /* MAP_STACK had been ignored by the kernel for quite some time. 5920 Recognize it for the target insofar as we do not want to pass 5921 it through to the host. */ 5922 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 }, 5923 { 0, 0, 0, 0 } 5924 }; 5925 5926 #if defined(TARGET_I386) 5927 5928 /* NOTE: there is really one LDT for all the threads */ 5929 static uint8_t *ldt_table; 5930 5931 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount) 5932 { 5933 int size; 5934 void *p; 5935 5936 if (!ldt_table) 5937 return 0; 5938 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE; 5939 if (size > bytecount) 5940 size = bytecount; 5941 p = lock_user(VERIFY_WRITE, ptr, size, 0); 5942 if (!p) 5943 return -TARGET_EFAULT; 5944 /* ??? Should this by byteswapped? */ 5945 memcpy(p, ldt_table, size); 5946 unlock_user(p, ptr, size); 5947 return size; 5948 } 5949 5950 /* XXX: add locking support */ 5951 static abi_long write_ldt(CPUX86State *env, 5952 abi_ulong ptr, unsigned long bytecount, int oldmode) 5953 { 5954 struct target_modify_ldt_ldt_s ldt_info; 5955 struct target_modify_ldt_ldt_s *target_ldt_info; 5956 int seg_32bit, contents, read_exec_only, limit_in_pages; 5957 int seg_not_present, useable, lm; 5958 uint32_t *lp, entry_1, entry_2; 5959 5960 if (bytecount != sizeof(ldt_info)) 5961 return -TARGET_EINVAL; 5962 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1)) 5963 return -TARGET_EFAULT; 5964 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 5965 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 5966 ldt_info.limit = tswap32(target_ldt_info->limit); 5967 ldt_info.flags = tswap32(target_ldt_info->flags); 5968 unlock_user_struct(target_ldt_info, ptr, 0); 5969 5970 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES) 5971 return -TARGET_EINVAL; 5972 seg_32bit = ldt_info.flags & 1; 5973 contents = (ldt_info.flags >> 1) & 3; 5974 read_exec_only = (ldt_info.flags >> 3) & 1; 5975 limit_in_pages = (ldt_info.flags >> 4) & 1; 5976 seg_not_present = (ldt_info.flags >> 5) & 1; 5977 useable = (ldt_info.flags >> 6) & 1; 5978 #ifdef TARGET_ABI32 5979 lm = 0; 5980 #else 5981 lm = (ldt_info.flags >> 7) & 1; 5982 #endif 5983 if (contents == 3) { 5984 if (oldmode) 5985 return -TARGET_EINVAL; 5986 if (seg_not_present == 0) 5987 return -TARGET_EINVAL; 5988 } 5989 /* allocate the LDT */ 5990 if (!ldt_table) { 5991 env->ldt.base = target_mmap(0, 5992 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE, 5993 PROT_READ|PROT_WRITE, 5994 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 5995 if (env->ldt.base == -1) 5996 return -TARGET_ENOMEM; 5997 memset(g2h(env->ldt.base), 0, 5998 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE); 5999 env->ldt.limit = 0xffff; 6000 ldt_table = g2h(env->ldt.base); 6001 } 6002 6003 /* NOTE: same code as Linux kernel */ 6004 /* Allow LDTs to be cleared by the user. */ 6005 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 6006 if (oldmode || 6007 (contents == 0 && 6008 read_exec_only == 1 && 6009 seg_32bit == 0 && 6010 limit_in_pages == 0 && 6011 seg_not_present == 1 && 6012 useable == 0 )) { 6013 entry_1 = 0; 6014 entry_2 = 0; 6015 goto install; 6016 } 6017 } 6018 6019 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 6020 (ldt_info.limit & 0x0ffff); 6021 entry_2 = (ldt_info.base_addr & 0xff000000) | 6022 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 6023 (ldt_info.limit & 0xf0000) | 6024 ((read_exec_only ^ 1) << 9) | 6025 (contents << 10) | 6026 ((seg_not_present ^ 1) << 15) | 6027 (seg_32bit << 22) | 6028 (limit_in_pages << 23) | 6029 (lm << 21) | 6030 0x7000; 6031 if (!oldmode) 6032 entry_2 |= (useable << 20); 6033 6034 /* Install the new entry ... */ 6035 install: 6036 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3)); 6037 lp[0] = tswap32(entry_1); 6038 lp[1] = tswap32(entry_2); 6039 return 0; 6040 } 6041 6042 /* specific and weird i386 syscalls */ 6043 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr, 6044 unsigned long bytecount) 6045 { 6046 abi_long ret; 6047 6048 switch (func) { 6049 case 0: 6050 ret = read_ldt(ptr, bytecount); 6051 break; 6052 case 1: 6053 ret = write_ldt(env, ptr, bytecount, 1); 6054 break; 6055 case 0x11: 6056 ret = write_ldt(env, ptr, bytecount, 0); 6057 break; 6058 default: 6059 ret = -TARGET_ENOSYS; 6060 break; 6061 } 6062 return ret; 6063 } 6064 6065 #if defined(TARGET_I386) && defined(TARGET_ABI32) 6066 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr) 6067 { 6068 uint64_t *gdt_table = g2h(env->gdt.base); 6069 struct target_modify_ldt_ldt_s ldt_info; 6070 struct target_modify_ldt_ldt_s *target_ldt_info; 6071 int seg_32bit, contents, read_exec_only, limit_in_pages; 6072 int seg_not_present, useable, lm; 6073 uint32_t *lp, entry_1, entry_2; 6074 int i; 6075 6076 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 6077 if (!target_ldt_info) 6078 return -TARGET_EFAULT; 6079 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 6080 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 6081 ldt_info.limit = tswap32(target_ldt_info->limit); 6082 ldt_info.flags = tswap32(target_ldt_info->flags); 6083 if (ldt_info.entry_number == -1) { 6084 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) { 6085 if (gdt_table[i] == 0) { 6086 ldt_info.entry_number = i; 6087 target_ldt_info->entry_number = tswap32(i); 6088 break; 6089 } 6090 } 6091 } 6092 unlock_user_struct(target_ldt_info, ptr, 1); 6093 6094 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 6095 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX) 6096 return -TARGET_EINVAL; 6097 seg_32bit = ldt_info.flags & 1; 6098 contents = (ldt_info.flags >> 1) & 3; 6099 read_exec_only = (ldt_info.flags >> 3) & 1; 6100 limit_in_pages = (ldt_info.flags >> 4) & 1; 6101 seg_not_present = (ldt_info.flags >> 5) & 1; 6102 useable = (ldt_info.flags >> 6) & 1; 6103 #ifdef TARGET_ABI32 6104 lm = 0; 6105 #else 6106 lm = (ldt_info.flags >> 7) & 1; 6107 #endif 6108 6109 if (contents == 3) { 6110 if (seg_not_present == 0) 6111 return -TARGET_EINVAL; 6112 } 6113 6114 /* NOTE: same code as Linux kernel */ 6115 /* Allow LDTs to be cleared by the user. */ 6116 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 6117 if ((contents == 0 && 6118 read_exec_only == 1 && 6119 seg_32bit == 0 && 6120 limit_in_pages == 0 && 6121 seg_not_present == 1 && 6122 useable == 0 )) { 6123 entry_1 = 0; 6124 entry_2 = 0; 6125 goto install; 6126 } 6127 } 6128 6129 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 6130 (ldt_info.limit & 0x0ffff); 6131 entry_2 = (ldt_info.base_addr & 0xff000000) | 6132 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 6133 (ldt_info.limit & 0xf0000) | 6134 ((read_exec_only ^ 1) << 9) | 6135 (contents << 10) | 6136 ((seg_not_present ^ 1) << 15) | 6137 (seg_32bit << 22) | 6138 (limit_in_pages << 23) | 6139 (useable << 20) | 6140 (lm << 21) | 6141 0x7000; 6142 6143 /* Install the new entry ... */ 6144 install: 6145 lp = (uint32_t *)(gdt_table + ldt_info.entry_number); 6146 lp[0] = tswap32(entry_1); 6147 lp[1] = tswap32(entry_2); 6148 return 0; 6149 } 6150 6151 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr) 6152 { 6153 struct target_modify_ldt_ldt_s *target_ldt_info; 6154 uint64_t *gdt_table = g2h(env->gdt.base); 6155 uint32_t base_addr, limit, flags; 6156 int seg_32bit, contents, read_exec_only, limit_in_pages, idx; 6157 int seg_not_present, useable, lm; 6158 uint32_t *lp, entry_1, entry_2; 6159 6160 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 6161 if (!target_ldt_info) 6162 return -TARGET_EFAULT; 6163 idx = tswap32(target_ldt_info->entry_number); 6164 if (idx < TARGET_GDT_ENTRY_TLS_MIN || 6165 idx > TARGET_GDT_ENTRY_TLS_MAX) { 6166 unlock_user_struct(target_ldt_info, ptr, 1); 6167 return -TARGET_EINVAL; 6168 } 6169 lp = (uint32_t *)(gdt_table + idx); 6170 entry_1 = tswap32(lp[0]); 6171 entry_2 = tswap32(lp[1]); 6172 6173 read_exec_only = ((entry_2 >> 9) & 1) ^ 1; 6174 contents = (entry_2 >> 10) & 3; 6175 seg_not_present = ((entry_2 >> 15) & 1) ^ 1; 6176 seg_32bit = (entry_2 >> 22) & 1; 6177 limit_in_pages = (entry_2 >> 23) & 1; 6178 useable = (entry_2 >> 20) & 1; 6179 #ifdef TARGET_ABI32 6180 lm = 0; 6181 #else 6182 lm = (entry_2 >> 21) & 1; 6183 #endif 6184 flags = (seg_32bit << 0) | (contents << 1) | 6185 (read_exec_only << 3) | (limit_in_pages << 4) | 6186 (seg_not_present << 5) | (useable << 6) | (lm << 7); 6187 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000); 6188 base_addr = (entry_1 >> 16) | 6189 (entry_2 & 0xff000000) | 6190 ((entry_2 & 0xff) << 16); 6191 target_ldt_info->base_addr = tswapal(base_addr); 6192 target_ldt_info->limit = tswap32(limit); 6193 target_ldt_info->flags = tswap32(flags); 6194 unlock_user_struct(target_ldt_info, ptr, 1); 6195 return 0; 6196 } 6197 #endif /* TARGET_I386 && TARGET_ABI32 */ 6198 6199 #ifndef TARGET_ABI32 6200 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 6201 { 6202 abi_long ret = 0; 6203 abi_ulong val; 6204 int idx; 6205 6206 switch(code) { 6207 case TARGET_ARCH_SET_GS: 6208 case TARGET_ARCH_SET_FS: 6209 if (code == TARGET_ARCH_SET_GS) 6210 idx = R_GS; 6211 else 6212 idx = R_FS; 6213 cpu_x86_load_seg(env, idx, 0); 6214 env->segs[idx].base = addr; 6215 break; 6216 case TARGET_ARCH_GET_GS: 6217 case TARGET_ARCH_GET_FS: 6218 if (code == TARGET_ARCH_GET_GS) 6219 idx = R_GS; 6220 else 6221 idx = R_FS; 6222 val = env->segs[idx].base; 6223 if (put_user(val, addr, abi_ulong)) 6224 ret = -TARGET_EFAULT; 6225 break; 6226 default: 6227 ret = -TARGET_EINVAL; 6228 break; 6229 } 6230 return ret; 6231 } 6232 #endif 6233 6234 #endif /* defined(TARGET_I386) */ 6235 6236 #define NEW_STACK_SIZE 0x40000 6237 6238 6239 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; 6240 typedef struct { 6241 CPUArchState *env; 6242 pthread_mutex_t mutex; 6243 pthread_cond_t cond; 6244 pthread_t thread; 6245 uint32_t tid; 6246 abi_ulong child_tidptr; 6247 abi_ulong parent_tidptr; 6248 sigset_t sigmask; 6249 } new_thread_info; 6250 6251 static void *clone_func(void *arg) 6252 { 6253 new_thread_info *info = arg; 6254 CPUArchState *env; 6255 CPUState *cpu; 6256 TaskState *ts; 6257 6258 rcu_register_thread(); 6259 tcg_register_thread(); 6260 env = info->env; 6261 cpu = ENV_GET_CPU(env); 6262 thread_cpu = cpu; 6263 ts = (TaskState *)cpu->opaque; 6264 info->tid = gettid(); 6265 task_settid(ts); 6266 if (info->child_tidptr) 6267 put_user_u32(info->tid, info->child_tidptr); 6268 if (info->parent_tidptr) 6269 put_user_u32(info->tid, info->parent_tidptr); 6270 /* Enable signals. */ 6271 sigprocmask(SIG_SETMASK, &info->sigmask, NULL); 6272 /* Signal to the parent that we're ready. */ 6273 pthread_mutex_lock(&info->mutex); 6274 pthread_cond_broadcast(&info->cond); 6275 pthread_mutex_unlock(&info->mutex); 6276 /* Wait until the parent has finished initializing the tls state. */ 6277 pthread_mutex_lock(&clone_lock); 6278 pthread_mutex_unlock(&clone_lock); 6279 cpu_loop(env); 6280 /* never exits */ 6281 return NULL; 6282 } 6283 6284 /* do_fork() Must return host values and target errnos (unlike most 6285 do_*() functions). */ 6286 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, 6287 abi_ulong parent_tidptr, target_ulong newtls, 6288 abi_ulong child_tidptr) 6289 { 6290 CPUState *cpu = ENV_GET_CPU(env); 6291 int ret; 6292 TaskState *ts; 6293 CPUState *new_cpu; 6294 CPUArchState *new_env; 6295 sigset_t sigmask; 6296 6297 flags &= ~CLONE_IGNORED_FLAGS; 6298 6299 /* Emulate vfork() with fork() */ 6300 if (flags & CLONE_VFORK) 6301 flags &= ~(CLONE_VFORK | CLONE_VM); 6302 6303 if (flags & CLONE_VM) { 6304 TaskState *parent_ts = (TaskState *)cpu->opaque; 6305 new_thread_info info; 6306 pthread_attr_t attr; 6307 6308 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) || 6309 (flags & CLONE_INVALID_THREAD_FLAGS)) { 6310 return -TARGET_EINVAL; 6311 } 6312 6313 ts = g_new0(TaskState, 1); 6314 init_task_state(ts); 6315 /* we create a new CPU instance. */ 6316 new_env = cpu_copy(env); 6317 /* Init regs that differ from the parent. */ 6318 cpu_clone_regs(new_env, newsp); 6319 new_cpu = ENV_GET_CPU(new_env); 6320 new_cpu->opaque = ts; 6321 ts->bprm = parent_ts->bprm; 6322 ts->info = parent_ts->info; 6323 ts->signal_mask = parent_ts->signal_mask; 6324 6325 if (flags & CLONE_CHILD_CLEARTID) { 6326 ts->child_tidptr = child_tidptr; 6327 } 6328 6329 if (flags & CLONE_SETTLS) { 6330 cpu_set_tls (new_env, newtls); 6331 } 6332 6333 /* Grab a mutex so that thread setup appears atomic. */ 6334 pthread_mutex_lock(&clone_lock); 6335 6336 memset(&info, 0, sizeof(info)); 6337 pthread_mutex_init(&info.mutex, NULL); 6338 pthread_mutex_lock(&info.mutex); 6339 pthread_cond_init(&info.cond, NULL); 6340 info.env = new_env; 6341 if (flags & CLONE_CHILD_SETTID) { 6342 info.child_tidptr = child_tidptr; 6343 } 6344 if (flags & CLONE_PARENT_SETTID) { 6345 info.parent_tidptr = parent_tidptr; 6346 } 6347 6348 ret = pthread_attr_init(&attr); 6349 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE); 6350 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 6351 /* It is not safe to deliver signals until the child has finished 6352 initializing, so temporarily block all signals. */ 6353 sigfillset(&sigmask); 6354 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); 6355 6356 /* If this is our first additional thread, we need to ensure we 6357 * generate code for parallel execution and flush old translations. 6358 */ 6359 if (!parallel_cpus) { 6360 parallel_cpus = true; 6361 tb_flush(cpu); 6362 } 6363 6364 ret = pthread_create(&info.thread, &attr, clone_func, &info); 6365 /* TODO: Free new CPU state if thread creation failed. */ 6366 6367 sigprocmask(SIG_SETMASK, &info.sigmask, NULL); 6368 pthread_attr_destroy(&attr); 6369 if (ret == 0) { 6370 /* Wait for the child to initialize. */ 6371 pthread_cond_wait(&info.cond, &info.mutex); 6372 ret = info.tid; 6373 } else { 6374 ret = -1; 6375 } 6376 pthread_mutex_unlock(&info.mutex); 6377 pthread_cond_destroy(&info.cond); 6378 pthread_mutex_destroy(&info.mutex); 6379 pthread_mutex_unlock(&clone_lock); 6380 } else { 6381 /* if no CLONE_VM, we consider it is a fork */ 6382 if (flags & CLONE_INVALID_FORK_FLAGS) { 6383 return -TARGET_EINVAL; 6384 } 6385 6386 /* We can't support custom termination signals */ 6387 if ((flags & CSIGNAL) != TARGET_SIGCHLD) { 6388 return -TARGET_EINVAL; 6389 } 6390 6391 if (block_signals()) { 6392 return -TARGET_ERESTARTSYS; 6393 } 6394 6395 fork_start(); 6396 ret = fork(); 6397 if (ret == 0) { 6398 /* Child Process. */ 6399 cpu_clone_regs(env, newsp); 6400 fork_end(1); 6401 /* There is a race condition here. The parent process could 6402 theoretically read the TID in the child process before the child 6403 tid is set. This would require using either ptrace 6404 (not implemented) or having *_tidptr to point at a shared memory 6405 mapping. We can't repeat the spinlock hack used above because 6406 the child process gets its own copy of the lock. */ 6407 if (flags & CLONE_CHILD_SETTID) 6408 put_user_u32(gettid(), child_tidptr); 6409 if (flags & CLONE_PARENT_SETTID) 6410 put_user_u32(gettid(), parent_tidptr); 6411 ts = (TaskState *)cpu->opaque; 6412 if (flags & CLONE_SETTLS) 6413 cpu_set_tls (env, newtls); 6414 if (flags & CLONE_CHILD_CLEARTID) 6415 ts->child_tidptr = child_tidptr; 6416 } else { 6417 fork_end(0); 6418 } 6419 } 6420 return ret; 6421 } 6422 6423 /* warning : doesn't handle linux specific flags... */ 6424 static int target_to_host_fcntl_cmd(int cmd) 6425 { 6426 switch(cmd) { 6427 case TARGET_F_DUPFD: 6428 case TARGET_F_GETFD: 6429 case TARGET_F_SETFD: 6430 case TARGET_F_GETFL: 6431 case TARGET_F_SETFL: 6432 return cmd; 6433 case TARGET_F_GETLK: 6434 return F_GETLK64; 6435 case TARGET_F_SETLK: 6436 return F_SETLK64; 6437 case TARGET_F_SETLKW: 6438 return F_SETLKW64; 6439 case TARGET_F_GETOWN: 6440 return F_GETOWN; 6441 case TARGET_F_SETOWN: 6442 return F_SETOWN; 6443 case TARGET_F_GETSIG: 6444 return F_GETSIG; 6445 case TARGET_F_SETSIG: 6446 return F_SETSIG; 6447 #if TARGET_ABI_BITS == 32 6448 case TARGET_F_GETLK64: 6449 return F_GETLK64; 6450 case TARGET_F_SETLK64: 6451 return F_SETLK64; 6452 case TARGET_F_SETLKW64: 6453 return F_SETLKW64; 6454 #endif 6455 case TARGET_F_SETLEASE: 6456 return F_SETLEASE; 6457 case TARGET_F_GETLEASE: 6458 return F_GETLEASE; 6459 #ifdef F_DUPFD_CLOEXEC 6460 case TARGET_F_DUPFD_CLOEXEC: 6461 return F_DUPFD_CLOEXEC; 6462 #endif 6463 case TARGET_F_NOTIFY: 6464 return F_NOTIFY; 6465 #ifdef F_GETOWN_EX 6466 case TARGET_F_GETOWN_EX: 6467 return F_GETOWN_EX; 6468 #endif 6469 #ifdef F_SETOWN_EX 6470 case TARGET_F_SETOWN_EX: 6471 return F_SETOWN_EX; 6472 #endif 6473 #ifdef F_SETPIPE_SZ 6474 case TARGET_F_SETPIPE_SZ: 6475 return F_SETPIPE_SZ; 6476 case TARGET_F_GETPIPE_SZ: 6477 return F_GETPIPE_SZ; 6478 #endif 6479 default: 6480 return -TARGET_EINVAL; 6481 } 6482 return -TARGET_EINVAL; 6483 } 6484 6485 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a } 6486 static const bitmask_transtbl flock_tbl[] = { 6487 TRANSTBL_CONVERT(F_RDLCK), 6488 TRANSTBL_CONVERT(F_WRLCK), 6489 TRANSTBL_CONVERT(F_UNLCK), 6490 TRANSTBL_CONVERT(F_EXLCK), 6491 TRANSTBL_CONVERT(F_SHLCK), 6492 { 0, 0, 0, 0 } 6493 }; 6494 6495 static inline abi_long copy_from_user_flock(struct flock64 *fl, 6496 abi_ulong target_flock_addr) 6497 { 6498 struct target_flock *target_fl; 6499 short l_type; 6500 6501 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6502 return -TARGET_EFAULT; 6503 } 6504 6505 __get_user(l_type, &target_fl->l_type); 6506 fl->l_type = target_to_host_bitmask(l_type, flock_tbl); 6507 __get_user(fl->l_whence, &target_fl->l_whence); 6508 __get_user(fl->l_start, &target_fl->l_start); 6509 __get_user(fl->l_len, &target_fl->l_len); 6510 __get_user(fl->l_pid, &target_fl->l_pid); 6511 unlock_user_struct(target_fl, target_flock_addr, 0); 6512 return 0; 6513 } 6514 6515 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr, 6516 const struct flock64 *fl) 6517 { 6518 struct target_flock *target_fl; 6519 short l_type; 6520 6521 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 6522 return -TARGET_EFAULT; 6523 } 6524 6525 l_type = host_to_target_bitmask(fl->l_type, flock_tbl); 6526 __put_user(l_type, &target_fl->l_type); 6527 __put_user(fl->l_whence, &target_fl->l_whence); 6528 __put_user(fl->l_start, &target_fl->l_start); 6529 __put_user(fl->l_len, &target_fl->l_len); 6530 __put_user(fl->l_pid, &target_fl->l_pid); 6531 unlock_user_struct(target_fl, target_flock_addr, 1); 6532 return 0; 6533 } 6534 6535 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr); 6536 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl); 6537 6538 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32 6539 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl, 6540 abi_ulong target_flock_addr) 6541 { 6542 struct target_eabi_flock64 *target_fl; 6543 short l_type; 6544 6545 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6546 return -TARGET_EFAULT; 6547 } 6548 6549 __get_user(l_type, &target_fl->l_type); 6550 fl->l_type = target_to_host_bitmask(l_type, flock_tbl); 6551 __get_user(fl->l_whence, &target_fl->l_whence); 6552 __get_user(fl->l_start, &target_fl->l_start); 6553 __get_user(fl->l_len, &target_fl->l_len); 6554 __get_user(fl->l_pid, &target_fl->l_pid); 6555 unlock_user_struct(target_fl, target_flock_addr, 0); 6556 return 0; 6557 } 6558 6559 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr, 6560 const struct flock64 *fl) 6561 { 6562 struct target_eabi_flock64 *target_fl; 6563 short l_type; 6564 6565 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 6566 return -TARGET_EFAULT; 6567 } 6568 6569 l_type = host_to_target_bitmask(fl->l_type, flock_tbl); 6570 __put_user(l_type, &target_fl->l_type); 6571 __put_user(fl->l_whence, &target_fl->l_whence); 6572 __put_user(fl->l_start, &target_fl->l_start); 6573 __put_user(fl->l_len, &target_fl->l_len); 6574 __put_user(fl->l_pid, &target_fl->l_pid); 6575 unlock_user_struct(target_fl, target_flock_addr, 1); 6576 return 0; 6577 } 6578 #endif 6579 6580 static inline abi_long copy_from_user_flock64(struct flock64 *fl, 6581 abi_ulong target_flock_addr) 6582 { 6583 struct target_flock64 *target_fl; 6584 short l_type; 6585 6586 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6587 return -TARGET_EFAULT; 6588 } 6589 6590 __get_user(l_type, &target_fl->l_type); 6591 fl->l_type = target_to_host_bitmask(l_type, flock_tbl); 6592 __get_user(fl->l_whence, &target_fl->l_whence); 6593 __get_user(fl->l_start, &target_fl->l_start); 6594 __get_user(fl->l_len, &target_fl->l_len); 6595 __get_user(fl->l_pid, &target_fl->l_pid); 6596 unlock_user_struct(target_fl, target_flock_addr, 0); 6597 return 0; 6598 } 6599 6600 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr, 6601 const struct flock64 *fl) 6602 { 6603 struct target_flock64 *target_fl; 6604 short l_type; 6605 6606 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 6607 return -TARGET_EFAULT; 6608 } 6609 6610 l_type = host_to_target_bitmask(fl->l_type, flock_tbl); 6611 __put_user(l_type, &target_fl->l_type); 6612 __put_user(fl->l_whence, &target_fl->l_whence); 6613 __put_user(fl->l_start, &target_fl->l_start); 6614 __put_user(fl->l_len, &target_fl->l_len); 6615 __put_user(fl->l_pid, &target_fl->l_pid); 6616 unlock_user_struct(target_fl, target_flock_addr, 1); 6617 return 0; 6618 } 6619 6620 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) 6621 { 6622 struct flock64 fl64; 6623 #ifdef F_GETOWN_EX 6624 struct f_owner_ex fox; 6625 struct target_f_owner_ex *target_fox; 6626 #endif 6627 abi_long ret; 6628 int host_cmd = target_to_host_fcntl_cmd(cmd); 6629 6630 if (host_cmd == -TARGET_EINVAL) 6631 return host_cmd; 6632 6633 switch(cmd) { 6634 case TARGET_F_GETLK: 6635 ret = copy_from_user_flock(&fl64, arg); 6636 if (ret) { 6637 return ret; 6638 } 6639 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 6640 if (ret == 0) { 6641 ret = copy_to_user_flock(arg, &fl64); 6642 } 6643 break; 6644 6645 case TARGET_F_SETLK: 6646 case TARGET_F_SETLKW: 6647 ret = copy_from_user_flock(&fl64, arg); 6648 if (ret) { 6649 return ret; 6650 } 6651 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 6652 break; 6653 6654 case TARGET_F_GETLK64: 6655 ret = copy_from_user_flock64(&fl64, arg); 6656 if (ret) { 6657 return ret; 6658 } 6659 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 6660 if (ret == 0) { 6661 ret = copy_to_user_flock64(arg, &fl64); 6662 } 6663 break; 6664 case TARGET_F_SETLK64: 6665 case TARGET_F_SETLKW64: 6666 ret = copy_from_user_flock64(&fl64, arg); 6667 if (ret) { 6668 return ret; 6669 } 6670 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 6671 break; 6672 6673 case TARGET_F_GETFL: 6674 ret = get_errno(safe_fcntl(fd, host_cmd, arg)); 6675 if (ret >= 0) { 6676 ret = host_to_target_bitmask(ret, fcntl_flags_tbl); 6677 } 6678 break; 6679 6680 case TARGET_F_SETFL: 6681 ret = get_errno(safe_fcntl(fd, host_cmd, 6682 target_to_host_bitmask(arg, 6683 fcntl_flags_tbl))); 6684 break; 6685 6686 #ifdef F_GETOWN_EX 6687 case TARGET_F_GETOWN_EX: 6688 ret = get_errno(safe_fcntl(fd, host_cmd, &fox)); 6689 if (ret >= 0) { 6690 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0)) 6691 return -TARGET_EFAULT; 6692 target_fox->type = tswap32(fox.type); 6693 target_fox->pid = tswap32(fox.pid); 6694 unlock_user_struct(target_fox, arg, 1); 6695 } 6696 break; 6697 #endif 6698 6699 #ifdef F_SETOWN_EX 6700 case TARGET_F_SETOWN_EX: 6701 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1)) 6702 return -TARGET_EFAULT; 6703 fox.type = tswap32(target_fox->type); 6704 fox.pid = tswap32(target_fox->pid); 6705 unlock_user_struct(target_fox, arg, 0); 6706 ret = get_errno(safe_fcntl(fd, host_cmd, &fox)); 6707 break; 6708 #endif 6709 6710 case TARGET_F_SETOWN: 6711 case TARGET_F_GETOWN: 6712 case TARGET_F_SETSIG: 6713 case TARGET_F_GETSIG: 6714 case TARGET_F_SETLEASE: 6715 case TARGET_F_GETLEASE: 6716 case TARGET_F_SETPIPE_SZ: 6717 case TARGET_F_GETPIPE_SZ: 6718 ret = get_errno(safe_fcntl(fd, host_cmd, arg)); 6719 break; 6720 6721 default: 6722 ret = get_errno(safe_fcntl(fd, cmd, arg)); 6723 break; 6724 } 6725 return ret; 6726 } 6727 6728 #ifdef USE_UID16 6729 6730 static inline int high2lowuid(int uid) 6731 { 6732 if (uid > 65535) 6733 return 65534; 6734 else 6735 return uid; 6736 } 6737 6738 static inline int high2lowgid(int gid) 6739 { 6740 if (gid > 65535) 6741 return 65534; 6742 else 6743 return gid; 6744 } 6745 6746 static inline int low2highuid(int uid) 6747 { 6748 if ((int16_t)uid == -1) 6749 return -1; 6750 else 6751 return uid; 6752 } 6753 6754 static inline int low2highgid(int gid) 6755 { 6756 if ((int16_t)gid == -1) 6757 return -1; 6758 else 6759 return gid; 6760 } 6761 static inline int tswapid(int id) 6762 { 6763 return tswap16(id); 6764 } 6765 6766 #define put_user_id(x, gaddr) put_user_u16(x, gaddr) 6767 6768 #else /* !USE_UID16 */ 6769 static inline int high2lowuid(int uid) 6770 { 6771 return uid; 6772 } 6773 static inline int high2lowgid(int gid) 6774 { 6775 return gid; 6776 } 6777 static inline int low2highuid(int uid) 6778 { 6779 return uid; 6780 } 6781 static inline int low2highgid(int gid) 6782 { 6783 return gid; 6784 } 6785 static inline int tswapid(int id) 6786 { 6787 return tswap32(id); 6788 } 6789 6790 #define put_user_id(x, gaddr) put_user_u32(x, gaddr) 6791 6792 #endif /* USE_UID16 */ 6793 6794 /* We must do direct syscalls for setting UID/GID, because we want to 6795 * implement the Linux system call semantics of "change only for this thread", 6796 * not the libc/POSIX semantics of "change for all threads in process". 6797 * (See http://ewontfix.com/17/ for more details.) 6798 * We use the 32-bit version of the syscalls if present; if it is not 6799 * then either the host architecture supports 32-bit UIDs natively with 6800 * the standard syscall, or the 16-bit UID is the best we can do. 6801 */ 6802 #ifdef __NR_setuid32 6803 #define __NR_sys_setuid __NR_setuid32 6804 #else 6805 #define __NR_sys_setuid __NR_setuid 6806 #endif 6807 #ifdef __NR_setgid32 6808 #define __NR_sys_setgid __NR_setgid32 6809 #else 6810 #define __NR_sys_setgid __NR_setgid 6811 #endif 6812 #ifdef __NR_setresuid32 6813 #define __NR_sys_setresuid __NR_setresuid32 6814 #else 6815 #define __NR_sys_setresuid __NR_setresuid 6816 #endif 6817 #ifdef __NR_setresgid32 6818 #define __NR_sys_setresgid __NR_setresgid32 6819 #else 6820 #define __NR_sys_setresgid __NR_setresgid 6821 #endif 6822 6823 _syscall1(int, sys_setuid, uid_t, uid) 6824 _syscall1(int, sys_setgid, gid_t, gid) 6825 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) 6826 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid) 6827 6828 void syscall_init(void) 6829 { 6830 IOCTLEntry *ie; 6831 const argtype *arg_type; 6832 int size; 6833 int i; 6834 6835 thunk_init(STRUCT_MAX); 6836 6837 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def); 6838 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def); 6839 #include "syscall_types.h" 6840 #undef STRUCT 6841 #undef STRUCT_SPECIAL 6842 6843 /* Build target_to_host_errno_table[] table from 6844 * host_to_target_errno_table[]. */ 6845 for (i = 0; i < ERRNO_TABLE_SIZE; i++) { 6846 target_to_host_errno_table[host_to_target_errno_table[i]] = i; 6847 } 6848 6849 /* we patch the ioctl size if necessary. We rely on the fact that 6850 no ioctl has all the bits at '1' in the size field */ 6851 ie = ioctl_entries; 6852 while (ie->target_cmd != 0) { 6853 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) == 6854 TARGET_IOC_SIZEMASK) { 6855 arg_type = ie->arg_type; 6856 if (arg_type[0] != TYPE_PTR) { 6857 fprintf(stderr, "cannot patch size for ioctl 0x%x\n", 6858 ie->target_cmd); 6859 exit(1); 6860 } 6861 arg_type++; 6862 size = thunk_type_size(arg_type, 0); 6863 ie->target_cmd = (ie->target_cmd & 6864 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) | 6865 (size << TARGET_IOC_SIZESHIFT); 6866 } 6867 6868 /* automatic consistency check if same arch */ 6869 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 6870 (defined(__x86_64__) && defined(TARGET_X86_64)) 6871 if (unlikely(ie->target_cmd != ie->host_cmd)) { 6872 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n", 6873 ie->name, ie->target_cmd, ie->host_cmd); 6874 } 6875 #endif 6876 ie++; 6877 } 6878 } 6879 6880 #if TARGET_ABI_BITS == 32 6881 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1) 6882 { 6883 #ifdef TARGET_WORDS_BIGENDIAN 6884 return ((uint64_t)word0 << 32) | word1; 6885 #else 6886 return ((uint64_t)word1 << 32) | word0; 6887 #endif 6888 } 6889 #else /* TARGET_ABI_BITS == 32 */ 6890 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1) 6891 { 6892 return word0; 6893 } 6894 #endif /* TARGET_ABI_BITS != 32 */ 6895 6896 #ifdef TARGET_NR_truncate64 6897 static inline abi_long target_truncate64(void *cpu_env, const char *arg1, 6898 abi_long arg2, 6899 abi_long arg3, 6900 abi_long arg4) 6901 { 6902 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) { 6903 arg2 = arg3; 6904 arg3 = arg4; 6905 } 6906 return get_errno(truncate64(arg1, target_offset64(arg2, arg3))); 6907 } 6908 #endif 6909 6910 #ifdef TARGET_NR_ftruncate64 6911 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1, 6912 abi_long arg2, 6913 abi_long arg3, 6914 abi_long arg4) 6915 { 6916 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) { 6917 arg2 = arg3; 6918 arg3 = arg4; 6919 } 6920 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3))); 6921 } 6922 #endif 6923 6924 static inline abi_long target_to_host_timespec(struct timespec *host_ts, 6925 abi_ulong target_addr) 6926 { 6927 struct target_timespec *target_ts; 6928 6929 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) 6930 return -TARGET_EFAULT; 6931 __get_user(host_ts->tv_sec, &target_ts->tv_sec); 6932 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec); 6933 unlock_user_struct(target_ts, target_addr, 0); 6934 return 0; 6935 } 6936 6937 static inline abi_long host_to_target_timespec(abi_ulong target_addr, 6938 struct timespec *host_ts) 6939 { 6940 struct target_timespec *target_ts; 6941 6942 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) 6943 return -TARGET_EFAULT; 6944 __put_user(host_ts->tv_sec, &target_ts->tv_sec); 6945 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec); 6946 unlock_user_struct(target_ts, target_addr, 1); 6947 return 0; 6948 } 6949 6950 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec, 6951 abi_ulong target_addr) 6952 { 6953 struct target_itimerspec *target_itspec; 6954 6955 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) { 6956 return -TARGET_EFAULT; 6957 } 6958 6959 host_itspec->it_interval.tv_sec = 6960 tswapal(target_itspec->it_interval.tv_sec); 6961 host_itspec->it_interval.tv_nsec = 6962 tswapal(target_itspec->it_interval.tv_nsec); 6963 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec); 6964 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec); 6965 6966 unlock_user_struct(target_itspec, target_addr, 1); 6967 return 0; 6968 } 6969 6970 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr, 6971 struct itimerspec *host_its) 6972 { 6973 struct target_itimerspec *target_itspec; 6974 6975 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) { 6976 return -TARGET_EFAULT; 6977 } 6978 6979 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec); 6980 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec); 6981 6982 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec); 6983 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec); 6984 6985 unlock_user_struct(target_itspec, target_addr, 0); 6986 return 0; 6987 } 6988 6989 static inline abi_long target_to_host_timex(struct timex *host_tx, 6990 abi_long target_addr) 6991 { 6992 struct target_timex *target_tx; 6993 6994 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) { 6995 return -TARGET_EFAULT; 6996 } 6997 6998 __get_user(host_tx->modes, &target_tx->modes); 6999 __get_user(host_tx->offset, &target_tx->offset); 7000 __get_user(host_tx->freq, &target_tx->freq); 7001 __get_user(host_tx->maxerror, &target_tx->maxerror); 7002 __get_user(host_tx->esterror, &target_tx->esterror); 7003 __get_user(host_tx->status, &target_tx->status); 7004 __get_user(host_tx->constant, &target_tx->constant); 7005 __get_user(host_tx->precision, &target_tx->precision); 7006 __get_user(host_tx->tolerance, &target_tx->tolerance); 7007 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec); 7008 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec); 7009 __get_user(host_tx->tick, &target_tx->tick); 7010 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7011 __get_user(host_tx->jitter, &target_tx->jitter); 7012 __get_user(host_tx->shift, &target_tx->shift); 7013 __get_user(host_tx->stabil, &target_tx->stabil); 7014 __get_user(host_tx->jitcnt, &target_tx->jitcnt); 7015 __get_user(host_tx->calcnt, &target_tx->calcnt); 7016 __get_user(host_tx->errcnt, &target_tx->errcnt); 7017 __get_user(host_tx->stbcnt, &target_tx->stbcnt); 7018 __get_user(host_tx->tai, &target_tx->tai); 7019 7020 unlock_user_struct(target_tx, target_addr, 0); 7021 return 0; 7022 } 7023 7024 static inline abi_long host_to_target_timex(abi_long target_addr, 7025 struct timex *host_tx) 7026 { 7027 struct target_timex *target_tx; 7028 7029 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) { 7030 return -TARGET_EFAULT; 7031 } 7032 7033 __put_user(host_tx->modes, &target_tx->modes); 7034 __put_user(host_tx->offset, &target_tx->offset); 7035 __put_user(host_tx->freq, &target_tx->freq); 7036 __put_user(host_tx->maxerror, &target_tx->maxerror); 7037 __put_user(host_tx->esterror, &target_tx->esterror); 7038 __put_user(host_tx->status, &target_tx->status); 7039 __put_user(host_tx->constant, &target_tx->constant); 7040 __put_user(host_tx->precision, &target_tx->precision); 7041 __put_user(host_tx->tolerance, &target_tx->tolerance); 7042 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec); 7043 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec); 7044 __put_user(host_tx->tick, &target_tx->tick); 7045 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7046 __put_user(host_tx->jitter, &target_tx->jitter); 7047 __put_user(host_tx->shift, &target_tx->shift); 7048 __put_user(host_tx->stabil, &target_tx->stabil); 7049 __put_user(host_tx->jitcnt, &target_tx->jitcnt); 7050 __put_user(host_tx->calcnt, &target_tx->calcnt); 7051 __put_user(host_tx->errcnt, &target_tx->errcnt); 7052 __put_user(host_tx->stbcnt, &target_tx->stbcnt); 7053 __put_user(host_tx->tai, &target_tx->tai); 7054 7055 unlock_user_struct(target_tx, target_addr, 1); 7056 return 0; 7057 } 7058 7059 7060 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp, 7061 abi_ulong target_addr) 7062 { 7063 struct target_sigevent *target_sevp; 7064 7065 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) { 7066 return -TARGET_EFAULT; 7067 } 7068 7069 /* This union is awkward on 64 bit systems because it has a 32 bit 7070 * integer and a pointer in it; we follow the conversion approach 7071 * used for handling sigval types in signal.c so the guest should get 7072 * the correct value back even if we did a 64 bit byteswap and it's 7073 * using the 32 bit integer. 7074 */ 7075 host_sevp->sigev_value.sival_ptr = 7076 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr); 7077 host_sevp->sigev_signo = 7078 target_to_host_signal(tswap32(target_sevp->sigev_signo)); 7079 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify); 7080 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid); 7081 7082 unlock_user_struct(target_sevp, target_addr, 1); 7083 return 0; 7084 } 7085 7086 #if defined(TARGET_NR_mlockall) 7087 static inline int target_to_host_mlockall_arg(int arg) 7088 { 7089 int result = 0; 7090 7091 if (arg & TARGET_MLOCKALL_MCL_CURRENT) { 7092 result |= MCL_CURRENT; 7093 } 7094 if (arg & TARGET_MLOCKALL_MCL_FUTURE) { 7095 result |= MCL_FUTURE; 7096 } 7097 return result; 7098 } 7099 #endif 7100 7101 static inline abi_long host_to_target_stat64(void *cpu_env, 7102 abi_ulong target_addr, 7103 struct stat *host_st) 7104 { 7105 #if defined(TARGET_ARM) && defined(TARGET_ABI32) 7106 if (((CPUARMState *)cpu_env)->eabi) { 7107 struct target_eabi_stat64 *target_st; 7108 7109 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 7110 return -TARGET_EFAULT; 7111 memset(target_st, 0, sizeof(struct target_eabi_stat64)); 7112 __put_user(host_st->st_dev, &target_st->st_dev); 7113 __put_user(host_st->st_ino, &target_st->st_ino); 7114 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 7115 __put_user(host_st->st_ino, &target_st->__st_ino); 7116 #endif 7117 __put_user(host_st->st_mode, &target_st->st_mode); 7118 __put_user(host_st->st_nlink, &target_st->st_nlink); 7119 __put_user(host_st->st_uid, &target_st->st_uid); 7120 __put_user(host_st->st_gid, &target_st->st_gid); 7121 __put_user(host_st->st_rdev, &target_st->st_rdev); 7122 __put_user(host_st->st_size, &target_st->st_size); 7123 __put_user(host_st->st_blksize, &target_st->st_blksize); 7124 __put_user(host_st->st_blocks, &target_st->st_blocks); 7125 __put_user(host_st->st_atime, &target_st->target_st_atime); 7126 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 7127 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 7128 unlock_user_struct(target_st, target_addr, 1); 7129 } else 7130 #endif 7131 { 7132 #if defined(TARGET_HAS_STRUCT_STAT64) 7133 struct target_stat64 *target_st; 7134 #else 7135 struct target_stat *target_st; 7136 #endif 7137 7138 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 7139 return -TARGET_EFAULT; 7140 memset(target_st, 0, sizeof(*target_st)); 7141 __put_user(host_st->st_dev, &target_st->st_dev); 7142 __put_user(host_st->st_ino, &target_st->st_ino); 7143 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 7144 __put_user(host_st->st_ino, &target_st->__st_ino); 7145 #endif 7146 __put_user(host_st->st_mode, &target_st->st_mode); 7147 __put_user(host_st->st_nlink, &target_st->st_nlink); 7148 __put_user(host_st->st_uid, &target_st->st_uid); 7149 __put_user(host_st->st_gid, &target_st->st_gid); 7150 __put_user(host_st->st_rdev, &target_st->st_rdev); 7151 /* XXX: better use of kernel struct */ 7152 __put_user(host_st->st_size, &target_st->st_size); 7153 __put_user(host_st->st_blksize, &target_st->st_blksize); 7154 __put_user(host_st->st_blocks, &target_st->st_blocks); 7155 __put_user(host_st->st_atime, &target_st->target_st_atime); 7156 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 7157 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 7158 unlock_user_struct(target_st, target_addr, 1); 7159 } 7160 7161 return 0; 7162 } 7163 7164 /* ??? Using host futex calls even when target atomic operations 7165 are not really atomic probably breaks things. However implementing 7166 futexes locally would make futexes shared between multiple processes 7167 tricky. However they're probably useless because guest atomic 7168 operations won't work either. */ 7169 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout, 7170 target_ulong uaddr2, int val3) 7171 { 7172 struct timespec ts, *pts; 7173 int base_op; 7174 7175 /* ??? We assume FUTEX_* constants are the same on both host 7176 and target. */ 7177 #ifdef FUTEX_CMD_MASK 7178 base_op = op & FUTEX_CMD_MASK; 7179 #else 7180 base_op = op; 7181 #endif 7182 switch (base_op) { 7183 case FUTEX_WAIT: 7184 case FUTEX_WAIT_BITSET: 7185 if (timeout) { 7186 pts = &ts; 7187 target_to_host_timespec(pts, timeout); 7188 } else { 7189 pts = NULL; 7190 } 7191 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val), 7192 pts, NULL, val3)); 7193 case FUTEX_WAKE: 7194 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 7195 case FUTEX_FD: 7196 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 7197 case FUTEX_REQUEUE: 7198 case FUTEX_CMP_REQUEUE: 7199 case FUTEX_WAKE_OP: 7200 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 7201 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 7202 But the prototype takes a `struct timespec *'; insert casts 7203 to satisfy the compiler. We do not need to tswap TIMEOUT 7204 since it's not compared to guest memory. */ 7205 pts = (struct timespec *)(uintptr_t) timeout; 7206 return get_errno(safe_futex(g2h(uaddr), op, val, pts, 7207 g2h(uaddr2), 7208 (base_op == FUTEX_CMP_REQUEUE 7209 ? tswap32(val3) 7210 : val3))); 7211 default: 7212 return -TARGET_ENOSYS; 7213 } 7214 } 7215 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 7216 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname, 7217 abi_long handle, abi_long mount_id, 7218 abi_long flags) 7219 { 7220 struct file_handle *target_fh; 7221 struct file_handle *fh; 7222 int mid = 0; 7223 abi_long ret; 7224 char *name; 7225 unsigned int size, total_size; 7226 7227 if (get_user_s32(size, handle)) { 7228 return -TARGET_EFAULT; 7229 } 7230 7231 name = lock_user_string(pathname); 7232 if (!name) { 7233 return -TARGET_EFAULT; 7234 } 7235 7236 total_size = sizeof(struct file_handle) + size; 7237 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0); 7238 if (!target_fh) { 7239 unlock_user(name, pathname, 0); 7240 return -TARGET_EFAULT; 7241 } 7242 7243 fh = g_malloc0(total_size); 7244 fh->handle_bytes = size; 7245 7246 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags)); 7247 unlock_user(name, pathname, 0); 7248 7249 /* man name_to_handle_at(2): 7250 * Other than the use of the handle_bytes field, the caller should treat 7251 * the file_handle structure as an opaque data type 7252 */ 7253 7254 memcpy(target_fh, fh, total_size); 7255 target_fh->handle_bytes = tswap32(fh->handle_bytes); 7256 target_fh->handle_type = tswap32(fh->handle_type); 7257 g_free(fh); 7258 unlock_user(target_fh, handle, total_size); 7259 7260 if (put_user_s32(mid, mount_id)) { 7261 return -TARGET_EFAULT; 7262 } 7263 7264 return ret; 7265 7266 } 7267 #endif 7268 7269 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 7270 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle, 7271 abi_long flags) 7272 { 7273 struct file_handle *target_fh; 7274 struct file_handle *fh; 7275 unsigned int size, total_size; 7276 abi_long ret; 7277 7278 if (get_user_s32(size, handle)) { 7279 return -TARGET_EFAULT; 7280 } 7281 7282 total_size = sizeof(struct file_handle) + size; 7283 target_fh = lock_user(VERIFY_READ, handle, total_size, 1); 7284 if (!target_fh) { 7285 return -TARGET_EFAULT; 7286 } 7287 7288 fh = g_memdup(target_fh, total_size); 7289 fh->handle_bytes = size; 7290 fh->handle_type = tswap32(target_fh->handle_type); 7291 7292 ret = get_errno(open_by_handle_at(mount_fd, fh, 7293 target_to_host_bitmask(flags, fcntl_flags_tbl))); 7294 7295 g_free(fh); 7296 7297 unlock_user(target_fh, handle, total_size); 7298 7299 return ret; 7300 } 7301 #endif 7302 7303 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4) 7304 7305 /* signalfd siginfo conversion */ 7306 7307 static void 7308 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo, 7309 const struct signalfd_siginfo *info) 7310 { 7311 int sig = host_to_target_signal(info->ssi_signo); 7312 7313 /* linux/signalfd.h defines a ssi_addr_lsb 7314 * not defined in sys/signalfd.h but used by some kernels 7315 */ 7316 7317 #ifdef BUS_MCEERR_AO 7318 if (tinfo->ssi_signo == SIGBUS && 7319 (tinfo->ssi_code == BUS_MCEERR_AR || 7320 tinfo->ssi_code == BUS_MCEERR_AO)) { 7321 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1); 7322 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1); 7323 *tssi_addr_lsb = tswap16(*ssi_addr_lsb); 7324 } 7325 #endif 7326 7327 tinfo->ssi_signo = tswap32(sig); 7328 tinfo->ssi_errno = tswap32(tinfo->ssi_errno); 7329 tinfo->ssi_code = tswap32(info->ssi_code); 7330 tinfo->ssi_pid = tswap32(info->ssi_pid); 7331 tinfo->ssi_uid = tswap32(info->ssi_uid); 7332 tinfo->ssi_fd = tswap32(info->ssi_fd); 7333 tinfo->ssi_tid = tswap32(info->ssi_tid); 7334 tinfo->ssi_band = tswap32(info->ssi_band); 7335 tinfo->ssi_overrun = tswap32(info->ssi_overrun); 7336 tinfo->ssi_trapno = tswap32(info->ssi_trapno); 7337 tinfo->ssi_status = tswap32(info->ssi_status); 7338 tinfo->ssi_int = tswap32(info->ssi_int); 7339 tinfo->ssi_ptr = tswap64(info->ssi_ptr); 7340 tinfo->ssi_utime = tswap64(info->ssi_utime); 7341 tinfo->ssi_stime = tswap64(info->ssi_stime); 7342 tinfo->ssi_addr = tswap64(info->ssi_addr); 7343 } 7344 7345 static abi_long host_to_target_data_signalfd(void *buf, size_t len) 7346 { 7347 int i; 7348 7349 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) { 7350 host_to_target_signalfd_siginfo(buf + i, buf + i); 7351 } 7352 7353 return len; 7354 } 7355 7356 static TargetFdTrans target_signalfd_trans = { 7357 .host_to_target_data = host_to_target_data_signalfd, 7358 }; 7359 7360 static abi_long do_signalfd4(int fd, abi_long mask, int flags) 7361 { 7362 int host_flags; 7363 target_sigset_t *target_mask; 7364 sigset_t host_mask; 7365 abi_long ret; 7366 7367 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) { 7368 return -TARGET_EINVAL; 7369 } 7370 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) { 7371 return -TARGET_EFAULT; 7372 } 7373 7374 target_to_host_sigset(&host_mask, target_mask); 7375 7376 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl); 7377 7378 ret = get_errno(signalfd(fd, &host_mask, host_flags)); 7379 if (ret >= 0) { 7380 fd_trans_register(ret, &target_signalfd_trans); 7381 } 7382 7383 unlock_user_struct(target_mask, mask, 0); 7384 7385 return ret; 7386 } 7387 #endif 7388 7389 /* Map host to target signal numbers for the wait family of syscalls. 7390 Assume all other status bits are the same. */ 7391 int host_to_target_waitstatus(int status) 7392 { 7393 if (WIFSIGNALED(status)) { 7394 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f); 7395 } 7396 if (WIFSTOPPED(status)) { 7397 return (host_to_target_signal(WSTOPSIG(status)) << 8) 7398 | (status & 0xff); 7399 } 7400 return status; 7401 } 7402 7403 static int open_self_cmdline(void *cpu_env, int fd) 7404 { 7405 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 7406 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm; 7407 int i; 7408 7409 for (i = 0; i < bprm->argc; i++) { 7410 size_t len = strlen(bprm->argv[i]) + 1; 7411 7412 if (write(fd, bprm->argv[i], len) != len) { 7413 return -1; 7414 } 7415 } 7416 7417 return 0; 7418 } 7419 7420 static int open_self_maps(void *cpu_env, int fd) 7421 { 7422 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 7423 TaskState *ts = cpu->opaque; 7424 FILE *fp; 7425 char *line = NULL; 7426 size_t len = 0; 7427 ssize_t read; 7428 7429 fp = fopen("/proc/self/maps", "r"); 7430 if (fp == NULL) { 7431 return -1; 7432 } 7433 7434 while ((read = getline(&line, &len, fp)) != -1) { 7435 int fields, dev_maj, dev_min, inode; 7436 uint64_t min, max, offset; 7437 char flag_r, flag_w, flag_x, flag_p; 7438 char path[512] = ""; 7439 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d" 7440 " %512s", &min, &max, &flag_r, &flag_w, &flag_x, 7441 &flag_p, &offset, &dev_maj, &dev_min, &inode, path); 7442 7443 if ((fields < 10) || (fields > 11)) { 7444 continue; 7445 } 7446 if (h2g_valid(min)) { 7447 int flags = page_get_flags(h2g(min)); 7448 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX); 7449 if (page_check_range(h2g(min), max - min, flags) == -1) { 7450 continue; 7451 } 7452 if (h2g(min) == ts->info->stack_limit) { 7453 pstrcpy(path, sizeof(path), " [stack]"); 7454 } 7455 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx 7456 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n", 7457 h2g(min), h2g(max - 1) + 1, flag_r, flag_w, 7458 flag_x, flag_p, offset, dev_maj, dev_min, inode, 7459 path[0] ? " " : "", path); 7460 } 7461 } 7462 7463 free(line); 7464 fclose(fp); 7465 7466 return 0; 7467 } 7468 7469 static int open_self_stat(void *cpu_env, int fd) 7470 { 7471 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 7472 TaskState *ts = cpu->opaque; 7473 abi_ulong start_stack = ts->info->start_stack; 7474 int i; 7475 7476 for (i = 0; i < 44; i++) { 7477 char buf[128]; 7478 int len; 7479 uint64_t val = 0; 7480 7481 if (i == 0) { 7482 /* pid */ 7483 val = getpid(); 7484 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 7485 } else if (i == 1) { 7486 /* app name */ 7487 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]); 7488 } else if (i == 27) { 7489 /* stack bottom */ 7490 val = start_stack; 7491 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 7492 } else { 7493 /* for the rest, there is MasterCard */ 7494 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' '); 7495 } 7496 7497 len = strlen(buf); 7498 if (write(fd, buf, len) != len) { 7499 return -1; 7500 } 7501 } 7502 7503 return 0; 7504 } 7505 7506 static int open_self_auxv(void *cpu_env, int fd) 7507 { 7508 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 7509 TaskState *ts = cpu->opaque; 7510 abi_ulong auxv = ts->info->saved_auxv; 7511 abi_ulong len = ts->info->auxv_len; 7512 char *ptr; 7513 7514 /* 7515 * Auxiliary vector is stored in target process stack. 7516 * read in whole auxv vector and copy it to file 7517 */ 7518 ptr = lock_user(VERIFY_READ, auxv, len, 0); 7519 if (ptr != NULL) { 7520 while (len > 0) { 7521 ssize_t r; 7522 r = write(fd, ptr, len); 7523 if (r <= 0) { 7524 break; 7525 } 7526 len -= r; 7527 ptr += r; 7528 } 7529 lseek(fd, 0, SEEK_SET); 7530 unlock_user(ptr, auxv, len); 7531 } 7532 7533 return 0; 7534 } 7535 7536 static int is_proc_myself(const char *filename, const char *entry) 7537 { 7538 if (!strncmp(filename, "/proc/", strlen("/proc/"))) { 7539 filename += strlen("/proc/"); 7540 if (!strncmp(filename, "self/", strlen("self/"))) { 7541 filename += strlen("self/"); 7542 } else if (*filename >= '1' && *filename <= '9') { 7543 char myself[80]; 7544 snprintf(myself, sizeof(myself), "%d/", getpid()); 7545 if (!strncmp(filename, myself, strlen(myself))) { 7546 filename += strlen(myself); 7547 } else { 7548 return 0; 7549 } 7550 } else { 7551 return 0; 7552 } 7553 if (!strcmp(filename, entry)) { 7554 return 1; 7555 } 7556 } 7557 return 0; 7558 } 7559 7560 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 7561 static int is_proc(const char *filename, const char *entry) 7562 { 7563 return strcmp(filename, entry) == 0; 7564 } 7565 7566 static int open_net_route(void *cpu_env, int fd) 7567 { 7568 FILE *fp; 7569 char *line = NULL; 7570 size_t len = 0; 7571 ssize_t read; 7572 7573 fp = fopen("/proc/net/route", "r"); 7574 if (fp == NULL) { 7575 return -1; 7576 } 7577 7578 /* read header */ 7579 7580 read = getline(&line, &len, fp); 7581 dprintf(fd, "%s", line); 7582 7583 /* read routes */ 7584 7585 while ((read = getline(&line, &len, fp)) != -1) { 7586 char iface[16]; 7587 uint32_t dest, gw, mask; 7588 unsigned int flags, refcnt, use, metric, mtu, window, irtt; 7589 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 7590 iface, &dest, &gw, &flags, &refcnt, &use, &metric, 7591 &mask, &mtu, &window, &irtt); 7592 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 7593 iface, tswap32(dest), tswap32(gw), flags, refcnt, use, 7594 metric, tswap32(mask), mtu, window, irtt); 7595 } 7596 7597 free(line); 7598 fclose(fp); 7599 7600 return 0; 7601 } 7602 #endif 7603 7604 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode) 7605 { 7606 struct fake_open { 7607 const char *filename; 7608 int (*fill)(void *cpu_env, int fd); 7609 int (*cmp)(const char *s1, const char *s2); 7610 }; 7611 const struct fake_open *fake_open; 7612 static const struct fake_open fakes[] = { 7613 { "maps", open_self_maps, is_proc_myself }, 7614 { "stat", open_self_stat, is_proc_myself }, 7615 { "auxv", open_self_auxv, is_proc_myself }, 7616 { "cmdline", open_self_cmdline, is_proc_myself }, 7617 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 7618 { "/proc/net/route", open_net_route, is_proc }, 7619 #endif 7620 { NULL, NULL, NULL } 7621 }; 7622 7623 if (is_proc_myself(pathname, "exe")) { 7624 int execfd = qemu_getauxval(AT_EXECFD); 7625 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode); 7626 } 7627 7628 for (fake_open = fakes; fake_open->filename; fake_open++) { 7629 if (fake_open->cmp(pathname, fake_open->filename)) { 7630 break; 7631 } 7632 } 7633 7634 if (fake_open->filename) { 7635 const char *tmpdir; 7636 char filename[PATH_MAX]; 7637 int fd, r; 7638 7639 /* create temporary file to map stat to */ 7640 tmpdir = getenv("TMPDIR"); 7641 if (!tmpdir) 7642 tmpdir = "/tmp"; 7643 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir); 7644 fd = mkstemp(filename); 7645 if (fd < 0) { 7646 return fd; 7647 } 7648 unlink(filename); 7649 7650 if ((r = fake_open->fill(cpu_env, fd))) { 7651 int e = errno; 7652 close(fd); 7653 errno = e; 7654 return r; 7655 } 7656 lseek(fd, 0, SEEK_SET); 7657 7658 return fd; 7659 } 7660 7661 return safe_openat(dirfd, path(pathname), flags, mode); 7662 } 7663 7664 #define TIMER_MAGIC 0x0caf0000 7665 #define TIMER_MAGIC_MASK 0xffff0000 7666 7667 /* Convert QEMU provided timer ID back to internal 16bit index format */ 7668 static target_timer_t get_timer_id(abi_long arg) 7669 { 7670 target_timer_t timerid = arg; 7671 7672 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) { 7673 return -TARGET_EINVAL; 7674 } 7675 7676 timerid &= 0xffff; 7677 7678 if (timerid >= ARRAY_SIZE(g_posix_timers)) { 7679 return -TARGET_EINVAL; 7680 } 7681 7682 return timerid; 7683 } 7684 7685 static abi_long swap_data_eventfd(void *buf, size_t len) 7686 { 7687 uint64_t *counter = buf; 7688 int i; 7689 7690 if (len < sizeof(uint64_t)) { 7691 return -EINVAL; 7692 } 7693 7694 for (i = 0; i < len; i += sizeof(uint64_t)) { 7695 *counter = tswap64(*counter); 7696 counter++; 7697 } 7698 7699 return len; 7700 } 7701 7702 static TargetFdTrans target_eventfd_trans = { 7703 .host_to_target_data = swap_data_eventfd, 7704 .target_to_host_data = swap_data_eventfd, 7705 }; 7706 7707 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \ 7708 (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \ 7709 defined(__NR_inotify_init1)) 7710 static abi_long host_to_target_data_inotify(void *buf, size_t len) 7711 { 7712 struct inotify_event *ev; 7713 int i; 7714 uint32_t name_len; 7715 7716 for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) { 7717 ev = (struct inotify_event *)((char *)buf + i); 7718 name_len = ev->len; 7719 7720 ev->wd = tswap32(ev->wd); 7721 ev->mask = tswap32(ev->mask); 7722 ev->cookie = tswap32(ev->cookie); 7723 ev->len = tswap32(name_len); 7724 } 7725 7726 return len; 7727 } 7728 7729 static TargetFdTrans target_inotify_trans = { 7730 .host_to_target_data = host_to_target_data_inotify, 7731 }; 7732 #endif 7733 7734 /* do_syscall() should always have a single exit point at the end so 7735 that actions, such as logging of syscall results, can be performed. 7736 All errnos that do_syscall() returns must be -TARGET_<errcode>. */ 7737 abi_long do_syscall(void *cpu_env, int num, abi_long arg1, 7738 abi_long arg2, abi_long arg3, abi_long arg4, 7739 abi_long arg5, abi_long arg6, abi_long arg7, 7740 abi_long arg8) 7741 { 7742 CPUState *cpu = ENV_GET_CPU(cpu_env); 7743 abi_long ret; 7744 struct stat st; 7745 struct statfs stfs; 7746 void *p; 7747 7748 #if defined(DEBUG_ERESTARTSYS) 7749 /* Debug-only code for exercising the syscall-restart code paths 7750 * in the per-architecture cpu main loops: restart every syscall 7751 * the guest makes once before letting it through. 7752 */ 7753 { 7754 static int flag; 7755 7756 flag = !flag; 7757 if (flag) { 7758 return -TARGET_ERESTARTSYS; 7759 } 7760 } 7761 #endif 7762 7763 #ifdef DEBUG 7764 gemu_log("syscall %d", num); 7765 #endif 7766 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8); 7767 if(do_strace) 7768 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); 7769 7770 switch(num) { 7771 case TARGET_NR_exit: 7772 /* In old applications this may be used to implement _exit(2). 7773 However in threaded applictions it is used for thread termination, 7774 and _exit_group is used for application termination. 7775 Do thread termination if we have more then one thread. */ 7776 7777 if (block_signals()) { 7778 ret = -TARGET_ERESTARTSYS; 7779 break; 7780 } 7781 7782 cpu_list_lock(); 7783 7784 if (CPU_NEXT(first_cpu)) { 7785 TaskState *ts; 7786 7787 /* Remove the CPU from the list. */ 7788 QTAILQ_REMOVE(&cpus, cpu, node); 7789 7790 cpu_list_unlock(); 7791 7792 ts = cpu->opaque; 7793 if (ts->child_tidptr) { 7794 put_user_u32(0, ts->child_tidptr); 7795 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX, 7796 NULL, NULL, 0); 7797 } 7798 thread_cpu = NULL; 7799 object_unref(OBJECT(cpu)); 7800 g_free(ts); 7801 rcu_unregister_thread(); 7802 pthread_exit(NULL); 7803 } 7804 7805 cpu_list_unlock(); 7806 #ifdef TARGET_GPROF 7807 _mcleanup(); 7808 #endif 7809 gdb_exit(cpu_env, arg1); 7810 _exit(arg1); 7811 ret = 0; /* avoid warning */ 7812 break; 7813 case TARGET_NR_read: 7814 if (arg3 == 0) 7815 ret = 0; 7816 else { 7817 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 7818 goto efault; 7819 ret = get_errno(safe_read(arg1, p, arg3)); 7820 if (ret >= 0 && 7821 fd_trans_host_to_target_data(arg1)) { 7822 ret = fd_trans_host_to_target_data(arg1)(p, ret); 7823 } 7824 unlock_user(p, arg2, ret); 7825 } 7826 break; 7827 case TARGET_NR_write: 7828 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 7829 goto efault; 7830 if (fd_trans_target_to_host_data(arg1)) { 7831 void *copy = g_malloc(arg3); 7832 memcpy(copy, p, arg3); 7833 ret = fd_trans_target_to_host_data(arg1)(copy, arg3); 7834 if (ret >= 0) { 7835 ret = get_errno(safe_write(arg1, copy, ret)); 7836 } 7837 g_free(copy); 7838 } else { 7839 ret = get_errno(safe_write(arg1, p, arg3)); 7840 } 7841 unlock_user(p, arg2, 0); 7842 break; 7843 #ifdef TARGET_NR_open 7844 case TARGET_NR_open: 7845 if (!(p = lock_user_string(arg1))) 7846 goto efault; 7847 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p, 7848 target_to_host_bitmask(arg2, fcntl_flags_tbl), 7849 arg3)); 7850 fd_trans_unregister(ret); 7851 unlock_user(p, arg1, 0); 7852 break; 7853 #endif 7854 case TARGET_NR_openat: 7855 if (!(p = lock_user_string(arg2))) 7856 goto efault; 7857 ret = get_errno(do_openat(cpu_env, arg1, p, 7858 target_to_host_bitmask(arg3, fcntl_flags_tbl), 7859 arg4)); 7860 fd_trans_unregister(ret); 7861 unlock_user(p, arg2, 0); 7862 break; 7863 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 7864 case TARGET_NR_name_to_handle_at: 7865 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5); 7866 break; 7867 #endif 7868 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 7869 case TARGET_NR_open_by_handle_at: 7870 ret = do_open_by_handle_at(arg1, arg2, arg3); 7871 fd_trans_unregister(ret); 7872 break; 7873 #endif 7874 case TARGET_NR_close: 7875 fd_trans_unregister(arg1); 7876 ret = get_errno(close(arg1)); 7877 break; 7878 case TARGET_NR_brk: 7879 ret = do_brk(arg1); 7880 break; 7881 #ifdef TARGET_NR_fork 7882 case TARGET_NR_fork: 7883 ret = get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0)); 7884 break; 7885 #endif 7886 #ifdef TARGET_NR_waitpid 7887 case TARGET_NR_waitpid: 7888 { 7889 int status; 7890 ret = get_errno(safe_wait4(arg1, &status, arg3, 0)); 7891 if (!is_error(ret) && arg2 && ret 7892 && put_user_s32(host_to_target_waitstatus(status), arg2)) 7893 goto efault; 7894 } 7895 break; 7896 #endif 7897 #ifdef TARGET_NR_waitid 7898 case TARGET_NR_waitid: 7899 { 7900 siginfo_t info; 7901 info.si_pid = 0; 7902 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL)); 7903 if (!is_error(ret) && arg3 && info.si_pid != 0) { 7904 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) 7905 goto efault; 7906 host_to_target_siginfo(p, &info); 7907 unlock_user(p, arg3, sizeof(target_siginfo_t)); 7908 } 7909 } 7910 break; 7911 #endif 7912 #ifdef TARGET_NR_creat /* not on alpha */ 7913 case TARGET_NR_creat: 7914 if (!(p = lock_user_string(arg1))) 7915 goto efault; 7916 ret = get_errno(creat(p, arg2)); 7917 fd_trans_unregister(ret); 7918 unlock_user(p, arg1, 0); 7919 break; 7920 #endif 7921 #ifdef TARGET_NR_link 7922 case TARGET_NR_link: 7923 { 7924 void * p2; 7925 p = lock_user_string(arg1); 7926 p2 = lock_user_string(arg2); 7927 if (!p || !p2) 7928 ret = -TARGET_EFAULT; 7929 else 7930 ret = get_errno(link(p, p2)); 7931 unlock_user(p2, arg2, 0); 7932 unlock_user(p, arg1, 0); 7933 } 7934 break; 7935 #endif 7936 #if defined(TARGET_NR_linkat) 7937 case TARGET_NR_linkat: 7938 { 7939 void * p2 = NULL; 7940 if (!arg2 || !arg4) 7941 goto efault; 7942 p = lock_user_string(arg2); 7943 p2 = lock_user_string(arg4); 7944 if (!p || !p2) 7945 ret = -TARGET_EFAULT; 7946 else 7947 ret = get_errno(linkat(arg1, p, arg3, p2, arg5)); 7948 unlock_user(p, arg2, 0); 7949 unlock_user(p2, arg4, 0); 7950 } 7951 break; 7952 #endif 7953 #ifdef TARGET_NR_unlink 7954 case TARGET_NR_unlink: 7955 if (!(p = lock_user_string(arg1))) 7956 goto efault; 7957 ret = get_errno(unlink(p)); 7958 unlock_user(p, arg1, 0); 7959 break; 7960 #endif 7961 #if defined(TARGET_NR_unlinkat) 7962 case TARGET_NR_unlinkat: 7963 if (!(p = lock_user_string(arg2))) 7964 goto efault; 7965 ret = get_errno(unlinkat(arg1, p, arg3)); 7966 unlock_user(p, arg2, 0); 7967 break; 7968 #endif 7969 case TARGET_NR_execve: 7970 { 7971 char **argp, **envp; 7972 int argc, envc; 7973 abi_ulong gp; 7974 abi_ulong guest_argp; 7975 abi_ulong guest_envp; 7976 abi_ulong addr; 7977 char **q; 7978 int total_size = 0; 7979 7980 argc = 0; 7981 guest_argp = arg2; 7982 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { 7983 if (get_user_ual(addr, gp)) 7984 goto efault; 7985 if (!addr) 7986 break; 7987 argc++; 7988 } 7989 envc = 0; 7990 guest_envp = arg3; 7991 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { 7992 if (get_user_ual(addr, gp)) 7993 goto efault; 7994 if (!addr) 7995 break; 7996 envc++; 7997 } 7998 7999 argp = g_new0(char *, argc + 1); 8000 envp = g_new0(char *, envc + 1); 8001 8002 for (gp = guest_argp, q = argp; gp; 8003 gp += sizeof(abi_ulong), q++) { 8004 if (get_user_ual(addr, gp)) 8005 goto execve_efault; 8006 if (!addr) 8007 break; 8008 if (!(*q = lock_user_string(addr))) 8009 goto execve_efault; 8010 total_size += strlen(*q) + 1; 8011 } 8012 *q = NULL; 8013 8014 for (gp = guest_envp, q = envp; gp; 8015 gp += sizeof(abi_ulong), q++) { 8016 if (get_user_ual(addr, gp)) 8017 goto execve_efault; 8018 if (!addr) 8019 break; 8020 if (!(*q = lock_user_string(addr))) 8021 goto execve_efault; 8022 total_size += strlen(*q) + 1; 8023 } 8024 *q = NULL; 8025 8026 if (!(p = lock_user_string(arg1))) 8027 goto execve_efault; 8028 /* Although execve() is not an interruptible syscall it is 8029 * a special case where we must use the safe_syscall wrapper: 8030 * if we allow a signal to happen before we make the host 8031 * syscall then we will 'lose' it, because at the point of 8032 * execve the process leaves QEMU's control. So we use the 8033 * safe syscall wrapper to ensure that we either take the 8034 * signal as a guest signal, or else it does not happen 8035 * before the execve completes and makes it the other 8036 * program's problem. 8037 */ 8038 ret = get_errno(safe_execve(p, argp, envp)); 8039 unlock_user(p, arg1, 0); 8040 8041 goto execve_end; 8042 8043 execve_efault: 8044 ret = -TARGET_EFAULT; 8045 8046 execve_end: 8047 for (gp = guest_argp, q = argp; *q; 8048 gp += sizeof(abi_ulong), q++) { 8049 if (get_user_ual(addr, gp) 8050 || !addr) 8051 break; 8052 unlock_user(*q, addr, 0); 8053 } 8054 for (gp = guest_envp, q = envp; *q; 8055 gp += sizeof(abi_ulong), q++) { 8056 if (get_user_ual(addr, gp) 8057 || !addr) 8058 break; 8059 unlock_user(*q, addr, 0); 8060 } 8061 8062 g_free(argp); 8063 g_free(envp); 8064 } 8065 break; 8066 case TARGET_NR_chdir: 8067 if (!(p = lock_user_string(arg1))) 8068 goto efault; 8069 ret = get_errno(chdir(p)); 8070 unlock_user(p, arg1, 0); 8071 break; 8072 #ifdef TARGET_NR_time 8073 case TARGET_NR_time: 8074 { 8075 time_t host_time; 8076 ret = get_errno(time(&host_time)); 8077 if (!is_error(ret) 8078 && arg1 8079 && put_user_sal(host_time, arg1)) 8080 goto efault; 8081 } 8082 break; 8083 #endif 8084 #ifdef TARGET_NR_mknod 8085 case TARGET_NR_mknod: 8086 if (!(p = lock_user_string(arg1))) 8087 goto efault; 8088 ret = get_errno(mknod(p, arg2, arg3)); 8089 unlock_user(p, arg1, 0); 8090 break; 8091 #endif 8092 #if defined(TARGET_NR_mknodat) 8093 case TARGET_NR_mknodat: 8094 if (!(p = lock_user_string(arg2))) 8095 goto efault; 8096 ret = get_errno(mknodat(arg1, p, arg3, arg4)); 8097 unlock_user(p, arg2, 0); 8098 break; 8099 #endif 8100 #ifdef TARGET_NR_chmod 8101 case TARGET_NR_chmod: 8102 if (!(p = lock_user_string(arg1))) 8103 goto efault; 8104 ret = get_errno(chmod(p, arg2)); 8105 unlock_user(p, arg1, 0); 8106 break; 8107 #endif 8108 #ifdef TARGET_NR_break 8109 case TARGET_NR_break: 8110 goto unimplemented; 8111 #endif 8112 #ifdef TARGET_NR_oldstat 8113 case TARGET_NR_oldstat: 8114 goto unimplemented; 8115 #endif 8116 case TARGET_NR_lseek: 8117 ret = get_errno(lseek(arg1, arg2, arg3)); 8118 break; 8119 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) 8120 /* Alpha specific */ 8121 case TARGET_NR_getxpid: 8122 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid(); 8123 ret = get_errno(getpid()); 8124 break; 8125 #endif 8126 #ifdef TARGET_NR_getpid 8127 case TARGET_NR_getpid: 8128 ret = get_errno(getpid()); 8129 break; 8130 #endif 8131 case TARGET_NR_mount: 8132 { 8133 /* need to look at the data field */ 8134 void *p2, *p3; 8135 8136 if (arg1) { 8137 p = lock_user_string(arg1); 8138 if (!p) { 8139 goto efault; 8140 } 8141 } else { 8142 p = NULL; 8143 } 8144 8145 p2 = lock_user_string(arg2); 8146 if (!p2) { 8147 if (arg1) { 8148 unlock_user(p, arg1, 0); 8149 } 8150 goto efault; 8151 } 8152 8153 if (arg3) { 8154 p3 = lock_user_string(arg3); 8155 if (!p3) { 8156 if (arg1) { 8157 unlock_user(p, arg1, 0); 8158 } 8159 unlock_user(p2, arg2, 0); 8160 goto efault; 8161 } 8162 } else { 8163 p3 = NULL; 8164 } 8165 8166 /* FIXME - arg5 should be locked, but it isn't clear how to 8167 * do that since it's not guaranteed to be a NULL-terminated 8168 * string. 8169 */ 8170 if (!arg5) { 8171 ret = mount(p, p2, p3, (unsigned long)arg4, NULL); 8172 } else { 8173 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)); 8174 } 8175 ret = get_errno(ret); 8176 8177 if (arg1) { 8178 unlock_user(p, arg1, 0); 8179 } 8180 unlock_user(p2, arg2, 0); 8181 if (arg3) { 8182 unlock_user(p3, arg3, 0); 8183 } 8184 } 8185 break; 8186 #ifdef TARGET_NR_umount 8187 case TARGET_NR_umount: 8188 if (!(p = lock_user_string(arg1))) 8189 goto efault; 8190 ret = get_errno(umount(p)); 8191 unlock_user(p, arg1, 0); 8192 break; 8193 #endif 8194 #ifdef TARGET_NR_stime /* not on alpha */ 8195 case TARGET_NR_stime: 8196 { 8197 time_t host_time; 8198 if (get_user_sal(host_time, arg1)) 8199 goto efault; 8200 ret = get_errno(stime(&host_time)); 8201 } 8202 break; 8203 #endif 8204 case TARGET_NR_ptrace: 8205 goto unimplemented; 8206 #ifdef TARGET_NR_alarm /* not on alpha */ 8207 case TARGET_NR_alarm: 8208 ret = alarm(arg1); 8209 break; 8210 #endif 8211 #ifdef TARGET_NR_oldfstat 8212 case TARGET_NR_oldfstat: 8213 goto unimplemented; 8214 #endif 8215 #ifdef TARGET_NR_pause /* not on alpha */ 8216 case TARGET_NR_pause: 8217 if (!block_signals()) { 8218 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask); 8219 } 8220 ret = -TARGET_EINTR; 8221 break; 8222 #endif 8223 #ifdef TARGET_NR_utime 8224 case TARGET_NR_utime: 8225 { 8226 struct utimbuf tbuf, *host_tbuf; 8227 struct target_utimbuf *target_tbuf; 8228 if (arg2) { 8229 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) 8230 goto efault; 8231 tbuf.actime = tswapal(target_tbuf->actime); 8232 tbuf.modtime = tswapal(target_tbuf->modtime); 8233 unlock_user_struct(target_tbuf, arg2, 0); 8234 host_tbuf = &tbuf; 8235 } else { 8236 host_tbuf = NULL; 8237 } 8238 if (!(p = lock_user_string(arg1))) 8239 goto efault; 8240 ret = get_errno(utime(p, host_tbuf)); 8241 unlock_user(p, arg1, 0); 8242 } 8243 break; 8244 #endif 8245 #ifdef TARGET_NR_utimes 8246 case TARGET_NR_utimes: 8247 { 8248 struct timeval *tvp, tv[2]; 8249 if (arg2) { 8250 if (copy_from_user_timeval(&tv[0], arg2) 8251 || copy_from_user_timeval(&tv[1], 8252 arg2 + sizeof(struct target_timeval))) 8253 goto efault; 8254 tvp = tv; 8255 } else { 8256 tvp = NULL; 8257 } 8258 if (!(p = lock_user_string(arg1))) 8259 goto efault; 8260 ret = get_errno(utimes(p, tvp)); 8261 unlock_user(p, arg1, 0); 8262 } 8263 break; 8264 #endif 8265 #if defined(TARGET_NR_futimesat) 8266 case TARGET_NR_futimesat: 8267 { 8268 struct timeval *tvp, tv[2]; 8269 if (arg3) { 8270 if (copy_from_user_timeval(&tv[0], arg3) 8271 || copy_from_user_timeval(&tv[1], 8272 arg3 + sizeof(struct target_timeval))) 8273 goto efault; 8274 tvp = tv; 8275 } else { 8276 tvp = NULL; 8277 } 8278 if (!(p = lock_user_string(arg2))) 8279 goto efault; 8280 ret = get_errno(futimesat(arg1, path(p), tvp)); 8281 unlock_user(p, arg2, 0); 8282 } 8283 break; 8284 #endif 8285 #ifdef TARGET_NR_stty 8286 case TARGET_NR_stty: 8287 goto unimplemented; 8288 #endif 8289 #ifdef TARGET_NR_gtty 8290 case TARGET_NR_gtty: 8291 goto unimplemented; 8292 #endif 8293 #ifdef TARGET_NR_access 8294 case TARGET_NR_access: 8295 if (!(p = lock_user_string(arg1))) 8296 goto efault; 8297 ret = get_errno(access(path(p), arg2)); 8298 unlock_user(p, arg1, 0); 8299 break; 8300 #endif 8301 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 8302 case TARGET_NR_faccessat: 8303 if (!(p = lock_user_string(arg2))) 8304 goto efault; 8305 ret = get_errno(faccessat(arg1, p, arg3, 0)); 8306 unlock_user(p, arg2, 0); 8307 break; 8308 #endif 8309 #ifdef TARGET_NR_nice /* not on alpha */ 8310 case TARGET_NR_nice: 8311 ret = get_errno(nice(arg1)); 8312 break; 8313 #endif 8314 #ifdef TARGET_NR_ftime 8315 case TARGET_NR_ftime: 8316 goto unimplemented; 8317 #endif 8318 case TARGET_NR_sync: 8319 sync(); 8320 ret = 0; 8321 break; 8322 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS) 8323 case TARGET_NR_syncfs: 8324 ret = get_errno(syncfs(arg1)); 8325 break; 8326 #endif 8327 case TARGET_NR_kill: 8328 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2))); 8329 break; 8330 #ifdef TARGET_NR_rename 8331 case TARGET_NR_rename: 8332 { 8333 void *p2; 8334 p = lock_user_string(arg1); 8335 p2 = lock_user_string(arg2); 8336 if (!p || !p2) 8337 ret = -TARGET_EFAULT; 8338 else 8339 ret = get_errno(rename(p, p2)); 8340 unlock_user(p2, arg2, 0); 8341 unlock_user(p, arg1, 0); 8342 } 8343 break; 8344 #endif 8345 #if defined(TARGET_NR_renameat) 8346 case TARGET_NR_renameat: 8347 { 8348 void *p2; 8349 p = lock_user_string(arg2); 8350 p2 = lock_user_string(arg4); 8351 if (!p || !p2) 8352 ret = -TARGET_EFAULT; 8353 else 8354 ret = get_errno(renameat(arg1, p, arg3, p2)); 8355 unlock_user(p2, arg4, 0); 8356 unlock_user(p, arg2, 0); 8357 } 8358 break; 8359 #endif 8360 #ifdef TARGET_NR_mkdir 8361 case TARGET_NR_mkdir: 8362 if (!(p = lock_user_string(arg1))) 8363 goto efault; 8364 ret = get_errno(mkdir(p, arg2)); 8365 unlock_user(p, arg1, 0); 8366 break; 8367 #endif 8368 #if defined(TARGET_NR_mkdirat) 8369 case TARGET_NR_mkdirat: 8370 if (!(p = lock_user_string(arg2))) 8371 goto efault; 8372 ret = get_errno(mkdirat(arg1, p, arg3)); 8373 unlock_user(p, arg2, 0); 8374 break; 8375 #endif 8376 #ifdef TARGET_NR_rmdir 8377 case TARGET_NR_rmdir: 8378 if (!(p = lock_user_string(arg1))) 8379 goto efault; 8380 ret = get_errno(rmdir(p)); 8381 unlock_user(p, arg1, 0); 8382 break; 8383 #endif 8384 case TARGET_NR_dup: 8385 ret = get_errno(dup(arg1)); 8386 if (ret >= 0) { 8387 fd_trans_dup(arg1, ret); 8388 } 8389 break; 8390 #ifdef TARGET_NR_pipe 8391 case TARGET_NR_pipe: 8392 ret = do_pipe(cpu_env, arg1, 0, 0); 8393 break; 8394 #endif 8395 #ifdef TARGET_NR_pipe2 8396 case TARGET_NR_pipe2: 8397 ret = do_pipe(cpu_env, arg1, 8398 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1); 8399 break; 8400 #endif 8401 case TARGET_NR_times: 8402 { 8403 struct target_tms *tmsp; 8404 struct tms tms; 8405 ret = get_errno(times(&tms)); 8406 if (arg1) { 8407 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); 8408 if (!tmsp) 8409 goto efault; 8410 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime)); 8411 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime)); 8412 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime)); 8413 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime)); 8414 } 8415 if (!is_error(ret)) 8416 ret = host_to_target_clock_t(ret); 8417 } 8418 break; 8419 #ifdef TARGET_NR_prof 8420 case TARGET_NR_prof: 8421 goto unimplemented; 8422 #endif 8423 #ifdef TARGET_NR_signal 8424 case TARGET_NR_signal: 8425 goto unimplemented; 8426 #endif 8427 case TARGET_NR_acct: 8428 if (arg1 == 0) { 8429 ret = get_errno(acct(NULL)); 8430 } else { 8431 if (!(p = lock_user_string(arg1))) 8432 goto efault; 8433 ret = get_errno(acct(path(p))); 8434 unlock_user(p, arg1, 0); 8435 } 8436 break; 8437 #ifdef TARGET_NR_umount2 8438 case TARGET_NR_umount2: 8439 if (!(p = lock_user_string(arg1))) 8440 goto efault; 8441 ret = get_errno(umount2(p, arg2)); 8442 unlock_user(p, arg1, 0); 8443 break; 8444 #endif 8445 #ifdef TARGET_NR_lock 8446 case TARGET_NR_lock: 8447 goto unimplemented; 8448 #endif 8449 case TARGET_NR_ioctl: 8450 ret = do_ioctl(arg1, arg2, arg3); 8451 break; 8452 case TARGET_NR_fcntl: 8453 ret = do_fcntl(arg1, arg2, arg3); 8454 break; 8455 #ifdef TARGET_NR_mpx 8456 case TARGET_NR_mpx: 8457 goto unimplemented; 8458 #endif 8459 case TARGET_NR_setpgid: 8460 ret = get_errno(setpgid(arg1, arg2)); 8461 break; 8462 #ifdef TARGET_NR_ulimit 8463 case TARGET_NR_ulimit: 8464 goto unimplemented; 8465 #endif 8466 #ifdef TARGET_NR_oldolduname 8467 case TARGET_NR_oldolduname: 8468 goto unimplemented; 8469 #endif 8470 case TARGET_NR_umask: 8471 ret = get_errno(umask(arg1)); 8472 break; 8473 case TARGET_NR_chroot: 8474 if (!(p = lock_user_string(arg1))) 8475 goto efault; 8476 ret = get_errno(chroot(p)); 8477 unlock_user(p, arg1, 0); 8478 break; 8479 #ifdef TARGET_NR_ustat 8480 case TARGET_NR_ustat: 8481 goto unimplemented; 8482 #endif 8483 #ifdef TARGET_NR_dup2 8484 case TARGET_NR_dup2: 8485 ret = get_errno(dup2(arg1, arg2)); 8486 if (ret >= 0) { 8487 fd_trans_dup(arg1, arg2); 8488 } 8489 break; 8490 #endif 8491 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) 8492 case TARGET_NR_dup3: 8493 ret = get_errno(dup3(arg1, arg2, arg3)); 8494 if (ret >= 0) { 8495 fd_trans_dup(arg1, arg2); 8496 } 8497 break; 8498 #endif 8499 #ifdef TARGET_NR_getppid /* not on alpha */ 8500 case TARGET_NR_getppid: 8501 ret = get_errno(getppid()); 8502 break; 8503 #endif 8504 #ifdef TARGET_NR_getpgrp 8505 case TARGET_NR_getpgrp: 8506 ret = get_errno(getpgrp()); 8507 break; 8508 #endif 8509 case TARGET_NR_setsid: 8510 ret = get_errno(setsid()); 8511 break; 8512 #ifdef TARGET_NR_sigaction 8513 case TARGET_NR_sigaction: 8514 { 8515 #if defined(TARGET_ALPHA) 8516 struct target_sigaction act, oact, *pact = 0; 8517 struct target_old_sigaction *old_act; 8518 if (arg2) { 8519 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 8520 goto efault; 8521 act._sa_handler = old_act->_sa_handler; 8522 target_siginitset(&act.sa_mask, old_act->sa_mask); 8523 act.sa_flags = old_act->sa_flags; 8524 act.sa_restorer = 0; 8525 unlock_user_struct(old_act, arg2, 0); 8526 pact = &act; 8527 } 8528 ret = get_errno(do_sigaction(arg1, pact, &oact)); 8529 if (!is_error(ret) && arg3) { 8530 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 8531 goto efault; 8532 old_act->_sa_handler = oact._sa_handler; 8533 old_act->sa_mask = oact.sa_mask.sig[0]; 8534 old_act->sa_flags = oact.sa_flags; 8535 unlock_user_struct(old_act, arg3, 1); 8536 } 8537 #elif defined(TARGET_MIPS) 8538 struct target_sigaction act, oact, *pact, *old_act; 8539 8540 if (arg2) { 8541 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 8542 goto efault; 8543 act._sa_handler = old_act->_sa_handler; 8544 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); 8545 act.sa_flags = old_act->sa_flags; 8546 unlock_user_struct(old_act, arg2, 0); 8547 pact = &act; 8548 } else { 8549 pact = NULL; 8550 } 8551 8552 ret = get_errno(do_sigaction(arg1, pact, &oact)); 8553 8554 if (!is_error(ret) && arg3) { 8555 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 8556 goto efault; 8557 old_act->_sa_handler = oact._sa_handler; 8558 old_act->sa_flags = oact.sa_flags; 8559 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; 8560 old_act->sa_mask.sig[1] = 0; 8561 old_act->sa_mask.sig[2] = 0; 8562 old_act->sa_mask.sig[3] = 0; 8563 unlock_user_struct(old_act, arg3, 1); 8564 } 8565 #else 8566 struct target_old_sigaction *old_act; 8567 struct target_sigaction act, oact, *pact; 8568 if (arg2) { 8569 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 8570 goto efault; 8571 act._sa_handler = old_act->_sa_handler; 8572 target_siginitset(&act.sa_mask, old_act->sa_mask); 8573 act.sa_flags = old_act->sa_flags; 8574 act.sa_restorer = old_act->sa_restorer; 8575 unlock_user_struct(old_act, arg2, 0); 8576 pact = &act; 8577 } else { 8578 pact = NULL; 8579 } 8580 ret = get_errno(do_sigaction(arg1, pact, &oact)); 8581 if (!is_error(ret) && arg3) { 8582 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 8583 goto efault; 8584 old_act->_sa_handler = oact._sa_handler; 8585 old_act->sa_mask = oact.sa_mask.sig[0]; 8586 old_act->sa_flags = oact.sa_flags; 8587 old_act->sa_restorer = oact.sa_restorer; 8588 unlock_user_struct(old_act, arg3, 1); 8589 } 8590 #endif 8591 } 8592 break; 8593 #endif 8594 case TARGET_NR_rt_sigaction: 8595 { 8596 #if defined(TARGET_ALPHA) 8597 /* For Alpha and SPARC this is a 5 argument syscall, with 8598 * a 'restorer' parameter which must be copied into the 8599 * sa_restorer field of the sigaction struct. 8600 * For Alpha that 'restorer' is arg5; for SPARC it is arg4, 8601 * and arg5 is the sigsetsize. 8602 * Alpha also has a separate rt_sigaction struct that it uses 8603 * here; SPARC uses the usual sigaction struct. 8604 */ 8605 struct target_rt_sigaction *rt_act; 8606 struct target_sigaction act, oact, *pact = 0; 8607 8608 if (arg4 != sizeof(target_sigset_t)) { 8609 ret = -TARGET_EINVAL; 8610 break; 8611 } 8612 if (arg2) { 8613 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1)) 8614 goto efault; 8615 act._sa_handler = rt_act->_sa_handler; 8616 act.sa_mask = rt_act->sa_mask; 8617 act.sa_flags = rt_act->sa_flags; 8618 act.sa_restorer = arg5; 8619 unlock_user_struct(rt_act, arg2, 0); 8620 pact = &act; 8621 } 8622 ret = get_errno(do_sigaction(arg1, pact, &oact)); 8623 if (!is_error(ret) && arg3) { 8624 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0)) 8625 goto efault; 8626 rt_act->_sa_handler = oact._sa_handler; 8627 rt_act->sa_mask = oact.sa_mask; 8628 rt_act->sa_flags = oact.sa_flags; 8629 unlock_user_struct(rt_act, arg3, 1); 8630 } 8631 #else 8632 #ifdef TARGET_SPARC 8633 target_ulong restorer = arg4; 8634 target_ulong sigsetsize = arg5; 8635 #else 8636 target_ulong sigsetsize = arg4; 8637 #endif 8638 struct target_sigaction *act; 8639 struct target_sigaction *oact; 8640 8641 if (sigsetsize != sizeof(target_sigset_t)) { 8642 ret = -TARGET_EINVAL; 8643 break; 8644 } 8645 if (arg2) { 8646 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) { 8647 goto efault; 8648 } 8649 #ifdef TARGET_SPARC 8650 act->sa_restorer = restorer; 8651 #endif 8652 } else { 8653 act = NULL; 8654 } 8655 if (arg3) { 8656 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { 8657 ret = -TARGET_EFAULT; 8658 goto rt_sigaction_fail; 8659 } 8660 } else 8661 oact = NULL; 8662 ret = get_errno(do_sigaction(arg1, act, oact)); 8663 rt_sigaction_fail: 8664 if (act) 8665 unlock_user_struct(act, arg2, 0); 8666 if (oact) 8667 unlock_user_struct(oact, arg3, 1); 8668 #endif 8669 } 8670 break; 8671 #ifdef TARGET_NR_sgetmask /* not on alpha */ 8672 case TARGET_NR_sgetmask: 8673 { 8674 sigset_t cur_set; 8675 abi_ulong target_set; 8676 ret = do_sigprocmask(0, NULL, &cur_set); 8677 if (!ret) { 8678 host_to_target_old_sigset(&target_set, &cur_set); 8679 ret = target_set; 8680 } 8681 } 8682 break; 8683 #endif 8684 #ifdef TARGET_NR_ssetmask /* not on alpha */ 8685 case TARGET_NR_ssetmask: 8686 { 8687 sigset_t set, oset; 8688 abi_ulong target_set = arg1; 8689 target_to_host_old_sigset(&set, &target_set); 8690 ret = do_sigprocmask(SIG_SETMASK, &set, &oset); 8691 if (!ret) { 8692 host_to_target_old_sigset(&target_set, &oset); 8693 ret = target_set; 8694 } 8695 } 8696 break; 8697 #endif 8698 #ifdef TARGET_NR_sigprocmask 8699 case TARGET_NR_sigprocmask: 8700 { 8701 #if defined(TARGET_ALPHA) 8702 sigset_t set, oldset; 8703 abi_ulong mask; 8704 int how; 8705 8706 switch (arg1) { 8707 case TARGET_SIG_BLOCK: 8708 how = SIG_BLOCK; 8709 break; 8710 case TARGET_SIG_UNBLOCK: 8711 how = SIG_UNBLOCK; 8712 break; 8713 case TARGET_SIG_SETMASK: 8714 how = SIG_SETMASK; 8715 break; 8716 default: 8717 ret = -TARGET_EINVAL; 8718 goto fail; 8719 } 8720 mask = arg2; 8721 target_to_host_old_sigset(&set, &mask); 8722 8723 ret = do_sigprocmask(how, &set, &oldset); 8724 if (!is_error(ret)) { 8725 host_to_target_old_sigset(&mask, &oldset); 8726 ret = mask; 8727 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */ 8728 } 8729 #else 8730 sigset_t set, oldset, *set_ptr; 8731 int how; 8732 8733 if (arg2) { 8734 switch (arg1) { 8735 case TARGET_SIG_BLOCK: 8736 how = SIG_BLOCK; 8737 break; 8738 case TARGET_SIG_UNBLOCK: 8739 how = SIG_UNBLOCK; 8740 break; 8741 case TARGET_SIG_SETMASK: 8742 how = SIG_SETMASK; 8743 break; 8744 default: 8745 ret = -TARGET_EINVAL; 8746 goto fail; 8747 } 8748 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 8749 goto efault; 8750 target_to_host_old_sigset(&set, p); 8751 unlock_user(p, arg2, 0); 8752 set_ptr = &set; 8753 } else { 8754 how = 0; 8755 set_ptr = NULL; 8756 } 8757 ret = do_sigprocmask(how, set_ptr, &oldset); 8758 if (!is_error(ret) && arg3) { 8759 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 8760 goto efault; 8761 host_to_target_old_sigset(p, &oldset); 8762 unlock_user(p, arg3, sizeof(target_sigset_t)); 8763 } 8764 #endif 8765 } 8766 break; 8767 #endif 8768 case TARGET_NR_rt_sigprocmask: 8769 { 8770 int how = arg1; 8771 sigset_t set, oldset, *set_ptr; 8772 8773 if (arg4 != sizeof(target_sigset_t)) { 8774 ret = -TARGET_EINVAL; 8775 break; 8776 } 8777 8778 if (arg2) { 8779 switch(how) { 8780 case TARGET_SIG_BLOCK: 8781 how = SIG_BLOCK; 8782 break; 8783 case TARGET_SIG_UNBLOCK: 8784 how = SIG_UNBLOCK; 8785 break; 8786 case TARGET_SIG_SETMASK: 8787 how = SIG_SETMASK; 8788 break; 8789 default: 8790 ret = -TARGET_EINVAL; 8791 goto fail; 8792 } 8793 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 8794 goto efault; 8795 target_to_host_sigset(&set, p); 8796 unlock_user(p, arg2, 0); 8797 set_ptr = &set; 8798 } else { 8799 how = 0; 8800 set_ptr = NULL; 8801 } 8802 ret = do_sigprocmask(how, set_ptr, &oldset); 8803 if (!is_error(ret) && arg3) { 8804 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 8805 goto efault; 8806 host_to_target_sigset(p, &oldset); 8807 unlock_user(p, arg3, sizeof(target_sigset_t)); 8808 } 8809 } 8810 break; 8811 #ifdef TARGET_NR_sigpending 8812 case TARGET_NR_sigpending: 8813 { 8814 sigset_t set; 8815 ret = get_errno(sigpending(&set)); 8816 if (!is_error(ret)) { 8817 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 8818 goto efault; 8819 host_to_target_old_sigset(p, &set); 8820 unlock_user(p, arg1, sizeof(target_sigset_t)); 8821 } 8822 } 8823 break; 8824 #endif 8825 case TARGET_NR_rt_sigpending: 8826 { 8827 sigset_t set; 8828 8829 /* Yes, this check is >, not != like most. We follow the kernel's 8830 * logic and it does it like this because it implements 8831 * NR_sigpending through the same code path, and in that case 8832 * the old_sigset_t is smaller in size. 8833 */ 8834 if (arg2 > sizeof(target_sigset_t)) { 8835 ret = -TARGET_EINVAL; 8836 break; 8837 } 8838 8839 ret = get_errno(sigpending(&set)); 8840 if (!is_error(ret)) { 8841 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 8842 goto efault; 8843 host_to_target_sigset(p, &set); 8844 unlock_user(p, arg1, sizeof(target_sigset_t)); 8845 } 8846 } 8847 break; 8848 #ifdef TARGET_NR_sigsuspend 8849 case TARGET_NR_sigsuspend: 8850 { 8851 TaskState *ts = cpu->opaque; 8852 #if defined(TARGET_ALPHA) 8853 abi_ulong mask = arg1; 8854 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask); 8855 #else 8856 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 8857 goto efault; 8858 target_to_host_old_sigset(&ts->sigsuspend_mask, p); 8859 unlock_user(p, arg1, 0); 8860 #endif 8861 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask, 8862 SIGSET_T_SIZE)); 8863 if (ret != -TARGET_ERESTARTSYS) { 8864 ts->in_sigsuspend = 1; 8865 } 8866 } 8867 break; 8868 #endif 8869 case TARGET_NR_rt_sigsuspend: 8870 { 8871 TaskState *ts = cpu->opaque; 8872 8873 if (arg2 != sizeof(target_sigset_t)) { 8874 ret = -TARGET_EINVAL; 8875 break; 8876 } 8877 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 8878 goto efault; 8879 target_to_host_sigset(&ts->sigsuspend_mask, p); 8880 unlock_user(p, arg1, 0); 8881 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask, 8882 SIGSET_T_SIZE)); 8883 if (ret != -TARGET_ERESTARTSYS) { 8884 ts->in_sigsuspend = 1; 8885 } 8886 } 8887 break; 8888 case TARGET_NR_rt_sigtimedwait: 8889 { 8890 sigset_t set; 8891 struct timespec uts, *puts; 8892 siginfo_t uinfo; 8893 8894 if (arg4 != sizeof(target_sigset_t)) { 8895 ret = -TARGET_EINVAL; 8896 break; 8897 } 8898 8899 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 8900 goto efault; 8901 target_to_host_sigset(&set, p); 8902 unlock_user(p, arg1, 0); 8903 if (arg3) { 8904 puts = &uts; 8905 target_to_host_timespec(puts, arg3); 8906 } else { 8907 puts = NULL; 8908 } 8909 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts, 8910 SIGSET_T_SIZE)); 8911 if (!is_error(ret)) { 8912 if (arg2) { 8913 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 8914 0); 8915 if (!p) { 8916 goto efault; 8917 } 8918 host_to_target_siginfo(p, &uinfo); 8919 unlock_user(p, arg2, sizeof(target_siginfo_t)); 8920 } 8921 ret = host_to_target_signal(ret); 8922 } 8923 } 8924 break; 8925 case TARGET_NR_rt_sigqueueinfo: 8926 { 8927 siginfo_t uinfo; 8928 8929 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1); 8930 if (!p) { 8931 goto efault; 8932 } 8933 target_to_host_siginfo(&uinfo, p); 8934 unlock_user(p, arg3, 0); 8935 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo)); 8936 } 8937 break; 8938 case TARGET_NR_rt_tgsigqueueinfo: 8939 { 8940 siginfo_t uinfo; 8941 8942 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1); 8943 if (!p) { 8944 goto efault; 8945 } 8946 target_to_host_siginfo(&uinfo, p); 8947 unlock_user(p, arg4, 0); 8948 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo)); 8949 } 8950 break; 8951 #ifdef TARGET_NR_sigreturn 8952 case TARGET_NR_sigreturn: 8953 if (block_signals()) { 8954 ret = -TARGET_ERESTARTSYS; 8955 } else { 8956 ret = do_sigreturn(cpu_env); 8957 } 8958 break; 8959 #endif 8960 case TARGET_NR_rt_sigreturn: 8961 if (block_signals()) { 8962 ret = -TARGET_ERESTARTSYS; 8963 } else { 8964 ret = do_rt_sigreturn(cpu_env); 8965 } 8966 break; 8967 case TARGET_NR_sethostname: 8968 if (!(p = lock_user_string(arg1))) 8969 goto efault; 8970 ret = get_errno(sethostname(p, arg2)); 8971 unlock_user(p, arg1, 0); 8972 break; 8973 case TARGET_NR_setrlimit: 8974 { 8975 int resource = target_to_host_resource(arg1); 8976 struct target_rlimit *target_rlim; 8977 struct rlimit rlim; 8978 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) 8979 goto efault; 8980 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur); 8981 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max); 8982 unlock_user_struct(target_rlim, arg2, 0); 8983 ret = get_errno(setrlimit(resource, &rlim)); 8984 } 8985 break; 8986 case TARGET_NR_getrlimit: 8987 { 8988 int resource = target_to_host_resource(arg1); 8989 struct target_rlimit *target_rlim; 8990 struct rlimit rlim; 8991 8992 ret = get_errno(getrlimit(resource, &rlim)); 8993 if (!is_error(ret)) { 8994 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 8995 goto efault; 8996 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 8997 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 8998 unlock_user_struct(target_rlim, arg2, 1); 8999 } 9000 } 9001 break; 9002 case TARGET_NR_getrusage: 9003 { 9004 struct rusage rusage; 9005 ret = get_errno(getrusage(arg1, &rusage)); 9006 if (!is_error(ret)) { 9007 ret = host_to_target_rusage(arg2, &rusage); 9008 } 9009 } 9010 break; 9011 case TARGET_NR_gettimeofday: 9012 { 9013 struct timeval tv; 9014 ret = get_errno(gettimeofday(&tv, NULL)); 9015 if (!is_error(ret)) { 9016 if (copy_to_user_timeval(arg1, &tv)) 9017 goto efault; 9018 } 9019 } 9020 break; 9021 case TARGET_NR_settimeofday: 9022 { 9023 struct timeval tv, *ptv = NULL; 9024 struct timezone tz, *ptz = NULL; 9025 9026 if (arg1) { 9027 if (copy_from_user_timeval(&tv, arg1)) { 9028 goto efault; 9029 } 9030 ptv = &tv; 9031 } 9032 9033 if (arg2) { 9034 if (copy_from_user_timezone(&tz, arg2)) { 9035 goto efault; 9036 } 9037 ptz = &tz; 9038 } 9039 9040 ret = get_errno(settimeofday(ptv, ptz)); 9041 } 9042 break; 9043 #if defined(TARGET_NR_select) 9044 case TARGET_NR_select: 9045 #if defined(TARGET_WANT_NI_OLD_SELECT) 9046 /* some architectures used to have old_select here 9047 * but now ENOSYS it. 9048 */ 9049 ret = -TARGET_ENOSYS; 9050 #elif defined(TARGET_WANT_OLD_SYS_SELECT) 9051 ret = do_old_select(arg1); 9052 #else 9053 ret = do_select(arg1, arg2, arg3, arg4, arg5); 9054 #endif 9055 break; 9056 #endif 9057 #ifdef TARGET_NR_pselect6 9058 case TARGET_NR_pselect6: 9059 { 9060 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr; 9061 fd_set rfds, wfds, efds; 9062 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 9063 struct timespec ts, *ts_ptr; 9064 9065 /* 9066 * The 6th arg is actually two args smashed together, 9067 * so we cannot use the C library. 9068 */ 9069 sigset_t set; 9070 struct { 9071 sigset_t *set; 9072 size_t size; 9073 } sig, *sig_ptr; 9074 9075 abi_ulong arg_sigset, arg_sigsize, *arg7; 9076 target_sigset_t *target_sigset; 9077 9078 n = arg1; 9079 rfd_addr = arg2; 9080 wfd_addr = arg3; 9081 efd_addr = arg4; 9082 ts_addr = arg5; 9083 9084 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 9085 if (ret) { 9086 goto fail; 9087 } 9088 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 9089 if (ret) { 9090 goto fail; 9091 } 9092 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 9093 if (ret) { 9094 goto fail; 9095 } 9096 9097 /* 9098 * This takes a timespec, and not a timeval, so we cannot 9099 * use the do_select() helper ... 9100 */ 9101 if (ts_addr) { 9102 if (target_to_host_timespec(&ts, ts_addr)) { 9103 goto efault; 9104 } 9105 ts_ptr = &ts; 9106 } else { 9107 ts_ptr = NULL; 9108 } 9109 9110 /* Extract the two packed args for the sigset */ 9111 if (arg6) { 9112 sig_ptr = &sig; 9113 sig.size = SIGSET_T_SIZE; 9114 9115 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1); 9116 if (!arg7) { 9117 goto efault; 9118 } 9119 arg_sigset = tswapal(arg7[0]); 9120 arg_sigsize = tswapal(arg7[1]); 9121 unlock_user(arg7, arg6, 0); 9122 9123 if (arg_sigset) { 9124 sig.set = &set; 9125 if (arg_sigsize != sizeof(*target_sigset)) { 9126 /* Like the kernel, we enforce correct size sigsets */ 9127 ret = -TARGET_EINVAL; 9128 goto fail; 9129 } 9130 target_sigset = lock_user(VERIFY_READ, arg_sigset, 9131 sizeof(*target_sigset), 1); 9132 if (!target_sigset) { 9133 goto efault; 9134 } 9135 target_to_host_sigset(&set, target_sigset); 9136 unlock_user(target_sigset, arg_sigset, 0); 9137 } else { 9138 sig.set = NULL; 9139 } 9140 } else { 9141 sig_ptr = NULL; 9142 } 9143 9144 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 9145 ts_ptr, sig_ptr)); 9146 9147 if (!is_error(ret)) { 9148 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 9149 goto efault; 9150 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 9151 goto efault; 9152 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 9153 goto efault; 9154 9155 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) 9156 goto efault; 9157 } 9158 } 9159 break; 9160 #endif 9161 #ifdef TARGET_NR_symlink 9162 case TARGET_NR_symlink: 9163 { 9164 void *p2; 9165 p = lock_user_string(arg1); 9166 p2 = lock_user_string(arg2); 9167 if (!p || !p2) 9168 ret = -TARGET_EFAULT; 9169 else 9170 ret = get_errno(symlink(p, p2)); 9171 unlock_user(p2, arg2, 0); 9172 unlock_user(p, arg1, 0); 9173 } 9174 break; 9175 #endif 9176 #if defined(TARGET_NR_symlinkat) 9177 case TARGET_NR_symlinkat: 9178 { 9179 void *p2; 9180 p = lock_user_string(arg1); 9181 p2 = lock_user_string(arg3); 9182 if (!p || !p2) 9183 ret = -TARGET_EFAULT; 9184 else 9185 ret = get_errno(symlinkat(p, arg2, p2)); 9186 unlock_user(p2, arg3, 0); 9187 unlock_user(p, arg1, 0); 9188 } 9189 break; 9190 #endif 9191 #ifdef TARGET_NR_oldlstat 9192 case TARGET_NR_oldlstat: 9193 goto unimplemented; 9194 #endif 9195 #ifdef TARGET_NR_readlink 9196 case TARGET_NR_readlink: 9197 { 9198 void *p2; 9199 p = lock_user_string(arg1); 9200 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); 9201 if (!p || !p2) { 9202 ret = -TARGET_EFAULT; 9203 } else if (!arg3) { 9204 /* Short circuit this for the magic exe check. */ 9205 ret = -TARGET_EINVAL; 9206 } else if (is_proc_myself((const char *)p, "exe")) { 9207 char real[PATH_MAX], *temp; 9208 temp = realpath(exec_path, real); 9209 /* Return value is # of bytes that we wrote to the buffer. */ 9210 if (temp == NULL) { 9211 ret = get_errno(-1); 9212 } else { 9213 /* Don't worry about sign mismatch as earlier mapping 9214 * logic would have thrown a bad address error. */ 9215 ret = MIN(strlen(real), arg3); 9216 /* We cannot NUL terminate the string. */ 9217 memcpy(p2, real, ret); 9218 } 9219 } else { 9220 ret = get_errno(readlink(path(p), p2, arg3)); 9221 } 9222 unlock_user(p2, arg2, ret); 9223 unlock_user(p, arg1, 0); 9224 } 9225 break; 9226 #endif 9227 #if defined(TARGET_NR_readlinkat) 9228 case TARGET_NR_readlinkat: 9229 { 9230 void *p2; 9231 p = lock_user_string(arg2); 9232 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); 9233 if (!p || !p2) { 9234 ret = -TARGET_EFAULT; 9235 } else if (is_proc_myself((const char *)p, "exe")) { 9236 char real[PATH_MAX], *temp; 9237 temp = realpath(exec_path, real); 9238 ret = temp == NULL ? get_errno(-1) : strlen(real) ; 9239 snprintf((char *)p2, arg4, "%s", real); 9240 } else { 9241 ret = get_errno(readlinkat(arg1, path(p), p2, arg4)); 9242 } 9243 unlock_user(p2, arg3, ret); 9244 unlock_user(p, arg2, 0); 9245 } 9246 break; 9247 #endif 9248 #ifdef TARGET_NR_uselib 9249 case TARGET_NR_uselib: 9250 goto unimplemented; 9251 #endif 9252 #ifdef TARGET_NR_swapon 9253 case TARGET_NR_swapon: 9254 if (!(p = lock_user_string(arg1))) 9255 goto efault; 9256 ret = get_errno(swapon(p, arg2)); 9257 unlock_user(p, arg1, 0); 9258 break; 9259 #endif 9260 case TARGET_NR_reboot: 9261 if (arg3 == LINUX_REBOOT_CMD_RESTART2) { 9262 /* arg4 must be ignored in all other cases */ 9263 p = lock_user_string(arg4); 9264 if (!p) { 9265 goto efault; 9266 } 9267 ret = get_errno(reboot(arg1, arg2, arg3, p)); 9268 unlock_user(p, arg4, 0); 9269 } else { 9270 ret = get_errno(reboot(arg1, arg2, arg3, NULL)); 9271 } 9272 break; 9273 #ifdef TARGET_NR_readdir 9274 case TARGET_NR_readdir: 9275 goto unimplemented; 9276 #endif 9277 #ifdef TARGET_NR_mmap 9278 case TARGET_NR_mmap: 9279 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 9280 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \ 9281 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ 9282 || defined(TARGET_S390X) 9283 { 9284 abi_ulong *v; 9285 abi_ulong v1, v2, v3, v4, v5, v6; 9286 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) 9287 goto efault; 9288 v1 = tswapal(v[0]); 9289 v2 = tswapal(v[1]); 9290 v3 = tswapal(v[2]); 9291 v4 = tswapal(v[3]); 9292 v5 = tswapal(v[4]); 9293 v6 = tswapal(v[5]); 9294 unlock_user(v, arg1, 0); 9295 ret = get_errno(target_mmap(v1, v2, v3, 9296 target_to_host_bitmask(v4, mmap_flags_tbl), 9297 v5, v6)); 9298 } 9299 #else 9300 ret = get_errno(target_mmap(arg1, arg2, arg3, 9301 target_to_host_bitmask(arg4, mmap_flags_tbl), 9302 arg5, 9303 arg6)); 9304 #endif 9305 break; 9306 #endif 9307 #ifdef TARGET_NR_mmap2 9308 case TARGET_NR_mmap2: 9309 #ifndef MMAP_SHIFT 9310 #define MMAP_SHIFT 12 9311 #endif 9312 ret = get_errno(target_mmap(arg1, arg2, arg3, 9313 target_to_host_bitmask(arg4, mmap_flags_tbl), 9314 arg5, 9315 arg6 << MMAP_SHIFT)); 9316 break; 9317 #endif 9318 case TARGET_NR_munmap: 9319 ret = get_errno(target_munmap(arg1, arg2)); 9320 break; 9321 case TARGET_NR_mprotect: 9322 { 9323 TaskState *ts = cpu->opaque; 9324 /* Special hack to detect libc making the stack executable. */ 9325 if ((arg3 & PROT_GROWSDOWN) 9326 && arg1 >= ts->info->stack_limit 9327 && arg1 <= ts->info->start_stack) { 9328 arg3 &= ~PROT_GROWSDOWN; 9329 arg2 = arg2 + arg1 - ts->info->stack_limit; 9330 arg1 = ts->info->stack_limit; 9331 } 9332 } 9333 ret = get_errno(target_mprotect(arg1, arg2, arg3)); 9334 break; 9335 #ifdef TARGET_NR_mremap 9336 case TARGET_NR_mremap: 9337 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); 9338 break; 9339 #endif 9340 /* ??? msync/mlock/munlock are broken for softmmu. */ 9341 #ifdef TARGET_NR_msync 9342 case TARGET_NR_msync: 9343 ret = get_errno(msync(g2h(arg1), arg2, arg3)); 9344 break; 9345 #endif 9346 #ifdef TARGET_NR_mlock 9347 case TARGET_NR_mlock: 9348 ret = get_errno(mlock(g2h(arg1), arg2)); 9349 break; 9350 #endif 9351 #ifdef TARGET_NR_munlock 9352 case TARGET_NR_munlock: 9353 ret = get_errno(munlock(g2h(arg1), arg2)); 9354 break; 9355 #endif 9356 #ifdef TARGET_NR_mlockall 9357 case TARGET_NR_mlockall: 9358 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1))); 9359 break; 9360 #endif 9361 #ifdef TARGET_NR_munlockall 9362 case TARGET_NR_munlockall: 9363 ret = get_errno(munlockall()); 9364 break; 9365 #endif 9366 case TARGET_NR_truncate: 9367 if (!(p = lock_user_string(arg1))) 9368 goto efault; 9369 ret = get_errno(truncate(p, arg2)); 9370 unlock_user(p, arg1, 0); 9371 break; 9372 case TARGET_NR_ftruncate: 9373 ret = get_errno(ftruncate(arg1, arg2)); 9374 break; 9375 case TARGET_NR_fchmod: 9376 ret = get_errno(fchmod(arg1, arg2)); 9377 break; 9378 #if defined(TARGET_NR_fchmodat) 9379 case TARGET_NR_fchmodat: 9380 if (!(p = lock_user_string(arg2))) 9381 goto efault; 9382 ret = get_errno(fchmodat(arg1, p, arg3, 0)); 9383 unlock_user(p, arg2, 0); 9384 break; 9385 #endif 9386 case TARGET_NR_getpriority: 9387 /* Note that negative values are valid for getpriority, so we must 9388 differentiate based on errno settings. */ 9389 errno = 0; 9390 ret = getpriority(arg1, arg2); 9391 if (ret == -1 && errno != 0) { 9392 ret = -host_to_target_errno(errno); 9393 break; 9394 } 9395 #ifdef TARGET_ALPHA 9396 /* Return value is the unbiased priority. Signal no error. */ 9397 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; 9398 #else 9399 /* Return value is a biased priority to avoid negative numbers. */ 9400 ret = 20 - ret; 9401 #endif 9402 break; 9403 case TARGET_NR_setpriority: 9404 ret = get_errno(setpriority(arg1, arg2, arg3)); 9405 break; 9406 #ifdef TARGET_NR_profil 9407 case TARGET_NR_profil: 9408 goto unimplemented; 9409 #endif 9410 case TARGET_NR_statfs: 9411 if (!(p = lock_user_string(arg1))) 9412 goto efault; 9413 ret = get_errno(statfs(path(p), &stfs)); 9414 unlock_user(p, arg1, 0); 9415 convert_statfs: 9416 if (!is_error(ret)) { 9417 struct target_statfs *target_stfs; 9418 9419 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) 9420 goto efault; 9421 __put_user(stfs.f_type, &target_stfs->f_type); 9422 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 9423 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 9424 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 9425 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 9426 __put_user(stfs.f_files, &target_stfs->f_files); 9427 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 9428 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 9429 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 9430 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 9431 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 9432 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 9433 unlock_user_struct(target_stfs, arg2, 1); 9434 } 9435 break; 9436 case TARGET_NR_fstatfs: 9437 ret = get_errno(fstatfs(arg1, &stfs)); 9438 goto convert_statfs; 9439 #ifdef TARGET_NR_statfs64 9440 case TARGET_NR_statfs64: 9441 if (!(p = lock_user_string(arg1))) 9442 goto efault; 9443 ret = get_errno(statfs(path(p), &stfs)); 9444 unlock_user(p, arg1, 0); 9445 convert_statfs64: 9446 if (!is_error(ret)) { 9447 struct target_statfs64 *target_stfs; 9448 9449 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) 9450 goto efault; 9451 __put_user(stfs.f_type, &target_stfs->f_type); 9452 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 9453 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 9454 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 9455 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 9456 __put_user(stfs.f_files, &target_stfs->f_files); 9457 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 9458 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 9459 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 9460 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 9461 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 9462 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 9463 unlock_user_struct(target_stfs, arg3, 1); 9464 } 9465 break; 9466 case TARGET_NR_fstatfs64: 9467 ret = get_errno(fstatfs(arg1, &stfs)); 9468 goto convert_statfs64; 9469 #endif 9470 #ifdef TARGET_NR_ioperm 9471 case TARGET_NR_ioperm: 9472 goto unimplemented; 9473 #endif 9474 #ifdef TARGET_NR_socketcall 9475 case TARGET_NR_socketcall: 9476 ret = do_socketcall(arg1, arg2); 9477 break; 9478 #endif 9479 #ifdef TARGET_NR_accept 9480 case TARGET_NR_accept: 9481 ret = do_accept4(arg1, arg2, arg3, 0); 9482 break; 9483 #endif 9484 #ifdef TARGET_NR_accept4 9485 case TARGET_NR_accept4: 9486 ret = do_accept4(arg1, arg2, arg3, arg4); 9487 break; 9488 #endif 9489 #ifdef TARGET_NR_bind 9490 case TARGET_NR_bind: 9491 ret = do_bind(arg1, arg2, arg3); 9492 break; 9493 #endif 9494 #ifdef TARGET_NR_connect 9495 case TARGET_NR_connect: 9496 ret = do_connect(arg1, arg2, arg3); 9497 break; 9498 #endif 9499 #ifdef TARGET_NR_getpeername 9500 case TARGET_NR_getpeername: 9501 ret = do_getpeername(arg1, arg2, arg3); 9502 break; 9503 #endif 9504 #ifdef TARGET_NR_getsockname 9505 case TARGET_NR_getsockname: 9506 ret = do_getsockname(arg1, arg2, arg3); 9507 break; 9508 #endif 9509 #ifdef TARGET_NR_getsockopt 9510 case TARGET_NR_getsockopt: 9511 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5); 9512 break; 9513 #endif 9514 #ifdef TARGET_NR_listen 9515 case TARGET_NR_listen: 9516 ret = get_errno(listen(arg1, arg2)); 9517 break; 9518 #endif 9519 #ifdef TARGET_NR_recv 9520 case TARGET_NR_recv: 9521 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); 9522 break; 9523 #endif 9524 #ifdef TARGET_NR_recvfrom 9525 case TARGET_NR_recvfrom: 9526 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); 9527 break; 9528 #endif 9529 #ifdef TARGET_NR_recvmsg 9530 case TARGET_NR_recvmsg: 9531 ret = do_sendrecvmsg(arg1, arg2, arg3, 0); 9532 break; 9533 #endif 9534 #ifdef TARGET_NR_send 9535 case TARGET_NR_send: 9536 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0); 9537 break; 9538 #endif 9539 #ifdef TARGET_NR_sendmsg 9540 case TARGET_NR_sendmsg: 9541 ret = do_sendrecvmsg(arg1, arg2, arg3, 1); 9542 break; 9543 #endif 9544 #ifdef TARGET_NR_sendmmsg 9545 case TARGET_NR_sendmmsg: 9546 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1); 9547 break; 9548 case TARGET_NR_recvmmsg: 9549 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0); 9550 break; 9551 #endif 9552 #ifdef TARGET_NR_sendto 9553 case TARGET_NR_sendto: 9554 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); 9555 break; 9556 #endif 9557 #ifdef TARGET_NR_shutdown 9558 case TARGET_NR_shutdown: 9559 ret = get_errno(shutdown(arg1, arg2)); 9560 break; 9561 #endif 9562 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom) 9563 case TARGET_NR_getrandom: 9564 p = lock_user(VERIFY_WRITE, arg1, arg2, 0); 9565 if (!p) { 9566 goto efault; 9567 } 9568 ret = get_errno(getrandom(p, arg2, arg3)); 9569 unlock_user(p, arg1, ret); 9570 break; 9571 #endif 9572 #ifdef TARGET_NR_socket 9573 case TARGET_NR_socket: 9574 ret = do_socket(arg1, arg2, arg3); 9575 break; 9576 #endif 9577 #ifdef TARGET_NR_socketpair 9578 case TARGET_NR_socketpair: 9579 ret = do_socketpair(arg1, arg2, arg3, arg4); 9580 break; 9581 #endif 9582 #ifdef TARGET_NR_setsockopt 9583 case TARGET_NR_setsockopt: 9584 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); 9585 break; 9586 #endif 9587 #if defined(TARGET_NR_syslog) 9588 case TARGET_NR_syslog: 9589 { 9590 int len = arg2; 9591 9592 switch (arg1) { 9593 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */ 9594 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */ 9595 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */ 9596 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */ 9597 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */ 9598 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */ 9599 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */ 9600 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */ 9601 { 9602 ret = get_errno(sys_syslog((int)arg1, NULL, (int)arg3)); 9603 } 9604 break; 9605 case TARGET_SYSLOG_ACTION_READ: /* Read from log */ 9606 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */ 9607 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */ 9608 { 9609 ret = -TARGET_EINVAL; 9610 if (len < 0) { 9611 goto fail; 9612 } 9613 ret = 0; 9614 if (len == 0) { 9615 break; 9616 } 9617 p = lock_user(VERIFY_WRITE, arg2, arg3, 0); 9618 if (!p) { 9619 ret = -TARGET_EFAULT; 9620 goto fail; 9621 } 9622 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); 9623 unlock_user(p, arg2, arg3); 9624 } 9625 break; 9626 default: 9627 ret = -EINVAL; 9628 break; 9629 } 9630 } 9631 break; 9632 #endif 9633 case TARGET_NR_setitimer: 9634 { 9635 struct itimerval value, ovalue, *pvalue; 9636 9637 if (arg2) { 9638 pvalue = &value; 9639 if (copy_from_user_timeval(&pvalue->it_interval, arg2) 9640 || copy_from_user_timeval(&pvalue->it_value, 9641 arg2 + sizeof(struct target_timeval))) 9642 goto efault; 9643 } else { 9644 pvalue = NULL; 9645 } 9646 ret = get_errno(setitimer(arg1, pvalue, &ovalue)); 9647 if (!is_error(ret) && arg3) { 9648 if (copy_to_user_timeval(arg3, 9649 &ovalue.it_interval) 9650 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), 9651 &ovalue.it_value)) 9652 goto efault; 9653 } 9654 } 9655 break; 9656 case TARGET_NR_getitimer: 9657 { 9658 struct itimerval value; 9659 9660 ret = get_errno(getitimer(arg1, &value)); 9661 if (!is_error(ret) && arg2) { 9662 if (copy_to_user_timeval(arg2, 9663 &value.it_interval) 9664 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), 9665 &value.it_value)) 9666 goto efault; 9667 } 9668 } 9669 break; 9670 #ifdef TARGET_NR_stat 9671 case TARGET_NR_stat: 9672 if (!(p = lock_user_string(arg1))) 9673 goto efault; 9674 ret = get_errno(stat(path(p), &st)); 9675 unlock_user(p, arg1, 0); 9676 goto do_stat; 9677 #endif 9678 #ifdef TARGET_NR_lstat 9679 case TARGET_NR_lstat: 9680 if (!(p = lock_user_string(arg1))) 9681 goto efault; 9682 ret = get_errno(lstat(path(p), &st)); 9683 unlock_user(p, arg1, 0); 9684 goto do_stat; 9685 #endif 9686 case TARGET_NR_fstat: 9687 { 9688 ret = get_errno(fstat(arg1, &st)); 9689 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat) 9690 do_stat: 9691 #endif 9692 if (!is_error(ret)) { 9693 struct target_stat *target_st; 9694 9695 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) 9696 goto efault; 9697 memset(target_st, 0, sizeof(*target_st)); 9698 __put_user(st.st_dev, &target_st->st_dev); 9699 __put_user(st.st_ino, &target_st->st_ino); 9700 __put_user(st.st_mode, &target_st->st_mode); 9701 __put_user(st.st_uid, &target_st->st_uid); 9702 __put_user(st.st_gid, &target_st->st_gid); 9703 __put_user(st.st_nlink, &target_st->st_nlink); 9704 __put_user(st.st_rdev, &target_st->st_rdev); 9705 __put_user(st.st_size, &target_st->st_size); 9706 __put_user(st.st_blksize, &target_st->st_blksize); 9707 __put_user(st.st_blocks, &target_st->st_blocks); 9708 __put_user(st.st_atime, &target_st->target_st_atime); 9709 __put_user(st.st_mtime, &target_st->target_st_mtime); 9710 __put_user(st.st_ctime, &target_st->target_st_ctime); 9711 unlock_user_struct(target_st, arg2, 1); 9712 } 9713 } 9714 break; 9715 #ifdef TARGET_NR_olduname 9716 case TARGET_NR_olduname: 9717 goto unimplemented; 9718 #endif 9719 #ifdef TARGET_NR_iopl 9720 case TARGET_NR_iopl: 9721 goto unimplemented; 9722 #endif 9723 case TARGET_NR_vhangup: 9724 ret = get_errno(vhangup()); 9725 break; 9726 #ifdef TARGET_NR_idle 9727 case TARGET_NR_idle: 9728 goto unimplemented; 9729 #endif 9730 #ifdef TARGET_NR_syscall 9731 case TARGET_NR_syscall: 9732 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5, 9733 arg6, arg7, arg8, 0); 9734 break; 9735 #endif 9736 case TARGET_NR_wait4: 9737 { 9738 int status; 9739 abi_long status_ptr = arg2; 9740 struct rusage rusage, *rusage_ptr; 9741 abi_ulong target_rusage = arg4; 9742 abi_long rusage_err; 9743 if (target_rusage) 9744 rusage_ptr = &rusage; 9745 else 9746 rusage_ptr = NULL; 9747 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr)); 9748 if (!is_error(ret)) { 9749 if (status_ptr && ret) { 9750 status = host_to_target_waitstatus(status); 9751 if (put_user_s32(status, status_ptr)) 9752 goto efault; 9753 } 9754 if (target_rusage) { 9755 rusage_err = host_to_target_rusage(target_rusage, &rusage); 9756 if (rusage_err) { 9757 ret = rusage_err; 9758 } 9759 } 9760 } 9761 } 9762 break; 9763 #ifdef TARGET_NR_swapoff 9764 case TARGET_NR_swapoff: 9765 if (!(p = lock_user_string(arg1))) 9766 goto efault; 9767 ret = get_errno(swapoff(p)); 9768 unlock_user(p, arg1, 0); 9769 break; 9770 #endif 9771 case TARGET_NR_sysinfo: 9772 { 9773 struct target_sysinfo *target_value; 9774 struct sysinfo value; 9775 ret = get_errno(sysinfo(&value)); 9776 if (!is_error(ret) && arg1) 9777 { 9778 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) 9779 goto efault; 9780 __put_user(value.uptime, &target_value->uptime); 9781 __put_user(value.loads[0], &target_value->loads[0]); 9782 __put_user(value.loads[1], &target_value->loads[1]); 9783 __put_user(value.loads[2], &target_value->loads[2]); 9784 __put_user(value.totalram, &target_value->totalram); 9785 __put_user(value.freeram, &target_value->freeram); 9786 __put_user(value.sharedram, &target_value->sharedram); 9787 __put_user(value.bufferram, &target_value->bufferram); 9788 __put_user(value.totalswap, &target_value->totalswap); 9789 __put_user(value.freeswap, &target_value->freeswap); 9790 __put_user(value.procs, &target_value->procs); 9791 __put_user(value.totalhigh, &target_value->totalhigh); 9792 __put_user(value.freehigh, &target_value->freehigh); 9793 __put_user(value.mem_unit, &target_value->mem_unit); 9794 unlock_user_struct(target_value, arg1, 1); 9795 } 9796 } 9797 break; 9798 #ifdef TARGET_NR_ipc 9799 case TARGET_NR_ipc: 9800 ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6); 9801 break; 9802 #endif 9803 #ifdef TARGET_NR_semget 9804 case TARGET_NR_semget: 9805 ret = get_errno(semget(arg1, arg2, arg3)); 9806 break; 9807 #endif 9808 #ifdef TARGET_NR_semop 9809 case TARGET_NR_semop: 9810 ret = do_semop(arg1, arg2, arg3); 9811 break; 9812 #endif 9813 #ifdef TARGET_NR_semctl 9814 case TARGET_NR_semctl: 9815 ret = do_semctl(arg1, arg2, arg3, arg4); 9816 break; 9817 #endif 9818 #ifdef TARGET_NR_msgctl 9819 case TARGET_NR_msgctl: 9820 ret = do_msgctl(arg1, arg2, arg3); 9821 break; 9822 #endif 9823 #ifdef TARGET_NR_msgget 9824 case TARGET_NR_msgget: 9825 ret = get_errno(msgget(arg1, arg2)); 9826 break; 9827 #endif 9828 #ifdef TARGET_NR_msgrcv 9829 case TARGET_NR_msgrcv: 9830 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5); 9831 break; 9832 #endif 9833 #ifdef TARGET_NR_msgsnd 9834 case TARGET_NR_msgsnd: 9835 ret = do_msgsnd(arg1, arg2, arg3, arg4); 9836 break; 9837 #endif 9838 #ifdef TARGET_NR_shmget 9839 case TARGET_NR_shmget: 9840 ret = get_errno(shmget(arg1, arg2, arg3)); 9841 break; 9842 #endif 9843 #ifdef TARGET_NR_shmctl 9844 case TARGET_NR_shmctl: 9845 ret = do_shmctl(arg1, arg2, arg3); 9846 break; 9847 #endif 9848 #ifdef TARGET_NR_shmat 9849 case TARGET_NR_shmat: 9850 ret = do_shmat(cpu_env, arg1, arg2, arg3); 9851 break; 9852 #endif 9853 #ifdef TARGET_NR_shmdt 9854 case TARGET_NR_shmdt: 9855 ret = do_shmdt(arg1); 9856 break; 9857 #endif 9858 case TARGET_NR_fsync: 9859 ret = get_errno(fsync(arg1)); 9860 break; 9861 case TARGET_NR_clone: 9862 /* Linux manages to have three different orderings for its 9863 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines 9864 * match the kernel's CONFIG_CLONE_* settings. 9865 * Microblaze is further special in that it uses a sixth 9866 * implicit argument to clone for the TLS pointer. 9867 */ 9868 #if defined(TARGET_MICROBLAZE) 9869 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5)); 9870 #elif defined(TARGET_CLONE_BACKWARDS) 9871 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); 9872 #elif defined(TARGET_CLONE_BACKWARDS2) 9873 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); 9874 #else 9875 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); 9876 #endif 9877 break; 9878 #ifdef __NR_exit_group 9879 /* new thread calls */ 9880 case TARGET_NR_exit_group: 9881 #ifdef TARGET_GPROF 9882 _mcleanup(); 9883 #endif 9884 gdb_exit(cpu_env, arg1); 9885 ret = get_errno(exit_group(arg1)); 9886 break; 9887 #endif 9888 case TARGET_NR_setdomainname: 9889 if (!(p = lock_user_string(arg1))) 9890 goto efault; 9891 ret = get_errno(setdomainname(p, arg2)); 9892 unlock_user(p, arg1, 0); 9893 break; 9894 case TARGET_NR_uname: 9895 /* no need to transcode because we use the linux syscall */ 9896 { 9897 struct new_utsname * buf; 9898 9899 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) 9900 goto efault; 9901 ret = get_errno(sys_uname(buf)); 9902 if (!is_error(ret)) { 9903 /* Overwrite the native machine name with whatever is being 9904 emulated. */ 9905 strcpy (buf->machine, cpu_to_uname_machine(cpu_env)); 9906 /* Allow the user to override the reported release. */ 9907 if (qemu_uname_release && *qemu_uname_release) { 9908 g_strlcpy(buf->release, qemu_uname_release, 9909 sizeof(buf->release)); 9910 } 9911 } 9912 unlock_user_struct(buf, arg1, 1); 9913 } 9914 break; 9915 #ifdef TARGET_I386 9916 case TARGET_NR_modify_ldt: 9917 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3); 9918 break; 9919 #if !defined(TARGET_X86_64) 9920 case TARGET_NR_vm86old: 9921 goto unimplemented; 9922 case TARGET_NR_vm86: 9923 ret = do_vm86(cpu_env, arg1, arg2); 9924 break; 9925 #endif 9926 #endif 9927 case TARGET_NR_adjtimex: 9928 { 9929 struct timex host_buf; 9930 9931 if (target_to_host_timex(&host_buf, arg1) != 0) { 9932 goto efault; 9933 } 9934 ret = get_errno(adjtimex(&host_buf)); 9935 if (!is_error(ret)) { 9936 if (host_to_target_timex(arg1, &host_buf) != 0) { 9937 goto efault; 9938 } 9939 } 9940 } 9941 break; 9942 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME) 9943 case TARGET_NR_clock_adjtime: 9944 { 9945 struct timex htx, *phtx = &htx; 9946 9947 if (target_to_host_timex(phtx, arg2) != 0) { 9948 goto efault; 9949 } 9950 ret = get_errno(clock_adjtime(arg1, phtx)); 9951 if (!is_error(ret) && phtx) { 9952 if (host_to_target_timex(arg2, phtx) != 0) { 9953 goto efault; 9954 } 9955 } 9956 } 9957 break; 9958 #endif 9959 #ifdef TARGET_NR_create_module 9960 case TARGET_NR_create_module: 9961 #endif 9962 case TARGET_NR_init_module: 9963 case TARGET_NR_delete_module: 9964 #ifdef TARGET_NR_get_kernel_syms 9965 case TARGET_NR_get_kernel_syms: 9966 #endif 9967 goto unimplemented; 9968 case TARGET_NR_quotactl: 9969 goto unimplemented; 9970 case TARGET_NR_getpgid: 9971 ret = get_errno(getpgid(arg1)); 9972 break; 9973 case TARGET_NR_fchdir: 9974 ret = get_errno(fchdir(arg1)); 9975 break; 9976 #ifdef TARGET_NR_bdflush /* not on x86_64 */ 9977 case TARGET_NR_bdflush: 9978 goto unimplemented; 9979 #endif 9980 #ifdef TARGET_NR_sysfs 9981 case TARGET_NR_sysfs: 9982 goto unimplemented; 9983 #endif 9984 case TARGET_NR_personality: 9985 ret = get_errno(personality(arg1)); 9986 break; 9987 #ifdef TARGET_NR_afs_syscall 9988 case TARGET_NR_afs_syscall: 9989 goto unimplemented; 9990 #endif 9991 #ifdef TARGET_NR__llseek /* Not on alpha */ 9992 case TARGET_NR__llseek: 9993 { 9994 int64_t res; 9995 #if !defined(__NR_llseek) 9996 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5); 9997 if (res == -1) { 9998 ret = get_errno(res); 9999 } else { 10000 ret = 0; 10001 } 10002 #else 10003 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); 10004 #endif 10005 if ((ret == 0) && put_user_s64(res, arg4)) { 10006 goto efault; 10007 } 10008 } 10009 break; 10010 #endif 10011 #ifdef TARGET_NR_getdents 10012 case TARGET_NR_getdents: 10013 #ifdef __NR_getdents 10014 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 10015 { 10016 struct target_dirent *target_dirp; 10017 struct linux_dirent *dirp; 10018 abi_long count = arg3; 10019 10020 dirp = g_try_malloc(count); 10021 if (!dirp) { 10022 ret = -TARGET_ENOMEM; 10023 goto fail; 10024 } 10025 10026 ret = get_errno(sys_getdents(arg1, dirp, count)); 10027 if (!is_error(ret)) { 10028 struct linux_dirent *de; 10029 struct target_dirent *tde; 10030 int len = ret; 10031 int reclen, treclen; 10032 int count1, tnamelen; 10033 10034 count1 = 0; 10035 de = dirp; 10036 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 10037 goto efault; 10038 tde = target_dirp; 10039 while (len > 0) { 10040 reclen = de->d_reclen; 10041 tnamelen = reclen - offsetof(struct linux_dirent, d_name); 10042 assert(tnamelen >= 0); 10043 treclen = tnamelen + offsetof(struct target_dirent, d_name); 10044 assert(count1 + treclen <= count); 10045 tde->d_reclen = tswap16(treclen); 10046 tde->d_ino = tswapal(de->d_ino); 10047 tde->d_off = tswapal(de->d_off); 10048 memcpy(tde->d_name, de->d_name, tnamelen); 10049 de = (struct linux_dirent *)((char *)de + reclen); 10050 len -= reclen; 10051 tde = (struct target_dirent *)((char *)tde + treclen); 10052 count1 += treclen; 10053 } 10054 ret = count1; 10055 unlock_user(target_dirp, arg2, ret); 10056 } 10057 g_free(dirp); 10058 } 10059 #else 10060 { 10061 struct linux_dirent *dirp; 10062 abi_long count = arg3; 10063 10064 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 10065 goto efault; 10066 ret = get_errno(sys_getdents(arg1, dirp, count)); 10067 if (!is_error(ret)) { 10068 struct linux_dirent *de; 10069 int len = ret; 10070 int reclen; 10071 de = dirp; 10072 while (len > 0) { 10073 reclen = de->d_reclen; 10074 if (reclen > len) 10075 break; 10076 de->d_reclen = tswap16(reclen); 10077 tswapls(&de->d_ino); 10078 tswapls(&de->d_off); 10079 de = (struct linux_dirent *)((char *)de + reclen); 10080 len -= reclen; 10081 } 10082 } 10083 unlock_user(dirp, arg2, ret); 10084 } 10085 #endif 10086 #else 10087 /* Implement getdents in terms of getdents64 */ 10088 { 10089 struct linux_dirent64 *dirp; 10090 abi_long count = arg3; 10091 10092 dirp = lock_user(VERIFY_WRITE, arg2, count, 0); 10093 if (!dirp) { 10094 goto efault; 10095 } 10096 ret = get_errno(sys_getdents64(arg1, dirp, count)); 10097 if (!is_error(ret)) { 10098 /* Convert the dirent64 structs to target dirent. We do this 10099 * in-place, since we can guarantee that a target_dirent is no 10100 * larger than a dirent64; however this means we have to be 10101 * careful to read everything before writing in the new format. 10102 */ 10103 struct linux_dirent64 *de; 10104 struct target_dirent *tde; 10105 int len = ret; 10106 int tlen = 0; 10107 10108 de = dirp; 10109 tde = (struct target_dirent *)dirp; 10110 while (len > 0) { 10111 int namelen, treclen; 10112 int reclen = de->d_reclen; 10113 uint64_t ino = de->d_ino; 10114 int64_t off = de->d_off; 10115 uint8_t type = de->d_type; 10116 10117 namelen = strlen(de->d_name); 10118 treclen = offsetof(struct target_dirent, d_name) 10119 + namelen + 2; 10120 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long)); 10121 10122 memmove(tde->d_name, de->d_name, namelen + 1); 10123 tde->d_ino = tswapal(ino); 10124 tde->d_off = tswapal(off); 10125 tde->d_reclen = tswap16(treclen); 10126 /* The target_dirent type is in what was formerly a padding 10127 * byte at the end of the structure: 10128 */ 10129 *(((char *)tde) + treclen - 1) = type; 10130 10131 de = (struct linux_dirent64 *)((char *)de + reclen); 10132 tde = (struct target_dirent *)((char *)tde + treclen); 10133 len -= reclen; 10134 tlen += treclen; 10135 } 10136 ret = tlen; 10137 } 10138 unlock_user(dirp, arg2, ret); 10139 } 10140 #endif 10141 break; 10142 #endif /* TARGET_NR_getdents */ 10143 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 10144 case TARGET_NR_getdents64: 10145 { 10146 struct linux_dirent64 *dirp; 10147 abi_long count = arg3; 10148 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 10149 goto efault; 10150 ret = get_errno(sys_getdents64(arg1, dirp, count)); 10151 if (!is_error(ret)) { 10152 struct linux_dirent64 *de; 10153 int len = ret; 10154 int reclen; 10155 de = dirp; 10156 while (len > 0) { 10157 reclen = de->d_reclen; 10158 if (reclen > len) 10159 break; 10160 de->d_reclen = tswap16(reclen); 10161 tswap64s((uint64_t *)&de->d_ino); 10162 tswap64s((uint64_t *)&de->d_off); 10163 de = (struct linux_dirent64 *)((char *)de + reclen); 10164 len -= reclen; 10165 } 10166 } 10167 unlock_user(dirp, arg2, ret); 10168 } 10169 break; 10170 #endif /* TARGET_NR_getdents64 */ 10171 #if defined(TARGET_NR__newselect) 10172 case TARGET_NR__newselect: 10173 ret = do_select(arg1, arg2, arg3, arg4, arg5); 10174 break; 10175 #endif 10176 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) 10177 # ifdef TARGET_NR_poll 10178 case TARGET_NR_poll: 10179 # endif 10180 # ifdef TARGET_NR_ppoll 10181 case TARGET_NR_ppoll: 10182 # endif 10183 { 10184 struct target_pollfd *target_pfd; 10185 unsigned int nfds = arg2; 10186 struct pollfd *pfd; 10187 unsigned int i; 10188 10189 pfd = NULL; 10190 target_pfd = NULL; 10191 if (nfds) { 10192 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) { 10193 ret = -TARGET_EINVAL; 10194 break; 10195 } 10196 10197 target_pfd = lock_user(VERIFY_WRITE, arg1, 10198 sizeof(struct target_pollfd) * nfds, 1); 10199 if (!target_pfd) { 10200 goto efault; 10201 } 10202 10203 pfd = alloca(sizeof(struct pollfd) * nfds); 10204 for (i = 0; i < nfds; i++) { 10205 pfd[i].fd = tswap32(target_pfd[i].fd); 10206 pfd[i].events = tswap16(target_pfd[i].events); 10207 } 10208 } 10209 10210 switch (num) { 10211 # ifdef TARGET_NR_ppoll 10212 case TARGET_NR_ppoll: 10213 { 10214 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; 10215 target_sigset_t *target_set; 10216 sigset_t _set, *set = &_set; 10217 10218 if (arg3) { 10219 if (target_to_host_timespec(timeout_ts, arg3)) { 10220 unlock_user(target_pfd, arg1, 0); 10221 goto efault; 10222 } 10223 } else { 10224 timeout_ts = NULL; 10225 } 10226 10227 if (arg4) { 10228 if (arg5 != sizeof(target_sigset_t)) { 10229 unlock_user(target_pfd, arg1, 0); 10230 ret = -TARGET_EINVAL; 10231 break; 10232 } 10233 10234 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1); 10235 if (!target_set) { 10236 unlock_user(target_pfd, arg1, 0); 10237 goto efault; 10238 } 10239 target_to_host_sigset(set, target_set); 10240 } else { 10241 set = NULL; 10242 } 10243 10244 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts, 10245 set, SIGSET_T_SIZE)); 10246 10247 if (!is_error(ret) && arg3) { 10248 host_to_target_timespec(arg3, timeout_ts); 10249 } 10250 if (arg4) { 10251 unlock_user(target_set, arg4, 0); 10252 } 10253 break; 10254 } 10255 # endif 10256 # ifdef TARGET_NR_poll 10257 case TARGET_NR_poll: 10258 { 10259 struct timespec ts, *pts; 10260 10261 if (arg3 >= 0) { 10262 /* Convert ms to secs, ns */ 10263 ts.tv_sec = arg3 / 1000; 10264 ts.tv_nsec = (arg3 % 1000) * 1000000LL; 10265 pts = &ts; 10266 } else { 10267 /* -ve poll() timeout means "infinite" */ 10268 pts = NULL; 10269 } 10270 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0)); 10271 break; 10272 } 10273 # endif 10274 default: 10275 g_assert_not_reached(); 10276 } 10277 10278 if (!is_error(ret)) { 10279 for(i = 0; i < nfds; i++) { 10280 target_pfd[i].revents = tswap16(pfd[i].revents); 10281 } 10282 } 10283 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds); 10284 } 10285 break; 10286 #endif 10287 case TARGET_NR_flock: 10288 /* NOTE: the flock constant seems to be the same for every 10289 Linux platform */ 10290 ret = get_errno(safe_flock(arg1, arg2)); 10291 break; 10292 case TARGET_NR_readv: 10293 { 10294 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 10295 if (vec != NULL) { 10296 ret = get_errno(safe_readv(arg1, vec, arg3)); 10297 unlock_iovec(vec, arg2, arg3, 1); 10298 } else { 10299 ret = -host_to_target_errno(errno); 10300 } 10301 } 10302 break; 10303 case TARGET_NR_writev: 10304 { 10305 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 10306 if (vec != NULL) { 10307 ret = get_errno(safe_writev(arg1, vec, arg3)); 10308 unlock_iovec(vec, arg2, arg3, 0); 10309 } else { 10310 ret = -host_to_target_errno(errno); 10311 } 10312 } 10313 break; 10314 #if defined(TARGET_NR_preadv) 10315 case TARGET_NR_preadv: 10316 { 10317 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 10318 if (vec != NULL) { 10319 ret = get_errno(safe_preadv(arg1, vec, arg3, arg4, arg5)); 10320 unlock_iovec(vec, arg2, arg3, 1); 10321 } else { 10322 ret = -host_to_target_errno(errno); 10323 } 10324 } 10325 break; 10326 #endif 10327 #if defined(TARGET_NR_pwritev) 10328 case TARGET_NR_pwritev: 10329 { 10330 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 10331 if (vec != NULL) { 10332 ret = get_errno(safe_pwritev(arg1, vec, arg3, arg4, arg5)); 10333 unlock_iovec(vec, arg2, arg3, 0); 10334 } else { 10335 ret = -host_to_target_errno(errno); 10336 } 10337 } 10338 break; 10339 #endif 10340 case TARGET_NR_getsid: 10341 ret = get_errno(getsid(arg1)); 10342 break; 10343 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ 10344 case TARGET_NR_fdatasync: 10345 ret = get_errno(fdatasync(arg1)); 10346 break; 10347 #endif 10348 #ifdef TARGET_NR__sysctl 10349 case TARGET_NR__sysctl: 10350 /* We don't implement this, but ENOTDIR is always a safe 10351 return value. */ 10352 ret = -TARGET_ENOTDIR; 10353 break; 10354 #endif 10355 case TARGET_NR_sched_getaffinity: 10356 { 10357 unsigned int mask_size; 10358 unsigned long *mask; 10359 10360 /* 10361 * sched_getaffinity needs multiples of ulong, so need to take 10362 * care of mismatches between target ulong and host ulong sizes. 10363 */ 10364 if (arg2 & (sizeof(abi_ulong) - 1)) { 10365 ret = -TARGET_EINVAL; 10366 break; 10367 } 10368 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 10369 10370 mask = alloca(mask_size); 10371 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask)); 10372 10373 if (!is_error(ret)) { 10374 if (ret > arg2) { 10375 /* More data returned than the caller's buffer will fit. 10376 * This only happens if sizeof(abi_long) < sizeof(long) 10377 * and the caller passed us a buffer holding an odd number 10378 * of abi_longs. If the host kernel is actually using the 10379 * extra 4 bytes then fail EINVAL; otherwise we can just 10380 * ignore them and only copy the interesting part. 10381 */ 10382 int numcpus = sysconf(_SC_NPROCESSORS_CONF); 10383 if (numcpus > arg2 * 8) { 10384 ret = -TARGET_EINVAL; 10385 break; 10386 } 10387 ret = arg2; 10388 } 10389 10390 if (copy_to_user(arg3, mask, ret)) { 10391 goto efault; 10392 } 10393 } 10394 } 10395 break; 10396 case TARGET_NR_sched_setaffinity: 10397 { 10398 unsigned int mask_size; 10399 unsigned long *mask; 10400 10401 /* 10402 * sched_setaffinity needs multiples of ulong, so need to take 10403 * care of mismatches between target ulong and host ulong sizes. 10404 */ 10405 if (arg2 & (sizeof(abi_ulong) - 1)) { 10406 ret = -TARGET_EINVAL; 10407 break; 10408 } 10409 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 10410 10411 mask = alloca(mask_size); 10412 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) { 10413 goto efault; 10414 } 10415 memcpy(mask, p, arg2); 10416 unlock_user_struct(p, arg2, 0); 10417 10418 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask)); 10419 } 10420 break; 10421 case TARGET_NR_sched_setparam: 10422 { 10423 struct sched_param *target_schp; 10424 struct sched_param schp; 10425 10426 if (arg2 == 0) { 10427 return -TARGET_EINVAL; 10428 } 10429 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) 10430 goto efault; 10431 schp.sched_priority = tswap32(target_schp->sched_priority); 10432 unlock_user_struct(target_schp, arg2, 0); 10433 ret = get_errno(sched_setparam(arg1, &schp)); 10434 } 10435 break; 10436 case TARGET_NR_sched_getparam: 10437 { 10438 struct sched_param *target_schp; 10439 struct sched_param schp; 10440 10441 if (arg2 == 0) { 10442 return -TARGET_EINVAL; 10443 } 10444 ret = get_errno(sched_getparam(arg1, &schp)); 10445 if (!is_error(ret)) { 10446 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) 10447 goto efault; 10448 target_schp->sched_priority = tswap32(schp.sched_priority); 10449 unlock_user_struct(target_schp, arg2, 1); 10450 } 10451 } 10452 break; 10453 case TARGET_NR_sched_setscheduler: 10454 { 10455 struct sched_param *target_schp; 10456 struct sched_param schp; 10457 if (arg3 == 0) { 10458 return -TARGET_EINVAL; 10459 } 10460 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) 10461 goto efault; 10462 schp.sched_priority = tswap32(target_schp->sched_priority); 10463 unlock_user_struct(target_schp, arg3, 0); 10464 ret = get_errno(sched_setscheduler(arg1, arg2, &schp)); 10465 } 10466 break; 10467 case TARGET_NR_sched_getscheduler: 10468 ret = get_errno(sched_getscheduler(arg1)); 10469 break; 10470 case TARGET_NR_sched_yield: 10471 ret = get_errno(sched_yield()); 10472 break; 10473 case TARGET_NR_sched_get_priority_max: 10474 ret = get_errno(sched_get_priority_max(arg1)); 10475 break; 10476 case TARGET_NR_sched_get_priority_min: 10477 ret = get_errno(sched_get_priority_min(arg1)); 10478 break; 10479 case TARGET_NR_sched_rr_get_interval: 10480 { 10481 struct timespec ts; 10482 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 10483 if (!is_error(ret)) { 10484 ret = host_to_target_timespec(arg2, &ts); 10485 } 10486 } 10487 break; 10488 case TARGET_NR_nanosleep: 10489 { 10490 struct timespec req, rem; 10491 target_to_host_timespec(&req, arg1); 10492 ret = get_errno(safe_nanosleep(&req, &rem)); 10493 if (is_error(ret) && arg2) { 10494 host_to_target_timespec(arg2, &rem); 10495 } 10496 } 10497 break; 10498 #ifdef TARGET_NR_query_module 10499 case TARGET_NR_query_module: 10500 goto unimplemented; 10501 #endif 10502 #ifdef TARGET_NR_nfsservctl 10503 case TARGET_NR_nfsservctl: 10504 goto unimplemented; 10505 #endif 10506 case TARGET_NR_prctl: 10507 switch (arg1) { 10508 case PR_GET_PDEATHSIG: 10509 { 10510 int deathsig; 10511 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5)); 10512 if (!is_error(ret) && arg2 10513 && put_user_ual(deathsig, arg2)) { 10514 goto efault; 10515 } 10516 break; 10517 } 10518 #ifdef PR_GET_NAME 10519 case PR_GET_NAME: 10520 { 10521 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1); 10522 if (!name) { 10523 goto efault; 10524 } 10525 ret = get_errno(prctl(arg1, (unsigned long)name, 10526 arg3, arg4, arg5)); 10527 unlock_user(name, arg2, 16); 10528 break; 10529 } 10530 case PR_SET_NAME: 10531 { 10532 void *name = lock_user(VERIFY_READ, arg2, 16, 1); 10533 if (!name) { 10534 goto efault; 10535 } 10536 ret = get_errno(prctl(arg1, (unsigned long)name, 10537 arg3, arg4, arg5)); 10538 unlock_user(name, arg2, 0); 10539 break; 10540 } 10541 #endif 10542 case PR_GET_SECCOMP: 10543 case PR_SET_SECCOMP: 10544 /* Disable seccomp to prevent the target disabling syscalls we 10545 * need. */ 10546 ret = -TARGET_EINVAL; 10547 break; 10548 default: 10549 /* Most prctl options have no pointer arguments */ 10550 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5)); 10551 break; 10552 } 10553 break; 10554 #ifdef TARGET_NR_arch_prctl 10555 case TARGET_NR_arch_prctl: 10556 #if defined(TARGET_I386) && !defined(TARGET_ABI32) 10557 ret = do_arch_prctl(cpu_env, arg1, arg2); 10558 break; 10559 #else 10560 goto unimplemented; 10561 #endif 10562 #endif 10563 #ifdef TARGET_NR_pread64 10564 case TARGET_NR_pread64: 10565 if (regpairs_aligned(cpu_env, num)) { 10566 arg4 = arg5; 10567 arg5 = arg6; 10568 } 10569 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 10570 goto efault; 10571 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); 10572 unlock_user(p, arg2, ret); 10573 break; 10574 case TARGET_NR_pwrite64: 10575 if (regpairs_aligned(cpu_env, num)) { 10576 arg4 = arg5; 10577 arg5 = arg6; 10578 } 10579 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 10580 goto efault; 10581 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); 10582 unlock_user(p, arg2, 0); 10583 break; 10584 #endif 10585 case TARGET_NR_getcwd: 10586 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) 10587 goto efault; 10588 ret = get_errno(sys_getcwd1(p, arg2)); 10589 unlock_user(p, arg1, ret); 10590 break; 10591 case TARGET_NR_capget: 10592 case TARGET_NR_capset: 10593 { 10594 struct target_user_cap_header *target_header; 10595 struct target_user_cap_data *target_data = NULL; 10596 struct __user_cap_header_struct header; 10597 struct __user_cap_data_struct data[2]; 10598 struct __user_cap_data_struct *dataptr = NULL; 10599 int i, target_datalen; 10600 int data_items = 1; 10601 10602 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) { 10603 goto efault; 10604 } 10605 header.version = tswap32(target_header->version); 10606 header.pid = tswap32(target_header->pid); 10607 10608 if (header.version != _LINUX_CAPABILITY_VERSION) { 10609 /* Version 2 and up takes pointer to two user_data structs */ 10610 data_items = 2; 10611 } 10612 10613 target_datalen = sizeof(*target_data) * data_items; 10614 10615 if (arg2) { 10616 if (num == TARGET_NR_capget) { 10617 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0); 10618 } else { 10619 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1); 10620 } 10621 if (!target_data) { 10622 unlock_user_struct(target_header, arg1, 0); 10623 goto efault; 10624 } 10625 10626 if (num == TARGET_NR_capset) { 10627 for (i = 0; i < data_items; i++) { 10628 data[i].effective = tswap32(target_data[i].effective); 10629 data[i].permitted = tswap32(target_data[i].permitted); 10630 data[i].inheritable = tswap32(target_data[i].inheritable); 10631 } 10632 } 10633 10634 dataptr = data; 10635 } 10636 10637 if (num == TARGET_NR_capget) { 10638 ret = get_errno(capget(&header, dataptr)); 10639 } else { 10640 ret = get_errno(capset(&header, dataptr)); 10641 } 10642 10643 /* The kernel always updates version for both capget and capset */ 10644 target_header->version = tswap32(header.version); 10645 unlock_user_struct(target_header, arg1, 1); 10646 10647 if (arg2) { 10648 if (num == TARGET_NR_capget) { 10649 for (i = 0; i < data_items; i++) { 10650 target_data[i].effective = tswap32(data[i].effective); 10651 target_data[i].permitted = tswap32(data[i].permitted); 10652 target_data[i].inheritable = tswap32(data[i].inheritable); 10653 } 10654 unlock_user(target_data, arg2, target_datalen); 10655 } else { 10656 unlock_user(target_data, arg2, 0); 10657 } 10658 } 10659 break; 10660 } 10661 case TARGET_NR_sigaltstack: 10662 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env)); 10663 break; 10664 10665 #ifdef CONFIG_SENDFILE 10666 case TARGET_NR_sendfile: 10667 { 10668 off_t *offp = NULL; 10669 off_t off; 10670 if (arg3) { 10671 ret = get_user_sal(off, arg3); 10672 if (is_error(ret)) { 10673 break; 10674 } 10675 offp = &off; 10676 } 10677 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 10678 if (!is_error(ret) && arg3) { 10679 abi_long ret2 = put_user_sal(off, arg3); 10680 if (is_error(ret2)) { 10681 ret = ret2; 10682 } 10683 } 10684 break; 10685 } 10686 #ifdef TARGET_NR_sendfile64 10687 case TARGET_NR_sendfile64: 10688 { 10689 off_t *offp = NULL; 10690 off_t off; 10691 if (arg3) { 10692 ret = get_user_s64(off, arg3); 10693 if (is_error(ret)) { 10694 break; 10695 } 10696 offp = &off; 10697 } 10698 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 10699 if (!is_error(ret) && arg3) { 10700 abi_long ret2 = put_user_s64(off, arg3); 10701 if (is_error(ret2)) { 10702 ret = ret2; 10703 } 10704 } 10705 break; 10706 } 10707 #endif 10708 #else 10709 case TARGET_NR_sendfile: 10710 #ifdef TARGET_NR_sendfile64 10711 case TARGET_NR_sendfile64: 10712 #endif 10713 goto unimplemented; 10714 #endif 10715 10716 #ifdef TARGET_NR_getpmsg 10717 case TARGET_NR_getpmsg: 10718 goto unimplemented; 10719 #endif 10720 #ifdef TARGET_NR_putpmsg 10721 case TARGET_NR_putpmsg: 10722 goto unimplemented; 10723 #endif 10724 #ifdef TARGET_NR_vfork 10725 case TARGET_NR_vfork: 10726 ret = get_errno(do_fork(cpu_env, 10727 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD, 10728 0, 0, 0, 0)); 10729 break; 10730 #endif 10731 #ifdef TARGET_NR_ugetrlimit 10732 case TARGET_NR_ugetrlimit: 10733 { 10734 struct rlimit rlim; 10735 int resource = target_to_host_resource(arg1); 10736 ret = get_errno(getrlimit(resource, &rlim)); 10737 if (!is_error(ret)) { 10738 struct target_rlimit *target_rlim; 10739 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 10740 goto efault; 10741 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 10742 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 10743 unlock_user_struct(target_rlim, arg2, 1); 10744 } 10745 break; 10746 } 10747 #endif 10748 #ifdef TARGET_NR_truncate64 10749 case TARGET_NR_truncate64: 10750 if (!(p = lock_user_string(arg1))) 10751 goto efault; 10752 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); 10753 unlock_user(p, arg1, 0); 10754 break; 10755 #endif 10756 #ifdef TARGET_NR_ftruncate64 10757 case TARGET_NR_ftruncate64: 10758 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); 10759 break; 10760 #endif 10761 #ifdef TARGET_NR_stat64 10762 case TARGET_NR_stat64: 10763 if (!(p = lock_user_string(arg1))) 10764 goto efault; 10765 ret = get_errno(stat(path(p), &st)); 10766 unlock_user(p, arg1, 0); 10767 if (!is_error(ret)) 10768 ret = host_to_target_stat64(cpu_env, arg2, &st); 10769 break; 10770 #endif 10771 #ifdef TARGET_NR_lstat64 10772 case TARGET_NR_lstat64: 10773 if (!(p = lock_user_string(arg1))) 10774 goto efault; 10775 ret = get_errno(lstat(path(p), &st)); 10776 unlock_user(p, arg1, 0); 10777 if (!is_error(ret)) 10778 ret = host_to_target_stat64(cpu_env, arg2, &st); 10779 break; 10780 #endif 10781 #ifdef TARGET_NR_fstat64 10782 case TARGET_NR_fstat64: 10783 ret = get_errno(fstat(arg1, &st)); 10784 if (!is_error(ret)) 10785 ret = host_to_target_stat64(cpu_env, arg2, &st); 10786 break; 10787 #endif 10788 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) 10789 #ifdef TARGET_NR_fstatat64 10790 case TARGET_NR_fstatat64: 10791 #endif 10792 #ifdef TARGET_NR_newfstatat 10793 case TARGET_NR_newfstatat: 10794 #endif 10795 if (!(p = lock_user_string(arg2))) 10796 goto efault; 10797 ret = get_errno(fstatat(arg1, path(p), &st, arg4)); 10798 if (!is_error(ret)) 10799 ret = host_to_target_stat64(cpu_env, arg3, &st); 10800 break; 10801 #endif 10802 #ifdef TARGET_NR_lchown 10803 case TARGET_NR_lchown: 10804 if (!(p = lock_user_string(arg1))) 10805 goto efault; 10806 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); 10807 unlock_user(p, arg1, 0); 10808 break; 10809 #endif 10810 #ifdef TARGET_NR_getuid 10811 case TARGET_NR_getuid: 10812 ret = get_errno(high2lowuid(getuid())); 10813 break; 10814 #endif 10815 #ifdef TARGET_NR_getgid 10816 case TARGET_NR_getgid: 10817 ret = get_errno(high2lowgid(getgid())); 10818 break; 10819 #endif 10820 #ifdef TARGET_NR_geteuid 10821 case TARGET_NR_geteuid: 10822 ret = get_errno(high2lowuid(geteuid())); 10823 break; 10824 #endif 10825 #ifdef TARGET_NR_getegid 10826 case TARGET_NR_getegid: 10827 ret = get_errno(high2lowgid(getegid())); 10828 break; 10829 #endif 10830 case TARGET_NR_setreuid: 10831 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); 10832 break; 10833 case TARGET_NR_setregid: 10834 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); 10835 break; 10836 case TARGET_NR_getgroups: 10837 { 10838 int gidsetsize = arg1; 10839 target_id *target_grouplist; 10840 gid_t *grouplist; 10841 int i; 10842 10843 grouplist = alloca(gidsetsize * sizeof(gid_t)); 10844 ret = get_errno(getgroups(gidsetsize, grouplist)); 10845 if (gidsetsize == 0) 10846 break; 10847 if (!is_error(ret)) { 10848 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0); 10849 if (!target_grouplist) 10850 goto efault; 10851 for(i = 0;i < ret; i++) 10852 target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); 10853 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id)); 10854 } 10855 } 10856 break; 10857 case TARGET_NR_setgroups: 10858 { 10859 int gidsetsize = arg1; 10860 target_id *target_grouplist; 10861 gid_t *grouplist = NULL; 10862 int i; 10863 if (gidsetsize) { 10864 grouplist = alloca(gidsetsize * sizeof(gid_t)); 10865 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1); 10866 if (!target_grouplist) { 10867 ret = -TARGET_EFAULT; 10868 goto fail; 10869 } 10870 for (i = 0; i < gidsetsize; i++) { 10871 grouplist[i] = low2highgid(tswapid(target_grouplist[i])); 10872 } 10873 unlock_user(target_grouplist, arg2, 0); 10874 } 10875 ret = get_errno(setgroups(gidsetsize, grouplist)); 10876 } 10877 break; 10878 case TARGET_NR_fchown: 10879 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); 10880 break; 10881 #if defined(TARGET_NR_fchownat) 10882 case TARGET_NR_fchownat: 10883 if (!(p = lock_user_string(arg2))) 10884 goto efault; 10885 ret = get_errno(fchownat(arg1, p, low2highuid(arg3), 10886 low2highgid(arg4), arg5)); 10887 unlock_user(p, arg2, 0); 10888 break; 10889 #endif 10890 #ifdef TARGET_NR_setresuid 10891 case TARGET_NR_setresuid: 10892 ret = get_errno(sys_setresuid(low2highuid(arg1), 10893 low2highuid(arg2), 10894 low2highuid(arg3))); 10895 break; 10896 #endif 10897 #ifdef TARGET_NR_getresuid 10898 case TARGET_NR_getresuid: 10899 { 10900 uid_t ruid, euid, suid; 10901 ret = get_errno(getresuid(&ruid, &euid, &suid)); 10902 if (!is_error(ret)) { 10903 if (put_user_id(high2lowuid(ruid), arg1) 10904 || put_user_id(high2lowuid(euid), arg2) 10905 || put_user_id(high2lowuid(suid), arg3)) 10906 goto efault; 10907 } 10908 } 10909 break; 10910 #endif 10911 #ifdef TARGET_NR_getresgid 10912 case TARGET_NR_setresgid: 10913 ret = get_errno(sys_setresgid(low2highgid(arg1), 10914 low2highgid(arg2), 10915 low2highgid(arg3))); 10916 break; 10917 #endif 10918 #ifdef TARGET_NR_getresgid 10919 case TARGET_NR_getresgid: 10920 { 10921 gid_t rgid, egid, sgid; 10922 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 10923 if (!is_error(ret)) { 10924 if (put_user_id(high2lowgid(rgid), arg1) 10925 || put_user_id(high2lowgid(egid), arg2) 10926 || put_user_id(high2lowgid(sgid), arg3)) 10927 goto efault; 10928 } 10929 } 10930 break; 10931 #endif 10932 #ifdef TARGET_NR_chown 10933 case TARGET_NR_chown: 10934 if (!(p = lock_user_string(arg1))) 10935 goto efault; 10936 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); 10937 unlock_user(p, arg1, 0); 10938 break; 10939 #endif 10940 case TARGET_NR_setuid: 10941 ret = get_errno(sys_setuid(low2highuid(arg1))); 10942 break; 10943 case TARGET_NR_setgid: 10944 ret = get_errno(sys_setgid(low2highgid(arg1))); 10945 break; 10946 case TARGET_NR_setfsuid: 10947 ret = get_errno(setfsuid(arg1)); 10948 break; 10949 case TARGET_NR_setfsgid: 10950 ret = get_errno(setfsgid(arg1)); 10951 break; 10952 10953 #ifdef TARGET_NR_lchown32 10954 case TARGET_NR_lchown32: 10955 if (!(p = lock_user_string(arg1))) 10956 goto efault; 10957 ret = get_errno(lchown(p, arg2, arg3)); 10958 unlock_user(p, arg1, 0); 10959 break; 10960 #endif 10961 #ifdef TARGET_NR_getuid32 10962 case TARGET_NR_getuid32: 10963 ret = get_errno(getuid()); 10964 break; 10965 #endif 10966 10967 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) 10968 /* Alpha specific */ 10969 case TARGET_NR_getxuid: 10970 { 10971 uid_t euid; 10972 euid=geteuid(); 10973 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid; 10974 } 10975 ret = get_errno(getuid()); 10976 break; 10977 #endif 10978 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) 10979 /* Alpha specific */ 10980 case TARGET_NR_getxgid: 10981 { 10982 uid_t egid; 10983 egid=getegid(); 10984 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid; 10985 } 10986 ret = get_errno(getgid()); 10987 break; 10988 #endif 10989 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) 10990 /* Alpha specific */ 10991 case TARGET_NR_osf_getsysinfo: 10992 ret = -TARGET_EOPNOTSUPP; 10993 switch (arg1) { 10994 case TARGET_GSI_IEEE_FP_CONTROL: 10995 { 10996 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env); 10997 10998 /* Copied from linux ieee_fpcr_to_swcr. */ 10999 swcr = (fpcr >> 35) & SWCR_STATUS_MASK; 11000 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ; 11001 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV 11002 | SWCR_TRAP_ENABLE_DZE 11003 | SWCR_TRAP_ENABLE_OVF); 11004 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF 11005 | SWCR_TRAP_ENABLE_INE); 11006 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ; 11007 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO; 11008 11009 if (put_user_u64 (swcr, arg2)) 11010 goto efault; 11011 ret = 0; 11012 } 11013 break; 11014 11015 /* case GSI_IEEE_STATE_AT_SIGNAL: 11016 -- Not implemented in linux kernel. 11017 case GSI_UACPROC: 11018 -- Retrieves current unaligned access state; not much used. 11019 case GSI_PROC_TYPE: 11020 -- Retrieves implver information; surely not used. 11021 case GSI_GET_HWRPB: 11022 -- Grabs a copy of the HWRPB; surely not used. 11023 */ 11024 } 11025 break; 11026 #endif 11027 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) 11028 /* Alpha specific */ 11029 case TARGET_NR_osf_setsysinfo: 11030 ret = -TARGET_EOPNOTSUPP; 11031 switch (arg1) { 11032 case TARGET_SSI_IEEE_FP_CONTROL: 11033 { 11034 uint64_t swcr, fpcr, orig_fpcr; 11035 11036 if (get_user_u64 (swcr, arg2)) { 11037 goto efault; 11038 } 11039 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 11040 fpcr = orig_fpcr & FPCR_DYN_MASK; 11041 11042 /* Copied from linux ieee_swcr_to_fpcr. */ 11043 fpcr |= (swcr & SWCR_STATUS_MASK) << 35; 11044 fpcr |= (swcr & SWCR_MAP_DMZ) << 36; 11045 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV 11046 | SWCR_TRAP_ENABLE_DZE 11047 | SWCR_TRAP_ENABLE_OVF)) << 48; 11048 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF 11049 | SWCR_TRAP_ENABLE_INE)) << 57; 11050 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0); 11051 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41; 11052 11053 cpu_alpha_store_fpcr(cpu_env, fpcr); 11054 ret = 0; 11055 } 11056 break; 11057 11058 case TARGET_SSI_IEEE_RAISE_EXCEPTION: 11059 { 11060 uint64_t exc, fpcr, orig_fpcr; 11061 int si_code; 11062 11063 if (get_user_u64(exc, arg2)) { 11064 goto efault; 11065 } 11066 11067 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 11068 11069 /* We only add to the exception status here. */ 11070 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35); 11071 11072 cpu_alpha_store_fpcr(cpu_env, fpcr); 11073 ret = 0; 11074 11075 /* Old exceptions are not signaled. */ 11076 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK); 11077 11078 /* If any exceptions set by this call, 11079 and are unmasked, send a signal. */ 11080 si_code = 0; 11081 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) { 11082 si_code = TARGET_FPE_FLTRES; 11083 } 11084 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) { 11085 si_code = TARGET_FPE_FLTUND; 11086 } 11087 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) { 11088 si_code = TARGET_FPE_FLTOVF; 11089 } 11090 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) { 11091 si_code = TARGET_FPE_FLTDIV; 11092 } 11093 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) { 11094 si_code = TARGET_FPE_FLTINV; 11095 } 11096 if (si_code != 0) { 11097 target_siginfo_t info; 11098 info.si_signo = SIGFPE; 11099 info.si_errno = 0; 11100 info.si_code = si_code; 11101 info._sifields._sigfault._addr 11102 = ((CPUArchState *)cpu_env)->pc; 11103 queue_signal((CPUArchState *)cpu_env, info.si_signo, 11104 QEMU_SI_FAULT, &info); 11105 } 11106 } 11107 break; 11108 11109 /* case SSI_NVPAIRS: 11110 -- Used with SSIN_UACPROC to enable unaligned accesses. 11111 case SSI_IEEE_STATE_AT_SIGNAL: 11112 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: 11113 -- Not implemented in linux kernel 11114 */ 11115 } 11116 break; 11117 #endif 11118 #ifdef TARGET_NR_osf_sigprocmask 11119 /* Alpha specific. */ 11120 case TARGET_NR_osf_sigprocmask: 11121 { 11122 abi_ulong mask; 11123 int how; 11124 sigset_t set, oldset; 11125 11126 switch(arg1) { 11127 case TARGET_SIG_BLOCK: 11128 how = SIG_BLOCK; 11129 break; 11130 case TARGET_SIG_UNBLOCK: 11131 how = SIG_UNBLOCK; 11132 break; 11133 case TARGET_SIG_SETMASK: 11134 how = SIG_SETMASK; 11135 break; 11136 default: 11137 ret = -TARGET_EINVAL; 11138 goto fail; 11139 } 11140 mask = arg2; 11141 target_to_host_old_sigset(&set, &mask); 11142 ret = do_sigprocmask(how, &set, &oldset); 11143 if (!ret) { 11144 host_to_target_old_sigset(&mask, &oldset); 11145 ret = mask; 11146 } 11147 } 11148 break; 11149 #endif 11150 11151 #ifdef TARGET_NR_getgid32 11152 case TARGET_NR_getgid32: 11153 ret = get_errno(getgid()); 11154 break; 11155 #endif 11156 #ifdef TARGET_NR_geteuid32 11157 case TARGET_NR_geteuid32: 11158 ret = get_errno(geteuid()); 11159 break; 11160 #endif 11161 #ifdef TARGET_NR_getegid32 11162 case TARGET_NR_getegid32: 11163 ret = get_errno(getegid()); 11164 break; 11165 #endif 11166 #ifdef TARGET_NR_setreuid32 11167 case TARGET_NR_setreuid32: 11168 ret = get_errno(setreuid(arg1, arg2)); 11169 break; 11170 #endif 11171 #ifdef TARGET_NR_setregid32 11172 case TARGET_NR_setregid32: 11173 ret = get_errno(setregid(arg1, arg2)); 11174 break; 11175 #endif 11176 #ifdef TARGET_NR_getgroups32 11177 case TARGET_NR_getgroups32: 11178 { 11179 int gidsetsize = arg1; 11180 uint32_t *target_grouplist; 11181 gid_t *grouplist; 11182 int i; 11183 11184 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11185 ret = get_errno(getgroups(gidsetsize, grouplist)); 11186 if (gidsetsize == 0) 11187 break; 11188 if (!is_error(ret)) { 11189 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); 11190 if (!target_grouplist) { 11191 ret = -TARGET_EFAULT; 11192 goto fail; 11193 } 11194 for(i = 0;i < ret; i++) 11195 target_grouplist[i] = tswap32(grouplist[i]); 11196 unlock_user(target_grouplist, arg2, gidsetsize * 4); 11197 } 11198 } 11199 break; 11200 #endif 11201 #ifdef TARGET_NR_setgroups32 11202 case TARGET_NR_setgroups32: 11203 { 11204 int gidsetsize = arg1; 11205 uint32_t *target_grouplist; 11206 gid_t *grouplist; 11207 int i; 11208 11209 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11210 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); 11211 if (!target_grouplist) { 11212 ret = -TARGET_EFAULT; 11213 goto fail; 11214 } 11215 for(i = 0;i < gidsetsize; i++) 11216 grouplist[i] = tswap32(target_grouplist[i]); 11217 unlock_user(target_grouplist, arg2, 0); 11218 ret = get_errno(setgroups(gidsetsize, grouplist)); 11219 } 11220 break; 11221 #endif 11222 #ifdef TARGET_NR_fchown32 11223 case TARGET_NR_fchown32: 11224 ret = get_errno(fchown(arg1, arg2, arg3)); 11225 break; 11226 #endif 11227 #ifdef TARGET_NR_setresuid32 11228 case TARGET_NR_setresuid32: 11229 ret = get_errno(sys_setresuid(arg1, arg2, arg3)); 11230 break; 11231 #endif 11232 #ifdef TARGET_NR_getresuid32 11233 case TARGET_NR_getresuid32: 11234 { 11235 uid_t ruid, euid, suid; 11236 ret = get_errno(getresuid(&ruid, &euid, &suid)); 11237 if (!is_error(ret)) { 11238 if (put_user_u32(ruid, arg1) 11239 || put_user_u32(euid, arg2) 11240 || put_user_u32(suid, arg3)) 11241 goto efault; 11242 } 11243 } 11244 break; 11245 #endif 11246 #ifdef TARGET_NR_setresgid32 11247 case TARGET_NR_setresgid32: 11248 ret = get_errno(sys_setresgid(arg1, arg2, arg3)); 11249 break; 11250 #endif 11251 #ifdef TARGET_NR_getresgid32 11252 case TARGET_NR_getresgid32: 11253 { 11254 gid_t rgid, egid, sgid; 11255 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 11256 if (!is_error(ret)) { 11257 if (put_user_u32(rgid, arg1) 11258 || put_user_u32(egid, arg2) 11259 || put_user_u32(sgid, arg3)) 11260 goto efault; 11261 } 11262 } 11263 break; 11264 #endif 11265 #ifdef TARGET_NR_chown32 11266 case TARGET_NR_chown32: 11267 if (!(p = lock_user_string(arg1))) 11268 goto efault; 11269 ret = get_errno(chown(p, arg2, arg3)); 11270 unlock_user(p, arg1, 0); 11271 break; 11272 #endif 11273 #ifdef TARGET_NR_setuid32 11274 case TARGET_NR_setuid32: 11275 ret = get_errno(sys_setuid(arg1)); 11276 break; 11277 #endif 11278 #ifdef TARGET_NR_setgid32 11279 case TARGET_NR_setgid32: 11280 ret = get_errno(sys_setgid(arg1)); 11281 break; 11282 #endif 11283 #ifdef TARGET_NR_setfsuid32 11284 case TARGET_NR_setfsuid32: 11285 ret = get_errno(setfsuid(arg1)); 11286 break; 11287 #endif 11288 #ifdef TARGET_NR_setfsgid32 11289 case TARGET_NR_setfsgid32: 11290 ret = get_errno(setfsgid(arg1)); 11291 break; 11292 #endif 11293 11294 case TARGET_NR_pivot_root: 11295 goto unimplemented; 11296 #ifdef TARGET_NR_mincore 11297 case TARGET_NR_mincore: 11298 { 11299 void *a; 11300 ret = -TARGET_ENOMEM; 11301 a = lock_user(VERIFY_READ, arg1, arg2, 0); 11302 if (!a) { 11303 goto fail; 11304 } 11305 ret = -TARGET_EFAULT; 11306 p = lock_user_string(arg3); 11307 if (!p) { 11308 goto mincore_fail; 11309 } 11310 ret = get_errno(mincore(a, arg2, p)); 11311 unlock_user(p, arg3, ret); 11312 mincore_fail: 11313 unlock_user(a, arg1, 0); 11314 } 11315 break; 11316 #endif 11317 #ifdef TARGET_NR_arm_fadvise64_64 11318 case TARGET_NR_arm_fadvise64_64: 11319 /* arm_fadvise64_64 looks like fadvise64_64 but 11320 * with different argument order: fd, advice, offset, len 11321 * rather than the usual fd, offset, len, advice. 11322 * Note that offset and len are both 64-bit so appear as 11323 * pairs of 32-bit registers. 11324 */ 11325 ret = posix_fadvise(arg1, target_offset64(arg3, arg4), 11326 target_offset64(arg5, arg6), arg2); 11327 ret = -host_to_target_errno(ret); 11328 break; 11329 #endif 11330 11331 #if TARGET_ABI_BITS == 32 11332 11333 #ifdef TARGET_NR_fadvise64_64 11334 case TARGET_NR_fadvise64_64: 11335 #if defined(TARGET_PPC) 11336 /* 6 args: fd, advice, offset (high, low), len (high, low) */ 11337 ret = arg2; 11338 arg2 = arg3; 11339 arg3 = arg4; 11340 arg4 = arg5; 11341 arg5 = arg6; 11342 arg6 = ret; 11343 #else 11344 /* 6 args: fd, offset (high, low), len (high, low), advice */ 11345 if (regpairs_aligned(cpu_env, num)) { 11346 /* offset is in (3,4), len in (5,6) and advice in 7 */ 11347 arg2 = arg3; 11348 arg3 = arg4; 11349 arg4 = arg5; 11350 arg5 = arg6; 11351 arg6 = arg7; 11352 } 11353 #endif 11354 ret = -host_to_target_errno(posix_fadvise(arg1, 11355 target_offset64(arg2, arg3), 11356 target_offset64(arg4, arg5), 11357 arg6)); 11358 break; 11359 #endif 11360 11361 #ifdef TARGET_NR_fadvise64 11362 case TARGET_NR_fadvise64: 11363 /* 5 args: fd, offset (high, low), len, advice */ 11364 if (regpairs_aligned(cpu_env, num)) { 11365 /* offset is in (3,4), len in 5 and advice in 6 */ 11366 arg2 = arg3; 11367 arg3 = arg4; 11368 arg4 = arg5; 11369 arg5 = arg6; 11370 } 11371 ret = -host_to_target_errno(posix_fadvise(arg1, 11372 target_offset64(arg2, arg3), 11373 arg4, arg5)); 11374 break; 11375 #endif 11376 11377 #else /* not a 32-bit ABI */ 11378 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64) 11379 #ifdef TARGET_NR_fadvise64_64 11380 case TARGET_NR_fadvise64_64: 11381 #endif 11382 #ifdef TARGET_NR_fadvise64 11383 case TARGET_NR_fadvise64: 11384 #endif 11385 #ifdef TARGET_S390X 11386 switch (arg4) { 11387 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ 11388 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ 11389 case 6: arg4 = POSIX_FADV_DONTNEED; break; 11390 case 7: arg4 = POSIX_FADV_NOREUSE; break; 11391 default: break; 11392 } 11393 #endif 11394 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4)); 11395 break; 11396 #endif 11397 #endif /* end of 64-bit ABI fadvise handling */ 11398 11399 #ifdef TARGET_NR_madvise 11400 case TARGET_NR_madvise: 11401 /* A straight passthrough may not be safe because qemu sometimes 11402 turns private file-backed mappings into anonymous mappings. 11403 This will break MADV_DONTNEED. 11404 This is a hint, so ignoring and returning success is ok. */ 11405 ret = get_errno(0); 11406 break; 11407 #endif 11408 #if TARGET_ABI_BITS == 32 11409 case TARGET_NR_fcntl64: 11410 { 11411 int cmd; 11412 struct flock64 fl; 11413 from_flock64_fn *copyfrom = copy_from_user_flock64; 11414 to_flock64_fn *copyto = copy_to_user_flock64; 11415 11416 #ifdef TARGET_ARM 11417 if (((CPUARMState *)cpu_env)->eabi) { 11418 copyfrom = copy_from_user_eabi_flock64; 11419 copyto = copy_to_user_eabi_flock64; 11420 } 11421 #endif 11422 11423 cmd = target_to_host_fcntl_cmd(arg2); 11424 if (cmd == -TARGET_EINVAL) { 11425 ret = cmd; 11426 break; 11427 } 11428 11429 switch(arg2) { 11430 case TARGET_F_GETLK64: 11431 ret = copyfrom(&fl, arg3); 11432 if (ret) { 11433 break; 11434 } 11435 ret = get_errno(fcntl(arg1, cmd, &fl)); 11436 if (ret == 0) { 11437 ret = copyto(arg3, &fl); 11438 } 11439 break; 11440 11441 case TARGET_F_SETLK64: 11442 case TARGET_F_SETLKW64: 11443 ret = copyfrom(&fl, arg3); 11444 if (ret) { 11445 break; 11446 } 11447 ret = get_errno(safe_fcntl(arg1, cmd, &fl)); 11448 break; 11449 default: 11450 ret = do_fcntl(arg1, arg2, arg3); 11451 break; 11452 } 11453 break; 11454 } 11455 #endif 11456 #ifdef TARGET_NR_cacheflush 11457 case TARGET_NR_cacheflush: 11458 /* self-modifying code is handled automatically, so nothing needed */ 11459 ret = 0; 11460 break; 11461 #endif 11462 #ifdef TARGET_NR_security 11463 case TARGET_NR_security: 11464 goto unimplemented; 11465 #endif 11466 #ifdef TARGET_NR_getpagesize 11467 case TARGET_NR_getpagesize: 11468 ret = TARGET_PAGE_SIZE; 11469 break; 11470 #endif 11471 case TARGET_NR_gettid: 11472 ret = get_errno(gettid()); 11473 break; 11474 #ifdef TARGET_NR_readahead 11475 case TARGET_NR_readahead: 11476 #if TARGET_ABI_BITS == 32 11477 if (regpairs_aligned(cpu_env, num)) { 11478 arg2 = arg3; 11479 arg3 = arg4; 11480 arg4 = arg5; 11481 } 11482 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4)); 11483 #else 11484 ret = get_errno(readahead(arg1, arg2, arg3)); 11485 #endif 11486 break; 11487 #endif 11488 #ifdef CONFIG_ATTR 11489 #ifdef TARGET_NR_setxattr 11490 case TARGET_NR_listxattr: 11491 case TARGET_NR_llistxattr: 11492 { 11493 void *p, *b = 0; 11494 if (arg2) { 11495 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 11496 if (!b) { 11497 ret = -TARGET_EFAULT; 11498 break; 11499 } 11500 } 11501 p = lock_user_string(arg1); 11502 if (p) { 11503 if (num == TARGET_NR_listxattr) { 11504 ret = get_errno(listxattr(p, b, arg3)); 11505 } else { 11506 ret = get_errno(llistxattr(p, b, arg3)); 11507 } 11508 } else { 11509 ret = -TARGET_EFAULT; 11510 } 11511 unlock_user(p, arg1, 0); 11512 unlock_user(b, arg2, arg3); 11513 break; 11514 } 11515 case TARGET_NR_flistxattr: 11516 { 11517 void *b = 0; 11518 if (arg2) { 11519 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 11520 if (!b) { 11521 ret = -TARGET_EFAULT; 11522 break; 11523 } 11524 } 11525 ret = get_errno(flistxattr(arg1, b, arg3)); 11526 unlock_user(b, arg2, arg3); 11527 break; 11528 } 11529 case TARGET_NR_setxattr: 11530 case TARGET_NR_lsetxattr: 11531 { 11532 void *p, *n, *v = 0; 11533 if (arg3) { 11534 v = lock_user(VERIFY_READ, arg3, arg4, 1); 11535 if (!v) { 11536 ret = -TARGET_EFAULT; 11537 break; 11538 } 11539 } 11540 p = lock_user_string(arg1); 11541 n = lock_user_string(arg2); 11542 if (p && n) { 11543 if (num == TARGET_NR_setxattr) { 11544 ret = get_errno(setxattr(p, n, v, arg4, arg5)); 11545 } else { 11546 ret = get_errno(lsetxattr(p, n, v, arg4, arg5)); 11547 } 11548 } else { 11549 ret = -TARGET_EFAULT; 11550 } 11551 unlock_user(p, arg1, 0); 11552 unlock_user(n, arg2, 0); 11553 unlock_user(v, arg3, 0); 11554 } 11555 break; 11556 case TARGET_NR_fsetxattr: 11557 { 11558 void *n, *v = 0; 11559 if (arg3) { 11560 v = lock_user(VERIFY_READ, arg3, arg4, 1); 11561 if (!v) { 11562 ret = -TARGET_EFAULT; 11563 break; 11564 } 11565 } 11566 n = lock_user_string(arg2); 11567 if (n) { 11568 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5)); 11569 } else { 11570 ret = -TARGET_EFAULT; 11571 } 11572 unlock_user(n, arg2, 0); 11573 unlock_user(v, arg3, 0); 11574 } 11575 break; 11576 case TARGET_NR_getxattr: 11577 case TARGET_NR_lgetxattr: 11578 { 11579 void *p, *n, *v = 0; 11580 if (arg3) { 11581 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 11582 if (!v) { 11583 ret = -TARGET_EFAULT; 11584 break; 11585 } 11586 } 11587 p = lock_user_string(arg1); 11588 n = lock_user_string(arg2); 11589 if (p && n) { 11590 if (num == TARGET_NR_getxattr) { 11591 ret = get_errno(getxattr(p, n, v, arg4)); 11592 } else { 11593 ret = get_errno(lgetxattr(p, n, v, arg4)); 11594 } 11595 } else { 11596 ret = -TARGET_EFAULT; 11597 } 11598 unlock_user(p, arg1, 0); 11599 unlock_user(n, arg2, 0); 11600 unlock_user(v, arg3, arg4); 11601 } 11602 break; 11603 case TARGET_NR_fgetxattr: 11604 { 11605 void *n, *v = 0; 11606 if (arg3) { 11607 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 11608 if (!v) { 11609 ret = -TARGET_EFAULT; 11610 break; 11611 } 11612 } 11613 n = lock_user_string(arg2); 11614 if (n) { 11615 ret = get_errno(fgetxattr(arg1, n, v, arg4)); 11616 } else { 11617 ret = -TARGET_EFAULT; 11618 } 11619 unlock_user(n, arg2, 0); 11620 unlock_user(v, arg3, arg4); 11621 } 11622 break; 11623 case TARGET_NR_removexattr: 11624 case TARGET_NR_lremovexattr: 11625 { 11626 void *p, *n; 11627 p = lock_user_string(arg1); 11628 n = lock_user_string(arg2); 11629 if (p && n) { 11630 if (num == TARGET_NR_removexattr) { 11631 ret = get_errno(removexattr(p, n)); 11632 } else { 11633 ret = get_errno(lremovexattr(p, n)); 11634 } 11635 } else { 11636 ret = -TARGET_EFAULT; 11637 } 11638 unlock_user(p, arg1, 0); 11639 unlock_user(n, arg2, 0); 11640 } 11641 break; 11642 case TARGET_NR_fremovexattr: 11643 { 11644 void *n; 11645 n = lock_user_string(arg2); 11646 if (n) { 11647 ret = get_errno(fremovexattr(arg1, n)); 11648 } else { 11649 ret = -TARGET_EFAULT; 11650 } 11651 unlock_user(n, arg2, 0); 11652 } 11653 break; 11654 #endif 11655 #endif /* CONFIG_ATTR */ 11656 #ifdef TARGET_NR_set_thread_area 11657 case TARGET_NR_set_thread_area: 11658 #if defined(TARGET_MIPS) 11659 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1; 11660 ret = 0; 11661 break; 11662 #elif defined(TARGET_CRIS) 11663 if (arg1 & 0xff) 11664 ret = -TARGET_EINVAL; 11665 else { 11666 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1; 11667 ret = 0; 11668 } 11669 break; 11670 #elif defined(TARGET_I386) && defined(TARGET_ABI32) 11671 ret = do_set_thread_area(cpu_env, arg1); 11672 break; 11673 #elif defined(TARGET_M68K) 11674 { 11675 TaskState *ts = cpu->opaque; 11676 ts->tp_value = arg1; 11677 ret = 0; 11678 break; 11679 } 11680 #else 11681 goto unimplemented_nowarn; 11682 #endif 11683 #endif 11684 #ifdef TARGET_NR_get_thread_area 11685 case TARGET_NR_get_thread_area: 11686 #if defined(TARGET_I386) && defined(TARGET_ABI32) 11687 ret = do_get_thread_area(cpu_env, arg1); 11688 break; 11689 #elif defined(TARGET_M68K) 11690 { 11691 TaskState *ts = cpu->opaque; 11692 ret = ts->tp_value; 11693 break; 11694 } 11695 #else 11696 goto unimplemented_nowarn; 11697 #endif 11698 #endif 11699 #ifdef TARGET_NR_getdomainname 11700 case TARGET_NR_getdomainname: 11701 goto unimplemented_nowarn; 11702 #endif 11703 11704 #ifdef TARGET_NR_clock_gettime 11705 case TARGET_NR_clock_gettime: 11706 { 11707 struct timespec ts; 11708 ret = get_errno(clock_gettime(arg1, &ts)); 11709 if (!is_error(ret)) { 11710 host_to_target_timespec(arg2, &ts); 11711 } 11712 break; 11713 } 11714 #endif 11715 #ifdef TARGET_NR_clock_getres 11716 case TARGET_NR_clock_getres: 11717 { 11718 struct timespec ts; 11719 ret = get_errno(clock_getres(arg1, &ts)); 11720 if (!is_error(ret)) { 11721 host_to_target_timespec(arg2, &ts); 11722 } 11723 break; 11724 } 11725 #endif 11726 #ifdef TARGET_NR_clock_nanosleep 11727 case TARGET_NR_clock_nanosleep: 11728 { 11729 struct timespec ts; 11730 target_to_host_timespec(&ts, arg3); 11731 ret = get_errno(safe_clock_nanosleep(arg1, arg2, 11732 &ts, arg4 ? &ts : NULL)); 11733 if (arg4) 11734 host_to_target_timespec(arg4, &ts); 11735 11736 #if defined(TARGET_PPC) 11737 /* clock_nanosleep is odd in that it returns positive errno values. 11738 * On PPC, CR0 bit 3 should be set in such a situation. */ 11739 if (ret && ret != -TARGET_ERESTARTSYS) { 11740 ((CPUPPCState *)cpu_env)->crf[0] |= 1; 11741 } 11742 #endif 11743 break; 11744 } 11745 #endif 11746 11747 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 11748 case TARGET_NR_set_tid_address: 11749 ret = get_errno(set_tid_address((int *)g2h(arg1))); 11750 break; 11751 #endif 11752 11753 case TARGET_NR_tkill: 11754 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2))); 11755 break; 11756 11757 case TARGET_NR_tgkill: 11758 ret = get_errno(safe_tgkill((int)arg1, (int)arg2, 11759 target_to_host_signal(arg3))); 11760 break; 11761 11762 #ifdef TARGET_NR_set_robust_list 11763 case TARGET_NR_set_robust_list: 11764 case TARGET_NR_get_robust_list: 11765 /* The ABI for supporting robust futexes has userspace pass 11766 * the kernel a pointer to a linked list which is updated by 11767 * userspace after the syscall; the list is walked by the kernel 11768 * when the thread exits. Since the linked list in QEMU guest 11769 * memory isn't a valid linked list for the host and we have 11770 * no way to reliably intercept the thread-death event, we can't 11771 * support these. Silently return ENOSYS so that guest userspace 11772 * falls back to a non-robust futex implementation (which should 11773 * be OK except in the corner case of the guest crashing while 11774 * holding a mutex that is shared with another process via 11775 * shared memory). 11776 */ 11777 goto unimplemented_nowarn; 11778 #endif 11779 11780 #if defined(TARGET_NR_utimensat) 11781 case TARGET_NR_utimensat: 11782 { 11783 struct timespec *tsp, ts[2]; 11784 if (!arg3) { 11785 tsp = NULL; 11786 } else { 11787 target_to_host_timespec(ts, arg3); 11788 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec)); 11789 tsp = ts; 11790 } 11791 if (!arg2) 11792 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 11793 else { 11794 if (!(p = lock_user_string(arg2))) { 11795 ret = -TARGET_EFAULT; 11796 goto fail; 11797 } 11798 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 11799 unlock_user(p, arg2, 0); 11800 } 11801 } 11802 break; 11803 #endif 11804 case TARGET_NR_futex: 11805 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6); 11806 break; 11807 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 11808 case TARGET_NR_inotify_init: 11809 ret = get_errno(sys_inotify_init()); 11810 if (ret >= 0) { 11811 fd_trans_register(ret, &target_inotify_trans); 11812 } 11813 break; 11814 #endif 11815 #ifdef CONFIG_INOTIFY1 11816 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 11817 case TARGET_NR_inotify_init1: 11818 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1, 11819 fcntl_flags_tbl))); 11820 if (ret >= 0) { 11821 fd_trans_register(ret, &target_inotify_trans); 11822 } 11823 break; 11824 #endif 11825 #endif 11826 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 11827 case TARGET_NR_inotify_add_watch: 11828 p = lock_user_string(arg2); 11829 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3)); 11830 unlock_user(p, arg2, 0); 11831 break; 11832 #endif 11833 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 11834 case TARGET_NR_inotify_rm_watch: 11835 ret = get_errno(sys_inotify_rm_watch(arg1, arg2)); 11836 break; 11837 #endif 11838 11839 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 11840 case TARGET_NR_mq_open: 11841 { 11842 struct mq_attr posix_mq_attr; 11843 struct mq_attr *pposix_mq_attr; 11844 int host_flags; 11845 11846 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl); 11847 pposix_mq_attr = NULL; 11848 if (arg4) { 11849 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) { 11850 goto efault; 11851 } 11852 pposix_mq_attr = &posix_mq_attr; 11853 } 11854 p = lock_user_string(arg1 - 1); 11855 if (!p) { 11856 goto efault; 11857 } 11858 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr)); 11859 unlock_user (p, arg1, 0); 11860 } 11861 break; 11862 11863 case TARGET_NR_mq_unlink: 11864 p = lock_user_string(arg1 - 1); 11865 if (!p) { 11866 ret = -TARGET_EFAULT; 11867 break; 11868 } 11869 ret = get_errno(mq_unlink(p)); 11870 unlock_user (p, arg1, 0); 11871 break; 11872 11873 case TARGET_NR_mq_timedsend: 11874 { 11875 struct timespec ts; 11876 11877 p = lock_user (VERIFY_READ, arg2, arg3, 1); 11878 if (arg5 != 0) { 11879 target_to_host_timespec(&ts, arg5); 11880 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts)); 11881 host_to_target_timespec(arg5, &ts); 11882 } else { 11883 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL)); 11884 } 11885 unlock_user (p, arg2, arg3); 11886 } 11887 break; 11888 11889 case TARGET_NR_mq_timedreceive: 11890 { 11891 struct timespec ts; 11892 unsigned int prio; 11893 11894 p = lock_user (VERIFY_READ, arg2, arg3, 1); 11895 if (arg5 != 0) { 11896 target_to_host_timespec(&ts, arg5); 11897 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 11898 &prio, &ts)); 11899 host_to_target_timespec(arg5, &ts); 11900 } else { 11901 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 11902 &prio, NULL)); 11903 } 11904 unlock_user (p, arg2, arg3); 11905 if (arg4 != 0) 11906 put_user_u32(prio, arg4); 11907 } 11908 break; 11909 11910 /* Not implemented for now... */ 11911 /* case TARGET_NR_mq_notify: */ 11912 /* break; */ 11913 11914 case TARGET_NR_mq_getsetattr: 11915 { 11916 struct mq_attr posix_mq_attr_in, posix_mq_attr_out; 11917 ret = 0; 11918 if (arg3 != 0) { 11919 ret = mq_getattr(arg1, &posix_mq_attr_out); 11920 copy_to_user_mq_attr(arg3, &posix_mq_attr_out); 11921 } 11922 if (arg2 != 0) { 11923 copy_from_user_mq_attr(&posix_mq_attr_in, arg2); 11924 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out); 11925 } 11926 11927 } 11928 break; 11929 #endif 11930 11931 #ifdef CONFIG_SPLICE 11932 #ifdef TARGET_NR_tee 11933 case TARGET_NR_tee: 11934 { 11935 ret = get_errno(tee(arg1,arg2,arg3,arg4)); 11936 } 11937 break; 11938 #endif 11939 #ifdef TARGET_NR_splice 11940 case TARGET_NR_splice: 11941 { 11942 loff_t loff_in, loff_out; 11943 loff_t *ploff_in = NULL, *ploff_out = NULL; 11944 if (arg2) { 11945 if (get_user_u64(loff_in, arg2)) { 11946 goto efault; 11947 } 11948 ploff_in = &loff_in; 11949 } 11950 if (arg4) { 11951 if (get_user_u64(loff_out, arg4)) { 11952 goto efault; 11953 } 11954 ploff_out = &loff_out; 11955 } 11956 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); 11957 if (arg2) { 11958 if (put_user_u64(loff_in, arg2)) { 11959 goto efault; 11960 } 11961 } 11962 if (arg4) { 11963 if (put_user_u64(loff_out, arg4)) { 11964 goto efault; 11965 } 11966 } 11967 } 11968 break; 11969 #endif 11970 #ifdef TARGET_NR_vmsplice 11971 case TARGET_NR_vmsplice: 11972 { 11973 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 11974 if (vec != NULL) { 11975 ret = get_errno(vmsplice(arg1, vec, arg3, arg4)); 11976 unlock_iovec(vec, arg2, arg3, 0); 11977 } else { 11978 ret = -host_to_target_errno(errno); 11979 } 11980 } 11981 break; 11982 #endif 11983 #endif /* CONFIG_SPLICE */ 11984 #ifdef CONFIG_EVENTFD 11985 #if defined(TARGET_NR_eventfd) 11986 case TARGET_NR_eventfd: 11987 ret = get_errno(eventfd(arg1, 0)); 11988 if (ret >= 0) { 11989 fd_trans_register(ret, &target_eventfd_trans); 11990 } 11991 break; 11992 #endif 11993 #if defined(TARGET_NR_eventfd2) 11994 case TARGET_NR_eventfd2: 11995 { 11996 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)); 11997 if (arg2 & TARGET_O_NONBLOCK) { 11998 host_flags |= O_NONBLOCK; 11999 } 12000 if (arg2 & TARGET_O_CLOEXEC) { 12001 host_flags |= O_CLOEXEC; 12002 } 12003 ret = get_errno(eventfd(arg1, host_flags)); 12004 if (ret >= 0) { 12005 fd_trans_register(ret, &target_eventfd_trans); 12006 } 12007 break; 12008 } 12009 #endif 12010 #endif /* CONFIG_EVENTFD */ 12011 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) 12012 case TARGET_NR_fallocate: 12013 #if TARGET_ABI_BITS == 32 12014 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4), 12015 target_offset64(arg5, arg6))); 12016 #else 12017 ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); 12018 #endif 12019 break; 12020 #endif 12021 #if defined(CONFIG_SYNC_FILE_RANGE) 12022 #if defined(TARGET_NR_sync_file_range) 12023 case TARGET_NR_sync_file_range: 12024 #if TARGET_ABI_BITS == 32 12025 #if defined(TARGET_MIPS) 12026 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 12027 target_offset64(arg5, arg6), arg7)); 12028 #else 12029 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3), 12030 target_offset64(arg4, arg5), arg6)); 12031 #endif /* !TARGET_MIPS */ 12032 #else 12033 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4)); 12034 #endif 12035 break; 12036 #endif 12037 #if defined(TARGET_NR_sync_file_range2) 12038 case TARGET_NR_sync_file_range2: 12039 /* This is like sync_file_range but the arguments are reordered */ 12040 #if TARGET_ABI_BITS == 32 12041 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 12042 target_offset64(arg5, arg6), arg2)); 12043 #else 12044 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2)); 12045 #endif 12046 break; 12047 #endif 12048 #endif 12049 #if defined(TARGET_NR_signalfd4) 12050 case TARGET_NR_signalfd4: 12051 ret = do_signalfd4(arg1, arg2, arg4); 12052 break; 12053 #endif 12054 #if defined(TARGET_NR_signalfd) 12055 case TARGET_NR_signalfd: 12056 ret = do_signalfd4(arg1, arg2, 0); 12057 break; 12058 #endif 12059 #if defined(CONFIG_EPOLL) 12060 #if defined(TARGET_NR_epoll_create) 12061 case TARGET_NR_epoll_create: 12062 ret = get_errno(epoll_create(arg1)); 12063 break; 12064 #endif 12065 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1) 12066 case TARGET_NR_epoll_create1: 12067 ret = get_errno(epoll_create1(arg1)); 12068 break; 12069 #endif 12070 #if defined(TARGET_NR_epoll_ctl) 12071 case TARGET_NR_epoll_ctl: 12072 { 12073 struct epoll_event ep; 12074 struct epoll_event *epp = 0; 12075 if (arg4) { 12076 struct target_epoll_event *target_ep; 12077 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) { 12078 goto efault; 12079 } 12080 ep.events = tswap32(target_ep->events); 12081 /* The epoll_data_t union is just opaque data to the kernel, 12082 * so we transfer all 64 bits across and need not worry what 12083 * actual data type it is. 12084 */ 12085 ep.data.u64 = tswap64(target_ep->data.u64); 12086 unlock_user_struct(target_ep, arg4, 0); 12087 epp = &ep; 12088 } 12089 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp)); 12090 break; 12091 } 12092 #endif 12093 12094 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait) 12095 #if defined(TARGET_NR_epoll_wait) 12096 case TARGET_NR_epoll_wait: 12097 #endif 12098 #if defined(TARGET_NR_epoll_pwait) 12099 case TARGET_NR_epoll_pwait: 12100 #endif 12101 { 12102 struct target_epoll_event *target_ep; 12103 struct epoll_event *ep; 12104 int epfd = arg1; 12105 int maxevents = arg3; 12106 int timeout = arg4; 12107 12108 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) { 12109 ret = -TARGET_EINVAL; 12110 break; 12111 } 12112 12113 target_ep = lock_user(VERIFY_WRITE, arg2, 12114 maxevents * sizeof(struct target_epoll_event), 1); 12115 if (!target_ep) { 12116 goto efault; 12117 } 12118 12119 ep = g_try_new(struct epoll_event, maxevents); 12120 if (!ep) { 12121 unlock_user(target_ep, arg2, 0); 12122 ret = -TARGET_ENOMEM; 12123 break; 12124 } 12125 12126 switch (num) { 12127 #if defined(TARGET_NR_epoll_pwait) 12128 case TARGET_NR_epoll_pwait: 12129 { 12130 target_sigset_t *target_set; 12131 sigset_t _set, *set = &_set; 12132 12133 if (arg5) { 12134 if (arg6 != sizeof(target_sigset_t)) { 12135 ret = -TARGET_EINVAL; 12136 break; 12137 } 12138 12139 target_set = lock_user(VERIFY_READ, arg5, 12140 sizeof(target_sigset_t), 1); 12141 if (!target_set) { 12142 ret = -TARGET_EFAULT; 12143 break; 12144 } 12145 target_to_host_sigset(set, target_set); 12146 unlock_user(target_set, arg5, 0); 12147 } else { 12148 set = NULL; 12149 } 12150 12151 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout, 12152 set, SIGSET_T_SIZE)); 12153 break; 12154 } 12155 #endif 12156 #if defined(TARGET_NR_epoll_wait) 12157 case TARGET_NR_epoll_wait: 12158 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout, 12159 NULL, 0)); 12160 break; 12161 #endif 12162 default: 12163 ret = -TARGET_ENOSYS; 12164 } 12165 if (!is_error(ret)) { 12166 int i; 12167 for (i = 0; i < ret; i++) { 12168 target_ep[i].events = tswap32(ep[i].events); 12169 target_ep[i].data.u64 = tswap64(ep[i].data.u64); 12170 } 12171 unlock_user(target_ep, arg2, 12172 ret * sizeof(struct target_epoll_event)); 12173 } else { 12174 unlock_user(target_ep, arg2, 0); 12175 } 12176 g_free(ep); 12177 break; 12178 } 12179 #endif 12180 #endif 12181 #ifdef TARGET_NR_prlimit64 12182 case TARGET_NR_prlimit64: 12183 { 12184 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */ 12185 struct target_rlimit64 *target_rnew, *target_rold; 12186 struct host_rlimit64 rnew, rold, *rnewp = 0; 12187 int resource = target_to_host_resource(arg2); 12188 if (arg3) { 12189 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { 12190 goto efault; 12191 } 12192 rnew.rlim_cur = tswap64(target_rnew->rlim_cur); 12193 rnew.rlim_max = tswap64(target_rnew->rlim_max); 12194 unlock_user_struct(target_rnew, arg3, 0); 12195 rnewp = &rnew; 12196 } 12197 12198 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0)); 12199 if (!is_error(ret) && arg4) { 12200 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { 12201 goto efault; 12202 } 12203 target_rold->rlim_cur = tswap64(rold.rlim_cur); 12204 target_rold->rlim_max = tswap64(rold.rlim_max); 12205 unlock_user_struct(target_rold, arg4, 1); 12206 } 12207 break; 12208 } 12209 #endif 12210 #ifdef TARGET_NR_gethostname 12211 case TARGET_NR_gethostname: 12212 { 12213 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0); 12214 if (name) { 12215 ret = get_errno(gethostname(name, arg2)); 12216 unlock_user(name, arg1, arg2); 12217 } else { 12218 ret = -TARGET_EFAULT; 12219 } 12220 break; 12221 } 12222 #endif 12223 #ifdef TARGET_NR_atomic_cmpxchg_32 12224 case TARGET_NR_atomic_cmpxchg_32: 12225 { 12226 /* should use start_exclusive from main.c */ 12227 abi_ulong mem_value; 12228 if (get_user_u32(mem_value, arg6)) { 12229 target_siginfo_t info; 12230 info.si_signo = SIGSEGV; 12231 info.si_errno = 0; 12232 info.si_code = TARGET_SEGV_MAPERR; 12233 info._sifields._sigfault._addr = arg6; 12234 queue_signal((CPUArchState *)cpu_env, info.si_signo, 12235 QEMU_SI_FAULT, &info); 12236 ret = 0xdeadbeef; 12237 12238 } 12239 if (mem_value == arg2) 12240 put_user_u32(arg1, arg6); 12241 ret = mem_value; 12242 break; 12243 } 12244 #endif 12245 #ifdef TARGET_NR_atomic_barrier 12246 case TARGET_NR_atomic_barrier: 12247 { 12248 /* Like the kernel implementation and the qemu arm barrier, no-op this? */ 12249 ret = 0; 12250 break; 12251 } 12252 #endif 12253 12254 #ifdef TARGET_NR_timer_create 12255 case TARGET_NR_timer_create: 12256 { 12257 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */ 12258 12259 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL; 12260 12261 int clkid = arg1; 12262 int timer_index = next_free_host_timer(); 12263 12264 if (timer_index < 0) { 12265 ret = -TARGET_EAGAIN; 12266 } else { 12267 timer_t *phtimer = g_posix_timers + timer_index; 12268 12269 if (arg2) { 12270 phost_sevp = &host_sevp; 12271 ret = target_to_host_sigevent(phost_sevp, arg2); 12272 if (ret != 0) { 12273 break; 12274 } 12275 } 12276 12277 ret = get_errno(timer_create(clkid, phost_sevp, phtimer)); 12278 if (ret) { 12279 phtimer = NULL; 12280 } else { 12281 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) { 12282 goto efault; 12283 } 12284 } 12285 } 12286 break; 12287 } 12288 #endif 12289 12290 #ifdef TARGET_NR_timer_settime 12291 case TARGET_NR_timer_settime: 12292 { 12293 /* args: timer_t timerid, int flags, const struct itimerspec *new_value, 12294 * struct itimerspec * old_value */ 12295 target_timer_t timerid = get_timer_id(arg1); 12296 12297 if (timerid < 0) { 12298 ret = timerid; 12299 } else if (arg3 == 0) { 12300 ret = -TARGET_EINVAL; 12301 } else { 12302 timer_t htimer = g_posix_timers[timerid]; 12303 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},}; 12304 12305 if (target_to_host_itimerspec(&hspec_new, arg3)) { 12306 goto efault; 12307 } 12308 ret = get_errno( 12309 timer_settime(htimer, arg2, &hspec_new, &hspec_old)); 12310 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) { 12311 goto efault; 12312 } 12313 } 12314 break; 12315 } 12316 #endif 12317 12318 #ifdef TARGET_NR_timer_gettime 12319 case TARGET_NR_timer_gettime: 12320 { 12321 /* args: timer_t timerid, struct itimerspec *curr_value */ 12322 target_timer_t timerid = get_timer_id(arg1); 12323 12324 if (timerid < 0) { 12325 ret = timerid; 12326 } else if (!arg2) { 12327 ret = -TARGET_EFAULT; 12328 } else { 12329 timer_t htimer = g_posix_timers[timerid]; 12330 struct itimerspec hspec; 12331 ret = get_errno(timer_gettime(htimer, &hspec)); 12332 12333 if (host_to_target_itimerspec(arg2, &hspec)) { 12334 ret = -TARGET_EFAULT; 12335 } 12336 } 12337 break; 12338 } 12339 #endif 12340 12341 #ifdef TARGET_NR_timer_getoverrun 12342 case TARGET_NR_timer_getoverrun: 12343 { 12344 /* args: timer_t timerid */ 12345 target_timer_t timerid = get_timer_id(arg1); 12346 12347 if (timerid < 0) { 12348 ret = timerid; 12349 } else { 12350 timer_t htimer = g_posix_timers[timerid]; 12351 ret = get_errno(timer_getoverrun(htimer)); 12352 } 12353 fd_trans_unregister(ret); 12354 break; 12355 } 12356 #endif 12357 12358 #ifdef TARGET_NR_timer_delete 12359 case TARGET_NR_timer_delete: 12360 { 12361 /* args: timer_t timerid */ 12362 target_timer_t timerid = get_timer_id(arg1); 12363 12364 if (timerid < 0) { 12365 ret = timerid; 12366 } else { 12367 timer_t htimer = g_posix_timers[timerid]; 12368 ret = get_errno(timer_delete(htimer)); 12369 g_posix_timers[timerid] = 0; 12370 } 12371 break; 12372 } 12373 #endif 12374 12375 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD) 12376 case TARGET_NR_timerfd_create: 12377 ret = get_errno(timerfd_create(arg1, 12378 target_to_host_bitmask(arg2, fcntl_flags_tbl))); 12379 break; 12380 #endif 12381 12382 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD) 12383 case TARGET_NR_timerfd_gettime: 12384 { 12385 struct itimerspec its_curr; 12386 12387 ret = get_errno(timerfd_gettime(arg1, &its_curr)); 12388 12389 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) { 12390 goto efault; 12391 } 12392 } 12393 break; 12394 #endif 12395 12396 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD) 12397 case TARGET_NR_timerfd_settime: 12398 { 12399 struct itimerspec its_new, its_old, *p_new; 12400 12401 if (arg3) { 12402 if (target_to_host_itimerspec(&its_new, arg3)) { 12403 goto efault; 12404 } 12405 p_new = &its_new; 12406 } else { 12407 p_new = NULL; 12408 } 12409 12410 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old)); 12411 12412 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) { 12413 goto efault; 12414 } 12415 } 12416 break; 12417 #endif 12418 12419 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get) 12420 case TARGET_NR_ioprio_get: 12421 ret = get_errno(ioprio_get(arg1, arg2)); 12422 break; 12423 #endif 12424 12425 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set) 12426 case TARGET_NR_ioprio_set: 12427 ret = get_errno(ioprio_set(arg1, arg2, arg3)); 12428 break; 12429 #endif 12430 12431 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS) 12432 case TARGET_NR_setns: 12433 ret = get_errno(setns(arg1, arg2)); 12434 break; 12435 #endif 12436 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS) 12437 case TARGET_NR_unshare: 12438 ret = get_errno(unshare(arg1)); 12439 break; 12440 #endif 12441 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp) 12442 case TARGET_NR_kcmp: 12443 ret = get_errno(kcmp(arg1, arg2, arg3, arg4, arg5)); 12444 break; 12445 #endif 12446 12447 default: 12448 unimplemented: 12449 gemu_log("qemu: Unsupported syscall: %d\n", num); 12450 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list) 12451 unimplemented_nowarn: 12452 #endif 12453 ret = -TARGET_ENOSYS; 12454 break; 12455 } 12456 fail: 12457 #ifdef DEBUG 12458 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret); 12459 #endif 12460 if(do_strace) 12461 print_syscall_ret(num, ret); 12462 trace_guest_user_syscall_ret(cpu, num, ret); 12463 return ret; 12464 efault: 12465 ret = -TARGET_EFAULT; 12466 goto fail; 12467 } 12468