1 /* 2 * Linux syscalls 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #define _ATFILE_SOURCE 20 #include "qemu/osdep.h" 21 #include "qemu/cutils.h" 22 #include "qemu/path.h" 23 #include <elf.h> 24 #include <endian.h> 25 #include <grp.h> 26 #include <sys/ipc.h> 27 #include <sys/msg.h> 28 #include <sys/wait.h> 29 #include <sys/mount.h> 30 #include <sys/file.h> 31 #include <sys/fsuid.h> 32 #include <sys/personality.h> 33 #include <sys/prctl.h> 34 #include <sys/resource.h> 35 #include <sys/swap.h> 36 #include <linux/capability.h> 37 #include <sched.h> 38 #include <sys/timex.h> 39 #ifdef __ia64__ 40 int __clone2(int (*fn)(void *), void *child_stack_base, 41 size_t stack_size, int flags, void *arg, ...); 42 #endif 43 #include <sys/socket.h> 44 #include <sys/un.h> 45 #include <sys/uio.h> 46 #include <poll.h> 47 #include <sys/times.h> 48 #include <sys/shm.h> 49 #include <sys/sem.h> 50 #include <sys/statfs.h> 51 #include <time.h> 52 #include <utime.h> 53 #include <sys/sysinfo.h> 54 #include <sys/signalfd.h> 55 //#include <sys/user.h> 56 #include <netinet/ip.h> 57 #include <netinet/tcp.h> 58 #include <linux/wireless.h> 59 #include <linux/icmp.h> 60 #include <linux/icmpv6.h> 61 #include <linux/errqueue.h> 62 #include <linux/random.h> 63 #include "qemu-common.h" 64 #ifdef CONFIG_TIMERFD 65 #include <sys/timerfd.h> 66 #endif 67 #ifdef TARGET_GPROF 68 #include <sys/gmon.h> 69 #endif 70 #ifdef CONFIG_EVENTFD 71 #include <sys/eventfd.h> 72 #endif 73 #ifdef CONFIG_EPOLL 74 #include <sys/epoll.h> 75 #endif 76 #ifdef CONFIG_ATTR 77 #include "qemu/xattr.h" 78 #endif 79 #ifdef CONFIG_SENDFILE 80 #include <sys/sendfile.h> 81 #endif 82 83 #define termios host_termios 84 #define winsize host_winsize 85 #define termio host_termio 86 #define sgttyb host_sgttyb /* same as target */ 87 #define tchars host_tchars /* same as target */ 88 #define ltchars host_ltchars /* same as target */ 89 90 #include <linux/termios.h> 91 #include <linux/unistd.h> 92 #include <linux/cdrom.h> 93 #include <linux/hdreg.h> 94 #include <linux/soundcard.h> 95 #include <linux/kd.h> 96 #include <linux/mtio.h> 97 #include <linux/fs.h> 98 #if defined(CONFIG_FIEMAP) 99 #include <linux/fiemap.h> 100 #endif 101 #include <linux/fb.h> 102 #include <linux/vt.h> 103 #include <linux/dm-ioctl.h> 104 #include <linux/reboot.h> 105 #include <linux/route.h> 106 #include <linux/filter.h> 107 #include <linux/blkpg.h> 108 #include <netpacket/packet.h> 109 #include <linux/netlink.h> 110 #ifdef CONFIG_RTNETLINK 111 #include <linux/rtnetlink.h> 112 #include <linux/if_bridge.h> 113 #endif 114 #include <linux/audit.h> 115 #include "linux_loop.h" 116 #include "uname.h" 117 118 #include "qemu.h" 119 120 #ifndef CLONE_IO 121 #define CLONE_IO 0x80000000 /* Clone io context */ 122 #endif 123 124 /* We can't directly call the host clone syscall, because this will 125 * badly confuse libc (breaking mutexes, for example). So we must 126 * divide clone flags into: 127 * * flag combinations that look like pthread_create() 128 * * flag combinations that look like fork() 129 * * flags we can implement within QEMU itself 130 * * flags we can't support and will return an error for 131 */ 132 /* For thread creation, all these flags must be present; for 133 * fork, none must be present. 134 */ 135 #define CLONE_THREAD_FLAGS \ 136 (CLONE_VM | CLONE_FS | CLONE_FILES | \ 137 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM) 138 139 /* These flags are ignored: 140 * CLONE_DETACHED is now ignored by the kernel; 141 * CLONE_IO is just an optimisation hint to the I/O scheduler 142 */ 143 #define CLONE_IGNORED_FLAGS \ 144 (CLONE_DETACHED | CLONE_IO) 145 146 /* Flags for fork which we can implement within QEMU itself */ 147 #define CLONE_OPTIONAL_FORK_FLAGS \ 148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \ 149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID) 150 151 /* Flags for thread creation which we can implement within QEMU itself */ 152 #define CLONE_OPTIONAL_THREAD_FLAGS \ 153 (CLONE_SETTLS | CLONE_PARENT_SETTID | \ 154 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT) 155 156 #define CLONE_INVALID_FORK_FLAGS \ 157 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS)) 158 159 #define CLONE_INVALID_THREAD_FLAGS \ 160 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \ 161 CLONE_IGNORED_FLAGS)) 162 163 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits 164 * have almost all been allocated. We cannot support any of 165 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC, 166 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED. 167 * The checks against the invalid thread masks above will catch these. 168 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.) 169 */ 170 171 //#define DEBUG 172 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted 173 * once. This exercises the codepaths for restart. 174 */ 175 //#define DEBUG_ERESTARTSYS 176 177 //#include <linux/msdos_fs.h> 178 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2]) 179 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2]) 180 181 #undef _syscall0 182 #undef _syscall1 183 #undef _syscall2 184 #undef _syscall3 185 #undef _syscall4 186 #undef _syscall5 187 #undef _syscall6 188 189 #define _syscall0(type,name) \ 190 static type name (void) \ 191 { \ 192 return syscall(__NR_##name); \ 193 } 194 195 #define _syscall1(type,name,type1,arg1) \ 196 static type name (type1 arg1) \ 197 { \ 198 return syscall(__NR_##name, arg1); \ 199 } 200 201 #define _syscall2(type,name,type1,arg1,type2,arg2) \ 202 static type name (type1 arg1,type2 arg2) \ 203 { \ 204 return syscall(__NR_##name, arg1, arg2); \ 205 } 206 207 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 208 static type name (type1 arg1,type2 arg2,type3 arg3) \ 209 { \ 210 return syscall(__NR_##name, arg1, arg2, arg3); \ 211 } 212 213 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 214 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ 215 { \ 216 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 217 } 218 219 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 220 type5,arg5) \ 221 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ 222 { \ 223 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 224 } 225 226 227 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 228 type5,arg5,type6,arg6) \ 229 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \ 230 type6 arg6) \ 231 { \ 232 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 233 } 234 235 236 #define __NR_sys_uname __NR_uname 237 #define __NR_sys_getcwd1 __NR_getcwd 238 #define __NR_sys_getdents __NR_getdents 239 #define __NR_sys_getdents64 __NR_getdents64 240 #define __NR_sys_getpriority __NR_getpriority 241 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo 242 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo 243 #define __NR_sys_syslog __NR_syslog 244 #define __NR_sys_futex __NR_futex 245 #define __NR_sys_inotify_init __NR_inotify_init 246 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch 247 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch 248 249 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \ 250 defined(__s390x__) 251 #define __NR__llseek __NR_lseek 252 #endif 253 254 /* Newer kernel ports have llseek() instead of _llseek() */ 255 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek) 256 #define TARGET_NR__llseek TARGET_NR_llseek 257 #endif 258 259 #ifdef __NR_gettid 260 _syscall0(int, gettid) 261 #else 262 /* This is a replacement for the host gettid() and must return a host 263 errno. */ 264 static int gettid(void) { 265 return -ENOSYS; 266 } 267 #endif 268 #if defined(TARGET_NR_getdents) && defined(__NR_getdents) 269 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count); 270 #endif 271 #if !defined(__NR_getdents) || \ 272 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64)) 273 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count); 274 #endif 275 #if defined(TARGET_NR__llseek) && defined(__NR_llseek) 276 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo, 277 loff_t *, res, uint, wh); 278 #endif 279 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo) 280 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig, 281 siginfo_t *, uinfo) 282 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len) 283 #ifdef __NR_exit_group 284 _syscall1(int,exit_group,int,error_code) 285 #endif 286 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 287 _syscall1(int,set_tid_address,int *,tidptr) 288 #endif 289 #if defined(TARGET_NR_futex) && defined(__NR_futex) 290 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, 291 const struct timespec *,timeout,int *,uaddr2,int,val3) 292 #endif 293 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity 294 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, 295 unsigned long *, user_mask_ptr); 296 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity 297 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len, 298 unsigned long *, user_mask_ptr); 299 #define __NR_sys_getcpu __NR_getcpu 300 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache); 301 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd, 302 void *, arg); 303 _syscall2(int, capget, struct __user_cap_header_struct *, header, 304 struct __user_cap_data_struct *, data); 305 _syscall2(int, capset, struct __user_cap_header_struct *, header, 306 struct __user_cap_data_struct *, data); 307 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get) 308 _syscall2(int, ioprio_get, int, which, int, who) 309 #endif 310 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set) 311 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio) 312 #endif 313 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom) 314 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags) 315 #endif 316 317 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp) 318 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type, 319 unsigned long, idx1, unsigned long, idx2) 320 #endif 321 322 static bitmask_transtbl fcntl_flags_tbl[] = { 323 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, }, 324 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, }, 325 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, }, 326 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, }, 327 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, }, 328 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, }, 329 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, }, 330 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, }, 331 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, }, 332 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, }, 333 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, }, 334 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, }, 335 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, }, 336 #if defined(O_DIRECT) 337 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, }, 338 #endif 339 #if defined(O_NOATIME) 340 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME }, 341 #endif 342 #if defined(O_CLOEXEC) 343 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC }, 344 #endif 345 #if defined(O_PATH) 346 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH }, 347 #endif 348 #if defined(O_TMPFILE) 349 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE }, 350 #endif 351 /* Don't terminate the list prematurely on 64-bit host+guest. */ 352 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0 353 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, }, 354 #endif 355 { 0, 0, 0, 0 } 356 }; 357 358 enum { 359 QEMU_IFLA_BR_UNSPEC, 360 QEMU_IFLA_BR_FORWARD_DELAY, 361 QEMU_IFLA_BR_HELLO_TIME, 362 QEMU_IFLA_BR_MAX_AGE, 363 QEMU_IFLA_BR_AGEING_TIME, 364 QEMU_IFLA_BR_STP_STATE, 365 QEMU_IFLA_BR_PRIORITY, 366 QEMU_IFLA_BR_VLAN_FILTERING, 367 QEMU_IFLA_BR_VLAN_PROTOCOL, 368 QEMU_IFLA_BR_GROUP_FWD_MASK, 369 QEMU_IFLA_BR_ROOT_ID, 370 QEMU_IFLA_BR_BRIDGE_ID, 371 QEMU_IFLA_BR_ROOT_PORT, 372 QEMU_IFLA_BR_ROOT_PATH_COST, 373 QEMU_IFLA_BR_TOPOLOGY_CHANGE, 374 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED, 375 QEMU_IFLA_BR_HELLO_TIMER, 376 QEMU_IFLA_BR_TCN_TIMER, 377 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER, 378 QEMU_IFLA_BR_GC_TIMER, 379 QEMU_IFLA_BR_GROUP_ADDR, 380 QEMU_IFLA_BR_FDB_FLUSH, 381 QEMU_IFLA_BR_MCAST_ROUTER, 382 QEMU_IFLA_BR_MCAST_SNOOPING, 383 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR, 384 QEMU_IFLA_BR_MCAST_QUERIER, 385 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY, 386 QEMU_IFLA_BR_MCAST_HASH_MAX, 387 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT, 388 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT, 389 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL, 390 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL, 391 QEMU_IFLA_BR_MCAST_QUERIER_INTVL, 392 QEMU_IFLA_BR_MCAST_QUERY_INTVL, 393 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, 394 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL, 395 QEMU_IFLA_BR_NF_CALL_IPTABLES, 396 QEMU_IFLA_BR_NF_CALL_IP6TABLES, 397 QEMU_IFLA_BR_NF_CALL_ARPTABLES, 398 QEMU_IFLA_BR_VLAN_DEFAULT_PVID, 399 QEMU_IFLA_BR_PAD, 400 QEMU_IFLA_BR_VLAN_STATS_ENABLED, 401 QEMU_IFLA_BR_MCAST_STATS_ENABLED, 402 QEMU___IFLA_BR_MAX, 403 }; 404 405 enum { 406 QEMU_IFLA_UNSPEC, 407 QEMU_IFLA_ADDRESS, 408 QEMU_IFLA_BROADCAST, 409 QEMU_IFLA_IFNAME, 410 QEMU_IFLA_MTU, 411 QEMU_IFLA_LINK, 412 QEMU_IFLA_QDISC, 413 QEMU_IFLA_STATS, 414 QEMU_IFLA_COST, 415 QEMU_IFLA_PRIORITY, 416 QEMU_IFLA_MASTER, 417 QEMU_IFLA_WIRELESS, 418 QEMU_IFLA_PROTINFO, 419 QEMU_IFLA_TXQLEN, 420 QEMU_IFLA_MAP, 421 QEMU_IFLA_WEIGHT, 422 QEMU_IFLA_OPERSTATE, 423 QEMU_IFLA_LINKMODE, 424 QEMU_IFLA_LINKINFO, 425 QEMU_IFLA_NET_NS_PID, 426 QEMU_IFLA_IFALIAS, 427 QEMU_IFLA_NUM_VF, 428 QEMU_IFLA_VFINFO_LIST, 429 QEMU_IFLA_STATS64, 430 QEMU_IFLA_VF_PORTS, 431 QEMU_IFLA_PORT_SELF, 432 QEMU_IFLA_AF_SPEC, 433 QEMU_IFLA_GROUP, 434 QEMU_IFLA_NET_NS_FD, 435 QEMU_IFLA_EXT_MASK, 436 QEMU_IFLA_PROMISCUITY, 437 QEMU_IFLA_NUM_TX_QUEUES, 438 QEMU_IFLA_NUM_RX_QUEUES, 439 QEMU_IFLA_CARRIER, 440 QEMU_IFLA_PHYS_PORT_ID, 441 QEMU_IFLA_CARRIER_CHANGES, 442 QEMU_IFLA_PHYS_SWITCH_ID, 443 QEMU_IFLA_LINK_NETNSID, 444 QEMU_IFLA_PHYS_PORT_NAME, 445 QEMU_IFLA_PROTO_DOWN, 446 QEMU_IFLA_GSO_MAX_SEGS, 447 QEMU_IFLA_GSO_MAX_SIZE, 448 QEMU_IFLA_PAD, 449 QEMU_IFLA_XDP, 450 QEMU___IFLA_MAX 451 }; 452 453 enum { 454 QEMU_IFLA_BRPORT_UNSPEC, 455 QEMU_IFLA_BRPORT_STATE, 456 QEMU_IFLA_BRPORT_PRIORITY, 457 QEMU_IFLA_BRPORT_COST, 458 QEMU_IFLA_BRPORT_MODE, 459 QEMU_IFLA_BRPORT_GUARD, 460 QEMU_IFLA_BRPORT_PROTECT, 461 QEMU_IFLA_BRPORT_FAST_LEAVE, 462 QEMU_IFLA_BRPORT_LEARNING, 463 QEMU_IFLA_BRPORT_UNICAST_FLOOD, 464 QEMU_IFLA_BRPORT_PROXYARP, 465 QEMU_IFLA_BRPORT_LEARNING_SYNC, 466 QEMU_IFLA_BRPORT_PROXYARP_WIFI, 467 QEMU_IFLA_BRPORT_ROOT_ID, 468 QEMU_IFLA_BRPORT_BRIDGE_ID, 469 QEMU_IFLA_BRPORT_DESIGNATED_PORT, 470 QEMU_IFLA_BRPORT_DESIGNATED_COST, 471 QEMU_IFLA_BRPORT_ID, 472 QEMU_IFLA_BRPORT_NO, 473 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK, 474 QEMU_IFLA_BRPORT_CONFIG_PENDING, 475 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER, 476 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER, 477 QEMU_IFLA_BRPORT_HOLD_TIMER, 478 QEMU_IFLA_BRPORT_FLUSH, 479 QEMU_IFLA_BRPORT_MULTICAST_ROUTER, 480 QEMU_IFLA_BRPORT_PAD, 481 QEMU___IFLA_BRPORT_MAX 482 }; 483 484 enum { 485 QEMU_IFLA_INFO_UNSPEC, 486 QEMU_IFLA_INFO_KIND, 487 QEMU_IFLA_INFO_DATA, 488 QEMU_IFLA_INFO_XSTATS, 489 QEMU_IFLA_INFO_SLAVE_KIND, 490 QEMU_IFLA_INFO_SLAVE_DATA, 491 QEMU___IFLA_INFO_MAX, 492 }; 493 494 enum { 495 QEMU_IFLA_INET_UNSPEC, 496 QEMU_IFLA_INET_CONF, 497 QEMU___IFLA_INET_MAX, 498 }; 499 500 enum { 501 QEMU_IFLA_INET6_UNSPEC, 502 QEMU_IFLA_INET6_FLAGS, 503 QEMU_IFLA_INET6_CONF, 504 QEMU_IFLA_INET6_STATS, 505 QEMU_IFLA_INET6_MCAST, 506 QEMU_IFLA_INET6_CACHEINFO, 507 QEMU_IFLA_INET6_ICMP6STATS, 508 QEMU_IFLA_INET6_TOKEN, 509 QEMU_IFLA_INET6_ADDR_GEN_MODE, 510 QEMU___IFLA_INET6_MAX 511 }; 512 513 typedef abi_long (*TargetFdDataFunc)(void *, size_t); 514 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t); 515 typedef struct TargetFdTrans { 516 TargetFdDataFunc host_to_target_data; 517 TargetFdDataFunc target_to_host_data; 518 TargetFdAddrFunc target_to_host_addr; 519 } TargetFdTrans; 520 521 static TargetFdTrans **target_fd_trans; 522 523 static unsigned int target_fd_max; 524 525 static TargetFdDataFunc fd_trans_target_to_host_data(int fd) 526 { 527 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) { 528 return target_fd_trans[fd]->target_to_host_data; 529 } 530 return NULL; 531 } 532 533 static TargetFdDataFunc fd_trans_host_to_target_data(int fd) 534 { 535 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) { 536 return target_fd_trans[fd]->host_to_target_data; 537 } 538 return NULL; 539 } 540 541 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd) 542 { 543 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) { 544 return target_fd_trans[fd]->target_to_host_addr; 545 } 546 return NULL; 547 } 548 549 static void fd_trans_register(int fd, TargetFdTrans *trans) 550 { 551 unsigned int oldmax; 552 553 if (fd >= target_fd_max) { 554 oldmax = target_fd_max; 555 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */ 556 target_fd_trans = g_renew(TargetFdTrans *, 557 target_fd_trans, target_fd_max); 558 memset((void *)(target_fd_trans + oldmax), 0, 559 (target_fd_max - oldmax) * sizeof(TargetFdTrans *)); 560 } 561 target_fd_trans[fd] = trans; 562 } 563 564 static void fd_trans_unregister(int fd) 565 { 566 if (fd >= 0 && fd < target_fd_max) { 567 target_fd_trans[fd] = NULL; 568 } 569 } 570 571 static void fd_trans_dup(int oldfd, int newfd) 572 { 573 fd_trans_unregister(newfd); 574 if (oldfd < target_fd_max && target_fd_trans[oldfd]) { 575 fd_trans_register(newfd, target_fd_trans[oldfd]); 576 } 577 } 578 579 static int sys_getcwd1(char *buf, size_t size) 580 { 581 if (getcwd(buf, size) == NULL) { 582 /* getcwd() sets errno */ 583 return (-1); 584 } 585 return strlen(buf)+1; 586 } 587 588 #ifdef TARGET_NR_utimensat 589 #if defined(__NR_utimensat) 590 #define __NR_sys_utimensat __NR_utimensat 591 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname, 592 const struct timespec *,tsp,int,flags) 593 #else 594 static int sys_utimensat(int dirfd, const char *pathname, 595 const struct timespec times[2], int flags) 596 { 597 errno = ENOSYS; 598 return -1; 599 } 600 #endif 601 #endif /* TARGET_NR_utimensat */ 602 603 #ifdef CONFIG_INOTIFY 604 #include <sys/inotify.h> 605 606 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 607 static int sys_inotify_init(void) 608 { 609 return (inotify_init()); 610 } 611 #endif 612 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 613 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask) 614 { 615 return (inotify_add_watch(fd, pathname, mask)); 616 } 617 #endif 618 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 619 static int sys_inotify_rm_watch(int fd, int32_t wd) 620 { 621 return (inotify_rm_watch(fd, wd)); 622 } 623 #endif 624 #ifdef CONFIG_INOTIFY1 625 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 626 static int sys_inotify_init1(int flags) 627 { 628 return (inotify_init1(flags)); 629 } 630 #endif 631 #endif 632 #else 633 /* Userspace can usually survive runtime without inotify */ 634 #undef TARGET_NR_inotify_init 635 #undef TARGET_NR_inotify_init1 636 #undef TARGET_NR_inotify_add_watch 637 #undef TARGET_NR_inotify_rm_watch 638 #endif /* CONFIG_INOTIFY */ 639 640 #if defined(TARGET_NR_prlimit64) 641 #ifndef __NR_prlimit64 642 # define __NR_prlimit64 -1 643 #endif 644 #define __NR_sys_prlimit64 __NR_prlimit64 645 /* The glibc rlimit structure may not be that used by the underlying syscall */ 646 struct host_rlimit64 { 647 uint64_t rlim_cur; 648 uint64_t rlim_max; 649 }; 650 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource, 651 const struct host_rlimit64 *, new_limit, 652 struct host_rlimit64 *, old_limit) 653 #endif 654 655 656 #if defined(TARGET_NR_timer_create) 657 /* Maxiumum of 32 active POSIX timers allowed at any one time. */ 658 static timer_t g_posix_timers[32] = { 0, } ; 659 660 static inline int next_free_host_timer(void) 661 { 662 int k ; 663 /* FIXME: Does finding the next free slot require a lock? */ 664 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) { 665 if (g_posix_timers[k] == 0) { 666 g_posix_timers[k] = (timer_t) 1; 667 return k; 668 } 669 } 670 return -1; 671 } 672 #endif 673 674 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */ 675 #ifdef TARGET_ARM 676 static inline int regpairs_aligned(void *cpu_env, int num) 677 { 678 return ((((CPUARMState *)cpu_env)->eabi) == 1) ; 679 } 680 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32) 681 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; } 682 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64) 683 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs 684 * of registers which translates to the same as ARM/MIPS, because we start with 685 * r3 as arg1 */ 686 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; } 687 #elif defined(TARGET_SH4) 688 /* SH4 doesn't align register pairs, except for p{read,write}64 */ 689 static inline int regpairs_aligned(void *cpu_env, int num) 690 { 691 switch (num) { 692 case TARGET_NR_pread64: 693 case TARGET_NR_pwrite64: 694 return 1; 695 696 default: 697 return 0; 698 } 699 } 700 #else 701 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; } 702 #endif 703 704 #define ERRNO_TABLE_SIZE 1200 705 706 /* target_to_host_errno_table[] is initialized from 707 * host_to_target_errno_table[] in syscall_init(). */ 708 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = { 709 }; 710 711 /* 712 * This list is the union of errno values overridden in asm-<arch>/errno.h 713 * minus the errnos that are not actually generic to all archs. 714 */ 715 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = { 716 [EAGAIN] = TARGET_EAGAIN, 717 [EIDRM] = TARGET_EIDRM, 718 [ECHRNG] = TARGET_ECHRNG, 719 [EL2NSYNC] = TARGET_EL2NSYNC, 720 [EL3HLT] = TARGET_EL3HLT, 721 [EL3RST] = TARGET_EL3RST, 722 [ELNRNG] = TARGET_ELNRNG, 723 [EUNATCH] = TARGET_EUNATCH, 724 [ENOCSI] = TARGET_ENOCSI, 725 [EL2HLT] = TARGET_EL2HLT, 726 [EDEADLK] = TARGET_EDEADLK, 727 [ENOLCK] = TARGET_ENOLCK, 728 [EBADE] = TARGET_EBADE, 729 [EBADR] = TARGET_EBADR, 730 [EXFULL] = TARGET_EXFULL, 731 [ENOANO] = TARGET_ENOANO, 732 [EBADRQC] = TARGET_EBADRQC, 733 [EBADSLT] = TARGET_EBADSLT, 734 [EBFONT] = TARGET_EBFONT, 735 [ENOSTR] = TARGET_ENOSTR, 736 [ENODATA] = TARGET_ENODATA, 737 [ETIME] = TARGET_ETIME, 738 [ENOSR] = TARGET_ENOSR, 739 [ENONET] = TARGET_ENONET, 740 [ENOPKG] = TARGET_ENOPKG, 741 [EREMOTE] = TARGET_EREMOTE, 742 [ENOLINK] = TARGET_ENOLINK, 743 [EADV] = TARGET_EADV, 744 [ESRMNT] = TARGET_ESRMNT, 745 [ECOMM] = TARGET_ECOMM, 746 [EPROTO] = TARGET_EPROTO, 747 [EDOTDOT] = TARGET_EDOTDOT, 748 [EMULTIHOP] = TARGET_EMULTIHOP, 749 [EBADMSG] = TARGET_EBADMSG, 750 [ENAMETOOLONG] = TARGET_ENAMETOOLONG, 751 [EOVERFLOW] = TARGET_EOVERFLOW, 752 [ENOTUNIQ] = TARGET_ENOTUNIQ, 753 [EBADFD] = TARGET_EBADFD, 754 [EREMCHG] = TARGET_EREMCHG, 755 [ELIBACC] = TARGET_ELIBACC, 756 [ELIBBAD] = TARGET_ELIBBAD, 757 [ELIBSCN] = TARGET_ELIBSCN, 758 [ELIBMAX] = TARGET_ELIBMAX, 759 [ELIBEXEC] = TARGET_ELIBEXEC, 760 [EILSEQ] = TARGET_EILSEQ, 761 [ENOSYS] = TARGET_ENOSYS, 762 [ELOOP] = TARGET_ELOOP, 763 [ERESTART] = TARGET_ERESTART, 764 [ESTRPIPE] = TARGET_ESTRPIPE, 765 [ENOTEMPTY] = TARGET_ENOTEMPTY, 766 [EUSERS] = TARGET_EUSERS, 767 [ENOTSOCK] = TARGET_ENOTSOCK, 768 [EDESTADDRREQ] = TARGET_EDESTADDRREQ, 769 [EMSGSIZE] = TARGET_EMSGSIZE, 770 [EPROTOTYPE] = TARGET_EPROTOTYPE, 771 [ENOPROTOOPT] = TARGET_ENOPROTOOPT, 772 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT, 773 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT, 774 [EOPNOTSUPP] = TARGET_EOPNOTSUPP, 775 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT, 776 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT, 777 [EADDRINUSE] = TARGET_EADDRINUSE, 778 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL, 779 [ENETDOWN] = TARGET_ENETDOWN, 780 [ENETUNREACH] = TARGET_ENETUNREACH, 781 [ENETRESET] = TARGET_ENETRESET, 782 [ECONNABORTED] = TARGET_ECONNABORTED, 783 [ECONNRESET] = TARGET_ECONNRESET, 784 [ENOBUFS] = TARGET_ENOBUFS, 785 [EISCONN] = TARGET_EISCONN, 786 [ENOTCONN] = TARGET_ENOTCONN, 787 [EUCLEAN] = TARGET_EUCLEAN, 788 [ENOTNAM] = TARGET_ENOTNAM, 789 [ENAVAIL] = TARGET_ENAVAIL, 790 [EISNAM] = TARGET_EISNAM, 791 [EREMOTEIO] = TARGET_EREMOTEIO, 792 [EDQUOT] = TARGET_EDQUOT, 793 [ESHUTDOWN] = TARGET_ESHUTDOWN, 794 [ETOOMANYREFS] = TARGET_ETOOMANYREFS, 795 [ETIMEDOUT] = TARGET_ETIMEDOUT, 796 [ECONNREFUSED] = TARGET_ECONNREFUSED, 797 [EHOSTDOWN] = TARGET_EHOSTDOWN, 798 [EHOSTUNREACH] = TARGET_EHOSTUNREACH, 799 [EALREADY] = TARGET_EALREADY, 800 [EINPROGRESS] = TARGET_EINPROGRESS, 801 [ESTALE] = TARGET_ESTALE, 802 [ECANCELED] = TARGET_ECANCELED, 803 [ENOMEDIUM] = TARGET_ENOMEDIUM, 804 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE, 805 #ifdef ENOKEY 806 [ENOKEY] = TARGET_ENOKEY, 807 #endif 808 #ifdef EKEYEXPIRED 809 [EKEYEXPIRED] = TARGET_EKEYEXPIRED, 810 #endif 811 #ifdef EKEYREVOKED 812 [EKEYREVOKED] = TARGET_EKEYREVOKED, 813 #endif 814 #ifdef EKEYREJECTED 815 [EKEYREJECTED] = TARGET_EKEYREJECTED, 816 #endif 817 #ifdef EOWNERDEAD 818 [EOWNERDEAD] = TARGET_EOWNERDEAD, 819 #endif 820 #ifdef ENOTRECOVERABLE 821 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE, 822 #endif 823 #ifdef ENOMSG 824 [ENOMSG] = TARGET_ENOMSG, 825 #endif 826 #ifdef ERKFILL 827 [ERFKILL] = TARGET_ERFKILL, 828 #endif 829 #ifdef EHWPOISON 830 [EHWPOISON] = TARGET_EHWPOISON, 831 #endif 832 }; 833 834 static inline int host_to_target_errno(int err) 835 { 836 if (err >= 0 && err < ERRNO_TABLE_SIZE && 837 host_to_target_errno_table[err]) { 838 return host_to_target_errno_table[err]; 839 } 840 return err; 841 } 842 843 static inline int target_to_host_errno(int err) 844 { 845 if (err >= 0 && err < ERRNO_TABLE_SIZE && 846 target_to_host_errno_table[err]) { 847 return target_to_host_errno_table[err]; 848 } 849 return err; 850 } 851 852 static inline abi_long get_errno(abi_long ret) 853 { 854 if (ret == -1) 855 return -host_to_target_errno(errno); 856 else 857 return ret; 858 } 859 860 static inline int is_error(abi_long ret) 861 { 862 return (abi_ulong)ret >= (abi_ulong)(-4096); 863 } 864 865 const char *target_strerror(int err) 866 { 867 if (err == TARGET_ERESTARTSYS) { 868 return "To be restarted"; 869 } 870 if (err == TARGET_QEMU_ESIGRETURN) { 871 return "Successful exit from sigreturn"; 872 } 873 874 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) { 875 return NULL; 876 } 877 return strerror(target_to_host_errno(err)); 878 } 879 880 #define safe_syscall0(type, name) \ 881 static type safe_##name(void) \ 882 { \ 883 return safe_syscall(__NR_##name); \ 884 } 885 886 #define safe_syscall1(type, name, type1, arg1) \ 887 static type safe_##name(type1 arg1) \ 888 { \ 889 return safe_syscall(__NR_##name, arg1); \ 890 } 891 892 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \ 893 static type safe_##name(type1 arg1, type2 arg2) \ 894 { \ 895 return safe_syscall(__NR_##name, arg1, arg2); \ 896 } 897 898 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \ 899 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \ 900 { \ 901 return safe_syscall(__NR_##name, arg1, arg2, arg3); \ 902 } 903 904 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \ 905 type4, arg4) \ 906 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ 907 { \ 908 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 909 } 910 911 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \ 912 type4, arg4, type5, arg5) \ 913 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 914 type5 arg5) \ 915 { \ 916 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 917 } 918 919 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \ 920 type4, arg4, type5, arg5, type6, arg6) \ 921 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 922 type5 arg5, type6 arg6) \ 923 { \ 924 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 925 } 926 927 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count) 928 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count) 929 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \ 930 int, flags, mode_t, mode) 931 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \ 932 struct rusage *, rusage) 933 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \ 934 int, options, struct rusage *, rusage) 935 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp) 936 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \ 937 fd_set *, exceptfds, struct timespec *, timeout, void *, sig) 938 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds, 939 struct timespec *, tsp, const sigset_t *, sigmask, 940 size_t, sigsetsize) 941 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events, 942 int, maxevents, int, timeout, const sigset_t *, sigmask, 943 size_t, sigsetsize) 944 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \ 945 const struct timespec *,timeout,int *,uaddr2,int,val3) 946 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize) 947 safe_syscall2(int, kill, pid_t, pid, int, sig) 948 safe_syscall2(int, tkill, int, tid, int, sig) 949 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig) 950 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt) 951 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt) 952 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt, 953 unsigned long, pos_l, unsigned long, pos_h) 954 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt, 955 unsigned long, pos_l, unsigned long, pos_h) 956 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr, 957 socklen_t, addrlen) 958 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len, 959 int, flags, const struct sockaddr *, addr, socklen_t, addrlen) 960 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len, 961 int, flags, struct sockaddr *, addr, socklen_t *, addrlen) 962 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags) 963 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags) 964 safe_syscall2(int, flock, int, fd, int, operation) 965 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo, 966 const struct timespec *, uts, size_t, sigsetsize) 967 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len, 968 int, flags) 969 safe_syscall2(int, nanosleep, const struct timespec *, req, 970 struct timespec *, rem) 971 #ifdef TARGET_NR_clock_nanosleep 972 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags, 973 const struct timespec *, req, struct timespec *, rem) 974 #endif 975 #ifdef __NR_msgsnd 976 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz, 977 int, flags) 978 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz, 979 long, msgtype, int, flags) 980 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops, 981 unsigned, nsops, const struct timespec *, timeout) 982 #else 983 /* This host kernel architecture uses a single ipc syscall; fake up 984 * wrappers for the sub-operations to hide this implementation detail. 985 * Annoyingly we can't include linux/ipc.h to get the constant definitions 986 * for the call parameter because some structs in there conflict with the 987 * sys/ipc.h ones. So we just define them here, and rely on them being 988 * the same for all host architectures. 989 */ 990 #define Q_SEMTIMEDOP 4 991 #define Q_MSGSND 11 992 #define Q_MSGRCV 12 993 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP)) 994 995 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third, 996 void *, ptr, long, fifth) 997 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags) 998 { 999 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0); 1000 } 1001 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags) 1002 { 1003 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type); 1004 } 1005 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops, 1006 const struct timespec *timeout) 1007 { 1008 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops, 1009 (long)timeout); 1010 } 1011 #endif 1012 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 1013 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr, 1014 size_t, len, unsigned, prio, const struct timespec *, timeout) 1015 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr, 1016 size_t, len, unsigned *, prio, const struct timespec *, timeout) 1017 #endif 1018 /* We do ioctl like this rather than via safe_syscall3 to preserve the 1019 * "third argument might be integer or pointer or not present" behaviour of 1020 * the libc function. 1021 */ 1022 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__) 1023 /* Similarly for fcntl. Note that callers must always: 1024 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK 1025 * use the flock64 struct rather than unsuffixed flock 1026 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts. 1027 */ 1028 #ifdef __NR_fcntl64 1029 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__) 1030 #else 1031 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__) 1032 #endif 1033 1034 static inline int host_to_target_sock_type(int host_type) 1035 { 1036 int target_type; 1037 1038 switch (host_type & 0xf /* SOCK_TYPE_MASK */) { 1039 case SOCK_DGRAM: 1040 target_type = TARGET_SOCK_DGRAM; 1041 break; 1042 case SOCK_STREAM: 1043 target_type = TARGET_SOCK_STREAM; 1044 break; 1045 default: 1046 target_type = host_type & 0xf /* SOCK_TYPE_MASK */; 1047 break; 1048 } 1049 1050 #if defined(SOCK_CLOEXEC) 1051 if (host_type & SOCK_CLOEXEC) { 1052 target_type |= TARGET_SOCK_CLOEXEC; 1053 } 1054 #endif 1055 1056 #if defined(SOCK_NONBLOCK) 1057 if (host_type & SOCK_NONBLOCK) { 1058 target_type |= TARGET_SOCK_NONBLOCK; 1059 } 1060 #endif 1061 1062 return target_type; 1063 } 1064 1065 static abi_ulong target_brk; 1066 static abi_ulong target_original_brk; 1067 static abi_ulong brk_page; 1068 1069 void target_set_brk(abi_ulong new_brk) 1070 { 1071 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk); 1072 brk_page = HOST_PAGE_ALIGN(target_brk); 1073 } 1074 1075 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0) 1076 #define DEBUGF_BRK(message, args...) 1077 1078 /* do_brk() must return target values and target errnos. */ 1079 abi_long do_brk(abi_ulong new_brk) 1080 { 1081 abi_long mapped_addr; 1082 abi_ulong new_alloc_size; 1083 1084 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk); 1085 1086 if (!new_brk) { 1087 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk); 1088 return target_brk; 1089 } 1090 if (new_brk < target_original_brk) { 1091 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n", 1092 target_brk); 1093 return target_brk; 1094 } 1095 1096 /* If the new brk is less than the highest page reserved to the 1097 * target heap allocation, set it and we're almost done... */ 1098 if (new_brk <= brk_page) { 1099 /* Heap contents are initialized to zero, as for anonymous 1100 * mapped pages. */ 1101 if (new_brk > target_brk) { 1102 memset(g2h(target_brk), 0, new_brk - target_brk); 1103 } 1104 target_brk = new_brk; 1105 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk); 1106 return target_brk; 1107 } 1108 1109 /* We need to allocate more memory after the brk... Note that 1110 * we don't use MAP_FIXED because that will map over the top of 1111 * any existing mapping (like the one with the host libc or qemu 1112 * itself); instead we treat "mapped but at wrong address" as 1113 * a failure and unmap again. 1114 */ 1115 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page); 1116 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, 1117 PROT_READ|PROT_WRITE, 1118 MAP_ANON|MAP_PRIVATE, 0, 0)); 1119 1120 if (mapped_addr == brk_page) { 1121 /* Heap contents are initialized to zero, as for anonymous 1122 * mapped pages. Technically the new pages are already 1123 * initialized to zero since they *are* anonymous mapped 1124 * pages, however we have to take care with the contents that 1125 * come from the remaining part of the previous page: it may 1126 * contains garbage data due to a previous heap usage (grown 1127 * then shrunken). */ 1128 memset(g2h(target_brk), 0, brk_page - target_brk); 1129 1130 target_brk = new_brk; 1131 brk_page = HOST_PAGE_ALIGN(target_brk); 1132 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n", 1133 target_brk); 1134 return target_brk; 1135 } else if (mapped_addr != -1) { 1136 /* Mapped but at wrong address, meaning there wasn't actually 1137 * enough space for this brk. 1138 */ 1139 target_munmap(mapped_addr, new_alloc_size); 1140 mapped_addr = -1; 1141 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk); 1142 } 1143 else { 1144 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk); 1145 } 1146 1147 #if defined(TARGET_ALPHA) 1148 /* We (partially) emulate OSF/1 on Alpha, which requires we 1149 return a proper errno, not an unchanged brk value. */ 1150 return -TARGET_ENOMEM; 1151 #endif 1152 /* For everything else, return the previous break. */ 1153 return target_brk; 1154 } 1155 1156 static inline abi_long copy_from_user_fdset(fd_set *fds, 1157 abi_ulong target_fds_addr, 1158 int n) 1159 { 1160 int i, nw, j, k; 1161 abi_ulong b, *target_fds; 1162 1163 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS); 1164 if (!(target_fds = lock_user(VERIFY_READ, 1165 target_fds_addr, 1166 sizeof(abi_ulong) * nw, 1167 1))) 1168 return -TARGET_EFAULT; 1169 1170 FD_ZERO(fds); 1171 k = 0; 1172 for (i = 0; i < nw; i++) { 1173 /* grab the abi_ulong */ 1174 __get_user(b, &target_fds[i]); 1175 for (j = 0; j < TARGET_ABI_BITS; j++) { 1176 /* check the bit inside the abi_ulong */ 1177 if ((b >> j) & 1) 1178 FD_SET(k, fds); 1179 k++; 1180 } 1181 } 1182 1183 unlock_user(target_fds, target_fds_addr, 0); 1184 1185 return 0; 1186 } 1187 1188 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr, 1189 abi_ulong target_fds_addr, 1190 int n) 1191 { 1192 if (target_fds_addr) { 1193 if (copy_from_user_fdset(fds, target_fds_addr, n)) 1194 return -TARGET_EFAULT; 1195 *fds_ptr = fds; 1196 } else { 1197 *fds_ptr = NULL; 1198 } 1199 return 0; 1200 } 1201 1202 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr, 1203 const fd_set *fds, 1204 int n) 1205 { 1206 int i, nw, j, k; 1207 abi_long v; 1208 abi_ulong *target_fds; 1209 1210 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS); 1211 if (!(target_fds = lock_user(VERIFY_WRITE, 1212 target_fds_addr, 1213 sizeof(abi_ulong) * nw, 1214 0))) 1215 return -TARGET_EFAULT; 1216 1217 k = 0; 1218 for (i = 0; i < nw; i++) { 1219 v = 0; 1220 for (j = 0; j < TARGET_ABI_BITS; j++) { 1221 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j); 1222 k++; 1223 } 1224 __put_user(v, &target_fds[i]); 1225 } 1226 1227 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw); 1228 1229 return 0; 1230 } 1231 1232 #if defined(__alpha__) 1233 #define HOST_HZ 1024 1234 #else 1235 #define HOST_HZ 100 1236 #endif 1237 1238 static inline abi_long host_to_target_clock_t(long ticks) 1239 { 1240 #if HOST_HZ == TARGET_HZ 1241 return ticks; 1242 #else 1243 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ; 1244 #endif 1245 } 1246 1247 static inline abi_long host_to_target_rusage(abi_ulong target_addr, 1248 const struct rusage *rusage) 1249 { 1250 struct target_rusage *target_rusage; 1251 1252 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0)) 1253 return -TARGET_EFAULT; 1254 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec); 1255 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec); 1256 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec); 1257 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec); 1258 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss); 1259 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss); 1260 target_rusage->ru_idrss = tswapal(rusage->ru_idrss); 1261 target_rusage->ru_isrss = tswapal(rusage->ru_isrss); 1262 target_rusage->ru_minflt = tswapal(rusage->ru_minflt); 1263 target_rusage->ru_majflt = tswapal(rusage->ru_majflt); 1264 target_rusage->ru_nswap = tswapal(rusage->ru_nswap); 1265 target_rusage->ru_inblock = tswapal(rusage->ru_inblock); 1266 target_rusage->ru_oublock = tswapal(rusage->ru_oublock); 1267 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd); 1268 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv); 1269 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals); 1270 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw); 1271 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw); 1272 unlock_user_struct(target_rusage, target_addr, 1); 1273 1274 return 0; 1275 } 1276 1277 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim) 1278 { 1279 abi_ulong target_rlim_swap; 1280 rlim_t result; 1281 1282 target_rlim_swap = tswapal(target_rlim); 1283 if (target_rlim_swap == TARGET_RLIM_INFINITY) 1284 return RLIM_INFINITY; 1285 1286 result = target_rlim_swap; 1287 if (target_rlim_swap != (rlim_t)result) 1288 return RLIM_INFINITY; 1289 1290 return result; 1291 } 1292 1293 static inline abi_ulong host_to_target_rlim(rlim_t rlim) 1294 { 1295 abi_ulong target_rlim_swap; 1296 abi_ulong result; 1297 1298 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim) 1299 target_rlim_swap = TARGET_RLIM_INFINITY; 1300 else 1301 target_rlim_swap = rlim; 1302 result = tswapal(target_rlim_swap); 1303 1304 return result; 1305 } 1306 1307 static inline int target_to_host_resource(int code) 1308 { 1309 switch (code) { 1310 case TARGET_RLIMIT_AS: 1311 return RLIMIT_AS; 1312 case TARGET_RLIMIT_CORE: 1313 return RLIMIT_CORE; 1314 case TARGET_RLIMIT_CPU: 1315 return RLIMIT_CPU; 1316 case TARGET_RLIMIT_DATA: 1317 return RLIMIT_DATA; 1318 case TARGET_RLIMIT_FSIZE: 1319 return RLIMIT_FSIZE; 1320 case TARGET_RLIMIT_LOCKS: 1321 return RLIMIT_LOCKS; 1322 case TARGET_RLIMIT_MEMLOCK: 1323 return RLIMIT_MEMLOCK; 1324 case TARGET_RLIMIT_MSGQUEUE: 1325 return RLIMIT_MSGQUEUE; 1326 case TARGET_RLIMIT_NICE: 1327 return RLIMIT_NICE; 1328 case TARGET_RLIMIT_NOFILE: 1329 return RLIMIT_NOFILE; 1330 case TARGET_RLIMIT_NPROC: 1331 return RLIMIT_NPROC; 1332 case TARGET_RLIMIT_RSS: 1333 return RLIMIT_RSS; 1334 case TARGET_RLIMIT_RTPRIO: 1335 return RLIMIT_RTPRIO; 1336 case TARGET_RLIMIT_SIGPENDING: 1337 return RLIMIT_SIGPENDING; 1338 case TARGET_RLIMIT_STACK: 1339 return RLIMIT_STACK; 1340 default: 1341 return code; 1342 } 1343 } 1344 1345 static inline abi_long copy_from_user_timeval(struct timeval *tv, 1346 abi_ulong target_tv_addr) 1347 { 1348 struct target_timeval *target_tv; 1349 1350 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) 1351 return -TARGET_EFAULT; 1352 1353 __get_user(tv->tv_sec, &target_tv->tv_sec); 1354 __get_user(tv->tv_usec, &target_tv->tv_usec); 1355 1356 unlock_user_struct(target_tv, target_tv_addr, 0); 1357 1358 return 0; 1359 } 1360 1361 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr, 1362 const struct timeval *tv) 1363 { 1364 struct target_timeval *target_tv; 1365 1366 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) 1367 return -TARGET_EFAULT; 1368 1369 __put_user(tv->tv_sec, &target_tv->tv_sec); 1370 __put_user(tv->tv_usec, &target_tv->tv_usec); 1371 1372 unlock_user_struct(target_tv, target_tv_addr, 1); 1373 1374 return 0; 1375 } 1376 1377 static inline abi_long copy_from_user_timezone(struct timezone *tz, 1378 abi_ulong target_tz_addr) 1379 { 1380 struct target_timezone *target_tz; 1381 1382 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) { 1383 return -TARGET_EFAULT; 1384 } 1385 1386 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest); 1387 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime); 1388 1389 unlock_user_struct(target_tz, target_tz_addr, 0); 1390 1391 return 0; 1392 } 1393 1394 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 1395 #include <mqueue.h> 1396 1397 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr, 1398 abi_ulong target_mq_attr_addr) 1399 { 1400 struct target_mq_attr *target_mq_attr; 1401 1402 if (!lock_user_struct(VERIFY_READ, target_mq_attr, 1403 target_mq_attr_addr, 1)) 1404 return -TARGET_EFAULT; 1405 1406 __get_user(attr->mq_flags, &target_mq_attr->mq_flags); 1407 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1408 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1409 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1410 1411 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0); 1412 1413 return 0; 1414 } 1415 1416 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr, 1417 const struct mq_attr *attr) 1418 { 1419 struct target_mq_attr *target_mq_attr; 1420 1421 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr, 1422 target_mq_attr_addr, 0)) 1423 return -TARGET_EFAULT; 1424 1425 __put_user(attr->mq_flags, &target_mq_attr->mq_flags); 1426 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1427 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1428 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1429 1430 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1); 1431 1432 return 0; 1433 } 1434 #endif 1435 1436 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) 1437 /* do_select() must return target values and target errnos. */ 1438 static abi_long do_select(int n, 1439 abi_ulong rfd_addr, abi_ulong wfd_addr, 1440 abi_ulong efd_addr, abi_ulong target_tv_addr) 1441 { 1442 fd_set rfds, wfds, efds; 1443 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 1444 struct timeval tv; 1445 struct timespec ts, *ts_ptr; 1446 abi_long ret; 1447 1448 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 1449 if (ret) { 1450 return ret; 1451 } 1452 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 1453 if (ret) { 1454 return ret; 1455 } 1456 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 1457 if (ret) { 1458 return ret; 1459 } 1460 1461 if (target_tv_addr) { 1462 if (copy_from_user_timeval(&tv, target_tv_addr)) 1463 return -TARGET_EFAULT; 1464 ts.tv_sec = tv.tv_sec; 1465 ts.tv_nsec = tv.tv_usec * 1000; 1466 ts_ptr = &ts; 1467 } else { 1468 ts_ptr = NULL; 1469 } 1470 1471 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 1472 ts_ptr, NULL)); 1473 1474 if (!is_error(ret)) { 1475 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 1476 return -TARGET_EFAULT; 1477 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 1478 return -TARGET_EFAULT; 1479 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 1480 return -TARGET_EFAULT; 1481 1482 if (target_tv_addr) { 1483 tv.tv_sec = ts.tv_sec; 1484 tv.tv_usec = ts.tv_nsec / 1000; 1485 if (copy_to_user_timeval(target_tv_addr, &tv)) { 1486 return -TARGET_EFAULT; 1487 } 1488 } 1489 } 1490 1491 return ret; 1492 } 1493 1494 #if defined(TARGET_WANT_OLD_SYS_SELECT) 1495 static abi_long do_old_select(abi_ulong arg1) 1496 { 1497 struct target_sel_arg_struct *sel; 1498 abi_ulong inp, outp, exp, tvp; 1499 long nsel; 1500 1501 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) { 1502 return -TARGET_EFAULT; 1503 } 1504 1505 nsel = tswapal(sel->n); 1506 inp = tswapal(sel->inp); 1507 outp = tswapal(sel->outp); 1508 exp = tswapal(sel->exp); 1509 tvp = tswapal(sel->tvp); 1510 1511 unlock_user_struct(sel, arg1, 0); 1512 1513 return do_select(nsel, inp, outp, exp, tvp); 1514 } 1515 #endif 1516 #endif 1517 1518 static abi_long do_pipe2(int host_pipe[], int flags) 1519 { 1520 #ifdef CONFIG_PIPE2 1521 return pipe2(host_pipe, flags); 1522 #else 1523 return -ENOSYS; 1524 #endif 1525 } 1526 1527 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes, 1528 int flags, int is_pipe2) 1529 { 1530 int host_pipe[2]; 1531 abi_long ret; 1532 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe); 1533 1534 if (is_error(ret)) 1535 return get_errno(ret); 1536 1537 /* Several targets have special calling conventions for the original 1538 pipe syscall, but didn't replicate this into the pipe2 syscall. */ 1539 if (!is_pipe2) { 1540 #if defined(TARGET_ALPHA) 1541 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1]; 1542 return host_pipe[0]; 1543 #elif defined(TARGET_MIPS) 1544 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1]; 1545 return host_pipe[0]; 1546 #elif defined(TARGET_SH4) 1547 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1]; 1548 return host_pipe[0]; 1549 #elif defined(TARGET_SPARC) 1550 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1]; 1551 return host_pipe[0]; 1552 #endif 1553 } 1554 1555 if (put_user_s32(host_pipe[0], pipedes) 1556 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0]))) 1557 return -TARGET_EFAULT; 1558 return get_errno(ret); 1559 } 1560 1561 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn, 1562 abi_ulong target_addr, 1563 socklen_t len) 1564 { 1565 struct target_ip_mreqn *target_smreqn; 1566 1567 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1); 1568 if (!target_smreqn) 1569 return -TARGET_EFAULT; 1570 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr; 1571 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr; 1572 if (len == sizeof(struct target_ip_mreqn)) 1573 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex); 1574 unlock_user(target_smreqn, target_addr, 0); 1575 1576 return 0; 1577 } 1578 1579 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr, 1580 abi_ulong target_addr, 1581 socklen_t len) 1582 { 1583 const socklen_t unix_maxlen = sizeof (struct sockaddr_un); 1584 sa_family_t sa_family; 1585 struct target_sockaddr *target_saddr; 1586 1587 if (fd_trans_target_to_host_addr(fd)) { 1588 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len); 1589 } 1590 1591 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 1592 if (!target_saddr) 1593 return -TARGET_EFAULT; 1594 1595 sa_family = tswap16(target_saddr->sa_family); 1596 1597 /* Oops. The caller might send a incomplete sun_path; sun_path 1598 * must be terminated by \0 (see the manual page), but 1599 * unfortunately it is quite common to specify sockaddr_un 1600 * length as "strlen(x->sun_path)" while it should be 1601 * "strlen(...) + 1". We'll fix that here if needed. 1602 * Linux kernel has a similar feature. 1603 */ 1604 1605 if (sa_family == AF_UNIX) { 1606 if (len < unix_maxlen && len > 0) { 1607 char *cp = (char*)target_saddr; 1608 1609 if ( cp[len-1] && !cp[len] ) 1610 len++; 1611 } 1612 if (len > unix_maxlen) 1613 len = unix_maxlen; 1614 } 1615 1616 memcpy(addr, target_saddr, len); 1617 addr->sa_family = sa_family; 1618 if (sa_family == AF_NETLINK) { 1619 struct sockaddr_nl *nladdr; 1620 1621 nladdr = (struct sockaddr_nl *)addr; 1622 nladdr->nl_pid = tswap32(nladdr->nl_pid); 1623 nladdr->nl_groups = tswap32(nladdr->nl_groups); 1624 } else if (sa_family == AF_PACKET) { 1625 struct target_sockaddr_ll *lladdr; 1626 1627 lladdr = (struct target_sockaddr_ll *)addr; 1628 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex); 1629 lladdr->sll_hatype = tswap16(lladdr->sll_hatype); 1630 } 1631 unlock_user(target_saddr, target_addr, 0); 1632 1633 return 0; 1634 } 1635 1636 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr, 1637 struct sockaddr *addr, 1638 socklen_t len) 1639 { 1640 struct target_sockaddr *target_saddr; 1641 1642 if (len == 0) { 1643 return 0; 1644 } 1645 assert(addr); 1646 1647 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0); 1648 if (!target_saddr) 1649 return -TARGET_EFAULT; 1650 memcpy(target_saddr, addr, len); 1651 if (len >= offsetof(struct target_sockaddr, sa_family) + 1652 sizeof(target_saddr->sa_family)) { 1653 target_saddr->sa_family = tswap16(addr->sa_family); 1654 } 1655 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) { 1656 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr; 1657 target_nl->nl_pid = tswap32(target_nl->nl_pid); 1658 target_nl->nl_groups = tswap32(target_nl->nl_groups); 1659 } else if (addr->sa_family == AF_PACKET) { 1660 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr; 1661 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex); 1662 target_ll->sll_hatype = tswap16(target_ll->sll_hatype); 1663 } else if (addr->sa_family == AF_INET6 && 1664 len >= sizeof(struct target_sockaddr_in6)) { 1665 struct target_sockaddr_in6 *target_in6 = 1666 (struct target_sockaddr_in6 *)target_saddr; 1667 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id); 1668 } 1669 unlock_user(target_saddr, target_addr, len); 1670 1671 return 0; 1672 } 1673 1674 static inline abi_long target_to_host_cmsg(struct msghdr *msgh, 1675 struct target_msghdr *target_msgh) 1676 { 1677 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1678 abi_long msg_controllen; 1679 abi_ulong target_cmsg_addr; 1680 struct target_cmsghdr *target_cmsg, *target_cmsg_start; 1681 socklen_t space = 0; 1682 1683 msg_controllen = tswapal(target_msgh->msg_controllen); 1684 if (msg_controllen < sizeof (struct target_cmsghdr)) 1685 goto the_end; 1686 target_cmsg_addr = tswapal(target_msgh->msg_control); 1687 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1); 1688 target_cmsg_start = target_cmsg; 1689 if (!target_cmsg) 1690 return -TARGET_EFAULT; 1691 1692 while (cmsg && target_cmsg) { 1693 void *data = CMSG_DATA(cmsg); 1694 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1695 1696 int len = tswapal(target_cmsg->cmsg_len) 1697 - sizeof(struct target_cmsghdr); 1698 1699 space += CMSG_SPACE(len); 1700 if (space > msgh->msg_controllen) { 1701 space -= CMSG_SPACE(len); 1702 /* This is a QEMU bug, since we allocated the payload 1703 * area ourselves (unlike overflow in host-to-target 1704 * conversion, which is just the guest giving us a buffer 1705 * that's too small). It can't happen for the payload types 1706 * we currently support; if it becomes an issue in future 1707 * we would need to improve our allocation strategy to 1708 * something more intelligent than "twice the size of the 1709 * target buffer we're reading from". 1710 */ 1711 gemu_log("Host cmsg overflow\n"); 1712 break; 1713 } 1714 1715 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) { 1716 cmsg->cmsg_level = SOL_SOCKET; 1717 } else { 1718 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level); 1719 } 1720 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type); 1721 cmsg->cmsg_len = CMSG_LEN(len); 1722 1723 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) { 1724 int *fd = (int *)data; 1725 int *target_fd = (int *)target_data; 1726 int i, numfds = len / sizeof(int); 1727 1728 for (i = 0; i < numfds; i++) { 1729 __get_user(fd[i], target_fd + i); 1730 } 1731 } else if (cmsg->cmsg_level == SOL_SOCKET 1732 && cmsg->cmsg_type == SCM_CREDENTIALS) { 1733 struct ucred *cred = (struct ucred *)data; 1734 struct target_ucred *target_cred = 1735 (struct target_ucred *)target_data; 1736 1737 __get_user(cred->pid, &target_cred->pid); 1738 __get_user(cred->uid, &target_cred->uid); 1739 __get_user(cred->gid, &target_cred->gid); 1740 } else { 1741 gemu_log("Unsupported ancillary data: %d/%d\n", 1742 cmsg->cmsg_level, cmsg->cmsg_type); 1743 memcpy(data, target_data, len); 1744 } 1745 1746 cmsg = CMSG_NXTHDR(msgh, cmsg); 1747 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg, 1748 target_cmsg_start); 1749 } 1750 unlock_user(target_cmsg, target_cmsg_addr, 0); 1751 the_end: 1752 msgh->msg_controllen = space; 1753 return 0; 1754 } 1755 1756 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, 1757 struct msghdr *msgh) 1758 { 1759 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1760 abi_long msg_controllen; 1761 abi_ulong target_cmsg_addr; 1762 struct target_cmsghdr *target_cmsg, *target_cmsg_start; 1763 socklen_t space = 0; 1764 1765 msg_controllen = tswapal(target_msgh->msg_controllen); 1766 if (msg_controllen < sizeof (struct target_cmsghdr)) 1767 goto the_end; 1768 target_cmsg_addr = tswapal(target_msgh->msg_control); 1769 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0); 1770 target_cmsg_start = target_cmsg; 1771 if (!target_cmsg) 1772 return -TARGET_EFAULT; 1773 1774 while (cmsg && target_cmsg) { 1775 void *data = CMSG_DATA(cmsg); 1776 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1777 1778 int len = cmsg->cmsg_len - sizeof(struct cmsghdr); 1779 int tgt_len, tgt_space; 1780 1781 /* We never copy a half-header but may copy half-data; 1782 * this is Linux's behaviour in put_cmsg(). Note that 1783 * truncation here is a guest problem (which we report 1784 * to the guest via the CTRUNC bit), unlike truncation 1785 * in target_to_host_cmsg, which is a QEMU bug. 1786 */ 1787 if (msg_controllen < sizeof(struct target_cmsghdr)) { 1788 target_msgh->msg_flags |= tswap32(MSG_CTRUNC); 1789 break; 1790 } 1791 1792 if (cmsg->cmsg_level == SOL_SOCKET) { 1793 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET); 1794 } else { 1795 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level); 1796 } 1797 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type); 1798 1799 /* Payload types which need a different size of payload on 1800 * the target must adjust tgt_len here. 1801 */ 1802 switch (cmsg->cmsg_level) { 1803 case SOL_SOCKET: 1804 switch (cmsg->cmsg_type) { 1805 case SO_TIMESTAMP: 1806 tgt_len = sizeof(struct target_timeval); 1807 break; 1808 default: 1809 break; 1810 } 1811 default: 1812 tgt_len = len; 1813 break; 1814 } 1815 1816 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) { 1817 target_msgh->msg_flags |= tswap32(MSG_CTRUNC); 1818 tgt_len = msg_controllen - sizeof(struct target_cmsghdr); 1819 } 1820 1821 /* We must now copy-and-convert len bytes of payload 1822 * into tgt_len bytes of destination space. Bear in mind 1823 * that in both source and destination we may be dealing 1824 * with a truncated value! 1825 */ 1826 switch (cmsg->cmsg_level) { 1827 case SOL_SOCKET: 1828 switch (cmsg->cmsg_type) { 1829 case SCM_RIGHTS: 1830 { 1831 int *fd = (int *)data; 1832 int *target_fd = (int *)target_data; 1833 int i, numfds = tgt_len / sizeof(int); 1834 1835 for (i = 0; i < numfds; i++) { 1836 __put_user(fd[i], target_fd + i); 1837 } 1838 break; 1839 } 1840 case SO_TIMESTAMP: 1841 { 1842 struct timeval *tv = (struct timeval *)data; 1843 struct target_timeval *target_tv = 1844 (struct target_timeval *)target_data; 1845 1846 if (len != sizeof(struct timeval) || 1847 tgt_len != sizeof(struct target_timeval)) { 1848 goto unimplemented; 1849 } 1850 1851 /* copy struct timeval to target */ 1852 __put_user(tv->tv_sec, &target_tv->tv_sec); 1853 __put_user(tv->tv_usec, &target_tv->tv_usec); 1854 break; 1855 } 1856 case SCM_CREDENTIALS: 1857 { 1858 struct ucred *cred = (struct ucred *)data; 1859 struct target_ucred *target_cred = 1860 (struct target_ucred *)target_data; 1861 1862 __put_user(cred->pid, &target_cred->pid); 1863 __put_user(cred->uid, &target_cred->uid); 1864 __put_user(cred->gid, &target_cred->gid); 1865 break; 1866 } 1867 default: 1868 goto unimplemented; 1869 } 1870 break; 1871 1872 case SOL_IP: 1873 switch (cmsg->cmsg_type) { 1874 case IP_TTL: 1875 { 1876 uint32_t *v = (uint32_t *)data; 1877 uint32_t *t_int = (uint32_t *)target_data; 1878 1879 if (len != sizeof(uint32_t) || 1880 tgt_len != sizeof(uint32_t)) { 1881 goto unimplemented; 1882 } 1883 __put_user(*v, t_int); 1884 break; 1885 } 1886 case IP_RECVERR: 1887 { 1888 struct errhdr_t { 1889 struct sock_extended_err ee; 1890 struct sockaddr_in offender; 1891 }; 1892 struct errhdr_t *errh = (struct errhdr_t *)data; 1893 struct errhdr_t *target_errh = 1894 (struct errhdr_t *)target_data; 1895 1896 if (len != sizeof(struct errhdr_t) || 1897 tgt_len != sizeof(struct errhdr_t)) { 1898 goto unimplemented; 1899 } 1900 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno); 1901 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin); 1902 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type); 1903 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code); 1904 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad); 1905 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info); 1906 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data); 1907 host_to_target_sockaddr((unsigned long) &target_errh->offender, 1908 (void *) &errh->offender, sizeof(errh->offender)); 1909 break; 1910 } 1911 default: 1912 goto unimplemented; 1913 } 1914 break; 1915 1916 case SOL_IPV6: 1917 switch (cmsg->cmsg_type) { 1918 case IPV6_HOPLIMIT: 1919 { 1920 uint32_t *v = (uint32_t *)data; 1921 uint32_t *t_int = (uint32_t *)target_data; 1922 1923 if (len != sizeof(uint32_t) || 1924 tgt_len != sizeof(uint32_t)) { 1925 goto unimplemented; 1926 } 1927 __put_user(*v, t_int); 1928 break; 1929 } 1930 case IPV6_RECVERR: 1931 { 1932 struct errhdr6_t { 1933 struct sock_extended_err ee; 1934 struct sockaddr_in6 offender; 1935 }; 1936 struct errhdr6_t *errh = (struct errhdr6_t *)data; 1937 struct errhdr6_t *target_errh = 1938 (struct errhdr6_t *)target_data; 1939 1940 if (len != sizeof(struct errhdr6_t) || 1941 tgt_len != sizeof(struct errhdr6_t)) { 1942 goto unimplemented; 1943 } 1944 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno); 1945 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin); 1946 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type); 1947 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code); 1948 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad); 1949 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info); 1950 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data); 1951 host_to_target_sockaddr((unsigned long) &target_errh->offender, 1952 (void *) &errh->offender, sizeof(errh->offender)); 1953 break; 1954 } 1955 default: 1956 goto unimplemented; 1957 } 1958 break; 1959 1960 default: 1961 unimplemented: 1962 gemu_log("Unsupported ancillary data: %d/%d\n", 1963 cmsg->cmsg_level, cmsg->cmsg_type); 1964 memcpy(target_data, data, MIN(len, tgt_len)); 1965 if (tgt_len > len) { 1966 memset(target_data + len, 0, tgt_len - len); 1967 } 1968 } 1969 1970 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len)); 1971 tgt_space = TARGET_CMSG_SPACE(tgt_len); 1972 if (msg_controllen < tgt_space) { 1973 tgt_space = msg_controllen; 1974 } 1975 msg_controllen -= tgt_space; 1976 space += tgt_space; 1977 cmsg = CMSG_NXTHDR(msgh, cmsg); 1978 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg, 1979 target_cmsg_start); 1980 } 1981 unlock_user(target_cmsg, target_cmsg_addr, space); 1982 the_end: 1983 target_msgh->msg_controllen = tswapal(space); 1984 return 0; 1985 } 1986 1987 static void tswap_nlmsghdr(struct nlmsghdr *nlh) 1988 { 1989 nlh->nlmsg_len = tswap32(nlh->nlmsg_len); 1990 nlh->nlmsg_type = tswap16(nlh->nlmsg_type); 1991 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags); 1992 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq); 1993 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid); 1994 } 1995 1996 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh, 1997 size_t len, 1998 abi_long (*host_to_target_nlmsg) 1999 (struct nlmsghdr *)) 2000 { 2001 uint32_t nlmsg_len; 2002 abi_long ret; 2003 2004 while (len > sizeof(struct nlmsghdr)) { 2005 2006 nlmsg_len = nlh->nlmsg_len; 2007 if (nlmsg_len < sizeof(struct nlmsghdr) || 2008 nlmsg_len > len) { 2009 break; 2010 } 2011 2012 switch (nlh->nlmsg_type) { 2013 case NLMSG_DONE: 2014 tswap_nlmsghdr(nlh); 2015 return 0; 2016 case NLMSG_NOOP: 2017 break; 2018 case NLMSG_ERROR: 2019 { 2020 struct nlmsgerr *e = NLMSG_DATA(nlh); 2021 e->error = tswap32(e->error); 2022 tswap_nlmsghdr(&e->msg); 2023 tswap_nlmsghdr(nlh); 2024 return 0; 2025 } 2026 default: 2027 ret = host_to_target_nlmsg(nlh); 2028 if (ret < 0) { 2029 tswap_nlmsghdr(nlh); 2030 return ret; 2031 } 2032 break; 2033 } 2034 tswap_nlmsghdr(nlh); 2035 len -= NLMSG_ALIGN(nlmsg_len); 2036 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len)); 2037 } 2038 return 0; 2039 } 2040 2041 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh, 2042 size_t len, 2043 abi_long (*target_to_host_nlmsg) 2044 (struct nlmsghdr *)) 2045 { 2046 int ret; 2047 2048 while (len > sizeof(struct nlmsghdr)) { 2049 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) || 2050 tswap32(nlh->nlmsg_len) > len) { 2051 break; 2052 } 2053 tswap_nlmsghdr(nlh); 2054 switch (nlh->nlmsg_type) { 2055 case NLMSG_DONE: 2056 return 0; 2057 case NLMSG_NOOP: 2058 break; 2059 case NLMSG_ERROR: 2060 { 2061 struct nlmsgerr *e = NLMSG_DATA(nlh); 2062 e->error = tswap32(e->error); 2063 tswap_nlmsghdr(&e->msg); 2064 return 0; 2065 } 2066 default: 2067 ret = target_to_host_nlmsg(nlh); 2068 if (ret < 0) { 2069 return ret; 2070 } 2071 } 2072 len -= NLMSG_ALIGN(nlh->nlmsg_len); 2073 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len)); 2074 } 2075 return 0; 2076 } 2077 2078 #ifdef CONFIG_RTNETLINK 2079 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr, 2080 size_t len, void *context, 2081 abi_long (*host_to_target_nlattr) 2082 (struct nlattr *, 2083 void *context)) 2084 { 2085 unsigned short nla_len; 2086 abi_long ret; 2087 2088 while (len > sizeof(struct nlattr)) { 2089 nla_len = nlattr->nla_len; 2090 if (nla_len < sizeof(struct nlattr) || 2091 nla_len > len) { 2092 break; 2093 } 2094 ret = host_to_target_nlattr(nlattr, context); 2095 nlattr->nla_len = tswap16(nlattr->nla_len); 2096 nlattr->nla_type = tswap16(nlattr->nla_type); 2097 if (ret < 0) { 2098 return ret; 2099 } 2100 len -= NLA_ALIGN(nla_len); 2101 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len)); 2102 } 2103 return 0; 2104 } 2105 2106 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr, 2107 size_t len, 2108 abi_long (*host_to_target_rtattr) 2109 (struct rtattr *)) 2110 { 2111 unsigned short rta_len; 2112 abi_long ret; 2113 2114 while (len > sizeof(struct rtattr)) { 2115 rta_len = rtattr->rta_len; 2116 if (rta_len < sizeof(struct rtattr) || 2117 rta_len > len) { 2118 break; 2119 } 2120 ret = host_to_target_rtattr(rtattr); 2121 rtattr->rta_len = tswap16(rtattr->rta_len); 2122 rtattr->rta_type = tswap16(rtattr->rta_type); 2123 if (ret < 0) { 2124 return ret; 2125 } 2126 len -= RTA_ALIGN(rta_len); 2127 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len)); 2128 } 2129 return 0; 2130 } 2131 2132 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN) 2133 2134 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr, 2135 void *context) 2136 { 2137 uint16_t *u16; 2138 uint32_t *u32; 2139 uint64_t *u64; 2140 2141 switch (nlattr->nla_type) { 2142 /* no data */ 2143 case QEMU_IFLA_BR_FDB_FLUSH: 2144 break; 2145 /* binary */ 2146 case QEMU_IFLA_BR_GROUP_ADDR: 2147 break; 2148 /* uint8_t */ 2149 case QEMU_IFLA_BR_VLAN_FILTERING: 2150 case QEMU_IFLA_BR_TOPOLOGY_CHANGE: 2151 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED: 2152 case QEMU_IFLA_BR_MCAST_ROUTER: 2153 case QEMU_IFLA_BR_MCAST_SNOOPING: 2154 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR: 2155 case QEMU_IFLA_BR_MCAST_QUERIER: 2156 case QEMU_IFLA_BR_NF_CALL_IPTABLES: 2157 case QEMU_IFLA_BR_NF_CALL_IP6TABLES: 2158 case QEMU_IFLA_BR_NF_CALL_ARPTABLES: 2159 break; 2160 /* uint16_t */ 2161 case QEMU_IFLA_BR_PRIORITY: 2162 case QEMU_IFLA_BR_VLAN_PROTOCOL: 2163 case QEMU_IFLA_BR_GROUP_FWD_MASK: 2164 case QEMU_IFLA_BR_ROOT_PORT: 2165 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID: 2166 u16 = NLA_DATA(nlattr); 2167 *u16 = tswap16(*u16); 2168 break; 2169 /* uint32_t */ 2170 case QEMU_IFLA_BR_FORWARD_DELAY: 2171 case QEMU_IFLA_BR_HELLO_TIME: 2172 case QEMU_IFLA_BR_MAX_AGE: 2173 case QEMU_IFLA_BR_AGEING_TIME: 2174 case QEMU_IFLA_BR_STP_STATE: 2175 case QEMU_IFLA_BR_ROOT_PATH_COST: 2176 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY: 2177 case QEMU_IFLA_BR_MCAST_HASH_MAX: 2178 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT: 2179 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT: 2180 u32 = NLA_DATA(nlattr); 2181 *u32 = tswap32(*u32); 2182 break; 2183 /* uint64_t */ 2184 case QEMU_IFLA_BR_HELLO_TIMER: 2185 case QEMU_IFLA_BR_TCN_TIMER: 2186 case QEMU_IFLA_BR_GC_TIMER: 2187 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER: 2188 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL: 2189 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL: 2190 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL: 2191 case QEMU_IFLA_BR_MCAST_QUERY_INTVL: 2192 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL: 2193 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL: 2194 u64 = NLA_DATA(nlattr); 2195 *u64 = tswap64(*u64); 2196 break; 2197 /* ifla_bridge_id: uin8_t[] */ 2198 case QEMU_IFLA_BR_ROOT_ID: 2199 case QEMU_IFLA_BR_BRIDGE_ID: 2200 break; 2201 default: 2202 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type); 2203 break; 2204 } 2205 return 0; 2206 } 2207 2208 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr, 2209 void *context) 2210 { 2211 uint16_t *u16; 2212 uint32_t *u32; 2213 uint64_t *u64; 2214 2215 switch (nlattr->nla_type) { 2216 /* uint8_t */ 2217 case QEMU_IFLA_BRPORT_STATE: 2218 case QEMU_IFLA_BRPORT_MODE: 2219 case QEMU_IFLA_BRPORT_GUARD: 2220 case QEMU_IFLA_BRPORT_PROTECT: 2221 case QEMU_IFLA_BRPORT_FAST_LEAVE: 2222 case QEMU_IFLA_BRPORT_LEARNING: 2223 case QEMU_IFLA_BRPORT_UNICAST_FLOOD: 2224 case QEMU_IFLA_BRPORT_PROXYARP: 2225 case QEMU_IFLA_BRPORT_LEARNING_SYNC: 2226 case QEMU_IFLA_BRPORT_PROXYARP_WIFI: 2227 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK: 2228 case QEMU_IFLA_BRPORT_CONFIG_PENDING: 2229 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER: 2230 break; 2231 /* uint16_t */ 2232 case QEMU_IFLA_BRPORT_PRIORITY: 2233 case QEMU_IFLA_BRPORT_DESIGNATED_PORT: 2234 case QEMU_IFLA_BRPORT_DESIGNATED_COST: 2235 case QEMU_IFLA_BRPORT_ID: 2236 case QEMU_IFLA_BRPORT_NO: 2237 u16 = NLA_DATA(nlattr); 2238 *u16 = tswap16(*u16); 2239 break; 2240 /* uin32_t */ 2241 case QEMU_IFLA_BRPORT_COST: 2242 u32 = NLA_DATA(nlattr); 2243 *u32 = tswap32(*u32); 2244 break; 2245 /* uint64_t */ 2246 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER: 2247 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER: 2248 case QEMU_IFLA_BRPORT_HOLD_TIMER: 2249 u64 = NLA_DATA(nlattr); 2250 *u64 = tswap64(*u64); 2251 break; 2252 /* ifla_bridge_id: uint8_t[] */ 2253 case QEMU_IFLA_BRPORT_ROOT_ID: 2254 case QEMU_IFLA_BRPORT_BRIDGE_ID: 2255 break; 2256 default: 2257 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type); 2258 break; 2259 } 2260 return 0; 2261 } 2262 2263 struct linkinfo_context { 2264 int len; 2265 char *name; 2266 int slave_len; 2267 char *slave_name; 2268 }; 2269 2270 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr, 2271 void *context) 2272 { 2273 struct linkinfo_context *li_context = context; 2274 2275 switch (nlattr->nla_type) { 2276 /* string */ 2277 case QEMU_IFLA_INFO_KIND: 2278 li_context->name = NLA_DATA(nlattr); 2279 li_context->len = nlattr->nla_len - NLA_HDRLEN; 2280 break; 2281 case QEMU_IFLA_INFO_SLAVE_KIND: 2282 li_context->slave_name = NLA_DATA(nlattr); 2283 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN; 2284 break; 2285 /* stats */ 2286 case QEMU_IFLA_INFO_XSTATS: 2287 /* FIXME: only used by CAN */ 2288 break; 2289 /* nested */ 2290 case QEMU_IFLA_INFO_DATA: 2291 if (strncmp(li_context->name, "bridge", 2292 li_context->len) == 0) { 2293 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), 2294 nlattr->nla_len, 2295 NULL, 2296 host_to_target_data_bridge_nlattr); 2297 } else { 2298 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name); 2299 } 2300 break; 2301 case QEMU_IFLA_INFO_SLAVE_DATA: 2302 if (strncmp(li_context->slave_name, "bridge", 2303 li_context->slave_len) == 0) { 2304 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), 2305 nlattr->nla_len, 2306 NULL, 2307 host_to_target_slave_data_bridge_nlattr); 2308 } else { 2309 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n", 2310 li_context->slave_name); 2311 } 2312 break; 2313 default: 2314 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type); 2315 break; 2316 } 2317 2318 return 0; 2319 } 2320 2321 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr, 2322 void *context) 2323 { 2324 uint32_t *u32; 2325 int i; 2326 2327 switch (nlattr->nla_type) { 2328 case QEMU_IFLA_INET_CONF: 2329 u32 = NLA_DATA(nlattr); 2330 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32); 2331 i++) { 2332 u32[i] = tswap32(u32[i]); 2333 } 2334 break; 2335 default: 2336 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type); 2337 } 2338 return 0; 2339 } 2340 2341 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr, 2342 void *context) 2343 { 2344 uint32_t *u32; 2345 uint64_t *u64; 2346 struct ifla_cacheinfo *ci; 2347 int i; 2348 2349 switch (nlattr->nla_type) { 2350 /* binaries */ 2351 case QEMU_IFLA_INET6_TOKEN: 2352 break; 2353 /* uint8_t */ 2354 case QEMU_IFLA_INET6_ADDR_GEN_MODE: 2355 break; 2356 /* uint32_t */ 2357 case QEMU_IFLA_INET6_FLAGS: 2358 u32 = NLA_DATA(nlattr); 2359 *u32 = tswap32(*u32); 2360 break; 2361 /* uint32_t[] */ 2362 case QEMU_IFLA_INET6_CONF: 2363 u32 = NLA_DATA(nlattr); 2364 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32); 2365 i++) { 2366 u32[i] = tswap32(u32[i]); 2367 } 2368 break; 2369 /* ifla_cacheinfo */ 2370 case QEMU_IFLA_INET6_CACHEINFO: 2371 ci = NLA_DATA(nlattr); 2372 ci->max_reasm_len = tswap32(ci->max_reasm_len); 2373 ci->tstamp = tswap32(ci->tstamp); 2374 ci->reachable_time = tswap32(ci->reachable_time); 2375 ci->retrans_time = tswap32(ci->retrans_time); 2376 break; 2377 /* uint64_t[] */ 2378 case QEMU_IFLA_INET6_STATS: 2379 case QEMU_IFLA_INET6_ICMP6STATS: 2380 u64 = NLA_DATA(nlattr); 2381 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64); 2382 i++) { 2383 u64[i] = tswap64(u64[i]); 2384 } 2385 break; 2386 default: 2387 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type); 2388 } 2389 return 0; 2390 } 2391 2392 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr, 2393 void *context) 2394 { 2395 switch (nlattr->nla_type) { 2396 case AF_INET: 2397 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len, 2398 NULL, 2399 host_to_target_data_inet_nlattr); 2400 case AF_INET6: 2401 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len, 2402 NULL, 2403 host_to_target_data_inet6_nlattr); 2404 default: 2405 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type); 2406 break; 2407 } 2408 return 0; 2409 } 2410 2411 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr) 2412 { 2413 uint32_t *u32; 2414 struct rtnl_link_stats *st; 2415 struct rtnl_link_stats64 *st64; 2416 struct rtnl_link_ifmap *map; 2417 struct linkinfo_context li_context; 2418 2419 switch (rtattr->rta_type) { 2420 /* binary stream */ 2421 case QEMU_IFLA_ADDRESS: 2422 case QEMU_IFLA_BROADCAST: 2423 /* string */ 2424 case QEMU_IFLA_IFNAME: 2425 case QEMU_IFLA_QDISC: 2426 break; 2427 /* uin8_t */ 2428 case QEMU_IFLA_OPERSTATE: 2429 case QEMU_IFLA_LINKMODE: 2430 case QEMU_IFLA_CARRIER: 2431 case QEMU_IFLA_PROTO_DOWN: 2432 break; 2433 /* uint32_t */ 2434 case QEMU_IFLA_MTU: 2435 case QEMU_IFLA_LINK: 2436 case QEMU_IFLA_WEIGHT: 2437 case QEMU_IFLA_TXQLEN: 2438 case QEMU_IFLA_CARRIER_CHANGES: 2439 case QEMU_IFLA_NUM_RX_QUEUES: 2440 case QEMU_IFLA_NUM_TX_QUEUES: 2441 case QEMU_IFLA_PROMISCUITY: 2442 case QEMU_IFLA_EXT_MASK: 2443 case QEMU_IFLA_LINK_NETNSID: 2444 case QEMU_IFLA_GROUP: 2445 case QEMU_IFLA_MASTER: 2446 case QEMU_IFLA_NUM_VF: 2447 case QEMU_IFLA_GSO_MAX_SEGS: 2448 case QEMU_IFLA_GSO_MAX_SIZE: 2449 u32 = RTA_DATA(rtattr); 2450 *u32 = tswap32(*u32); 2451 break; 2452 /* struct rtnl_link_stats */ 2453 case QEMU_IFLA_STATS: 2454 st = RTA_DATA(rtattr); 2455 st->rx_packets = tswap32(st->rx_packets); 2456 st->tx_packets = tswap32(st->tx_packets); 2457 st->rx_bytes = tswap32(st->rx_bytes); 2458 st->tx_bytes = tswap32(st->tx_bytes); 2459 st->rx_errors = tswap32(st->rx_errors); 2460 st->tx_errors = tswap32(st->tx_errors); 2461 st->rx_dropped = tswap32(st->rx_dropped); 2462 st->tx_dropped = tswap32(st->tx_dropped); 2463 st->multicast = tswap32(st->multicast); 2464 st->collisions = tswap32(st->collisions); 2465 2466 /* detailed rx_errors: */ 2467 st->rx_length_errors = tswap32(st->rx_length_errors); 2468 st->rx_over_errors = tswap32(st->rx_over_errors); 2469 st->rx_crc_errors = tswap32(st->rx_crc_errors); 2470 st->rx_frame_errors = tswap32(st->rx_frame_errors); 2471 st->rx_fifo_errors = tswap32(st->rx_fifo_errors); 2472 st->rx_missed_errors = tswap32(st->rx_missed_errors); 2473 2474 /* detailed tx_errors */ 2475 st->tx_aborted_errors = tswap32(st->tx_aborted_errors); 2476 st->tx_carrier_errors = tswap32(st->tx_carrier_errors); 2477 st->tx_fifo_errors = tswap32(st->tx_fifo_errors); 2478 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors); 2479 st->tx_window_errors = tswap32(st->tx_window_errors); 2480 2481 /* for cslip etc */ 2482 st->rx_compressed = tswap32(st->rx_compressed); 2483 st->tx_compressed = tswap32(st->tx_compressed); 2484 break; 2485 /* struct rtnl_link_stats64 */ 2486 case QEMU_IFLA_STATS64: 2487 st64 = RTA_DATA(rtattr); 2488 st64->rx_packets = tswap64(st64->rx_packets); 2489 st64->tx_packets = tswap64(st64->tx_packets); 2490 st64->rx_bytes = tswap64(st64->rx_bytes); 2491 st64->tx_bytes = tswap64(st64->tx_bytes); 2492 st64->rx_errors = tswap64(st64->rx_errors); 2493 st64->tx_errors = tswap64(st64->tx_errors); 2494 st64->rx_dropped = tswap64(st64->rx_dropped); 2495 st64->tx_dropped = tswap64(st64->tx_dropped); 2496 st64->multicast = tswap64(st64->multicast); 2497 st64->collisions = tswap64(st64->collisions); 2498 2499 /* detailed rx_errors: */ 2500 st64->rx_length_errors = tswap64(st64->rx_length_errors); 2501 st64->rx_over_errors = tswap64(st64->rx_over_errors); 2502 st64->rx_crc_errors = tswap64(st64->rx_crc_errors); 2503 st64->rx_frame_errors = tswap64(st64->rx_frame_errors); 2504 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors); 2505 st64->rx_missed_errors = tswap64(st64->rx_missed_errors); 2506 2507 /* detailed tx_errors */ 2508 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors); 2509 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors); 2510 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors); 2511 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors); 2512 st64->tx_window_errors = tswap64(st64->tx_window_errors); 2513 2514 /* for cslip etc */ 2515 st64->rx_compressed = tswap64(st64->rx_compressed); 2516 st64->tx_compressed = tswap64(st64->tx_compressed); 2517 break; 2518 /* struct rtnl_link_ifmap */ 2519 case QEMU_IFLA_MAP: 2520 map = RTA_DATA(rtattr); 2521 map->mem_start = tswap64(map->mem_start); 2522 map->mem_end = tswap64(map->mem_end); 2523 map->base_addr = tswap64(map->base_addr); 2524 map->irq = tswap16(map->irq); 2525 break; 2526 /* nested */ 2527 case QEMU_IFLA_LINKINFO: 2528 memset(&li_context, 0, sizeof(li_context)); 2529 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len, 2530 &li_context, 2531 host_to_target_data_linkinfo_nlattr); 2532 case QEMU_IFLA_AF_SPEC: 2533 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len, 2534 NULL, 2535 host_to_target_data_spec_nlattr); 2536 default: 2537 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type); 2538 break; 2539 } 2540 return 0; 2541 } 2542 2543 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr) 2544 { 2545 uint32_t *u32; 2546 struct ifa_cacheinfo *ci; 2547 2548 switch (rtattr->rta_type) { 2549 /* binary: depends on family type */ 2550 case IFA_ADDRESS: 2551 case IFA_LOCAL: 2552 break; 2553 /* string */ 2554 case IFA_LABEL: 2555 break; 2556 /* u32 */ 2557 case IFA_FLAGS: 2558 case IFA_BROADCAST: 2559 u32 = RTA_DATA(rtattr); 2560 *u32 = tswap32(*u32); 2561 break; 2562 /* struct ifa_cacheinfo */ 2563 case IFA_CACHEINFO: 2564 ci = RTA_DATA(rtattr); 2565 ci->ifa_prefered = tswap32(ci->ifa_prefered); 2566 ci->ifa_valid = tswap32(ci->ifa_valid); 2567 ci->cstamp = tswap32(ci->cstamp); 2568 ci->tstamp = tswap32(ci->tstamp); 2569 break; 2570 default: 2571 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type); 2572 break; 2573 } 2574 return 0; 2575 } 2576 2577 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr) 2578 { 2579 uint32_t *u32; 2580 switch (rtattr->rta_type) { 2581 /* binary: depends on family type */ 2582 case RTA_GATEWAY: 2583 case RTA_DST: 2584 case RTA_PREFSRC: 2585 break; 2586 /* u32 */ 2587 case RTA_PRIORITY: 2588 case RTA_TABLE: 2589 case RTA_OIF: 2590 u32 = RTA_DATA(rtattr); 2591 *u32 = tswap32(*u32); 2592 break; 2593 default: 2594 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type); 2595 break; 2596 } 2597 return 0; 2598 } 2599 2600 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr, 2601 uint32_t rtattr_len) 2602 { 2603 return host_to_target_for_each_rtattr(rtattr, rtattr_len, 2604 host_to_target_data_link_rtattr); 2605 } 2606 2607 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr, 2608 uint32_t rtattr_len) 2609 { 2610 return host_to_target_for_each_rtattr(rtattr, rtattr_len, 2611 host_to_target_data_addr_rtattr); 2612 } 2613 2614 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr, 2615 uint32_t rtattr_len) 2616 { 2617 return host_to_target_for_each_rtattr(rtattr, rtattr_len, 2618 host_to_target_data_route_rtattr); 2619 } 2620 2621 static abi_long host_to_target_data_route(struct nlmsghdr *nlh) 2622 { 2623 uint32_t nlmsg_len; 2624 struct ifinfomsg *ifi; 2625 struct ifaddrmsg *ifa; 2626 struct rtmsg *rtm; 2627 2628 nlmsg_len = nlh->nlmsg_len; 2629 switch (nlh->nlmsg_type) { 2630 case RTM_NEWLINK: 2631 case RTM_DELLINK: 2632 case RTM_GETLINK: 2633 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) { 2634 ifi = NLMSG_DATA(nlh); 2635 ifi->ifi_type = tswap16(ifi->ifi_type); 2636 ifi->ifi_index = tswap32(ifi->ifi_index); 2637 ifi->ifi_flags = tswap32(ifi->ifi_flags); 2638 ifi->ifi_change = tswap32(ifi->ifi_change); 2639 host_to_target_link_rtattr(IFLA_RTA(ifi), 2640 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi))); 2641 } 2642 break; 2643 case RTM_NEWADDR: 2644 case RTM_DELADDR: 2645 case RTM_GETADDR: 2646 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) { 2647 ifa = NLMSG_DATA(nlh); 2648 ifa->ifa_index = tswap32(ifa->ifa_index); 2649 host_to_target_addr_rtattr(IFA_RTA(ifa), 2650 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa))); 2651 } 2652 break; 2653 case RTM_NEWROUTE: 2654 case RTM_DELROUTE: 2655 case RTM_GETROUTE: 2656 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) { 2657 rtm = NLMSG_DATA(nlh); 2658 rtm->rtm_flags = tswap32(rtm->rtm_flags); 2659 host_to_target_route_rtattr(RTM_RTA(rtm), 2660 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm))); 2661 } 2662 break; 2663 default: 2664 return -TARGET_EINVAL; 2665 } 2666 return 0; 2667 } 2668 2669 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh, 2670 size_t len) 2671 { 2672 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route); 2673 } 2674 2675 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr, 2676 size_t len, 2677 abi_long (*target_to_host_rtattr) 2678 (struct rtattr *)) 2679 { 2680 abi_long ret; 2681 2682 while (len >= sizeof(struct rtattr)) { 2683 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) || 2684 tswap16(rtattr->rta_len) > len) { 2685 break; 2686 } 2687 rtattr->rta_len = tswap16(rtattr->rta_len); 2688 rtattr->rta_type = tswap16(rtattr->rta_type); 2689 ret = target_to_host_rtattr(rtattr); 2690 if (ret < 0) { 2691 return ret; 2692 } 2693 len -= RTA_ALIGN(rtattr->rta_len); 2694 rtattr = (struct rtattr *)(((char *)rtattr) + 2695 RTA_ALIGN(rtattr->rta_len)); 2696 } 2697 return 0; 2698 } 2699 2700 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr) 2701 { 2702 switch (rtattr->rta_type) { 2703 default: 2704 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type); 2705 break; 2706 } 2707 return 0; 2708 } 2709 2710 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr) 2711 { 2712 switch (rtattr->rta_type) { 2713 /* binary: depends on family type */ 2714 case IFA_LOCAL: 2715 case IFA_ADDRESS: 2716 break; 2717 default: 2718 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type); 2719 break; 2720 } 2721 return 0; 2722 } 2723 2724 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr) 2725 { 2726 uint32_t *u32; 2727 switch (rtattr->rta_type) { 2728 /* binary: depends on family type */ 2729 case RTA_DST: 2730 case RTA_SRC: 2731 case RTA_GATEWAY: 2732 break; 2733 /* u32 */ 2734 case RTA_PRIORITY: 2735 case RTA_OIF: 2736 u32 = RTA_DATA(rtattr); 2737 *u32 = tswap32(*u32); 2738 break; 2739 default: 2740 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type); 2741 break; 2742 } 2743 return 0; 2744 } 2745 2746 static void target_to_host_link_rtattr(struct rtattr *rtattr, 2747 uint32_t rtattr_len) 2748 { 2749 target_to_host_for_each_rtattr(rtattr, rtattr_len, 2750 target_to_host_data_link_rtattr); 2751 } 2752 2753 static void target_to_host_addr_rtattr(struct rtattr *rtattr, 2754 uint32_t rtattr_len) 2755 { 2756 target_to_host_for_each_rtattr(rtattr, rtattr_len, 2757 target_to_host_data_addr_rtattr); 2758 } 2759 2760 static void target_to_host_route_rtattr(struct rtattr *rtattr, 2761 uint32_t rtattr_len) 2762 { 2763 target_to_host_for_each_rtattr(rtattr, rtattr_len, 2764 target_to_host_data_route_rtattr); 2765 } 2766 2767 static abi_long target_to_host_data_route(struct nlmsghdr *nlh) 2768 { 2769 struct ifinfomsg *ifi; 2770 struct ifaddrmsg *ifa; 2771 struct rtmsg *rtm; 2772 2773 switch (nlh->nlmsg_type) { 2774 case RTM_GETLINK: 2775 break; 2776 case RTM_NEWLINK: 2777 case RTM_DELLINK: 2778 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) { 2779 ifi = NLMSG_DATA(nlh); 2780 ifi->ifi_type = tswap16(ifi->ifi_type); 2781 ifi->ifi_index = tswap32(ifi->ifi_index); 2782 ifi->ifi_flags = tswap32(ifi->ifi_flags); 2783 ifi->ifi_change = tswap32(ifi->ifi_change); 2784 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len - 2785 NLMSG_LENGTH(sizeof(*ifi))); 2786 } 2787 break; 2788 case RTM_GETADDR: 2789 case RTM_NEWADDR: 2790 case RTM_DELADDR: 2791 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) { 2792 ifa = NLMSG_DATA(nlh); 2793 ifa->ifa_index = tswap32(ifa->ifa_index); 2794 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len - 2795 NLMSG_LENGTH(sizeof(*ifa))); 2796 } 2797 break; 2798 case RTM_GETROUTE: 2799 break; 2800 case RTM_NEWROUTE: 2801 case RTM_DELROUTE: 2802 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) { 2803 rtm = NLMSG_DATA(nlh); 2804 rtm->rtm_flags = tswap32(rtm->rtm_flags); 2805 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len - 2806 NLMSG_LENGTH(sizeof(*rtm))); 2807 } 2808 break; 2809 default: 2810 return -TARGET_EOPNOTSUPP; 2811 } 2812 return 0; 2813 } 2814 2815 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len) 2816 { 2817 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route); 2818 } 2819 #endif /* CONFIG_RTNETLINK */ 2820 2821 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh) 2822 { 2823 switch (nlh->nlmsg_type) { 2824 default: 2825 gemu_log("Unknown host audit message type %d\n", 2826 nlh->nlmsg_type); 2827 return -TARGET_EINVAL; 2828 } 2829 return 0; 2830 } 2831 2832 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh, 2833 size_t len) 2834 { 2835 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit); 2836 } 2837 2838 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh) 2839 { 2840 switch (nlh->nlmsg_type) { 2841 case AUDIT_USER: 2842 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG: 2843 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2: 2844 break; 2845 default: 2846 gemu_log("Unknown target audit message type %d\n", 2847 nlh->nlmsg_type); 2848 return -TARGET_EINVAL; 2849 } 2850 2851 return 0; 2852 } 2853 2854 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len) 2855 { 2856 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit); 2857 } 2858 2859 /* do_setsockopt() Must return target values and target errnos. */ 2860 static abi_long do_setsockopt(int sockfd, int level, int optname, 2861 abi_ulong optval_addr, socklen_t optlen) 2862 { 2863 abi_long ret; 2864 int val; 2865 struct ip_mreqn *ip_mreq; 2866 struct ip_mreq_source *ip_mreq_source; 2867 2868 switch(level) { 2869 case SOL_TCP: 2870 /* TCP options all take an 'int' value. */ 2871 if (optlen < sizeof(uint32_t)) 2872 return -TARGET_EINVAL; 2873 2874 if (get_user_u32(val, optval_addr)) 2875 return -TARGET_EFAULT; 2876 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 2877 break; 2878 case SOL_IP: 2879 switch(optname) { 2880 case IP_TOS: 2881 case IP_TTL: 2882 case IP_HDRINCL: 2883 case IP_ROUTER_ALERT: 2884 case IP_RECVOPTS: 2885 case IP_RETOPTS: 2886 case IP_PKTINFO: 2887 case IP_MTU_DISCOVER: 2888 case IP_RECVERR: 2889 case IP_RECVTTL: 2890 case IP_RECVTOS: 2891 #ifdef IP_FREEBIND 2892 case IP_FREEBIND: 2893 #endif 2894 case IP_MULTICAST_TTL: 2895 case IP_MULTICAST_LOOP: 2896 val = 0; 2897 if (optlen >= sizeof(uint32_t)) { 2898 if (get_user_u32(val, optval_addr)) 2899 return -TARGET_EFAULT; 2900 } else if (optlen >= 1) { 2901 if (get_user_u8(val, optval_addr)) 2902 return -TARGET_EFAULT; 2903 } 2904 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 2905 break; 2906 case IP_ADD_MEMBERSHIP: 2907 case IP_DROP_MEMBERSHIP: 2908 if (optlen < sizeof (struct target_ip_mreq) || 2909 optlen > sizeof (struct target_ip_mreqn)) 2910 return -TARGET_EINVAL; 2911 2912 ip_mreq = (struct ip_mreqn *) alloca(optlen); 2913 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen); 2914 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen)); 2915 break; 2916 2917 case IP_BLOCK_SOURCE: 2918 case IP_UNBLOCK_SOURCE: 2919 case IP_ADD_SOURCE_MEMBERSHIP: 2920 case IP_DROP_SOURCE_MEMBERSHIP: 2921 if (optlen != sizeof (struct target_ip_mreq_source)) 2922 return -TARGET_EINVAL; 2923 2924 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1); 2925 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen)); 2926 unlock_user (ip_mreq_source, optval_addr, 0); 2927 break; 2928 2929 default: 2930 goto unimplemented; 2931 } 2932 break; 2933 case SOL_IPV6: 2934 switch (optname) { 2935 case IPV6_MTU_DISCOVER: 2936 case IPV6_MTU: 2937 case IPV6_V6ONLY: 2938 case IPV6_RECVPKTINFO: 2939 case IPV6_UNICAST_HOPS: 2940 case IPV6_RECVERR: 2941 case IPV6_RECVHOPLIMIT: 2942 case IPV6_2292HOPLIMIT: 2943 case IPV6_CHECKSUM: 2944 val = 0; 2945 if (optlen < sizeof(uint32_t)) { 2946 return -TARGET_EINVAL; 2947 } 2948 if (get_user_u32(val, optval_addr)) { 2949 return -TARGET_EFAULT; 2950 } 2951 ret = get_errno(setsockopt(sockfd, level, optname, 2952 &val, sizeof(val))); 2953 break; 2954 case IPV6_PKTINFO: 2955 { 2956 struct in6_pktinfo pki; 2957 2958 if (optlen < sizeof(pki)) { 2959 return -TARGET_EINVAL; 2960 } 2961 2962 if (copy_from_user(&pki, optval_addr, sizeof(pki))) { 2963 return -TARGET_EFAULT; 2964 } 2965 2966 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex); 2967 2968 ret = get_errno(setsockopt(sockfd, level, optname, 2969 &pki, sizeof(pki))); 2970 break; 2971 } 2972 default: 2973 goto unimplemented; 2974 } 2975 break; 2976 case SOL_ICMPV6: 2977 switch (optname) { 2978 case ICMPV6_FILTER: 2979 { 2980 struct icmp6_filter icmp6f; 2981 2982 if (optlen > sizeof(icmp6f)) { 2983 optlen = sizeof(icmp6f); 2984 } 2985 2986 if (copy_from_user(&icmp6f, optval_addr, optlen)) { 2987 return -TARGET_EFAULT; 2988 } 2989 2990 for (val = 0; val < 8; val++) { 2991 icmp6f.data[val] = tswap32(icmp6f.data[val]); 2992 } 2993 2994 ret = get_errno(setsockopt(sockfd, level, optname, 2995 &icmp6f, optlen)); 2996 break; 2997 } 2998 default: 2999 goto unimplemented; 3000 } 3001 break; 3002 case SOL_RAW: 3003 switch (optname) { 3004 case ICMP_FILTER: 3005 case IPV6_CHECKSUM: 3006 /* those take an u32 value */ 3007 if (optlen < sizeof(uint32_t)) { 3008 return -TARGET_EINVAL; 3009 } 3010 3011 if (get_user_u32(val, optval_addr)) { 3012 return -TARGET_EFAULT; 3013 } 3014 ret = get_errno(setsockopt(sockfd, level, optname, 3015 &val, sizeof(val))); 3016 break; 3017 3018 default: 3019 goto unimplemented; 3020 } 3021 break; 3022 case TARGET_SOL_SOCKET: 3023 switch (optname) { 3024 case TARGET_SO_RCVTIMEO: 3025 { 3026 struct timeval tv; 3027 3028 optname = SO_RCVTIMEO; 3029 3030 set_timeout: 3031 if (optlen != sizeof(struct target_timeval)) { 3032 return -TARGET_EINVAL; 3033 } 3034 3035 if (copy_from_user_timeval(&tv, optval_addr)) { 3036 return -TARGET_EFAULT; 3037 } 3038 3039 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 3040 &tv, sizeof(tv))); 3041 return ret; 3042 } 3043 case TARGET_SO_SNDTIMEO: 3044 optname = SO_SNDTIMEO; 3045 goto set_timeout; 3046 case TARGET_SO_ATTACH_FILTER: 3047 { 3048 struct target_sock_fprog *tfprog; 3049 struct target_sock_filter *tfilter; 3050 struct sock_fprog fprog; 3051 struct sock_filter *filter; 3052 int i; 3053 3054 if (optlen != sizeof(*tfprog)) { 3055 return -TARGET_EINVAL; 3056 } 3057 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) { 3058 return -TARGET_EFAULT; 3059 } 3060 if (!lock_user_struct(VERIFY_READ, tfilter, 3061 tswapal(tfprog->filter), 0)) { 3062 unlock_user_struct(tfprog, optval_addr, 1); 3063 return -TARGET_EFAULT; 3064 } 3065 3066 fprog.len = tswap16(tfprog->len); 3067 filter = g_try_new(struct sock_filter, fprog.len); 3068 if (filter == NULL) { 3069 unlock_user_struct(tfilter, tfprog->filter, 1); 3070 unlock_user_struct(tfprog, optval_addr, 1); 3071 return -TARGET_ENOMEM; 3072 } 3073 for (i = 0; i < fprog.len; i++) { 3074 filter[i].code = tswap16(tfilter[i].code); 3075 filter[i].jt = tfilter[i].jt; 3076 filter[i].jf = tfilter[i].jf; 3077 filter[i].k = tswap32(tfilter[i].k); 3078 } 3079 fprog.filter = filter; 3080 3081 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, 3082 SO_ATTACH_FILTER, &fprog, sizeof(fprog))); 3083 g_free(filter); 3084 3085 unlock_user_struct(tfilter, tfprog->filter, 1); 3086 unlock_user_struct(tfprog, optval_addr, 1); 3087 return ret; 3088 } 3089 case TARGET_SO_BINDTODEVICE: 3090 { 3091 char *dev_ifname, *addr_ifname; 3092 3093 if (optlen > IFNAMSIZ - 1) { 3094 optlen = IFNAMSIZ - 1; 3095 } 3096 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1); 3097 if (!dev_ifname) { 3098 return -TARGET_EFAULT; 3099 } 3100 optname = SO_BINDTODEVICE; 3101 addr_ifname = alloca(IFNAMSIZ); 3102 memcpy(addr_ifname, dev_ifname, optlen); 3103 addr_ifname[optlen] = 0; 3104 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 3105 addr_ifname, optlen)); 3106 unlock_user (dev_ifname, optval_addr, 0); 3107 return ret; 3108 } 3109 /* Options with 'int' argument. */ 3110 case TARGET_SO_DEBUG: 3111 optname = SO_DEBUG; 3112 break; 3113 case TARGET_SO_REUSEADDR: 3114 optname = SO_REUSEADDR; 3115 break; 3116 case TARGET_SO_TYPE: 3117 optname = SO_TYPE; 3118 break; 3119 case TARGET_SO_ERROR: 3120 optname = SO_ERROR; 3121 break; 3122 case TARGET_SO_DONTROUTE: 3123 optname = SO_DONTROUTE; 3124 break; 3125 case TARGET_SO_BROADCAST: 3126 optname = SO_BROADCAST; 3127 break; 3128 case TARGET_SO_SNDBUF: 3129 optname = SO_SNDBUF; 3130 break; 3131 case TARGET_SO_SNDBUFFORCE: 3132 optname = SO_SNDBUFFORCE; 3133 break; 3134 case TARGET_SO_RCVBUF: 3135 optname = SO_RCVBUF; 3136 break; 3137 case TARGET_SO_RCVBUFFORCE: 3138 optname = SO_RCVBUFFORCE; 3139 break; 3140 case TARGET_SO_KEEPALIVE: 3141 optname = SO_KEEPALIVE; 3142 break; 3143 case TARGET_SO_OOBINLINE: 3144 optname = SO_OOBINLINE; 3145 break; 3146 case TARGET_SO_NO_CHECK: 3147 optname = SO_NO_CHECK; 3148 break; 3149 case TARGET_SO_PRIORITY: 3150 optname = SO_PRIORITY; 3151 break; 3152 #ifdef SO_BSDCOMPAT 3153 case TARGET_SO_BSDCOMPAT: 3154 optname = SO_BSDCOMPAT; 3155 break; 3156 #endif 3157 case TARGET_SO_PASSCRED: 3158 optname = SO_PASSCRED; 3159 break; 3160 case TARGET_SO_PASSSEC: 3161 optname = SO_PASSSEC; 3162 break; 3163 case TARGET_SO_TIMESTAMP: 3164 optname = SO_TIMESTAMP; 3165 break; 3166 case TARGET_SO_RCVLOWAT: 3167 optname = SO_RCVLOWAT; 3168 break; 3169 default: 3170 goto unimplemented; 3171 } 3172 if (optlen < sizeof(uint32_t)) 3173 return -TARGET_EINVAL; 3174 3175 if (get_user_u32(val, optval_addr)) 3176 return -TARGET_EFAULT; 3177 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val))); 3178 break; 3179 default: 3180 unimplemented: 3181 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname); 3182 ret = -TARGET_ENOPROTOOPT; 3183 } 3184 return ret; 3185 } 3186 3187 /* do_getsockopt() Must return target values and target errnos. */ 3188 static abi_long do_getsockopt(int sockfd, int level, int optname, 3189 abi_ulong optval_addr, abi_ulong optlen) 3190 { 3191 abi_long ret; 3192 int len, val; 3193 socklen_t lv; 3194 3195 switch(level) { 3196 case TARGET_SOL_SOCKET: 3197 level = SOL_SOCKET; 3198 switch (optname) { 3199 /* These don't just return a single integer */ 3200 case TARGET_SO_LINGER: 3201 case TARGET_SO_RCVTIMEO: 3202 case TARGET_SO_SNDTIMEO: 3203 case TARGET_SO_PEERNAME: 3204 goto unimplemented; 3205 case TARGET_SO_PEERCRED: { 3206 struct ucred cr; 3207 socklen_t crlen; 3208 struct target_ucred *tcr; 3209 3210 if (get_user_u32(len, optlen)) { 3211 return -TARGET_EFAULT; 3212 } 3213 if (len < 0) { 3214 return -TARGET_EINVAL; 3215 } 3216 3217 crlen = sizeof(cr); 3218 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED, 3219 &cr, &crlen)); 3220 if (ret < 0) { 3221 return ret; 3222 } 3223 if (len > crlen) { 3224 len = crlen; 3225 } 3226 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) { 3227 return -TARGET_EFAULT; 3228 } 3229 __put_user(cr.pid, &tcr->pid); 3230 __put_user(cr.uid, &tcr->uid); 3231 __put_user(cr.gid, &tcr->gid); 3232 unlock_user_struct(tcr, optval_addr, 1); 3233 if (put_user_u32(len, optlen)) { 3234 return -TARGET_EFAULT; 3235 } 3236 break; 3237 } 3238 /* Options with 'int' argument. */ 3239 case TARGET_SO_DEBUG: 3240 optname = SO_DEBUG; 3241 goto int_case; 3242 case TARGET_SO_REUSEADDR: 3243 optname = SO_REUSEADDR; 3244 goto int_case; 3245 case TARGET_SO_TYPE: 3246 optname = SO_TYPE; 3247 goto int_case; 3248 case TARGET_SO_ERROR: 3249 optname = SO_ERROR; 3250 goto int_case; 3251 case TARGET_SO_DONTROUTE: 3252 optname = SO_DONTROUTE; 3253 goto int_case; 3254 case TARGET_SO_BROADCAST: 3255 optname = SO_BROADCAST; 3256 goto int_case; 3257 case TARGET_SO_SNDBUF: 3258 optname = SO_SNDBUF; 3259 goto int_case; 3260 case TARGET_SO_RCVBUF: 3261 optname = SO_RCVBUF; 3262 goto int_case; 3263 case TARGET_SO_KEEPALIVE: 3264 optname = SO_KEEPALIVE; 3265 goto int_case; 3266 case TARGET_SO_OOBINLINE: 3267 optname = SO_OOBINLINE; 3268 goto int_case; 3269 case TARGET_SO_NO_CHECK: 3270 optname = SO_NO_CHECK; 3271 goto int_case; 3272 case TARGET_SO_PRIORITY: 3273 optname = SO_PRIORITY; 3274 goto int_case; 3275 #ifdef SO_BSDCOMPAT 3276 case TARGET_SO_BSDCOMPAT: 3277 optname = SO_BSDCOMPAT; 3278 goto int_case; 3279 #endif 3280 case TARGET_SO_PASSCRED: 3281 optname = SO_PASSCRED; 3282 goto int_case; 3283 case TARGET_SO_TIMESTAMP: 3284 optname = SO_TIMESTAMP; 3285 goto int_case; 3286 case TARGET_SO_RCVLOWAT: 3287 optname = SO_RCVLOWAT; 3288 goto int_case; 3289 case TARGET_SO_ACCEPTCONN: 3290 optname = SO_ACCEPTCONN; 3291 goto int_case; 3292 default: 3293 goto int_case; 3294 } 3295 break; 3296 case SOL_TCP: 3297 /* TCP options all take an 'int' value. */ 3298 int_case: 3299 if (get_user_u32(len, optlen)) 3300 return -TARGET_EFAULT; 3301 if (len < 0) 3302 return -TARGET_EINVAL; 3303 lv = sizeof(lv); 3304 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 3305 if (ret < 0) 3306 return ret; 3307 if (optname == SO_TYPE) { 3308 val = host_to_target_sock_type(val); 3309 } 3310 if (len > lv) 3311 len = lv; 3312 if (len == 4) { 3313 if (put_user_u32(val, optval_addr)) 3314 return -TARGET_EFAULT; 3315 } else { 3316 if (put_user_u8(val, optval_addr)) 3317 return -TARGET_EFAULT; 3318 } 3319 if (put_user_u32(len, optlen)) 3320 return -TARGET_EFAULT; 3321 break; 3322 case SOL_IP: 3323 switch(optname) { 3324 case IP_TOS: 3325 case IP_TTL: 3326 case IP_HDRINCL: 3327 case IP_ROUTER_ALERT: 3328 case IP_RECVOPTS: 3329 case IP_RETOPTS: 3330 case IP_PKTINFO: 3331 case IP_MTU_DISCOVER: 3332 case IP_RECVERR: 3333 case IP_RECVTOS: 3334 #ifdef IP_FREEBIND 3335 case IP_FREEBIND: 3336 #endif 3337 case IP_MULTICAST_TTL: 3338 case IP_MULTICAST_LOOP: 3339 if (get_user_u32(len, optlen)) 3340 return -TARGET_EFAULT; 3341 if (len < 0) 3342 return -TARGET_EINVAL; 3343 lv = sizeof(lv); 3344 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 3345 if (ret < 0) 3346 return ret; 3347 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 3348 len = 1; 3349 if (put_user_u32(len, optlen) 3350 || put_user_u8(val, optval_addr)) 3351 return -TARGET_EFAULT; 3352 } else { 3353 if (len > sizeof(int)) 3354 len = sizeof(int); 3355 if (put_user_u32(len, optlen) 3356 || put_user_u32(val, optval_addr)) 3357 return -TARGET_EFAULT; 3358 } 3359 break; 3360 default: 3361 ret = -TARGET_ENOPROTOOPT; 3362 break; 3363 } 3364 break; 3365 default: 3366 unimplemented: 3367 gemu_log("getsockopt level=%d optname=%d not yet supported\n", 3368 level, optname); 3369 ret = -TARGET_EOPNOTSUPP; 3370 break; 3371 } 3372 return ret; 3373 } 3374 3375 static struct iovec *lock_iovec(int type, abi_ulong target_addr, 3376 abi_ulong count, int copy) 3377 { 3378 struct target_iovec *target_vec; 3379 struct iovec *vec; 3380 abi_ulong total_len, max_len; 3381 int i; 3382 int err = 0; 3383 bool bad_address = false; 3384 3385 if (count == 0) { 3386 errno = 0; 3387 return NULL; 3388 } 3389 if (count > IOV_MAX) { 3390 errno = EINVAL; 3391 return NULL; 3392 } 3393 3394 vec = g_try_new0(struct iovec, count); 3395 if (vec == NULL) { 3396 errno = ENOMEM; 3397 return NULL; 3398 } 3399 3400 target_vec = lock_user(VERIFY_READ, target_addr, 3401 count * sizeof(struct target_iovec), 1); 3402 if (target_vec == NULL) { 3403 err = EFAULT; 3404 goto fail2; 3405 } 3406 3407 /* ??? If host page size > target page size, this will result in a 3408 value larger than what we can actually support. */ 3409 max_len = 0x7fffffff & TARGET_PAGE_MASK; 3410 total_len = 0; 3411 3412 for (i = 0; i < count; i++) { 3413 abi_ulong base = tswapal(target_vec[i].iov_base); 3414 abi_long len = tswapal(target_vec[i].iov_len); 3415 3416 if (len < 0) { 3417 err = EINVAL; 3418 goto fail; 3419 } else if (len == 0) { 3420 /* Zero length pointer is ignored. */ 3421 vec[i].iov_base = 0; 3422 } else { 3423 vec[i].iov_base = lock_user(type, base, len, copy); 3424 /* If the first buffer pointer is bad, this is a fault. But 3425 * subsequent bad buffers will result in a partial write; this 3426 * is realized by filling the vector with null pointers and 3427 * zero lengths. */ 3428 if (!vec[i].iov_base) { 3429 if (i == 0) { 3430 err = EFAULT; 3431 goto fail; 3432 } else { 3433 bad_address = true; 3434 } 3435 } 3436 if (bad_address) { 3437 len = 0; 3438 } 3439 if (len > max_len - total_len) { 3440 len = max_len - total_len; 3441 } 3442 } 3443 vec[i].iov_len = len; 3444 total_len += len; 3445 } 3446 3447 unlock_user(target_vec, target_addr, 0); 3448 return vec; 3449 3450 fail: 3451 while (--i >= 0) { 3452 if (tswapal(target_vec[i].iov_len) > 0) { 3453 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0); 3454 } 3455 } 3456 unlock_user(target_vec, target_addr, 0); 3457 fail2: 3458 g_free(vec); 3459 errno = err; 3460 return NULL; 3461 } 3462 3463 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr, 3464 abi_ulong count, int copy) 3465 { 3466 struct target_iovec *target_vec; 3467 int i; 3468 3469 target_vec = lock_user(VERIFY_READ, target_addr, 3470 count * sizeof(struct target_iovec), 1); 3471 if (target_vec) { 3472 for (i = 0; i < count; i++) { 3473 abi_ulong base = tswapal(target_vec[i].iov_base); 3474 abi_long len = tswapal(target_vec[i].iov_len); 3475 if (len < 0) { 3476 break; 3477 } 3478 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0); 3479 } 3480 unlock_user(target_vec, target_addr, 0); 3481 } 3482 3483 g_free(vec); 3484 } 3485 3486 static inline int target_to_host_sock_type(int *type) 3487 { 3488 int host_type = 0; 3489 int target_type = *type; 3490 3491 switch (target_type & TARGET_SOCK_TYPE_MASK) { 3492 case TARGET_SOCK_DGRAM: 3493 host_type = SOCK_DGRAM; 3494 break; 3495 case TARGET_SOCK_STREAM: 3496 host_type = SOCK_STREAM; 3497 break; 3498 default: 3499 host_type = target_type & TARGET_SOCK_TYPE_MASK; 3500 break; 3501 } 3502 if (target_type & TARGET_SOCK_CLOEXEC) { 3503 #if defined(SOCK_CLOEXEC) 3504 host_type |= SOCK_CLOEXEC; 3505 #else 3506 return -TARGET_EINVAL; 3507 #endif 3508 } 3509 if (target_type & TARGET_SOCK_NONBLOCK) { 3510 #if defined(SOCK_NONBLOCK) 3511 host_type |= SOCK_NONBLOCK; 3512 #elif !defined(O_NONBLOCK) 3513 return -TARGET_EINVAL; 3514 #endif 3515 } 3516 *type = host_type; 3517 return 0; 3518 } 3519 3520 /* Try to emulate socket type flags after socket creation. */ 3521 static int sock_flags_fixup(int fd, int target_type) 3522 { 3523 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK) 3524 if (target_type & TARGET_SOCK_NONBLOCK) { 3525 int flags = fcntl(fd, F_GETFL); 3526 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) { 3527 close(fd); 3528 return -TARGET_EINVAL; 3529 } 3530 } 3531 #endif 3532 return fd; 3533 } 3534 3535 static abi_long packet_target_to_host_sockaddr(void *host_addr, 3536 abi_ulong target_addr, 3537 socklen_t len) 3538 { 3539 struct sockaddr *addr = host_addr; 3540 struct target_sockaddr *target_saddr; 3541 3542 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 3543 if (!target_saddr) { 3544 return -TARGET_EFAULT; 3545 } 3546 3547 memcpy(addr, target_saddr, len); 3548 addr->sa_family = tswap16(target_saddr->sa_family); 3549 /* spkt_protocol is big-endian */ 3550 3551 unlock_user(target_saddr, target_addr, 0); 3552 return 0; 3553 } 3554 3555 static TargetFdTrans target_packet_trans = { 3556 .target_to_host_addr = packet_target_to_host_sockaddr, 3557 }; 3558 3559 #ifdef CONFIG_RTNETLINK 3560 static abi_long netlink_route_target_to_host(void *buf, size_t len) 3561 { 3562 abi_long ret; 3563 3564 ret = target_to_host_nlmsg_route(buf, len); 3565 if (ret < 0) { 3566 return ret; 3567 } 3568 3569 return len; 3570 } 3571 3572 static abi_long netlink_route_host_to_target(void *buf, size_t len) 3573 { 3574 abi_long ret; 3575 3576 ret = host_to_target_nlmsg_route(buf, len); 3577 if (ret < 0) { 3578 return ret; 3579 } 3580 3581 return len; 3582 } 3583 3584 static TargetFdTrans target_netlink_route_trans = { 3585 .target_to_host_data = netlink_route_target_to_host, 3586 .host_to_target_data = netlink_route_host_to_target, 3587 }; 3588 #endif /* CONFIG_RTNETLINK */ 3589 3590 static abi_long netlink_audit_target_to_host(void *buf, size_t len) 3591 { 3592 abi_long ret; 3593 3594 ret = target_to_host_nlmsg_audit(buf, len); 3595 if (ret < 0) { 3596 return ret; 3597 } 3598 3599 return len; 3600 } 3601 3602 static abi_long netlink_audit_host_to_target(void *buf, size_t len) 3603 { 3604 abi_long ret; 3605 3606 ret = host_to_target_nlmsg_audit(buf, len); 3607 if (ret < 0) { 3608 return ret; 3609 } 3610 3611 return len; 3612 } 3613 3614 static TargetFdTrans target_netlink_audit_trans = { 3615 .target_to_host_data = netlink_audit_target_to_host, 3616 .host_to_target_data = netlink_audit_host_to_target, 3617 }; 3618 3619 /* do_socket() Must return target values and target errnos. */ 3620 static abi_long do_socket(int domain, int type, int protocol) 3621 { 3622 int target_type = type; 3623 int ret; 3624 3625 ret = target_to_host_sock_type(&type); 3626 if (ret) { 3627 return ret; 3628 } 3629 3630 if (domain == PF_NETLINK && !( 3631 #ifdef CONFIG_RTNETLINK 3632 protocol == NETLINK_ROUTE || 3633 #endif 3634 protocol == NETLINK_KOBJECT_UEVENT || 3635 protocol == NETLINK_AUDIT)) { 3636 return -EPFNOSUPPORT; 3637 } 3638 3639 if (domain == AF_PACKET || 3640 (domain == AF_INET && type == SOCK_PACKET)) { 3641 protocol = tswap16(protocol); 3642 } 3643 3644 ret = get_errno(socket(domain, type, protocol)); 3645 if (ret >= 0) { 3646 ret = sock_flags_fixup(ret, target_type); 3647 if (type == SOCK_PACKET) { 3648 /* Manage an obsolete case : 3649 * if socket type is SOCK_PACKET, bind by name 3650 */ 3651 fd_trans_register(ret, &target_packet_trans); 3652 } else if (domain == PF_NETLINK) { 3653 switch (protocol) { 3654 #ifdef CONFIG_RTNETLINK 3655 case NETLINK_ROUTE: 3656 fd_trans_register(ret, &target_netlink_route_trans); 3657 break; 3658 #endif 3659 case NETLINK_KOBJECT_UEVENT: 3660 /* nothing to do: messages are strings */ 3661 break; 3662 case NETLINK_AUDIT: 3663 fd_trans_register(ret, &target_netlink_audit_trans); 3664 break; 3665 default: 3666 g_assert_not_reached(); 3667 } 3668 } 3669 } 3670 return ret; 3671 } 3672 3673 /* do_bind() Must return target values and target errnos. */ 3674 static abi_long do_bind(int sockfd, abi_ulong target_addr, 3675 socklen_t addrlen) 3676 { 3677 void *addr; 3678 abi_long ret; 3679 3680 if ((int)addrlen < 0) { 3681 return -TARGET_EINVAL; 3682 } 3683 3684 addr = alloca(addrlen+1); 3685 3686 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen); 3687 if (ret) 3688 return ret; 3689 3690 return get_errno(bind(sockfd, addr, addrlen)); 3691 } 3692 3693 /* do_connect() Must return target values and target errnos. */ 3694 static abi_long do_connect(int sockfd, abi_ulong target_addr, 3695 socklen_t addrlen) 3696 { 3697 void *addr; 3698 abi_long ret; 3699 3700 if ((int)addrlen < 0) { 3701 return -TARGET_EINVAL; 3702 } 3703 3704 addr = alloca(addrlen+1); 3705 3706 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen); 3707 if (ret) 3708 return ret; 3709 3710 return get_errno(safe_connect(sockfd, addr, addrlen)); 3711 } 3712 3713 /* do_sendrecvmsg_locked() Must return target values and target errnos. */ 3714 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp, 3715 int flags, int send) 3716 { 3717 abi_long ret, len; 3718 struct msghdr msg; 3719 abi_ulong count; 3720 struct iovec *vec; 3721 abi_ulong target_vec; 3722 3723 if (msgp->msg_name) { 3724 msg.msg_namelen = tswap32(msgp->msg_namelen); 3725 msg.msg_name = alloca(msg.msg_namelen+1); 3726 ret = target_to_host_sockaddr(fd, msg.msg_name, 3727 tswapal(msgp->msg_name), 3728 msg.msg_namelen); 3729 if (ret == -TARGET_EFAULT) { 3730 /* For connected sockets msg_name and msg_namelen must 3731 * be ignored, so returning EFAULT immediately is wrong. 3732 * Instead, pass a bad msg_name to the host kernel, and 3733 * let it decide whether to return EFAULT or not. 3734 */ 3735 msg.msg_name = (void *)-1; 3736 } else if (ret) { 3737 goto out2; 3738 } 3739 } else { 3740 msg.msg_name = NULL; 3741 msg.msg_namelen = 0; 3742 } 3743 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen); 3744 msg.msg_control = alloca(msg.msg_controllen); 3745 msg.msg_flags = tswap32(msgp->msg_flags); 3746 3747 count = tswapal(msgp->msg_iovlen); 3748 target_vec = tswapal(msgp->msg_iov); 3749 3750 if (count > IOV_MAX) { 3751 /* sendrcvmsg returns a different errno for this condition than 3752 * readv/writev, so we must catch it here before lock_iovec() does. 3753 */ 3754 ret = -TARGET_EMSGSIZE; 3755 goto out2; 3756 } 3757 3758 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, 3759 target_vec, count, send); 3760 if (vec == NULL) { 3761 ret = -host_to_target_errno(errno); 3762 goto out2; 3763 } 3764 msg.msg_iovlen = count; 3765 msg.msg_iov = vec; 3766 3767 if (send) { 3768 if (fd_trans_target_to_host_data(fd)) { 3769 void *host_msg; 3770 3771 host_msg = g_malloc(msg.msg_iov->iov_len); 3772 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len); 3773 ret = fd_trans_target_to_host_data(fd)(host_msg, 3774 msg.msg_iov->iov_len); 3775 if (ret >= 0) { 3776 msg.msg_iov->iov_base = host_msg; 3777 ret = get_errno(safe_sendmsg(fd, &msg, flags)); 3778 } 3779 g_free(host_msg); 3780 } else { 3781 ret = target_to_host_cmsg(&msg, msgp); 3782 if (ret == 0) { 3783 ret = get_errno(safe_sendmsg(fd, &msg, flags)); 3784 } 3785 } 3786 } else { 3787 ret = get_errno(safe_recvmsg(fd, &msg, flags)); 3788 if (!is_error(ret)) { 3789 len = ret; 3790 if (fd_trans_host_to_target_data(fd)) { 3791 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base, 3792 len); 3793 } else { 3794 ret = host_to_target_cmsg(msgp, &msg); 3795 } 3796 if (!is_error(ret)) { 3797 msgp->msg_namelen = tswap32(msg.msg_namelen); 3798 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) { 3799 ret = host_to_target_sockaddr(tswapal(msgp->msg_name), 3800 msg.msg_name, msg.msg_namelen); 3801 if (ret) { 3802 goto out; 3803 } 3804 } 3805 3806 ret = len; 3807 } 3808 } 3809 } 3810 3811 out: 3812 unlock_iovec(vec, target_vec, count, !send); 3813 out2: 3814 return ret; 3815 } 3816 3817 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg, 3818 int flags, int send) 3819 { 3820 abi_long ret; 3821 struct target_msghdr *msgp; 3822 3823 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE, 3824 msgp, 3825 target_msg, 3826 send ? 1 : 0)) { 3827 return -TARGET_EFAULT; 3828 } 3829 ret = do_sendrecvmsg_locked(fd, msgp, flags, send); 3830 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 3831 return ret; 3832 } 3833 3834 /* We don't rely on the C library to have sendmmsg/recvmmsg support, 3835 * so it might not have this *mmsg-specific flag either. 3836 */ 3837 #ifndef MSG_WAITFORONE 3838 #define MSG_WAITFORONE 0x10000 3839 #endif 3840 3841 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec, 3842 unsigned int vlen, unsigned int flags, 3843 int send) 3844 { 3845 struct target_mmsghdr *mmsgp; 3846 abi_long ret = 0; 3847 int i; 3848 3849 if (vlen > UIO_MAXIOV) { 3850 vlen = UIO_MAXIOV; 3851 } 3852 3853 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1); 3854 if (!mmsgp) { 3855 return -TARGET_EFAULT; 3856 } 3857 3858 for (i = 0; i < vlen; i++) { 3859 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send); 3860 if (is_error(ret)) { 3861 break; 3862 } 3863 mmsgp[i].msg_len = tswap32(ret); 3864 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ 3865 if (flags & MSG_WAITFORONE) { 3866 flags |= MSG_DONTWAIT; 3867 } 3868 } 3869 3870 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i); 3871 3872 /* Return number of datagrams sent if we sent any at all; 3873 * otherwise return the error. 3874 */ 3875 if (i) { 3876 return i; 3877 } 3878 return ret; 3879 } 3880 3881 /* do_accept4() Must return target values and target errnos. */ 3882 static abi_long do_accept4(int fd, abi_ulong target_addr, 3883 abi_ulong target_addrlen_addr, int flags) 3884 { 3885 socklen_t addrlen; 3886 void *addr; 3887 abi_long ret; 3888 int host_flags; 3889 3890 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl); 3891 3892 if (target_addr == 0) { 3893 return get_errno(safe_accept4(fd, NULL, NULL, host_flags)); 3894 } 3895 3896 /* linux returns EINVAL if addrlen pointer is invalid */ 3897 if (get_user_u32(addrlen, target_addrlen_addr)) 3898 return -TARGET_EINVAL; 3899 3900 if ((int)addrlen < 0) { 3901 return -TARGET_EINVAL; 3902 } 3903 3904 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 3905 return -TARGET_EINVAL; 3906 3907 addr = alloca(addrlen); 3908 3909 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags)); 3910 if (!is_error(ret)) { 3911 host_to_target_sockaddr(target_addr, addr, addrlen); 3912 if (put_user_u32(addrlen, target_addrlen_addr)) 3913 ret = -TARGET_EFAULT; 3914 } 3915 return ret; 3916 } 3917 3918 /* do_getpeername() Must return target values and target errnos. */ 3919 static abi_long do_getpeername(int fd, abi_ulong target_addr, 3920 abi_ulong target_addrlen_addr) 3921 { 3922 socklen_t addrlen; 3923 void *addr; 3924 abi_long ret; 3925 3926 if (get_user_u32(addrlen, target_addrlen_addr)) 3927 return -TARGET_EFAULT; 3928 3929 if ((int)addrlen < 0) { 3930 return -TARGET_EINVAL; 3931 } 3932 3933 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 3934 return -TARGET_EFAULT; 3935 3936 addr = alloca(addrlen); 3937 3938 ret = get_errno(getpeername(fd, addr, &addrlen)); 3939 if (!is_error(ret)) { 3940 host_to_target_sockaddr(target_addr, addr, addrlen); 3941 if (put_user_u32(addrlen, target_addrlen_addr)) 3942 ret = -TARGET_EFAULT; 3943 } 3944 return ret; 3945 } 3946 3947 /* do_getsockname() Must return target values and target errnos. */ 3948 static abi_long do_getsockname(int fd, abi_ulong target_addr, 3949 abi_ulong target_addrlen_addr) 3950 { 3951 socklen_t addrlen; 3952 void *addr; 3953 abi_long ret; 3954 3955 if (get_user_u32(addrlen, target_addrlen_addr)) 3956 return -TARGET_EFAULT; 3957 3958 if ((int)addrlen < 0) { 3959 return -TARGET_EINVAL; 3960 } 3961 3962 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 3963 return -TARGET_EFAULT; 3964 3965 addr = alloca(addrlen); 3966 3967 ret = get_errno(getsockname(fd, addr, &addrlen)); 3968 if (!is_error(ret)) { 3969 host_to_target_sockaddr(target_addr, addr, addrlen); 3970 if (put_user_u32(addrlen, target_addrlen_addr)) 3971 ret = -TARGET_EFAULT; 3972 } 3973 return ret; 3974 } 3975 3976 /* do_socketpair() Must return target values and target errnos. */ 3977 static abi_long do_socketpair(int domain, int type, int protocol, 3978 abi_ulong target_tab_addr) 3979 { 3980 int tab[2]; 3981 abi_long ret; 3982 3983 target_to_host_sock_type(&type); 3984 3985 ret = get_errno(socketpair(domain, type, protocol, tab)); 3986 if (!is_error(ret)) { 3987 if (put_user_s32(tab[0], target_tab_addr) 3988 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0]))) 3989 ret = -TARGET_EFAULT; 3990 } 3991 return ret; 3992 } 3993 3994 /* do_sendto() Must return target values and target errnos. */ 3995 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags, 3996 abi_ulong target_addr, socklen_t addrlen) 3997 { 3998 void *addr; 3999 void *host_msg; 4000 void *copy_msg = NULL; 4001 abi_long ret; 4002 4003 if ((int)addrlen < 0) { 4004 return -TARGET_EINVAL; 4005 } 4006 4007 host_msg = lock_user(VERIFY_READ, msg, len, 1); 4008 if (!host_msg) 4009 return -TARGET_EFAULT; 4010 if (fd_trans_target_to_host_data(fd)) { 4011 copy_msg = host_msg; 4012 host_msg = g_malloc(len); 4013 memcpy(host_msg, copy_msg, len); 4014 ret = fd_trans_target_to_host_data(fd)(host_msg, len); 4015 if (ret < 0) { 4016 goto fail; 4017 } 4018 } 4019 if (target_addr) { 4020 addr = alloca(addrlen+1); 4021 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen); 4022 if (ret) { 4023 goto fail; 4024 } 4025 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen)); 4026 } else { 4027 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0)); 4028 } 4029 fail: 4030 if (copy_msg) { 4031 g_free(host_msg); 4032 host_msg = copy_msg; 4033 } 4034 unlock_user(host_msg, msg, 0); 4035 return ret; 4036 } 4037 4038 /* do_recvfrom() Must return target values and target errnos. */ 4039 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags, 4040 abi_ulong target_addr, 4041 abi_ulong target_addrlen) 4042 { 4043 socklen_t addrlen; 4044 void *addr; 4045 void *host_msg; 4046 abi_long ret; 4047 4048 host_msg = lock_user(VERIFY_WRITE, msg, len, 0); 4049 if (!host_msg) 4050 return -TARGET_EFAULT; 4051 if (target_addr) { 4052 if (get_user_u32(addrlen, target_addrlen)) { 4053 ret = -TARGET_EFAULT; 4054 goto fail; 4055 } 4056 if ((int)addrlen < 0) { 4057 ret = -TARGET_EINVAL; 4058 goto fail; 4059 } 4060 addr = alloca(addrlen); 4061 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, 4062 addr, &addrlen)); 4063 } else { 4064 addr = NULL; /* To keep compiler quiet. */ 4065 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0)); 4066 } 4067 if (!is_error(ret)) { 4068 if (fd_trans_host_to_target_data(fd)) { 4069 ret = fd_trans_host_to_target_data(fd)(host_msg, ret); 4070 } 4071 if (target_addr) { 4072 host_to_target_sockaddr(target_addr, addr, addrlen); 4073 if (put_user_u32(addrlen, target_addrlen)) { 4074 ret = -TARGET_EFAULT; 4075 goto fail; 4076 } 4077 } 4078 unlock_user(host_msg, msg, len); 4079 } else { 4080 fail: 4081 unlock_user(host_msg, msg, 0); 4082 } 4083 return ret; 4084 } 4085 4086 #ifdef TARGET_NR_socketcall 4087 /* do_socketcall() must return target values and target errnos. */ 4088 static abi_long do_socketcall(int num, abi_ulong vptr) 4089 { 4090 static const unsigned nargs[] = { /* number of arguments per operation */ 4091 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */ 4092 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */ 4093 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */ 4094 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */ 4095 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */ 4096 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */ 4097 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */ 4098 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */ 4099 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */ 4100 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */ 4101 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */ 4102 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */ 4103 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */ 4104 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */ 4105 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */ 4106 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */ 4107 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */ 4108 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */ 4109 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */ 4110 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */ 4111 }; 4112 abi_long a[6]; /* max 6 args */ 4113 unsigned i; 4114 4115 /* check the range of the first argument num */ 4116 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */ 4117 if (num < 1 || num > TARGET_SYS_SENDMMSG) { 4118 return -TARGET_EINVAL; 4119 } 4120 /* ensure we have space for args */ 4121 if (nargs[num] > ARRAY_SIZE(a)) { 4122 return -TARGET_EINVAL; 4123 } 4124 /* collect the arguments in a[] according to nargs[] */ 4125 for (i = 0; i < nargs[num]; ++i) { 4126 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) { 4127 return -TARGET_EFAULT; 4128 } 4129 } 4130 /* now when we have the args, invoke the appropriate underlying function */ 4131 switch (num) { 4132 case TARGET_SYS_SOCKET: /* domain, type, protocol */ 4133 return do_socket(a[0], a[1], a[2]); 4134 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */ 4135 return do_bind(a[0], a[1], a[2]); 4136 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */ 4137 return do_connect(a[0], a[1], a[2]); 4138 case TARGET_SYS_LISTEN: /* sockfd, backlog */ 4139 return get_errno(listen(a[0], a[1])); 4140 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */ 4141 return do_accept4(a[0], a[1], a[2], 0); 4142 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */ 4143 return do_getsockname(a[0], a[1], a[2]); 4144 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */ 4145 return do_getpeername(a[0], a[1], a[2]); 4146 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */ 4147 return do_socketpair(a[0], a[1], a[2], a[3]); 4148 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */ 4149 return do_sendto(a[0], a[1], a[2], a[3], 0, 0); 4150 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */ 4151 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0); 4152 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */ 4153 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]); 4154 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */ 4155 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]); 4156 case TARGET_SYS_SHUTDOWN: /* sockfd, how */ 4157 return get_errno(shutdown(a[0], a[1])); 4158 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */ 4159 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]); 4160 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */ 4161 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]); 4162 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */ 4163 return do_sendrecvmsg(a[0], a[1], a[2], 1); 4164 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */ 4165 return do_sendrecvmsg(a[0], a[1], a[2], 0); 4166 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */ 4167 return do_accept4(a[0], a[1], a[2], a[3]); 4168 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */ 4169 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0); 4170 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */ 4171 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1); 4172 default: 4173 gemu_log("Unsupported socketcall: %d\n", num); 4174 return -TARGET_EINVAL; 4175 } 4176 } 4177 #endif 4178 4179 #define N_SHM_REGIONS 32 4180 4181 static struct shm_region { 4182 abi_ulong start; 4183 abi_ulong size; 4184 bool in_use; 4185 } shm_regions[N_SHM_REGIONS]; 4186 4187 #ifndef TARGET_SEMID64_DS 4188 /* asm-generic version of this struct */ 4189 struct target_semid64_ds 4190 { 4191 struct target_ipc_perm sem_perm; 4192 abi_ulong sem_otime; 4193 #if TARGET_ABI_BITS == 32 4194 abi_ulong __unused1; 4195 #endif 4196 abi_ulong sem_ctime; 4197 #if TARGET_ABI_BITS == 32 4198 abi_ulong __unused2; 4199 #endif 4200 abi_ulong sem_nsems; 4201 abi_ulong __unused3; 4202 abi_ulong __unused4; 4203 }; 4204 #endif 4205 4206 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip, 4207 abi_ulong target_addr) 4208 { 4209 struct target_ipc_perm *target_ip; 4210 struct target_semid64_ds *target_sd; 4211 4212 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 4213 return -TARGET_EFAULT; 4214 target_ip = &(target_sd->sem_perm); 4215 host_ip->__key = tswap32(target_ip->__key); 4216 host_ip->uid = tswap32(target_ip->uid); 4217 host_ip->gid = tswap32(target_ip->gid); 4218 host_ip->cuid = tswap32(target_ip->cuid); 4219 host_ip->cgid = tswap32(target_ip->cgid); 4220 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 4221 host_ip->mode = tswap32(target_ip->mode); 4222 #else 4223 host_ip->mode = tswap16(target_ip->mode); 4224 #endif 4225 #if defined(TARGET_PPC) 4226 host_ip->__seq = tswap32(target_ip->__seq); 4227 #else 4228 host_ip->__seq = tswap16(target_ip->__seq); 4229 #endif 4230 unlock_user_struct(target_sd, target_addr, 0); 4231 return 0; 4232 } 4233 4234 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr, 4235 struct ipc_perm *host_ip) 4236 { 4237 struct target_ipc_perm *target_ip; 4238 struct target_semid64_ds *target_sd; 4239 4240 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 4241 return -TARGET_EFAULT; 4242 target_ip = &(target_sd->sem_perm); 4243 target_ip->__key = tswap32(host_ip->__key); 4244 target_ip->uid = tswap32(host_ip->uid); 4245 target_ip->gid = tswap32(host_ip->gid); 4246 target_ip->cuid = tswap32(host_ip->cuid); 4247 target_ip->cgid = tswap32(host_ip->cgid); 4248 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 4249 target_ip->mode = tswap32(host_ip->mode); 4250 #else 4251 target_ip->mode = tswap16(host_ip->mode); 4252 #endif 4253 #if defined(TARGET_PPC) 4254 target_ip->__seq = tswap32(host_ip->__seq); 4255 #else 4256 target_ip->__seq = tswap16(host_ip->__seq); 4257 #endif 4258 unlock_user_struct(target_sd, target_addr, 1); 4259 return 0; 4260 } 4261 4262 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd, 4263 abi_ulong target_addr) 4264 { 4265 struct target_semid64_ds *target_sd; 4266 4267 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 4268 return -TARGET_EFAULT; 4269 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr)) 4270 return -TARGET_EFAULT; 4271 host_sd->sem_nsems = tswapal(target_sd->sem_nsems); 4272 host_sd->sem_otime = tswapal(target_sd->sem_otime); 4273 host_sd->sem_ctime = tswapal(target_sd->sem_ctime); 4274 unlock_user_struct(target_sd, target_addr, 0); 4275 return 0; 4276 } 4277 4278 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr, 4279 struct semid_ds *host_sd) 4280 { 4281 struct target_semid64_ds *target_sd; 4282 4283 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 4284 return -TARGET_EFAULT; 4285 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm))) 4286 return -TARGET_EFAULT; 4287 target_sd->sem_nsems = tswapal(host_sd->sem_nsems); 4288 target_sd->sem_otime = tswapal(host_sd->sem_otime); 4289 target_sd->sem_ctime = tswapal(host_sd->sem_ctime); 4290 unlock_user_struct(target_sd, target_addr, 1); 4291 return 0; 4292 } 4293 4294 struct target_seminfo { 4295 int semmap; 4296 int semmni; 4297 int semmns; 4298 int semmnu; 4299 int semmsl; 4300 int semopm; 4301 int semume; 4302 int semusz; 4303 int semvmx; 4304 int semaem; 4305 }; 4306 4307 static inline abi_long host_to_target_seminfo(abi_ulong target_addr, 4308 struct seminfo *host_seminfo) 4309 { 4310 struct target_seminfo *target_seminfo; 4311 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0)) 4312 return -TARGET_EFAULT; 4313 __put_user(host_seminfo->semmap, &target_seminfo->semmap); 4314 __put_user(host_seminfo->semmni, &target_seminfo->semmni); 4315 __put_user(host_seminfo->semmns, &target_seminfo->semmns); 4316 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu); 4317 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl); 4318 __put_user(host_seminfo->semopm, &target_seminfo->semopm); 4319 __put_user(host_seminfo->semume, &target_seminfo->semume); 4320 __put_user(host_seminfo->semusz, &target_seminfo->semusz); 4321 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx); 4322 __put_user(host_seminfo->semaem, &target_seminfo->semaem); 4323 unlock_user_struct(target_seminfo, target_addr, 1); 4324 return 0; 4325 } 4326 4327 union semun { 4328 int val; 4329 struct semid_ds *buf; 4330 unsigned short *array; 4331 struct seminfo *__buf; 4332 }; 4333 4334 union target_semun { 4335 int val; 4336 abi_ulong buf; 4337 abi_ulong array; 4338 abi_ulong __buf; 4339 }; 4340 4341 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array, 4342 abi_ulong target_addr) 4343 { 4344 int nsems; 4345 unsigned short *array; 4346 union semun semun; 4347 struct semid_ds semid_ds; 4348 int i, ret; 4349 4350 semun.buf = &semid_ds; 4351 4352 ret = semctl(semid, 0, IPC_STAT, semun); 4353 if (ret == -1) 4354 return get_errno(ret); 4355 4356 nsems = semid_ds.sem_nsems; 4357 4358 *host_array = g_try_new(unsigned short, nsems); 4359 if (!*host_array) { 4360 return -TARGET_ENOMEM; 4361 } 4362 array = lock_user(VERIFY_READ, target_addr, 4363 nsems*sizeof(unsigned short), 1); 4364 if (!array) { 4365 g_free(*host_array); 4366 return -TARGET_EFAULT; 4367 } 4368 4369 for(i=0; i<nsems; i++) { 4370 __get_user((*host_array)[i], &array[i]); 4371 } 4372 unlock_user(array, target_addr, 0); 4373 4374 return 0; 4375 } 4376 4377 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr, 4378 unsigned short **host_array) 4379 { 4380 int nsems; 4381 unsigned short *array; 4382 union semun semun; 4383 struct semid_ds semid_ds; 4384 int i, ret; 4385 4386 semun.buf = &semid_ds; 4387 4388 ret = semctl(semid, 0, IPC_STAT, semun); 4389 if (ret == -1) 4390 return get_errno(ret); 4391 4392 nsems = semid_ds.sem_nsems; 4393 4394 array = lock_user(VERIFY_WRITE, target_addr, 4395 nsems*sizeof(unsigned short), 0); 4396 if (!array) 4397 return -TARGET_EFAULT; 4398 4399 for(i=0; i<nsems; i++) { 4400 __put_user((*host_array)[i], &array[i]); 4401 } 4402 g_free(*host_array); 4403 unlock_user(array, target_addr, 1); 4404 4405 return 0; 4406 } 4407 4408 static inline abi_long do_semctl(int semid, int semnum, int cmd, 4409 abi_ulong target_arg) 4410 { 4411 union target_semun target_su = { .buf = target_arg }; 4412 union semun arg; 4413 struct semid_ds dsarg; 4414 unsigned short *array = NULL; 4415 struct seminfo seminfo; 4416 abi_long ret = -TARGET_EINVAL; 4417 abi_long err; 4418 cmd &= 0xff; 4419 4420 switch( cmd ) { 4421 case GETVAL: 4422 case SETVAL: 4423 /* In 64 bit cross-endian situations, we will erroneously pick up 4424 * the wrong half of the union for the "val" element. To rectify 4425 * this, the entire 8-byte structure is byteswapped, followed by 4426 * a swap of the 4 byte val field. In other cases, the data is 4427 * already in proper host byte order. */ 4428 if (sizeof(target_su.val) != (sizeof(target_su.buf))) { 4429 target_su.buf = tswapal(target_su.buf); 4430 arg.val = tswap32(target_su.val); 4431 } else { 4432 arg.val = target_su.val; 4433 } 4434 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4435 break; 4436 case GETALL: 4437 case SETALL: 4438 err = target_to_host_semarray(semid, &array, target_su.array); 4439 if (err) 4440 return err; 4441 arg.array = array; 4442 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4443 err = host_to_target_semarray(semid, target_su.array, &array); 4444 if (err) 4445 return err; 4446 break; 4447 case IPC_STAT: 4448 case IPC_SET: 4449 case SEM_STAT: 4450 err = target_to_host_semid_ds(&dsarg, target_su.buf); 4451 if (err) 4452 return err; 4453 arg.buf = &dsarg; 4454 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4455 err = host_to_target_semid_ds(target_su.buf, &dsarg); 4456 if (err) 4457 return err; 4458 break; 4459 case IPC_INFO: 4460 case SEM_INFO: 4461 arg.__buf = &seminfo; 4462 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4463 err = host_to_target_seminfo(target_su.__buf, &seminfo); 4464 if (err) 4465 return err; 4466 break; 4467 case IPC_RMID: 4468 case GETPID: 4469 case GETNCNT: 4470 case GETZCNT: 4471 ret = get_errno(semctl(semid, semnum, cmd, NULL)); 4472 break; 4473 } 4474 4475 return ret; 4476 } 4477 4478 struct target_sembuf { 4479 unsigned short sem_num; 4480 short sem_op; 4481 short sem_flg; 4482 }; 4483 4484 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf, 4485 abi_ulong target_addr, 4486 unsigned nsops) 4487 { 4488 struct target_sembuf *target_sembuf; 4489 int i; 4490 4491 target_sembuf = lock_user(VERIFY_READ, target_addr, 4492 nsops*sizeof(struct target_sembuf), 1); 4493 if (!target_sembuf) 4494 return -TARGET_EFAULT; 4495 4496 for(i=0; i<nsops; i++) { 4497 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num); 4498 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op); 4499 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg); 4500 } 4501 4502 unlock_user(target_sembuf, target_addr, 0); 4503 4504 return 0; 4505 } 4506 4507 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops) 4508 { 4509 struct sembuf sops[nsops]; 4510 4511 if (target_to_host_sembuf(sops, ptr, nsops)) 4512 return -TARGET_EFAULT; 4513 4514 return get_errno(safe_semtimedop(semid, sops, nsops, NULL)); 4515 } 4516 4517 struct target_msqid_ds 4518 { 4519 struct target_ipc_perm msg_perm; 4520 abi_ulong msg_stime; 4521 #if TARGET_ABI_BITS == 32 4522 abi_ulong __unused1; 4523 #endif 4524 abi_ulong msg_rtime; 4525 #if TARGET_ABI_BITS == 32 4526 abi_ulong __unused2; 4527 #endif 4528 abi_ulong msg_ctime; 4529 #if TARGET_ABI_BITS == 32 4530 abi_ulong __unused3; 4531 #endif 4532 abi_ulong __msg_cbytes; 4533 abi_ulong msg_qnum; 4534 abi_ulong msg_qbytes; 4535 abi_ulong msg_lspid; 4536 abi_ulong msg_lrpid; 4537 abi_ulong __unused4; 4538 abi_ulong __unused5; 4539 }; 4540 4541 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md, 4542 abi_ulong target_addr) 4543 { 4544 struct target_msqid_ds *target_md; 4545 4546 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1)) 4547 return -TARGET_EFAULT; 4548 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr)) 4549 return -TARGET_EFAULT; 4550 host_md->msg_stime = tswapal(target_md->msg_stime); 4551 host_md->msg_rtime = tswapal(target_md->msg_rtime); 4552 host_md->msg_ctime = tswapal(target_md->msg_ctime); 4553 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes); 4554 host_md->msg_qnum = tswapal(target_md->msg_qnum); 4555 host_md->msg_qbytes = tswapal(target_md->msg_qbytes); 4556 host_md->msg_lspid = tswapal(target_md->msg_lspid); 4557 host_md->msg_lrpid = tswapal(target_md->msg_lrpid); 4558 unlock_user_struct(target_md, target_addr, 0); 4559 return 0; 4560 } 4561 4562 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr, 4563 struct msqid_ds *host_md) 4564 { 4565 struct target_msqid_ds *target_md; 4566 4567 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0)) 4568 return -TARGET_EFAULT; 4569 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm))) 4570 return -TARGET_EFAULT; 4571 target_md->msg_stime = tswapal(host_md->msg_stime); 4572 target_md->msg_rtime = tswapal(host_md->msg_rtime); 4573 target_md->msg_ctime = tswapal(host_md->msg_ctime); 4574 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes); 4575 target_md->msg_qnum = tswapal(host_md->msg_qnum); 4576 target_md->msg_qbytes = tswapal(host_md->msg_qbytes); 4577 target_md->msg_lspid = tswapal(host_md->msg_lspid); 4578 target_md->msg_lrpid = tswapal(host_md->msg_lrpid); 4579 unlock_user_struct(target_md, target_addr, 1); 4580 return 0; 4581 } 4582 4583 struct target_msginfo { 4584 int msgpool; 4585 int msgmap; 4586 int msgmax; 4587 int msgmnb; 4588 int msgmni; 4589 int msgssz; 4590 int msgtql; 4591 unsigned short int msgseg; 4592 }; 4593 4594 static inline abi_long host_to_target_msginfo(abi_ulong target_addr, 4595 struct msginfo *host_msginfo) 4596 { 4597 struct target_msginfo *target_msginfo; 4598 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0)) 4599 return -TARGET_EFAULT; 4600 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool); 4601 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap); 4602 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax); 4603 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb); 4604 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni); 4605 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz); 4606 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql); 4607 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg); 4608 unlock_user_struct(target_msginfo, target_addr, 1); 4609 return 0; 4610 } 4611 4612 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr) 4613 { 4614 struct msqid_ds dsarg; 4615 struct msginfo msginfo; 4616 abi_long ret = -TARGET_EINVAL; 4617 4618 cmd &= 0xff; 4619 4620 switch (cmd) { 4621 case IPC_STAT: 4622 case IPC_SET: 4623 case MSG_STAT: 4624 if (target_to_host_msqid_ds(&dsarg,ptr)) 4625 return -TARGET_EFAULT; 4626 ret = get_errno(msgctl(msgid, cmd, &dsarg)); 4627 if (host_to_target_msqid_ds(ptr,&dsarg)) 4628 return -TARGET_EFAULT; 4629 break; 4630 case IPC_RMID: 4631 ret = get_errno(msgctl(msgid, cmd, NULL)); 4632 break; 4633 case IPC_INFO: 4634 case MSG_INFO: 4635 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo)); 4636 if (host_to_target_msginfo(ptr, &msginfo)) 4637 return -TARGET_EFAULT; 4638 break; 4639 } 4640 4641 return ret; 4642 } 4643 4644 struct target_msgbuf { 4645 abi_long mtype; 4646 char mtext[1]; 4647 }; 4648 4649 static inline abi_long do_msgsnd(int msqid, abi_long msgp, 4650 ssize_t msgsz, int msgflg) 4651 { 4652 struct target_msgbuf *target_mb; 4653 struct msgbuf *host_mb; 4654 abi_long ret = 0; 4655 4656 if (msgsz < 0) { 4657 return -TARGET_EINVAL; 4658 } 4659 4660 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0)) 4661 return -TARGET_EFAULT; 4662 host_mb = g_try_malloc(msgsz + sizeof(long)); 4663 if (!host_mb) { 4664 unlock_user_struct(target_mb, msgp, 0); 4665 return -TARGET_ENOMEM; 4666 } 4667 host_mb->mtype = (abi_long) tswapal(target_mb->mtype); 4668 memcpy(host_mb->mtext, target_mb->mtext, msgsz); 4669 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg)); 4670 g_free(host_mb); 4671 unlock_user_struct(target_mb, msgp, 0); 4672 4673 return ret; 4674 } 4675 4676 static inline abi_long do_msgrcv(int msqid, abi_long msgp, 4677 ssize_t msgsz, abi_long msgtyp, 4678 int msgflg) 4679 { 4680 struct target_msgbuf *target_mb; 4681 char *target_mtext; 4682 struct msgbuf *host_mb; 4683 abi_long ret = 0; 4684 4685 if (msgsz < 0) { 4686 return -TARGET_EINVAL; 4687 } 4688 4689 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0)) 4690 return -TARGET_EFAULT; 4691 4692 host_mb = g_try_malloc(msgsz + sizeof(long)); 4693 if (!host_mb) { 4694 ret = -TARGET_ENOMEM; 4695 goto end; 4696 } 4697 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg)); 4698 4699 if (ret > 0) { 4700 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong); 4701 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0); 4702 if (!target_mtext) { 4703 ret = -TARGET_EFAULT; 4704 goto end; 4705 } 4706 memcpy(target_mb->mtext, host_mb->mtext, ret); 4707 unlock_user(target_mtext, target_mtext_addr, ret); 4708 } 4709 4710 target_mb->mtype = tswapal(host_mb->mtype); 4711 4712 end: 4713 if (target_mb) 4714 unlock_user_struct(target_mb, msgp, 1); 4715 g_free(host_mb); 4716 return ret; 4717 } 4718 4719 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd, 4720 abi_ulong target_addr) 4721 { 4722 struct target_shmid_ds *target_sd; 4723 4724 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 4725 return -TARGET_EFAULT; 4726 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr)) 4727 return -TARGET_EFAULT; 4728 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz); 4729 __get_user(host_sd->shm_atime, &target_sd->shm_atime); 4730 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime); 4731 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime); 4732 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid); 4733 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid); 4734 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch); 4735 unlock_user_struct(target_sd, target_addr, 0); 4736 return 0; 4737 } 4738 4739 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr, 4740 struct shmid_ds *host_sd) 4741 { 4742 struct target_shmid_ds *target_sd; 4743 4744 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 4745 return -TARGET_EFAULT; 4746 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm))) 4747 return -TARGET_EFAULT; 4748 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz); 4749 __put_user(host_sd->shm_atime, &target_sd->shm_atime); 4750 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime); 4751 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime); 4752 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid); 4753 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid); 4754 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch); 4755 unlock_user_struct(target_sd, target_addr, 1); 4756 return 0; 4757 } 4758 4759 struct target_shminfo { 4760 abi_ulong shmmax; 4761 abi_ulong shmmin; 4762 abi_ulong shmmni; 4763 abi_ulong shmseg; 4764 abi_ulong shmall; 4765 }; 4766 4767 static inline abi_long host_to_target_shminfo(abi_ulong target_addr, 4768 struct shminfo *host_shminfo) 4769 { 4770 struct target_shminfo *target_shminfo; 4771 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0)) 4772 return -TARGET_EFAULT; 4773 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax); 4774 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin); 4775 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni); 4776 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg); 4777 __put_user(host_shminfo->shmall, &target_shminfo->shmall); 4778 unlock_user_struct(target_shminfo, target_addr, 1); 4779 return 0; 4780 } 4781 4782 struct target_shm_info { 4783 int used_ids; 4784 abi_ulong shm_tot; 4785 abi_ulong shm_rss; 4786 abi_ulong shm_swp; 4787 abi_ulong swap_attempts; 4788 abi_ulong swap_successes; 4789 }; 4790 4791 static inline abi_long host_to_target_shm_info(abi_ulong target_addr, 4792 struct shm_info *host_shm_info) 4793 { 4794 struct target_shm_info *target_shm_info; 4795 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0)) 4796 return -TARGET_EFAULT; 4797 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids); 4798 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot); 4799 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss); 4800 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp); 4801 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts); 4802 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes); 4803 unlock_user_struct(target_shm_info, target_addr, 1); 4804 return 0; 4805 } 4806 4807 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf) 4808 { 4809 struct shmid_ds dsarg; 4810 struct shminfo shminfo; 4811 struct shm_info shm_info; 4812 abi_long ret = -TARGET_EINVAL; 4813 4814 cmd &= 0xff; 4815 4816 switch(cmd) { 4817 case IPC_STAT: 4818 case IPC_SET: 4819 case SHM_STAT: 4820 if (target_to_host_shmid_ds(&dsarg, buf)) 4821 return -TARGET_EFAULT; 4822 ret = get_errno(shmctl(shmid, cmd, &dsarg)); 4823 if (host_to_target_shmid_ds(buf, &dsarg)) 4824 return -TARGET_EFAULT; 4825 break; 4826 case IPC_INFO: 4827 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo)); 4828 if (host_to_target_shminfo(buf, &shminfo)) 4829 return -TARGET_EFAULT; 4830 break; 4831 case SHM_INFO: 4832 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info)); 4833 if (host_to_target_shm_info(buf, &shm_info)) 4834 return -TARGET_EFAULT; 4835 break; 4836 case IPC_RMID: 4837 case SHM_LOCK: 4838 case SHM_UNLOCK: 4839 ret = get_errno(shmctl(shmid, cmd, NULL)); 4840 break; 4841 } 4842 4843 return ret; 4844 } 4845 4846 #ifndef TARGET_FORCE_SHMLBA 4847 /* For most architectures, SHMLBA is the same as the page size; 4848 * some architectures have larger values, in which case they should 4849 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function. 4850 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA 4851 * and defining its own value for SHMLBA. 4852 * 4853 * The kernel also permits SHMLBA to be set by the architecture to a 4854 * value larger than the page size without setting __ARCH_FORCE_SHMLBA; 4855 * this means that addresses are rounded to the large size if 4856 * SHM_RND is set but addresses not aligned to that size are not rejected 4857 * as long as they are at least page-aligned. Since the only architecture 4858 * which uses this is ia64 this code doesn't provide for that oddity. 4859 */ 4860 static inline abi_ulong target_shmlba(CPUArchState *cpu_env) 4861 { 4862 return TARGET_PAGE_SIZE; 4863 } 4864 #endif 4865 4866 static inline abi_ulong do_shmat(CPUArchState *cpu_env, 4867 int shmid, abi_ulong shmaddr, int shmflg) 4868 { 4869 abi_long raddr; 4870 void *host_raddr; 4871 struct shmid_ds shm_info; 4872 int i,ret; 4873 abi_ulong shmlba; 4874 4875 /* find out the length of the shared memory segment */ 4876 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); 4877 if (is_error(ret)) { 4878 /* can't get length, bail out */ 4879 return ret; 4880 } 4881 4882 shmlba = target_shmlba(cpu_env); 4883 4884 if (shmaddr & (shmlba - 1)) { 4885 if (shmflg & SHM_RND) { 4886 shmaddr &= ~(shmlba - 1); 4887 } else { 4888 return -TARGET_EINVAL; 4889 } 4890 } 4891 4892 mmap_lock(); 4893 4894 if (shmaddr) 4895 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg); 4896 else { 4897 abi_ulong mmap_start; 4898 4899 mmap_start = mmap_find_vma(0, shm_info.shm_segsz); 4900 4901 if (mmap_start == -1) { 4902 errno = ENOMEM; 4903 host_raddr = (void *)-1; 4904 } else 4905 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP); 4906 } 4907 4908 if (host_raddr == (void *)-1) { 4909 mmap_unlock(); 4910 return get_errno((long)host_raddr); 4911 } 4912 raddr=h2g((unsigned long)host_raddr); 4913 4914 page_set_flags(raddr, raddr + shm_info.shm_segsz, 4915 PAGE_VALID | PAGE_READ | 4916 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE)); 4917 4918 for (i = 0; i < N_SHM_REGIONS; i++) { 4919 if (!shm_regions[i].in_use) { 4920 shm_regions[i].in_use = true; 4921 shm_regions[i].start = raddr; 4922 shm_regions[i].size = shm_info.shm_segsz; 4923 break; 4924 } 4925 } 4926 4927 mmap_unlock(); 4928 return raddr; 4929 4930 } 4931 4932 static inline abi_long do_shmdt(abi_ulong shmaddr) 4933 { 4934 int i; 4935 4936 for (i = 0; i < N_SHM_REGIONS; ++i) { 4937 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) { 4938 shm_regions[i].in_use = false; 4939 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0); 4940 break; 4941 } 4942 } 4943 4944 return get_errno(shmdt(g2h(shmaddr))); 4945 } 4946 4947 #ifdef TARGET_NR_ipc 4948 /* ??? This only works with linear mappings. */ 4949 /* do_ipc() must return target values and target errnos. */ 4950 static abi_long do_ipc(CPUArchState *cpu_env, 4951 unsigned int call, abi_long first, 4952 abi_long second, abi_long third, 4953 abi_long ptr, abi_long fifth) 4954 { 4955 int version; 4956 abi_long ret = 0; 4957 4958 version = call >> 16; 4959 call &= 0xffff; 4960 4961 switch (call) { 4962 case IPCOP_semop: 4963 ret = do_semop(first, ptr, second); 4964 break; 4965 4966 case IPCOP_semget: 4967 ret = get_errno(semget(first, second, third)); 4968 break; 4969 4970 case IPCOP_semctl: { 4971 /* The semun argument to semctl is passed by value, so dereference the 4972 * ptr argument. */ 4973 abi_ulong atptr; 4974 get_user_ual(atptr, ptr); 4975 ret = do_semctl(first, second, third, atptr); 4976 break; 4977 } 4978 4979 case IPCOP_msgget: 4980 ret = get_errno(msgget(first, second)); 4981 break; 4982 4983 case IPCOP_msgsnd: 4984 ret = do_msgsnd(first, ptr, second, third); 4985 break; 4986 4987 case IPCOP_msgctl: 4988 ret = do_msgctl(first, second, ptr); 4989 break; 4990 4991 case IPCOP_msgrcv: 4992 switch (version) { 4993 case 0: 4994 { 4995 struct target_ipc_kludge { 4996 abi_long msgp; 4997 abi_long msgtyp; 4998 } *tmp; 4999 5000 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) { 5001 ret = -TARGET_EFAULT; 5002 break; 5003 } 5004 5005 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third); 5006 5007 unlock_user_struct(tmp, ptr, 0); 5008 break; 5009 } 5010 default: 5011 ret = do_msgrcv(first, ptr, second, fifth, third); 5012 } 5013 break; 5014 5015 case IPCOP_shmat: 5016 switch (version) { 5017 default: 5018 { 5019 abi_ulong raddr; 5020 raddr = do_shmat(cpu_env, first, ptr, second); 5021 if (is_error(raddr)) 5022 return get_errno(raddr); 5023 if (put_user_ual(raddr, third)) 5024 return -TARGET_EFAULT; 5025 break; 5026 } 5027 case 1: 5028 ret = -TARGET_EINVAL; 5029 break; 5030 } 5031 break; 5032 case IPCOP_shmdt: 5033 ret = do_shmdt(ptr); 5034 break; 5035 5036 case IPCOP_shmget: 5037 /* IPC_* flag values are the same on all linux platforms */ 5038 ret = get_errno(shmget(first, second, third)); 5039 break; 5040 5041 /* IPC_* and SHM_* command values are the same on all linux platforms */ 5042 case IPCOP_shmctl: 5043 ret = do_shmctl(first, second, ptr); 5044 break; 5045 default: 5046 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version); 5047 ret = -TARGET_ENOSYS; 5048 break; 5049 } 5050 return ret; 5051 } 5052 #endif 5053 5054 /* kernel structure types definitions */ 5055 5056 #define STRUCT(name, ...) STRUCT_ ## name, 5057 #define STRUCT_SPECIAL(name) STRUCT_ ## name, 5058 enum { 5059 #include "syscall_types.h" 5060 STRUCT_MAX 5061 }; 5062 #undef STRUCT 5063 #undef STRUCT_SPECIAL 5064 5065 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL }; 5066 #define STRUCT_SPECIAL(name) 5067 #include "syscall_types.h" 5068 #undef STRUCT 5069 #undef STRUCT_SPECIAL 5070 5071 typedef struct IOCTLEntry IOCTLEntry; 5072 5073 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp, 5074 int fd, int cmd, abi_long arg); 5075 5076 struct IOCTLEntry { 5077 int target_cmd; 5078 unsigned int host_cmd; 5079 const char *name; 5080 int access; 5081 do_ioctl_fn *do_ioctl; 5082 const argtype arg_type[5]; 5083 }; 5084 5085 #define IOC_R 0x0001 5086 #define IOC_W 0x0002 5087 #define IOC_RW (IOC_R | IOC_W) 5088 5089 #define MAX_STRUCT_SIZE 4096 5090 5091 #ifdef CONFIG_FIEMAP 5092 /* So fiemap access checks don't overflow on 32 bit systems. 5093 * This is very slightly smaller than the limit imposed by 5094 * the underlying kernel. 5095 */ 5096 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \ 5097 / sizeof(struct fiemap_extent)) 5098 5099 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp, 5100 int fd, int cmd, abi_long arg) 5101 { 5102 /* The parameter for this ioctl is a struct fiemap followed 5103 * by an array of struct fiemap_extent whose size is set 5104 * in fiemap->fm_extent_count. The array is filled in by the 5105 * ioctl. 5106 */ 5107 int target_size_in, target_size_out; 5108 struct fiemap *fm; 5109 const argtype *arg_type = ie->arg_type; 5110 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) }; 5111 void *argptr, *p; 5112 abi_long ret; 5113 int i, extent_size = thunk_type_size(extent_arg_type, 0); 5114 uint32_t outbufsz; 5115 int free_fm = 0; 5116 5117 assert(arg_type[0] == TYPE_PTR); 5118 assert(ie->access == IOC_RW); 5119 arg_type++; 5120 target_size_in = thunk_type_size(arg_type, 0); 5121 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1); 5122 if (!argptr) { 5123 return -TARGET_EFAULT; 5124 } 5125 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5126 unlock_user(argptr, arg, 0); 5127 fm = (struct fiemap *)buf_temp; 5128 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) { 5129 return -TARGET_EINVAL; 5130 } 5131 5132 outbufsz = sizeof (*fm) + 5133 (sizeof(struct fiemap_extent) * fm->fm_extent_count); 5134 5135 if (outbufsz > MAX_STRUCT_SIZE) { 5136 /* We can't fit all the extents into the fixed size buffer. 5137 * Allocate one that is large enough and use it instead. 5138 */ 5139 fm = g_try_malloc(outbufsz); 5140 if (!fm) { 5141 return -TARGET_ENOMEM; 5142 } 5143 memcpy(fm, buf_temp, sizeof(struct fiemap)); 5144 free_fm = 1; 5145 } 5146 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm)); 5147 if (!is_error(ret)) { 5148 target_size_out = target_size_in; 5149 /* An extent_count of 0 means we were only counting the extents 5150 * so there are no structs to copy 5151 */ 5152 if (fm->fm_extent_count != 0) { 5153 target_size_out += fm->fm_mapped_extents * extent_size; 5154 } 5155 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0); 5156 if (!argptr) { 5157 ret = -TARGET_EFAULT; 5158 } else { 5159 /* Convert the struct fiemap */ 5160 thunk_convert(argptr, fm, arg_type, THUNK_TARGET); 5161 if (fm->fm_extent_count != 0) { 5162 p = argptr + target_size_in; 5163 /* ...and then all the struct fiemap_extents */ 5164 for (i = 0; i < fm->fm_mapped_extents; i++) { 5165 thunk_convert(p, &fm->fm_extents[i], extent_arg_type, 5166 THUNK_TARGET); 5167 p += extent_size; 5168 } 5169 } 5170 unlock_user(argptr, arg, target_size_out); 5171 } 5172 } 5173 if (free_fm) { 5174 g_free(fm); 5175 } 5176 return ret; 5177 } 5178 #endif 5179 5180 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, 5181 int fd, int cmd, abi_long arg) 5182 { 5183 const argtype *arg_type = ie->arg_type; 5184 int target_size; 5185 void *argptr; 5186 int ret; 5187 struct ifconf *host_ifconf; 5188 uint32_t outbufsz; 5189 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) }; 5190 int target_ifreq_size; 5191 int nb_ifreq; 5192 int free_buf = 0; 5193 int i; 5194 int target_ifc_len; 5195 abi_long target_ifc_buf; 5196 int host_ifc_len; 5197 char *host_ifc_buf; 5198 5199 assert(arg_type[0] == TYPE_PTR); 5200 assert(ie->access == IOC_RW); 5201 5202 arg_type++; 5203 target_size = thunk_type_size(arg_type, 0); 5204 5205 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5206 if (!argptr) 5207 return -TARGET_EFAULT; 5208 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5209 unlock_user(argptr, arg, 0); 5210 5211 host_ifconf = (struct ifconf *)(unsigned long)buf_temp; 5212 target_ifc_len = host_ifconf->ifc_len; 5213 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf; 5214 5215 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0); 5216 nb_ifreq = target_ifc_len / target_ifreq_size; 5217 host_ifc_len = nb_ifreq * sizeof(struct ifreq); 5218 5219 outbufsz = sizeof(*host_ifconf) + host_ifc_len; 5220 if (outbufsz > MAX_STRUCT_SIZE) { 5221 /* We can't fit all the extents into the fixed size buffer. 5222 * Allocate one that is large enough and use it instead. 5223 */ 5224 host_ifconf = malloc(outbufsz); 5225 if (!host_ifconf) { 5226 return -TARGET_ENOMEM; 5227 } 5228 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf)); 5229 free_buf = 1; 5230 } 5231 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf); 5232 5233 host_ifconf->ifc_len = host_ifc_len; 5234 host_ifconf->ifc_buf = host_ifc_buf; 5235 5236 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf)); 5237 if (!is_error(ret)) { 5238 /* convert host ifc_len to target ifc_len */ 5239 5240 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq); 5241 target_ifc_len = nb_ifreq * target_ifreq_size; 5242 host_ifconf->ifc_len = target_ifc_len; 5243 5244 /* restore target ifc_buf */ 5245 5246 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf; 5247 5248 /* copy struct ifconf to target user */ 5249 5250 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5251 if (!argptr) 5252 return -TARGET_EFAULT; 5253 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET); 5254 unlock_user(argptr, arg, target_size); 5255 5256 /* copy ifreq[] to target user */ 5257 5258 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0); 5259 for (i = 0; i < nb_ifreq ; i++) { 5260 thunk_convert(argptr + i * target_ifreq_size, 5261 host_ifc_buf + i * sizeof(struct ifreq), 5262 ifreq_arg_type, THUNK_TARGET); 5263 } 5264 unlock_user(argptr, target_ifc_buf, target_ifc_len); 5265 } 5266 5267 if (free_buf) { 5268 free(host_ifconf); 5269 } 5270 5271 return ret; 5272 } 5273 5274 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 5275 int cmd, abi_long arg) 5276 { 5277 void *argptr; 5278 struct dm_ioctl *host_dm; 5279 abi_long guest_data; 5280 uint32_t guest_data_size; 5281 int target_size; 5282 const argtype *arg_type = ie->arg_type; 5283 abi_long ret; 5284 void *big_buf = NULL; 5285 char *host_data; 5286 5287 arg_type++; 5288 target_size = thunk_type_size(arg_type, 0); 5289 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5290 if (!argptr) { 5291 ret = -TARGET_EFAULT; 5292 goto out; 5293 } 5294 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5295 unlock_user(argptr, arg, 0); 5296 5297 /* buf_temp is too small, so fetch things into a bigger buffer */ 5298 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2); 5299 memcpy(big_buf, buf_temp, target_size); 5300 buf_temp = big_buf; 5301 host_dm = big_buf; 5302 5303 guest_data = arg + host_dm->data_start; 5304 if ((guest_data - arg) < 0) { 5305 ret = -TARGET_EINVAL; 5306 goto out; 5307 } 5308 guest_data_size = host_dm->data_size - host_dm->data_start; 5309 host_data = (char*)host_dm + host_dm->data_start; 5310 5311 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1); 5312 if (!argptr) { 5313 ret = -TARGET_EFAULT; 5314 goto out; 5315 } 5316 5317 switch (ie->host_cmd) { 5318 case DM_REMOVE_ALL: 5319 case DM_LIST_DEVICES: 5320 case DM_DEV_CREATE: 5321 case DM_DEV_REMOVE: 5322 case DM_DEV_SUSPEND: 5323 case DM_DEV_STATUS: 5324 case DM_DEV_WAIT: 5325 case DM_TABLE_STATUS: 5326 case DM_TABLE_CLEAR: 5327 case DM_TABLE_DEPS: 5328 case DM_LIST_VERSIONS: 5329 /* no input data */ 5330 break; 5331 case DM_DEV_RENAME: 5332 case DM_DEV_SET_GEOMETRY: 5333 /* data contains only strings */ 5334 memcpy(host_data, argptr, guest_data_size); 5335 break; 5336 case DM_TARGET_MSG: 5337 memcpy(host_data, argptr, guest_data_size); 5338 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr); 5339 break; 5340 case DM_TABLE_LOAD: 5341 { 5342 void *gspec = argptr; 5343 void *cur_data = host_data; 5344 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 5345 int spec_size = thunk_type_size(arg_type, 0); 5346 int i; 5347 5348 for (i = 0; i < host_dm->target_count; i++) { 5349 struct dm_target_spec *spec = cur_data; 5350 uint32_t next; 5351 int slen; 5352 5353 thunk_convert(spec, gspec, arg_type, THUNK_HOST); 5354 slen = strlen((char*)gspec + spec_size) + 1; 5355 next = spec->next; 5356 spec->next = sizeof(*spec) + slen; 5357 strcpy((char*)&spec[1], gspec + spec_size); 5358 gspec += next; 5359 cur_data += spec->next; 5360 } 5361 break; 5362 } 5363 default: 5364 ret = -TARGET_EINVAL; 5365 unlock_user(argptr, guest_data, 0); 5366 goto out; 5367 } 5368 unlock_user(argptr, guest_data, 0); 5369 5370 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5371 if (!is_error(ret)) { 5372 guest_data = arg + host_dm->data_start; 5373 guest_data_size = host_dm->data_size - host_dm->data_start; 5374 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0); 5375 switch (ie->host_cmd) { 5376 case DM_REMOVE_ALL: 5377 case DM_DEV_CREATE: 5378 case DM_DEV_REMOVE: 5379 case DM_DEV_RENAME: 5380 case DM_DEV_SUSPEND: 5381 case DM_DEV_STATUS: 5382 case DM_TABLE_LOAD: 5383 case DM_TABLE_CLEAR: 5384 case DM_TARGET_MSG: 5385 case DM_DEV_SET_GEOMETRY: 5386 /* no return data */ 5387 break; 5388 case DM_LIST_DEVICES: 5389 { 5390 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start; 5391 uint32_t remaining_data = guest_data_size; 5392 void *cur_data = argptr; 5393 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) }; 5394 int nl_size = 12; /* can't use thunk_size due to alignment */ 5395 5396 while (1) { 5397 uint32_t next = nl->next; 5398 if (next) { 5399 nl->next = nl_size + (strlen(nl->name) + 1); 5400 } 5401 if (remaining_data < nl->next) { 5402 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5403 break; 5404 } 5405 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET); 5406 strcpy(cur_data + nl_size, nl->name); 5407 cur_data += nl->next; 5408 remaining_data -= nl->next; 5409 if (!next) { 5410 break; 5411 } 5412 nl = (void*)nl + next; 5413 } 5414 break; 5415 } 5416 case DM_DEV_WAIT: 5417 case DM_TABLE_STATUS: 5418 { 5419 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start; 5420 void *cur_data = argptr; 5421 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 5422 int spec_size = thunk_type_size(arg_type, 0); 5423 int i; 5424 5425 for (i = 0; i < host_dm->target_count; i++) { 5426 uint32_t next = spec->next; 5427 int slen = strlen((char*)&spec[1]) + 1; 5428 spec->next = (cur_data - argptr) + spec_size + slen; 5429 if (guest_data_size < spec->next) { 5430 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5431 break; 5432 } 5433 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET); 5434 strcpy(cur_data + spec_size, (char*)&spec[1]); 5435 cur_data = argptr + spec->next; 5436 spec = (void*)host_dm + host_dm->data_start + next; 5437 } 5438 break; 5439 } 5440 case DM_TABLE_DEPS: 5441 { 5442 void *hdata = (void*)host_dm + host_dm->data_start; 5443 int count = *(uint32_t*)hdata; 5444 uint64_t *hdev = hdata + 8; 5445 uint64_t *gdev = argptr + 8; 5446 int i; 5447 5448 *(uint32_t*)argptr = tswap32(count); 5449 for (i = 0; i < count; i++) { 5450 *gdev = tswap64(*hdev); 5451 gdev++; 5452 hdev++; 5453 } 5454 break; 5455 } 5456 case DM_LIST_VERSIONS: 5457 { 5458 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start; 5459 uint32_t remaining_data = guest_data_size; 5460 void *cur_data = argptr; 5461 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) }; 5462 int vers_size = thunk_type_size(arg_type, 0); 5463 5464 while (1) { 5465 uint32_t next = vers->next; 5466 if (next) { 5467 vers->next = vers_size + (strlen(vers->name) + 1); 5468 } 5469 if (remaining_data < vers->next) { 5470 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5471 break; 5472 } 5473 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET); 5474 strcpy(cur_data + vers_size, vers->name); 5475 cur_data += vers->next; 5476 remaining_data -= vers->next; 5477 if (!next) { 5478 break; 5479 } 5480 vers = (void*)vers + next; 5481 } 5482 break; 5483 } 5484 default: 5485 unlock_user(argptr, guest_data, 0); 5486 ret = -TARGET_EINVAL; 5487 goto out; 5488 } 5489 unlock_user(argptr, guest_data, guest_data_size); 5490 5491 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5492 if (!argptr) { 5493 ret = -TARGET_EFAULT; 5494 goto out; 5495 } 5496 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5497 unlock_user(argptr, arg, target_size); 5498 } 5499 out: 5500 g_free(big_buf); 5501 return ret; 5502 } 5503 5504 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 5505 int cmd, abi_long arg) 5506 { 5507 void *argptr; 5508 int target_size; 5509 const argtype *arg_type = ie->arg_type; 5510 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) }; 5511 abi_long ret; 5512 5513 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp; 5514 struct blkpg_partition host_part; 5515 5516 /* Read and convert blkpg */ 5517 arg_type++; 5518 target_size = thunk_type_size(arg_type, 0); 5519 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5520 if (!argptr) { 5521 ret = -TARGET_EFAULT; 5522 goto out; 5523 } 5524 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5525 unlock_user(argptr, arg, 0); 5526 5527 switch (host_blkpg->op) { 5528 case BLKPG_ADD_PARTITION: 5529 case BLKPG_DEL_PARTITION: 5530 /* payload is struct blkpg_partition */ 5531 break; 5532 default: 5533 /* Unknown opcode */ 5534 ret = -TARGET_EINVAL; 5535 goto out; 5536 } 5537 5538 /* Read and convert blkpg->data */ 5539 arg = (abi_long)(uintptr_t)host_blkpg->data; 5540 target_size = thunk_type_size(part_arg_type, 0); 5541 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5542 if (!argptr) { 5543 ret = -TARGET_EFAULT; 5544 goto out; 5545 } 5546 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST); 5547 unlock_user(argptr, arg, 0); 5548 5549 /* Swizzle the data pointer to our local copy and call! */ 5550 host_blkpg->data = &host_part; 5551 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg)); 5552 5553 out: 5554 return ret; 5555 } 5556 5557 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp, 5558 int fd, int cmd, abi_long arg) 5559 { 5560 const argtype *arg_type = ie->arg_type; 5561 const StructEntry *se; 5562 const argtype *field_types; 5563 const int *dst_offsets, *src_offsets; 5564 int target_size; 5565 void *argptr; 5566 abi_ulong *target_rt_dev_ptr; 5567 unsigned long *host_rt_dev_ptr; 5568 abi_long ret; 5569 int i; 5570 5571 assert(ie->access == IOC_W); 5572 assert(*arg_type == TYPE_PTR); 5573 arg_type++; 5574 assert(*arg_type == TYPE_STRUCT); 5575 target_size = thunk_type_size(arg_type, 0); 5576 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5577 if (!argptr) { 5578 return -TARGET_EFAULT; 5579 } 5580 arg_type++; 5581 assert(*arg_type == (int)STRUCT_rtentry); 5582 se = struct_entries + *arg_type++; 5583 assert(se->convert[0] == NULL); 5584 /* convert struct here to be able to catch rt_dev string */ 5585 field_types = se->field_types; 5586 dst_offsets = se->field_offsets[THUNK_HOST]; 5587 src_offsets = se->field_offsets[THUNK_TARGET]; 5588 for (i = 0; i < se->nb_fields; i++) { 5589 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) { 5590 assert(*field_types == TYPE_PTRVOID); 5591 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]); 5592 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]); 5593 if (*target_rt_dev_ptr != 0) { 5594 *host_rt_dev_ptr = (unsigned long)lock_user_string( 5595 tswapal(*target_rt_dev_ptr)); 5596 if (!*host_rt_dev_ptr) { 5597 unlock_user(argptr, arg, 0); 5598 return -TARGET_EFAULT; 5599 } 5600 } else { 5601 *host_rt_dev_ptr = 0; 5602 } 5603 field_types++; 5604 continue; 5605 } 5606 field_types = thunk_convert(buf_temp + dst_offsets[i], 5607 argptr + src_offsets[i], 5608 field_types, THUNK_HOST); 5609 } 5610 unlock_user(argptr, arg, 0); 5611 5612 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5613 if (*host_rt_dev_ptr != 0) { 5614 unlock_user((void *)*host_rt_dev_ptr, 5615 *target_rt_dev_ptr, 0); 5616 } 5617 return ret; 5618 } 5619 5620 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp, 5621 int fd, int cmd, abi_long arg) 5622 { 5623 int sig = target_to_host_signal(arg); 5624 return get_errno(safe_ioctl(fd, ie->host_cmd, sig)); 5625 } 5626 5627 static IOCTLEntry ioctl_entries[] = { 5628 #define IOCTL(cmd, access, ...) \ 5629 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } }, 5630 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \ 5631 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } }, 5632 #define IOCTL_IGNORE(cmd) \ 5633 { TARGET_ ## cmd, 0, #cmd }, 5634 #include "ioctls.h" 5635 { 0, 0, }, 5636 }; 5637 5638 /* ??? Implement proper locking for ioctls. */ 5639 /* do_ioctl() Must return target values and target errnos. */ 5640 static abi_long do_ioctl(int fd, int cmd, abi_long arg) 5641 { 5642 const IOCTLEntry *ie; 5643 const argtype *arg_type; 5644 abi_long ret; 5645 uint8_t buf_temp[MAX_STRUCT_SIZE]; 5646 int target_size; 5647 void *argptr; 5648 5649 ie = ioctl_entries; 5650 for(;;) { 5651 if (ie->target_cmd == 0) { 5652 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd); 5653 return -TARGET_ENOSYS; 5654 } 5655 if (ie->target_cmd == cmd) 5656 break; 5657 ie++; 5658 } 5659 arg_type = ie->arg_type; 5660 #if defined(DEBUG) 5661 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name); 5662 #endif 5663 if (ie->do_ioctl) { 5664 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg); 5665 } else if (!ie->host_cmd) { 5666 /* Some architectures define BSD ioctls in their headers 5667 that are not implemented in Linux. */ 5668 return -TARGET_ENOSYS; 5669 } 5670 5671 switch(arg_type[0]) { 5672 case TYPE_NULL: 5673 /* no argument */ 5674 ret = get_errno(safe_ioctl(fd, ie->host_cmd)); 5675 break; 5676 case TYPE_PTRVOID: 5677 case TYPE_INT: 5678 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg)); 5679 break; 5680 case TYPE_PTR: 5681 arg_type++; 5682 target_size = thunk_type_size(arg_type, 0); 5683 switch(ie->access) { 5684 case IOC_R: 5685 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5686 if (!is_error(ret)) { 5687 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5688 if (!argptr) 5689 return -TARGET_EFAULT; 5690 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5691 unlock_user(argptr, arg, target_size); 5692 } 5693 break; 5694 case IOC_W: 5695 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5696 if (!argptr) 5697 return -TARGET_EFAULT; 5698 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5699 unlock_user(argptr, arg, 0); 5700 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5701 break; 5702 default: 5703 case IOC_RW: 5704 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5705 if (!argptr) 5706 return -TARGET_EFAULT; 5707 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5708 unlock_user(argptr, arg, 0); 5709 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5710 if (!is_error(ret)) { 5711 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5712 if (!argptr) 5713 return -TARGET_EFAULT; 5714 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5715 unlock_user(argptr, arg, target_size); 5716 } 5717 break; 5718 } 5719 break; 5720 default: 5721 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n", 5722 (long)cmd, arg_type[0]); 5723 ret = -TARGET_ENOSYS; 5724 break; 5725 } 5726 return ret; 5727 } 5728 5729 static const bitmask_transtbl iflag_tbl[] = { 5730 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK }, 5731 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT }, 5732 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR }, 5733 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK }, 5734 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK }, 5735 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP }, 5736 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR }, 5737 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR }, 5738 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL }, 5739 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC }, 5740 { TARGET_IXON, TARGET_IXON, IXON, IXON }, 5741 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY }, 5742 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF }, 5743 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL }, 5744 { 0, 0, 0, 0 } 5745 }; 5746 5747 static const bitmask_transtbl oflag_tbl[] = { 5748 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST }, 5749 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC }, 5750 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR }, 5751 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL }, 5752 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR }, 5753 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET }, 5754 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL }, 5755 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL }, 5756 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 }, 5757 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 }, 5758 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 }, 5759 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 }, 5760 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 }, 5761 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 }, 5762 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 }, 5763 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 }, 5764 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 }, 5765 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 }, 5766 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 }, 5767 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 }, 5768 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 }, 5769 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 }, 5770 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 }, 5771 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 }, 5772 { 0, 0, 0, 0 } 5773 }; 5774 5775 static const bitmask_transtbl cflag_tbl[] = { 5776 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 }, 5777 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 }, 5778 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 }, 5779 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 }, 5780 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 }, 5781 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 }, 5782 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 }, 5783 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 }, 5784 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 }, 5785 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 }, 5786 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 }, 5787 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 }, 5788 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 }, 5789 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 }, 5790 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 }, 5791 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 }, 5792 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 }, 5793 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 }, 5794 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 }, 5795 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 }, 5796 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 }, 5797 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 }, 5798 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 }, 5799 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 }, 5800 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB }, 5801 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD }, 5802 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB }, 5803 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD }, 5804 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL }, 5805 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL }, 5806 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS }, 5807 { 0, 0, 0, 0 } 5808 }; 5809 5810 static const bitmask_transtbl lflag_tbl[] = { 5811 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG }, 5812 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON }, 5813 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE }, 5814 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO }, 5815 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE }, 5816 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK }, 5817 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL }, 5818 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH }, 5819 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP }, 5820 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL }, 5821 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT }, 5822 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE }, 5823 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO }, 5824 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN }, 5825 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN }, 5826 { 0, 0, 0, 0 } 5827 }; 5828 5829 static void target_to_host_termios (void *dst, const void *src) 5830 { 5831 struct host_termios *host = dst; 5832 const struct target_termios *target = src; 5833 5834 host->c_iflag = 5835 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl); 5836 host->c_oflag = 5837 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl); 5838 host->c_cflag = 5839 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl); 5840 host->c_lflag = 5841 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl); 5842 host->c_line = target->c_line; 5843 5844 memset(host->c_cc, 0, sizeof(host->c_cc)); 5845 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR]; 5846 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT]; 5847 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE]; 5848 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL]; 5849 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF]; 5850 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME]; 5851 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN]; 5852 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC]; 5853 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART]; 5854 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP]; 5855 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP]; 5856 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL]; 5857 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT]; 5858 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD]; 5859 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE]; 5860 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT]; 5861 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2]; 5862 } 5863 5864 static void host_to_target_termios (void *dst, const void *src) 5865 { 5866 struct target_termios *target = dst; 5867 const struct host_termios *host = src; 5868 5869 target->c_iflag = 5870 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl)); 5871 target->c_oflag = 5872 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl)); 5873 target->c_cflag = 5874 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl)); 5875 target->c_lflag = 5876 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl)); 5877 target->c_line = host->c_line; 5878 5879 memset(target->c_cc, 0, sizeof(target->c_cc)); 5880 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR]; 5881 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT]; 5882 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE]; 5883 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL]; 5884 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF]; 5885 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME]; 5886 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN]; 5887 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC]; 5888 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART]; 5889 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP]; 5890 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP]; 5891 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL]; 5892 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT]; 5893 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD]; 5894 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE]; 5895 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT]; 5896 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2]; 5897 } 5898 5899 static const StructEntry struct_termios_def = { 5900 .convert = { host_to_target_termios, target_to_host_termios }, 5901 .size = { sizeof(struct target_termios), sizeof(struct host_termios) }, 5902 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) }, 5903 }; 5904 5905 static bitmask_transtbl mmap_flags_tbl[] = { 5906 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED }, 5907 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE }, 5908 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED }, 5909 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, 5910 MAP_ANONYMOUS, MAP_ANONYMOUS }, 5911 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, 5912 MAP_GROWSDOWN, MAP_GROWSDOWN }, 5913 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, 5914 MAP_DENYWRITE, MAP_DENYWRITE }, 5915 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, 5916 MAP_EXECUTABLE, MAP_EXECUTABLE }, 5917 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED }, 5918 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, 5919 MAP_NORESERVE, MAP_NORESERVE }, 5920 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB }, 5921 /* MAP_STACK had been ignored by the kernel for quite some time. 5922 Recognize it for the target insofar as we do not want to pass 5923 it through to the host. */ 5924 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 }, 5925 { 0, 0, 0, 0 } 5926 }; 5927 5928 #if defined(TARGET_I386) 5929 5930 /* NOTE: there is really one LDT for all the threads */ 5931 static uint8_t *ldt_table; 5932 5933 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount) 5934 { 5935 int size; 5936 void *p; 5937 5938 if (!ldt_table) 5939 return 0; 5940 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE; 5941 if (size > bytecount) 5942 size = bytecount; 5943 p = lock_user(VERIFY_WRITE, ptr, size, 0); 5944 if (!p) 5945 return -TARGET_EFAULT; 5946 /* ??? Should this by byteswapped? */ 5947 memcpy(p, ldt_table, size); 5948 unlock_user(p, ptr, size); 5949 return size; 5950 } 5951 5952 /* XXX: add locking support */ 5953 static abi_long write_ldt(CPUX86State *env, 5954 abi_ulong ptr, unsigned long bytecount, int oldmode) 5955 { 5956 struct target_modify_ldt_ldt_s ldt_info; 5957 struct target_modify_ldt_ldt_s *target_ldt_info; 5958 int seg_32bit, contents, read_exec_only, limit_in_pages; 5959 int seg_not_present, useable, lm; 5960 uint32_t *lp, entry_1, entry_2; 5961 5962 if (bytecount != sizeof(ldt_info)) 5963 return -TARGET_EINVAL; 5964 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1)) 5965 return -TARGET_EFAULT; 5966 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 5967 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 5968 ldt_info.limit = tswap32(target_ldt_info->limit); 5969 ldt_info.flags = tswap32(target_ldt_info->flags); 5970 unlock_user_struct(target_ldt_info, ptr, 0); 5971 5972 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES) 5973 return -TARGET_EINVAL; 5974 seg_32bit = ldt_info.flags & 1; 5975 contents = (ldt_info.flags >> 1) & 3; 5976 read_exec_only = (ldt_info.flags >> 3) & 1; 5977 limit_in_pages = (ldt_info.flags >> 4) & 1; 5978 seg_not_present = (ldt_info.flags >> 5) & 1; 5979 useable = (ldt_info.flags >> 6) & 1; 5980 #ifdef TARGET_ABI32 5981 lm = 0; 5982 #else 5983 lm = (ldt_info.flags >> 7) & 1; 5984 #endif 5985 if (contents == 3) { 5986 if (oldmode) 5987 return -TARGET_EINVAL; 5988 if (seg_not_present == 0) 5989 return -TARGET_EINVAL; 5990 } 5991 /* allocate the LDT */ 5992 if (!ldt_table) { 5993 env->ldt.base = target_mmap(0, 5994 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE, 5995 PROT_READ|PROT_WRITE, 5996 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 5997 if (env->ldt.base == -1) 5998 return -TARGET_ENOMEM; 5999 memset(g2h(env->ldt.base), 0, 6000 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE); 6001 env->ldt.limit = 0xffff; 6002 ldt_table = g2h(env->ldt.base); 6003 } 6004 6005 /* NOTE: same code as Linux kernel */ 6006 /* Allow LDTs to be cleared by the user. */ 6007 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 6008 if (oldmode || 6009 (contents == 0 && 6010 read_exec_only == 1 && 6011 seg_32bit == 0 && 6012 limit_in_pages == 0 && 6013 seg_not_present == 1 && 6014 useable == 0 )) { 6015 entry_1 = 0; 6016 entry_2 = 0; 6017 goto install; 6018 } 6019 } 6020 6021 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 6022 (ldt_info.limit & 0x0ffff); 6023 entry_2 = (ldt_info.base_addr & 0xff000000) | 6024 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 6025 (ldt_info.limit & 0xf0000) | 6026 ((read_exec_only ^ 1) << 9) | 6027 (contents << 10) | 6028 ((seg_not_present ^ 1) << 15) | 6029 (seg_32bit << 22) | 6030 (limit_in_pages << 23) | 6031 (lm << 21) | 6032 0x7000; 6033 if (!oldmode) 6034 entry_2 |= (useable << 20); 6035 6036 /* Install the new entry ... */ 6037 install: 6038 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3)); 6039 lp[0] = tswap32(entry_1); 6040 lp[1] = tswap32(entry_2); 6041 return 0; 6042 } 6043 6044 /* specific and weird i386 syscalls */ 6045 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr, 6046 unsigned long bytecount) 6047 { 6048 abi_long ret; 6049 6050 switch (func) { 6051 case 0: 6052 ret = read_ldt(ptr, bytecount); 6053 break; 6054 case 1: 6055 ret = write_ldt(env, ptr, bytecount, 1); 6056 break; 6057 case 0x11: 6058 ret = write_ldt(env, ptr, bytecount, 0); 6059 break; 6060 default: 6061 ret = -TARGET_ENOSYS; 6062 break; 6063 } 6064 return ret; 6065 } 6066 6067 #if defined(TARGET_I386) && defined(TARGET_ABI32) 6068 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr) 6069 { 6070 uint64_t *gdt_table = g2h(env->gdt.base); 6071 struct target_modify_ldt_ldt_s ldt_info; 6072 struct target_modify_ldt_ldt_s *target_ldt_info; 6073 int seg_32bit, contents, read_exec_only, limit_in_pages; 6074 int seg_not_present, useable, lm; 6075 uint32_t *lp, entry_1, entry_2; 6076 int i; 6077 6078 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 6079 if (!target_ldt_info) 6080 return -TARGET_EFAULT; 6081 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 6082 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 6083 ldt_info.limit = tswap32(target_ldt_info->limit); 6084 ldt_info.flags = tswap32(target_ldt_info->flags); 6085 if (ldt_info.entry_number == -1) { 6086 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) { 6087 if (gdt_table[i] == 0) { 6088 ldt_info.entry_number = i; 6089 target_ldt_info->entry_number = tswap32(i); 6090 break; 6091 } 6092 } 6093 } 6094 unlock_user_struct(target_ldt_info, ptr, 1); 6095 6096 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 6097 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX) 6098 return -TARGET_EINVAL; 6099 seg_32bit = ldt_info.flags & 1; 6100 contents = (ldt_info.flags >> 1) & 3; 6101 read_exec_only = (ldt_info.flags >> 3) & 1; 6102 limit_in_pages = (ldt_info.flags >> 4) & 1; 6103 seg_not_present = (ldt_info.flags >> 5) & 1; 6104 useable = (ldt_info.flags >> 6) & 1; 6105 #ifdef TARGET_ABI32 6106 lm = 0; 6107 #else 6108 lm = (ldt_info.flags >> 7) & 1; 6109 #endif 6110 6111 if (contents == 3) { 6112 if (seg_not_present == 0) 6113 return -TARGET_EINVAL; 6114 } 6115 6116 /* NOTE: same code as Linux kernel */ 6117 /* Allow LDTs to be cleared by the user. */ 6118 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 6119 if ((contents == 0 && 6120 read_exec_only == 1 && 6121 seg_32bit == 0 && 6122 limit_in_pages == 0 && 6123 seg_not_present == 1 && 6124 useable == 0 )) { 6125 entry_1 = 0; 6126 entry_2 = 0; 6127 goto install; 6128 } 6129 } 6130 6131 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 6132 (ldt_info.limit & 0x0ffff); 6133 entry_2 = (ldt_info.base_addr & 0xff000000) | 6134 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 6135 (ldt_info.limit & 0xf0000) | 6136 ((read_exec_only ^ 1) << 9) | 6137 (contents << 10) | 6138 ((seg_not_present ^ 1) << 15) | 6139 (seg_32bit << 22) | 6140 (limit_in_pages << 23) | 6141 (useable << 20) | 6142 (lm << 21) | 6143 0x7000; 6144 6145 /* Install the new entry ... */ 6146 install: 6147 lp = (uint32_t *)(gdt_table + ldt_info.entry_number); 6148 lp[0] = tswap32(entry_1); 6149 lp[1] = tswap32(entry_2); 6150 return 0; 6151 } 6152 6153 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr) 6154 { 6155 struct target_modify_ldt_ldt_s *target_ldt_info; 6156 uint64_t *gdt_table = g2h(env->gdt.base); 6157 uint32_t base_addr, limit, flags; 6158 int seg_32bit, contents, read_exec_only, limit_in_pages, idx; 6159 int seg_not_present, useable, lm; 6160 uint32_t *lp, entry_1, entry_2; 6161 6162 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 6163 if (!target_ldt_info) 6164 return -TARGET_EFAULT; 6165 idx = tswap32(target_ldt_info->entry_number); 6166 if (idx < TARGET_GDT_ENTRY_TLS_MIN || 6167 idx > TARGET_GDT_ENTRY_TLS_MAX) { 6168 unlock_user_struct(target_ldt_info, ptr, 1); 6169 return -TARGET_EINVAL; 6170 } 6171 lp = (uint32_t *)(gdt_table + idx); 6172 entry_1 = tswap32(lp[0]); 6173 entry_2 = tswap32(lp[1]); 6174 6175 read_exec_only = ((entry_2 >> 9) & 1) ^ 1; 6176 contents = (entry_2 >> 10) & 3; 6177 seg_not_present = ((entry_2 >> 15) & 1) ^ 1; 6178 seg_32bit = (entry_2 >> 22) & 1; 6179 limit_in_pages = (entry_2 >> 23) & 1; 6180 useable = (entry_2 >> 20) & 1; 6181 #ifdef TARGET_ABI32 6182 lm = 0; 6183 #else 6184 lm = (entry_2 >> 21) & 1; 6185 #endif 6186 flags = (seg_32bit << 0) | (contents << 1) | 6187 (read_exec_only << 3) | (limit_in_pages << 4) | 6188 (seg_not_present << 5) | (useable << 6) | (lm << 7); 6189 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000); 6190 base_addr = (entry_1 >> 16) | 6191 (entry_2 & 0xff000000) | 6192 ((entry_2 & 0xff) << 16); 6193 target_ldt_info->base_addr = tswapal(base_addr); 6194 target_ldt_info->limit = tswap32(limit); 6195 target_ldt_info->flags = tswap32(flags); 6196 unlock_user_struct(target_ldt_info, ptr, 1); 6197 return 0; 6198 } 6199 #endif /* TARGET_I386 && TARGET_ABI32 */ 6200 6201 #ifndef TARGET_ABI32 6202 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 6203 { 6204 abi_long ret = 0; 6205 abi_ulong val; 6206 int idx; 6207 6208 switch(code) { 6209 case TARGET_ARCH_SET_GS: 6210 case TARGET_ARCH_SET_FS: 6211 if (code == TARGET_ARCH_SET_GS) 6212 idx = R_GS; 6213 else 6214 idx = R_FS; 6215 cpu_x86_load_seg(env, idx, 0); 6216 env->segs[idx].base = addr; 6217 break; 6218 case TARGET_ARCH_GET_GS: 6219 case TARGET_ARCH_GET_FS: 6220 if (code == TARGET_ARCH_GET_GS) 6221 idx = R_GS; 6222 else 6223 idx = R_FS; 6224 val = env->segs[idx].base; 6225 if (put_user(val, addr, abi_ulong)) 6226 ret = -TARGET_EFAULT; 6227 break; 6228 default: 6229 ret = -TARGET_EINVAL; 6230 break; 6231 } 6232 return ret; 6233 } 6234 #endif 6235 6236 #endif /* defined(TARGET_I386) */ 6237 6238 #define NEW_STACK_SIZE 0x40000 6239 6240 6241 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; 6242 typedef struct { 6243 CPUArchState *env; 6244 pthread_mutex_t mutex; 6245 pthread_cond_t cond; 6246 pthread_t thread; 6247 uint32_t tid; 6248 abi_ulong child_tidptr; 6249 abi_ulong parent_tidptr; 6250 sigset_t sigmask; 6251 } new_thread_info; 6252 6253 static void *clone_func(void *arg) 6254 { 6255 new_thread_info *info = arg; 6256 CPUArchState *env; 6257 CPUState *cpu; 6258 TaskState *ts; 6259 6260 rcu_register_thread(); 6261 tcg_register_thread(); 6262 env = info->env; 6263 cpu = ENV_GET_CPU(env); 6264 thread_cpu = cpu; 6265 ts = (TaskState *)cpu->opaque; 6266 info->tid = gettid(); 6267 task_settid(ts); 6268 if (info->child_tidptr) 6269 put_user_u32(info->tid, info->child_tidptr); 6270 if (info->parent_tidptr) 6271 put_user_u32(info->tid, info->parent_tidptr); 6272 /* Enable signals. */ 6273 sigprocmask(SIG_SETMASK, &info->sigmask, NULL); 6274 /* Signal to the parent that we're ready. */ 6275 pthread_mutex_lock(&info->mutex); 6276 pthread_cond_broadcast(&info->cond); 6277 pthread_mutex_unlock(&info->mutex); 6278 /* Wait until the parent has finished initializing the tls state. */ 6279 pthread_mutex_lock(&clone_lock); 6280 pthread_mutex_unlock(&clone_lock); 6281 cpu_loop(env); 6282 /* never exits */ 6283 return NULL; 6284 } 6285 6286 /* do_fork() Must return host values and target errnos (unlike most 6287 do_*() functions). */ 6288 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, 6289 abi_ulong parent_tidptr, target_ulong newtls, 6290 abi_ulong child_tidptr) 6291 { 6292 CPUState *cpu = ENV_GET_CPU(env); 6293 int ret; 6294 TaskState *ts; 6295 CPUState *new_cpu; 6296 CPUArchState *new_env; 6297 sigset_t sigmask; 6298 6299 flags &= ~CLONE_IGNORED_FLAGS; 6300 6301 /* Emulate vfork() with fork() */ 6302 if (flags & CLONE_VFORK) 6303 flags &= ~(CLONE_VFORK | CLONE_VM); 6304 6305 if (flags & CLONE_VM) { 6306 TaskState *parent_ts = (TaskState *)cpu->opaque; 6307 new_thread_info info; 6308 pthread_attr_t attr; 6309 6310 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) || 6311 (flags & CLONE_INVALID_THREAD_FLAGS)) { 6312 return -TARGET_EINVAL; 6313 } 6314 6315 ts = g_new0(TaskState, 1); 6316 init_task_state(ts); 6317 /* we create a new CPU instance. */ 6318 new_env = cpu_copy(env); 6319 /* Init regs that differ from the parent. */ 6320 cpu_clone_regs(new_env, newsp); 6321 new_cpu = ENV_GET_CPU(new_env); 6322 new_cpu->opaque = ts; 6323 ts->bprm = parent_ts->bprm; 6324 ts->info = parent_ts->info; 6325 ts->signal_mask = parent_ts->signal_mask; 6326 6327 if (flags & CLONE_CHILD_CLEARTID) { 6328 ts->child_tidptr = child_tidptr; 6329 } 6330 6331 if (flags & CLONE_SETTLS) { 6332 cpu_set_tls (new_env, newtls); 6333 } 6334 6335 /* Grab a mutex so that thread setup appears atomic. */ 6336 pthread_mutex_lock(&clone_lock); 6337 6338 memset(&info, 0, sizeof(info)); 6339 pthread_mutex_init(&info.mutex, NULL); 6340 pthread_mutex_lock(&info.mutex); 6341 pthread_cond_init(&info.cond, NULL); 6342 info.env = new_env; 6343 if (flags & CLONE_CHILD_SETTID) { 6344 info.child_tidptr = child_tidptr; 6345 } 6346 if (flags & CLONE_PARENT_SETTID) { 6347 info.parent_tidptr = parent_tidptr; 6348 } 6349 6350 ret = pthread_attr_init(&attr); 6351 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE); 6352 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 6353 /* It is not safe to deliver signals until the child has finished 6354 initializing, so temporarily block all signals. */ 6355 sigfillset(&sigmask); 6356 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); 6357 6358 /* If this is our first additional thread, we need to ensure we 6359 * generate code for parallel execution and flush old translations. 6360 */ 6361 if (!parallel_cpus) { 6362 parallel_cpus = true; 6363 tb_flush(cpu); 6364 } 6365 6366 ret = pthread_create(&info.thread, &attr, clone_func, &info); 6367 /* TODO: Free new CPU state if thread creation failed. */ 6368 6369 sigprocmask(SIG_SETMASK, &info.sigmask, NULL); 6370 pthread_attr_destroy(&attr); 6371 if (ret == 0) { 6372 /* Wait for the child to initialize. */ 6373 pthread_cond_wait(&info.cond, &info.mutex); 6374 ret = info.tid; 6375 } else { 6376 ret = -1; 6377 } 6378 pthread_mutex_unlock(&info.mutex); 6379 pthread_cond_destroy(&info.cond); 6380 pthread_mutex_destroy(&info.mutex); 6381 pthread_mutex_unlock(&clone_lock); 6382 } else { 6383 /* if no CLONE_VM, we consider it is a fork */ 6384 if (flags & CLONE_INVALID_FORK_FLAGS) { 6385 return -TARGET_EINVAL; 6386 } 6387 6388 /* We can't support custom termination signals */ 6389 if ((flags & CSIGNAL) != TARGET_SIGCHLD) { 6390 return -TARGET_EINVAL; 6391 } 6392 6393 if (block_signals()) { 6394 return -TARGET_ERESTARTSYS; 6395 } 6396 6397 fork_start(); 6398 ret = fork(); 6399 if (ret == 0) { 6400 /* Child Process. */ 6401 cpu_clone_regs(env, newsp); 6402 fork_end(1); 6403 /* There is a race condition here. The parent process could 6404 theoretically read the TID in the child process before the child 6405 tid is set. This would require using either ptrace 6406 (not implemented) or having *_tidptr to point at a shared memory 6407 mapping. We can't repeat the spinlock hack used above because 6408 the child process gets its own copy of the lock. */ 6409 if (flags & CLONE_CHILD_SETTID) 6410 put_user_u32(gettid(), child_tidptr); 6411 if (flags & CLONE_PARENT_SETTID) 6412 put_user_u32(gettid(), parent_tidptr); 6413 ts = (TaskState *)cpu->opaque; 6414 if (flags & CLONE_SETTLS) 6415 cpu_set_tls (env, newtls); 6416 if (flags & CLONE_CHILD_CLEARTID) 6417 ts->child_tidptr = child_tidptr; 6418 } else { 6419 fork_end(0); 6420 } 6421 } 6422 return ret; 6423 } 6424 6425 /* warning : doesn't handle linux specific flags... */ 6426 static int target_to_host_fcntl_cmd(int cmd) 6427 { 6428 switch(cmd) { 6429 case TARGET_F_DUPFD: 6430 case TARGET_F_GETFD: 6431 case TARGET_F_SETFD: 6432 case TARGET_F_GETFL: 6433 case TARGET_F_SETFL: 6434 return cmd; 6435 case TARGET_F_GETLK: 6436 return F_GETLK64; 6437 case TARGET_F_SETLK: 6438 return F_SETLK64; 6439 case TARGET_F_SETLKW: 6440 return F_SETLKW64; 6441 case TARGET_F_GETOWN: 6442 return F_GETOWN; 6443 case TARGET_F_SETOWN: 6444 return F_SETOWN; 6445 case TARGET_F_GETSIG: 6446 return F_GETSIG; 6447 case TARGET_F_SETSIG: 6448 return F_SETSIG; 6449 #if TARGET_ABI_BITS == 32 6450 case TARGET_F_GETLK64: 6451 return F_GETLK64; 6452 case TARGET_F_SETLK64: 6453 return F_SETLK64; 6454 case TARGET_F_SETLKW64: 6455 return F_SETLKW64; 6456 #endif 6457 case TARGET_F_SETLEASE: 6458 return F_SETLEASE; 6459 case TARGET_F_GETLEASE: 6460 return F_GETLEASE; 6461 #ifdef F_DUPFD_CLOEXEC 6462 case TARGET_F_DUPFD_CLOEXEC: 6463 return F_DUPFD_CLOEXEC; 6464 #endif 6465 case TARGET_F_NOTIFY: 6466 return F_NOTIFY; 6467 #ifdef F_GETOWN_EX 6468 case TARGET_F_GETOWN_EX: 6469 return F_GETOWN_EX; 6470 #endif 6471 #ifdef F_SETOWN_EX 6472 case TARGET_F_SETOWN_EX: 6473 return F_SETOWN_EX; 6474 #endif 6475 #ifdef F_SETPIPE_SZ 6476 case TARGET_F_SETPIPE_SZ: 6477 return F_SETPIPE_SZ; 6478 case TARGET_F_GETPIPE_SZ: 6479 return F_GETPIPE_SZ; 6480 #endif 6481 default: 6482 return -TARGET_EINVAL; 6483 } 6484 return -TARGET_EINVAL; 6485 } 6486 6487 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a } 6488 static const bitmask_transtbl flock_tbl[] = { 6489 TRANSTBL_CONVERT(F_RDLCK), 6490 TRANSTBL_CONVERT(F_WRLCK), 6491 TRANSTBL_CONVERT(F_UNLCK), 6492 TRANSTBL_CONVERT(F_EXLCK), 6493 TRANSTBL_CONVERT(F_SHLCK), 6494 { 0, 0, 0, 0 } 6495 }; 6496 6497 static inline abi_long copy_from_user_flock(struct flock64 *fl, 6498 abi_ulong target_flock_addr) 6499 { 6500 struct target_flock *target_fl; 6501 short l_type; 6502 6503 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6504 return -TARGET_EFAULT; 6505 } 6506 6507 __get_user(l_type, &target_fl->l_type); 6508 fl->l_type = target_to_host_bitmask(l_type, flock_tbl); 6509 __get_user(fl->l_whence, &target_fl->l_whence); 6510 __get_user(fl->l_start, &target_fl->l_start); 6511 __get_user(fl->l_len, &target_fl->l_len); 6512 __get_user(fl->l_pid, &target_fl->l_pid); 6513 unlock_user_struct(target_fl, target_flock_addr, 0); 6514 return 0; 6515 } 6516 6517 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr, 6518 const struct flock64 *fl) 6519 { 6520 struct target_flock *target_fl; 6521 short l_type; 6522 6523 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 6524 return -TARGET_EFAULT; 6525 } 6526 6527 l_type = host_to_target_bitmask(fl->l_type, flock_tbl); 6528 __put_user(l_type, &target_fl->l_type); 6529 __put_user(fl->l_whence, &target_fl->l_whence); 6530 __put_user(fl->l_start, &target_fl->l_start); 6531 __put_user(fl->l_len, &target_fl->l_len); 6532 __put_user(fl->l_pid, &target_fl->l_pid); 6533 unlock_user_struct(target_fl, target_flock_addr, 1); 6534 return 0; 6535 } 6536 6537 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr); 6538 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl); 6539 6540 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32 6541 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl, 6542 abi_ulong target_flock_addr) 6543 { 6544 struct target_eabi_flock64 *target_fl; 6545 short l_type; 6546 6547 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6548 return -TARGET_EFAULT; 6549 } 6550 6551 __get_user(l_type, &target_fl->l_type); 6552 fl->l_type = target_to_host_bitmask(l_type, flock_tbl); 6553 __get_user(fl->l_whence, &target_fl->l_whence); 6554 __get_user(fl->l_start, &target_fl->l_start); 6555 __get_user(fl->l_len, &target_fl->l_len); 6556 __get_user(fl->l_pid, &target_fl->l_pid); 6557 unlock_user_struct(target_fl, target_flock_addr, 0); 6558 return 0; 6559 } 6560 6561 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr, 6562 const struct flock64 *fl) 6563 { 6564 struct target_eabi_flock64 *target_fl; 6565 short l_type; 6566 6567 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 6568 return -TARGET_EFAULT; 6569 } 6570 6571 l_type = host_to_target_bitmask(fl->l_type, flock_tbl); 6572 __put_user(l_type, &target_fl->l_type); 6573 __put_user(fl->l_whence, &target_fl->l_whence); 6574 __put_user(fl->l_start, &target_fl->l_start); 6575 __put_user(fl->l_len, &target_fl->l_len); 6576 __put_user(fl->l_pid, &target_fl->l_pid); 6577 unlock_user_struct(target_fl, target_flock_addr, 1); 6578 return 0; 6579 } 6580 #endif 6581 6582 static inline abi_long copy_from_user_flock64(struct flock64 *fl, 6583 abi_ulong target_flock_addr) 6584 { 6585 struct target_flock64 *target_fl; 6586 short l_type; 6587 6588 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6589 return -TARGET_EFAULT; 6590 } 6591 6592 __get_user(l_type, &target_fl->l_type); 6593 fl->l_type = target_to_host_bitmask(l_type, flock_tbl); 6594 __get_user(fl->l_whence, &target_fl->l_whence); 6595 __get_user(fl->l_start, &target_fl->l_start); 6596 __get_user(fl->l_len, &target_fl->l_len); 6597 __get_user(fl->l_pid, &target_fl->l_pid); 6598 unlock_user_struct(target_fl, target_flock_addr, 0); 6599 return 0; 6600 } 6601 6602 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr, 6603 const struct flock64 *fl) 6604 { 6605 struct target_flock64 *target_fl; 6606 short l_type; 6607 6608 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 6609 return -TARGET_EFAULT; 6610 } 6611 6612 l_type = host_to_target_bitmask(fl->l_type, flock_tbl); 6613 __put_user(l_type, &target_fl->l_type); 6614 __put_user(fl->l_whence, &target_fl->l_whence); 6615 __put_user(fl->l_start, &target_fl->l_start); 6616 __put_user(fl->l_len, &target_fl->l_len); 6617 __put_user(fl->l_pid, &target_fl->l_pid); 6618 unlock_user_struct(target_fl, target_flock_addr, 1); 6619 return 0; 6620 } 6621 6622 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) 6623 { 6624 struct flock64 fl64; 6625 #ifdef F_GETOWN_EX 6626 struct f_owner_ex fox; 6627 struct target_f_owner_ex *target_fox; 6628 #endif 6629 abi_long ret; 6630 int host_cmd = target_to_host_fcntl_cmd(cmd); 6631 6632 if (host_cmd == -TARGET_EINVAL) 6633 return host_cmd; 6634 6635 switch(cmd) { 6636 case TARGET_F_GETLK: 6637 ret = copy_from_user_flock(&fl64, arg); 6638 if (ret) { 6639 return ret; 6640 } 6641 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 6642 if (ret == 0) { 6643 ret = copy_to_user_flock(arg, &fl64); 6644 } 6645 break; 6646 6647 case TARGET_F_SETLK: 6648 case TARGET_F_SETLKW: 6649 ret = copy_from_user_flock(&fl64, arg); 6650 if (ret) { 6651 return ret; 6652 } 6653 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 6654 break; 6655 6656 case TARGET_F_GETLK64: 6657 ret = copy_from_user_flock64(&fl64, arg); 6658 if (ret) { 6659 return ret; 6660 } 6661 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 6662 if (ret == 0) { 6663 ret = copy_to_user_flock64(arg, &fl64); 6664 } 6665 break; 6666 case TARGET_F_SETLK64: 6667 case TARGET_F_SETLKW64: 6668 ret = copy_from_user_flock64(&fl64, arg); 6669 if (ret) { 6670 return ret; 6671 } 6672 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 6673 break; 6674 6675 case TARGET_F_GETFL: 6676 ret = get_errno(safe_fcntl(fd, host_cmd, arg)); 6677 if (ret >= 0) { 6678 ret = host_to_target_bitmask(ret, fcntl_flags_tbl); 6679 } 6680 break; 6681 6682 case TARGET_F_SETFL: 6683 ret = get_errno(safe_fcntl(fd, host_cmd, 6684 target_to_host_bitmask(arg, 6685 fcntl_flags_tbl))); 6686 break; 6687 6688 #ifdef F_GETOWN_EX 6689 case TARGET_F_GETOWN_EX: 6690 ret = get_errno(safe_fcntl(fd, host_cmd, &fox)); 6691 if (ret >= 0) { 6692 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0)) 6693 return -TARGET_EFAULT; 6694 target_fox->type = tswap32(fox.type); 6695 target_fox->pid = tswap32(fox.pid); 6696 unlock_user_struct(target_fox, arg, 1); 6697 } 6698 break; 6699 #endif 6700 6701 #ifdef F_SETOWN_EX 6702 case TARGET_F_SETOWN_EX: 6703 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1)) 6704 return -TARGET_EFAULT; 6705 fox.type = tswap32(target_fox->type); 6706 fox.pid = tswap32(target_fox->pid); 6707 unlock_user_struct(target_fox, arg, 0); 6708 ret = get_errno(safe_fcntl(fd, host_cmd, &fox)); 6709 break; 6710 #endif 6711 6712 case TARGET_F_SETOWN: 6713 case TARGET_F_GETOWN: 6714 case TARGET_F_SETSIG: 6715 case TARGET_F_GETSIG: 6716 case TARGET_F_SETLEASE: 6717 case TARGET_F_GETLEASE: 6718 case TARGET_F_SETPIPE_SZ: 6719 case TARGET_F_GETPIPE_SZ: 6720 ret = get_errno(safe_fcntl(fd, host_cmd, arg)); 6721 break; 6722 6723 default: 6724 ret = get_errno(safe_fcntl(fd, cmd, arg)); 6725 break; 6726 } 6727 return ret; 6728 } 6729 6730 #ifdef USE_UID16 6731 6732 static inline int high2lowuid(int uid) 6733 { 6734 if (uid > 65535) 6735 return 65534; 6736 else 6737 return uid; 6738 } 6739 6740 static inline int high2lowgid(int gid) 6741 { 6742 if (gid > 65535) 6743 return 65534; 6744 else 6745 return gid; 6746 } 6747 6748 static inline int low2highuid(int uid) 6749 { 6750 if ((int16_t)uid == -1) 6751 return -1; 6752 else 6753 return uid; 6754 } 6755 6756 static inline int low2highgid(int gid) 6757 { 6758 if ((int16_t)gid == -1) 6759 return -1; 6760 else 6761 return gid; 6762 } 6763 static inline int tswapid(int id) 6764 { 6765 return tswap16(id); 6766 } 6767 6768 #define put_user_id(x, gaddr) put_user_u16(x, gaddr) 6769 6770 #else /* !USE_UID16 */ 6771 static inline int high2lowuid(int uid) 6772 { 6773 return uid; 6774 } 6775 static inline int high2lowgid(int gid) 6776 { 6777 return gid; 6778 } 6779 static inline int low2highuid(int uid) 6780 { 6781 return uid; 6782 } 6783 static inline int low2highgid(int gid) 6784 { 6785 return gid; 6786 } 6787 static inline int tswapid(int id) 6788 { 6789 return tswap32(id); 6790 } 6791 6792 #define put_user_id(x, gaddr) put_user_u32(x, gaddr) 6793 6794 #endif /* USE_UID16 */ 6795 6796 /* We must do direct syscalls for setting UID/GID, because we want to 6797 * implement the Linux system call semantics of "change only for this thread", 6798 * not the libc/POSIX semantics of "change for all threads in process". 6799 * (See http://ewontfix.com/17/ for more details.) 6800 * We use the 32-bit version of the syscalls if present; if it is not 6801 * then either the host architecture supports 32-bit UIDs natively with 6802 * the standard syscall, or the 16-bit UID is the best we can do. 6803 */ 6804 #ifdef __NR_setuid32 6805 #define __NR_sys_setuid __NR_setuid32 6806 #else 6807 #define __NR_sys_setuid __NR_setuid 6808 #endif 6809 #ifdef __NR_setgid32 6810 #define __NR_sys_setgid __NR_setgid32 6811 #else 6812 #define __NR_sys_setgid __NR_setgid 6813 #endif 6814 #ifdef __NR_setresuid32 6815 #define __NR_sys_setresuid __NR_setresuid32 6816 #else 6817 #define __NR_sys_setresuid __NR_setresuid 6818 #endif 6819 #ifdef __NR_setresgid32 6820 #define __NR_sys_setresgid __NR_setresgid32 6821 #else 6822 #define __NR_sys_setresgid __NR_setresgid 6823 #endif 6824 6825 _syscall1(int, sys_setuid, uid_t, uid) 6826 _syscall1(int, sys_setgid, gid_t, gid) 6827 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) 6828 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid) 6829 6830 void syscall_init(void) 6831 { 6832 IOCTLEntry *ie; 6833 const argtype *arg_type; 6834 int size; 6835 int i; 6836 6837 thunk_init(STRUCT_MAX); 6838 6839 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def); 6840 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def); 6841 #include "syscall_types.h" 6842 #undef STRUCT 6843 #undef STRUCT_SPECIAL 6844 6845 /* Build target_to_host_errno_table[] table from 6846 * host_to_target_errno_table[]. */ 6847 for (i = 0; i < ERRNO_TABLE_SIZE; i++) { 6848 target_to_host_errno_table[host_to_target_errno_table[i]] = i; 6849 } 6850 6851 /* we patch the ioctl size if necessary. We rely on the fact that 6852 no ioctl has all the bits at '1' in the size field */ 6853 ie = ioctl_entries; 6854 while (ie->target_cmd != 0) { 6855 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) == 6856 TARGET_IOC_SIZEMASK) { 6857 arg_type = ie->arg_type; 6858 if (arg_type[0] != TYPE_PTR) { 6859 fprintf(stderr, "cannot patch size for ioctl 0x%x\n", 6860 ie->target_cmd); 6861 exit(1); 6862 } 6863 arg_type++; 6864 size = thunk_type_size(arg_type, 0); 6865 ie->target_cmd = (ie->target_cmd & 6866 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) | 6867 (size << TARGET_IOC_SIZESHIFT); 6868 } 6869 6870 /* automatic consistency check if same arch */ 6871 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 6872 (defined(__x86_64__) && defined(TARGET_X86_64)) 6873 if (unlikely(ie->target_cmd != ie->host_cmd)) { 6874 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n", 6875 ie->name, ie->target_cmd, ie->host_cmd); 6876 } 6877 #endif 6878 ie++; 6879 } 6880 } 6881 6882 #if TARGET_ABI_BITS == 32 6883 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1) 6884 { 6885 #ifdef TARGET_WORDS_BIGENDIAN 6886 return ((uint64_t)word0 << 32) | word1; 6887 #else 6888 return ((uint64_t)word1 << 32) | word0; 6889 #endif 6890 } 6891 #else /* TARGET_ABI_BITS == 32 */ 6892 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1) 6893 { 6894 return word0; 6895 } 6896 #endif /* TARGET_ABI_BITS != 32 */ 6897 6898 #ifdef TARGET_NR_truncate64 6899 static inline abi_long target_truncate64(void *cpu_env, const char *arg1, 6900 abi_long arg2, 6901 abi_long arg3, 6902 abi_long arg4) 6903 { 6904 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) { 6905 arg2 = arg3; 6906 arg3 = arg4; 6907 } 6908 return get_errno(truncate64(arg1, target_offset64(arg2, arg3))); 6909 } 6910 #endif 6911 6912 #ifdef TARGET_NR_ftruncate64 6913 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1, 6914 abi_long arg2, 6915 abi_long arg3, 6916 abi_long arg4) 6917 { 6918 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) { 6919 arg2 = arg3; 6920 arg3 = arg4; 6921 } 6922 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3))); 6923 } 6924 #endif 6925 6926 static inline abi_long target_to_host_timespec(struct timespec *host_ts, 6927 abi_ulong target_addr) 6928 { 6929 struct target_timespec *target_ts; 6930 6931 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) 6932 return -TARGET_EFAULT; 6933 __get_user(host_ts->tv_sec, &target_ts->tv_sec); 6934 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec); 6935 unlock_user_struct(target_ts, target_addr, 0); 6936 return 0; 6937 } 6938 6939 static inline abi_long host_to_target_timespec(abi_ulong target_addr, 6940 struct timespec *host_ts) 6941 { 6942 struct target_timespec *target_ts; 6943 6944 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) 6945 return -TARGET_EFAULT; 6946 __put_user(host_ts->tv_sec, &target_ts->tv_sec); 6947 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec); 6948 unlock_user_struct(target_ts, target_addr, 1); 6949 return 0; 6950 } 6951 6952 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec, 6953 abi_ulong target_addr) 6954 { 6955 struct target_itimerspec *target_itspec; 6956 6957 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) { 6958 return -TARGET_EFAULT; 6959 } 6960 6961 host_itspec->it_interval.tv_sec = 6962 tswapal(target_itspec->it_interval.tv_sec); 6963 host_itspec->it_interval.tv_nsec = 6964 tswapal(target_itspec->it_interval.tv_nsec); 6965 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec); 6966 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec); 6967 6968 unlock_user_struct(target_itspec, target_addr, 1); 6969 return 0; 6970 } 6971 6972 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr, 6973 struct itimerspec *host_its) 6974 { 6975 struct target_itimerspec *target_itspec; 6976 6977 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) { 6978 return -TARGET_EFAULT; 6979 } 6980 6981 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec); 6982 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec); 6983 6984 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec); 6985 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec); 6986 6987 unlock_user_struct(target_itspec, target_addr, 0); 6988 return 0; 6989 } 6990 6991 static inline abi_long target_to_host_timex(struct timex *host_tx, 6992 abi_long target_addr) 6993 { 6994 struct target_timex *target_tx; 6995 6996 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) { 6997 return -TARGET_EFAULT; 6998 } 6999 7000 __get_user(host_tx->modes, &target_tx->modes); 7001 __get_user(host_tx->offset, &target_tx->offset); 7002 __get_user(host_tx->freq, &target_tx->freq); 7003 __get_user(host_tx->maxerror, &target_tx->maxerror); 7004 __get_user(host_tx->esterror, &target_tx->esterror); 7005 __get_user(host_tx->status, &target_tx->status); 7006 __get_user(host_tx->constant, &target_tx->constant); 7007 __get_user(host_tx->precision, &target_tx->precision); 7008 __get_user(host_tx->tolerance, &target_tx->tolerance); 7009 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec); 7010 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec); 7011 __get_user(host_tx->tick, &target_tx->tick); 7012 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7013 __get_user(host_tx->jitter, &target_tx->jitter); 7014 __get_user(host_tx->shift, &target_tx->shift); 7015 __get_user(host_tx->stabil, &target_tx->stabil); 7016 __get_user(host_tx->jitcnt, &target_tx->jitcnt); 7017 __get_user(host_tx->calcnt, &target_tx->calcnt); 7018 __get_user(host_tx->errcnt, &target_tx->errcnt); 7019 __get_user(host_tx->stbcnt, &target_tx->stbcnt); 7020 __get_user(host_tx->tai, &target_tx->tai); 7021 7022 unlock_user_struct(target_tx, target_addr, 0); 7023 return 0; 7024 } 7025 7026 static inline abi_long host_to_target_timex(abi_long target_addr, 7027 struct timex *host_tx) 7028 { 7029 struct target_timex *target_tx; 7030 7031 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) { 7032 return -TARGET_EFAULT; 7033 } 7034 7035 __put_user(host_tx->modes, &target_tx->modes); 7036 __put_user(host_tx->offset, &target_tx->offset); 7037 __put_user(host_tx->freq, &target_tx->freq); 7038 __put_user(host_tx->maxerror, &target_tx->maxerror); 7039 __put_user(host_tx->esterror, &target_tx->esterror); 7040 __put_user(host_tx->status, &target_tx->status); 7041 __put_user(host_tx->constant, &target_tx->constant); 7042 __put_user(host_tx->precision, &target_tx->precision); 7043 __put_user(host_tx->tolerance, &target_tx->tolerance); 7044 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec); 7045 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec); 7046 __put_user(host_tx->tick, &target_tx->tick); 7047 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7048 __put_user(host_tx->jitter, &target_tx->jitter); 7049 __put_user(host_tx->shift, &target_tx->shift); 7050 __put_user(host_tx->stabil, &target_tx->stabil); 7051 __put_user(host_tx->jitcnt, &target_tx->jitcnt); 7052 __put_user(host_tx->calcnt, &target_tx->calcnt); 7053 __put_user(host_tx->errcnt, &target_tx->errcnt); 7054 __put_user(host_tx->stbcnt, &target_tx->stbcnt); 7055 __put_user(host_tx->tai, &target_tx->tai); 7056 7057 unlock_user_struct(target_tx, target_addr, 1); 7058 return 0; 7059 } 7060 7061 7062 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp, 7063 abi_ulong target_addr) 7064 { 7065 struct target_sigevent *target_sevp; 7066 7067 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) { 7068 return -TARGET_EFAULT; 7069 } 7070 7071 /* This union is awkward on 64 bit systems because it has a 32 bit 7072 * integer and a pointer in it; we follow the conversion approach 7073 * used for handling sigval types in signal.c so the guest should get 7074 * the correct value back even if we did a 64 bit byteswap and it's 7075 * using the 32 bit integer. 7076 */ 7077 host_sevp->sigev_value.sival_ptr = 7078 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr); 7079 host_sevp->sigev_signo = 7080 target_to_host_signal(tswap32(target_sevp->sigev_signo)); 7081 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify); 7082 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid); 7083 7084 unlock_user_struct(target_sevp, target_addr, 1); 7085 return 0; 7086 } 7087 7088 #if defined(TARGET_NR_mlockall) 7089 static inline int target_to_host_mlockall_arg(int arg) 7090 { 7091 int result = 0; 7092 7093 if (arg & TARGET_MLOCKALL_MCL_CURRENT) { 7094 result |= MCL_CURRENT; 7095 } 7096 if (arg & TARGET_MLOCKALL_MCL_FUTURE) { 7097 result |= MCL_FUTURE; 7098 } 7099 return result; 7100 } 7101 #endif 7102 7103 static inline abi_long host_to_target_stat64(void *cpu_env, 7104 abi_ulong target_addr, 7105 struct stat *host_st) 7106 { 7107 #if defined(TARGET_ARM) && defined(TARGET_ABI32) 7108 if (((CPUARMState *)cpu_env)->eabi) { 7109 struct target_eabi_stat64 *target_st; 7110 7111 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 7112 return -TARGET_EFAULT; 7113 memset(target_st, 0, sizeof(struct target_eabi_stat64)); 7114 __put_user(host_st->st_dev, &target_st->st_dev); 7115 __put_user(host_st->st_ino, &target_st->st_ino); 7116 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 7117 __put_user(host_st->st_ino, &target_st->__st_ino); 7118 #endif 7119 __put_user(host_st->st_mode, &target_st->st_mode); 7120 __put_user(host_st->st_nlink, &target_st->st_nlink); 7121 __put_user(host_st->st_uid, &target_st->st_uid); 7122 __put_user(host_st->st_gid, &target_st->st_gid); 7123 __put_user(host_st->st_rdev, &target_st->st_rdev); 7124 __put_user(host_st->st_size, &target_st->st_size); 7125 __put_user(host_st->st_blksize, &target_st->st_blksize); 7126 __put_user(host_st->st_blocks, &target_st->st_blocks); 7127 __put_user(host_st->st_atime, &target_st->target_st_atime); 7128 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 7129 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 7130 unlock_user_struct(target_st, target_addr, 1); 7131 } else 7132 #endif 7133 { 7134 #if defined(TARGET_HAS_STRUCT_STAT64) 7135 struct target_stat64 *target_st; 7136 #else 7137 struct target_stat *target_st; 7138 #endif 7139 7140 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 7141 return -TARGET_EFAULT; 7142 memset(target_st, 0, sizeof(*target_st)); 7143 __put_user(host_st->st_dev, &target_st->st_dev); 7144 __put_user(host_st->st_ino, &target_st->st_ino); 7145 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 7146 __put_user(host_st->st_ino, &target_st->__st_ino); 7147 #endif 7148 __put_user(host_st->st_mode, &target_st->st_mode); 7149 __put_user(host_st->st_nlink, &target_st->st_nlink); 7150 __put_user(host_st->st_uid, &target_st->st_uid); 7151 __put_user(host_st->st_gid, &target_st->st_gid); 7152 __put_user(host_st->st_rdev, &target_st->st_rdev); 7153 /* XXX: better use of kernel struct */ 7154 __put_user(host_st->st_size, &target_st->st_size); 7155 __put_user(host_st->st_blksize, &target_st->st_blksize); 7156 __put_user(host_st->st_blocks, &target_st->st_blocks); 7157 __put_user(host_st->st_atime, &target_st->target_st_atime); 7158 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 7159 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 7160 unlock_user_struct(target_st, target_addr, 1); 7161 } 7162 7163 return 0; 7164 } 7165 7166 /* ??? Using host futex calls even when target atomic operations 7167 are not really atomic probably breaks things. However implementing 7168 futexes locally would make futexes shared between multiple processes 7169 tricky. However they're probably useless because guest atomic 7170 operations won't work either. */ 7171 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout, 7172 target_ulong uaddr2, int val3) 7173 { 7174 struct timespec ts, *pts; 7175 int base_op; 7176 7177 /* ??? We assume FUTEX_* constants are the same on both host 7178 and target. */ 7179 #ifdef FUTEX_CMD_MASK 7180 base_op = op & FUTEX_CMD_MASK; 7181 #else 7182 base_op = op; 7183 #endif 7184 switch (base_op) { 7185 case FUTEX_WAIT: 7186 case FUTEX_WAIT_BITSET: 7187 if (timeout) { 7188 pts = &ts; 7189 target_to_host_timespec(pts, timeout); 7190 } else { 7191 pts = NULL; 7192 } 7193 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val), 7194 pts, NULL, val3)); 7195 case FUTEX_WAKE: 7196 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 7197 case FUTEX_FD: 7198 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 7199 case FUTEX_REQUEUE: 7200 case FUTEX_CMP_REQUEUE: 7201 case FUTEX_WAKE_OP: 7202 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 7203 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 7204 But the prototype takes a `struct timespec *'; insert casts 7205 to satisfy the compiler. We do not need to tswap TIMEOUT 7206 since it's not compared to guest memory. */ 7207 pts = (struct timespec *)(uintptr_t) timeout; 7208 return get_errno(safe_futex(g2h(uaddr), op, val, pts, 7209 g2h(uaddr2), 7210 (base_op == FUTEX_CMP_REQUEUE 7211 ? tswap32(val3) 7212 : val3))); 7213 default: 7214 return -TARGET_ENOSYS; 7215 } 7216 } 7217 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 7218 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname, 7219 abi_long handle, abi_long mount_id, 7220 abi_long flags) 7221 { 7222 struct file_handle *target_fh; 7223 struct file_handle *fh; 7224 int mid = 0; 7225 abi_long ret; 7226 char *name; 7227 unsigned int size, total_size; 7228 7229 if (get_user_s32(size, handle)) { 7230 return -TARGET_EFAULT; 7231 } 7232 7233 name = lock_user_string(pathname); 7234 if (!name) { 7235 return -TARGET_EFAULT; 7236 } 7237 7238 total_size = sizeof(struct file_handle) + size; 7239 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0); 7240 if (!target_fh) { 7241 unlock_user(name, pathname, 0); 7242 return -TARGET_EFAULT; 7243 } 7244 7245 fh = g_malloc0(total_size); 7246 fh->handle_bytes = size; 7247 7248 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags)); 7249 unlock_user(name, pathname, 0); 7250 7251 /* man name_to_handle_at(2): 7252 * Other than the use of the handle_bytes field, the caller should treat 7253 * the file_handle structure as an opaque data type 7254 */ 7255 7256 memcpy(target_fh, fh, total_size); 7257 target_fh->handle_bytes = tswap32(fh->handle_bytes); 7258 target_fh->handle_type = tswap32(fh->handle_type); 7259 g_free(fh); 7260 unlock_user(target_fh, handle, total_size); 7261 7262 if (put_user_s32(mid, mount_id)) { 7263 return -TARGET_EFAULT; 7264 } 7265 7266 return ret; 7267 7268 } 7269 #endif 7270 7271 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 7272 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle, 7273 abi_long flags) 7274 { 7275 struct file_handle *target_fh; 7276 struct file_handle *fh; 7277 unsigned int size, total_size; 7278 abi_long ret; 7279 7280 if (get_user_s32(size, handle)) { 7281 return -TARGET_EFAULT; 7282 } 7283 7284 total_size = sizeof(struct file_handle) + size; 7285 target_fh = lock_user(VERIFY_READ, handle, total_size, 1); 7286 if (!target_fh) { 7287 return -TARGET_EFAULT; 7288 } 7289 7290 fh = g_memdup(target_fh, total_size); 7291 fh->handle_bytes = size; 7292 fh->handle_type = tswap32(target_fh->handle_type); 7293 7294 ret = get_errno(open_by_handle_at(mount_fd, fh, 7295 target_to_host_bitmask(flags, fcntl_flags_tbl))); 7296 7297 g_free(fh); 7298 7299 unlock_user(target_fh, handle, total_size); 7300 7301 return ret; 7302 } 7303 #endif 7304 7305 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4) 7306 7307 /* signalfd siginfo conversion */ 7308 7309 static void 7310 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo, 7311 const struct signalfd_siginfo *info) 7312 { 7313 int sig = host_to_target_signal(info->ssi_signo); 7314 7315 /* linux/signalfd.h defines a ssi_addr_lsb 7316 * not defined in sys/signalfd.h but used by some kernels 7317 */ 7318 7319 #ifdef BUS_MCEERR_AO 7320 if (tinfo->ssi_signo == SIGBUS && 7321 (tinfo->ssi_code == BUS_MCEERR_AR || 7322 tinfo->ssi_code == BUS_MCEERR_AO)) { 7323 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1); 7324 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1); 7325 *tssi_addr_lsb = tswap16(*ssi_addr_lsb); 7326 } 7327 #endif 7328 7329 tinfo->ssi_signo = tswap32(sig); 7330 tinfo->ssi_errno = tswap32(tinfo->ssi_errno); 7331 tinfo->ssi_code = tswap32(info->ssi_code); 7332 tinfo->ssi_pid = tswap32(info->ssi_pid); 7333 tinfo->ssi_uid = tswap32(info->ssi_uid); 7334 tinfo->ssi_fd = tswap32(info->ssi_fd); 7335 tinfo->ssi_tid = tswap32(info->ssi_tid); 7336 tinfo->ssi_band = tswap32(info->ssi_band); 7337 tinfo->ssi_overrun = tswap32(info->ssi_overrun); 7338 tinfo->ssi_trapno = tswap32(info->ssi_trapno); 7339 tinfo->ssi_status = tswap32(info->ssi_status); 7340 tinfo->ssi_int = tswap32(info->ssi_int); 7341 tinfo->ssi_ptr = tswap64(info->ssi_ptr); 7342 tinfo->ssi_utime = tswap64(info->ssi_utime); 7343 tinfo->ssi_stime = tswap64(info->ssi_stime); 7344 tinfo->ssi_addr = tswap64(info->ssi_addr); 7345 } 7346 7347 static abi_long host_to_target_data_signalfd(void *buf, size_t len) 7348 { 7349 int i; 7350 7351 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) { 7352 host_to_target_signalfd_siginfo(buf + i, buf + i); 7353 } 7354 7355 return len; 7356 } 7357 7358 static TargetFdTrans target_signalfd_trans = { 7359 .host_to_target_data = host_to_target_data_signalfd, 7360 }; 7361 7362 static abi_long do_signalfd4(int fd, abi_long mask, int flags) 7363 { 7364 int host_flags; 7365 target_sigset_t *target_mask; 7366 sigset_t host_mask; 7367 abi_long ret; 7368 7369 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) { 7370 return -TARGET_EINVAL; 7371 } 7372 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) { 7373 return -TARGET_EFAULT; 7374 } 7375 7376 target_to_host_sigset(&host_mask, target_mask); 7377 7378 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl); 7379 7380 ret = get_errno(signalfd(fd, &host_mask, host_flags)); 7381 if (ret >= 0) { 7382 fd_trans_register(ret, &target_signalfd_trans); 7383 } 7384 7385 unlock_user_struct(target_mask, mask, 0); 7386 7387 return ret; 7388 } 7389 #endif 7390 7391 /* Map host to target signal numbers for the wait family of syscalls. 7392 Assume all other status bits are the same. */ 7393 int host_to_target_waitstatus(int status) 7394 { 7395 if (WIFSIGNALED(status)) { 7396 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f); 7397 } 7398 if (WIFSTOPPED(status)) { 7399 return (host_to_target_signal(WSTOPSIG(status)) << 8) 7400 | (status & 0xff); 7401 } 7402 return status; 7403 } 7404 7405 static int open_self_cmdline(void *cpu_env, int fd) 7406 { 7407 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 7408 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm; 7409 int i; 7410 7411 for (i = 0; i < bprm->argc; i++) { 7412 size_t len = strlen(bprm->argv[i]) + 1; 7413 7414 if (write(fd, bprm->argv[i], len) != len) { 7415 return -1; 7416 } 7417 } 7418 7419 return 0; 7420 } 7421 7422 static int open_self_maps(void *cpu_env, int fd) 7423 { 7424 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 7425 TaskState *ts = cpu->opaque; 7426 FILE *fp; 7427 char *line = NULL; 7428 size_t len = 0; 7429 ssize_t read; 7430 7431 fp = fopen("/proc/self/maps", "r"); 7432 if (fp == NULL) { 7433 return -1; 7434 } 7435 7436 while ((read = getline(&line, &len, fp)) != -1) { 7437 int fields, dev_maj, dev_min, inode; 7438 uint64_t min, max, offset; 7439 char flag_r, flag_w, flag_x, flag_p; 7440 char path[512] = ""; 7441 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d" 7442 " %512s", &min, &max, &flag_r, &flag_w, &flag_x, 7443 &flag_p, &offset, &dev_maj, &dev_min, &inode, path); 7444 7445 if ((fields < 10) || (fields > 11)) { 7446 continue; 7447 } 7448 if (h2g_valid(min)) { 7449 int flags = page_get_flags(h2g(min)); 7450 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX); 7451 if (page_check_range(h2g(min), max - min, flags) == -1) { 7452 continue; 7453 } 7454 if (h2g(min) == ts->info->stack_limit) { 7455 pstrcpy(path, sizeof(path), " [stack]"); 7456 } 7457 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx 7458 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n", 7459 h2g(min), h2g(max - 1) + 1, flag_r, flag_w, 7460 flag_x, flag_p, offset, dev_maj, dev_min, inode, 7461 path[0] ? " " : "", path); 7462 } 7463 } 7464 7465 free(line); 7466 fclose(fp); 7467 7468 return 0; 7469 } 7470 7471 static int open_self_stat(void *cpu_env, int fd) 7472 { 7473 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 7474 TaskState *ts = cpu->opaque; 7475 abi_ulong start_stack = ts->info->start_stack; 7476 int i; 7477 7478 for (i = 0; i < 44; i++) { 7479 char buf[128]; 7480 int len; 7481 uint64_t val = 0; 7482 7483 if (i == 0) { 7484 /* pid */ 7485 val = getpid(); 7486 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 7487 } else if (i == 1) { 7488 /* app name */ 7489 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]); 7490 } else if (i == 27) { 7491 /* stack bottom */ 7492 val = start_stack; 7493 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 7494 } else { 7495 /* for the rest, there is MasterCard */ 7496 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' '); 7497 } 7498 7499 len = strlen(buf); 7500 if (write(fd, buf, len) != len) { 7501 return -1; 7502 } 7503 } 7504 7505 return 0; 7506 } 7507 7508 static int open_self_auxv(void *cpu_env, int fd) 7509 { 7510 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 7511 TaskState *ts = cpu->opaque; 7512 abi_ulong auxv = ts->info->saved_auxv; 7513 abi_ulong len = ts->info->auxv_len; 7514 char *ptr; 7515 7516 /* 7517 * Auxiliary vector is stored in target process stack. 7518 * read in whole auxv vector and copy it to file 7519 */ 7520 ptr = lock_user(VERIFY_READ, auxv, len, 0); 7521 if (ptr != NULL) { 7522 while (len > 0) { 7523 ssize_t r; 7524 r = write(fd, ptr, len); 7525 if (r <= 0) { 7526 break; 7527 } 7528 len -= r; 7529 ptr += r; 7530 } 7531 lseek(fd, 0, SEEK_SET); 7532 unlock_user(ptr, auxv, len); 7533 } 7534 7535 return 0; 7536 } 7537 7538 static int is_proc_myself(const char *filename, const char *entry) 7539 { 7540 if (!strncmp(filename, "/proc/", strlen("/proc/"))) { 7541 filename += strlen("/proc/"); 7542 if (!strncmp(filename, "self/", strlen("self/"))) { 7543 filename += strlen("self/"); 7544 } else if (*filename >= '1' && *filename <= '9') { 7545 char myself[80]; 7546 snprintf(myself, sizeof(myself), "%d/", getpid()); 7547 if (!strncmp(filename, myself, strlen(myself))) { 7548 filename += strlen(myself); 7549 } else { 7550 return 0; 7551 } 7552 } else { 7553 return 0; 7554 } 7555 if (!strcmp(filename, entry)) { 7556 return 1; 7557 } 7558 } 7559 return 0; 7560 } 7561 7562 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 7563 static int is_proc(const char *filename, const char *entry) 7564 { 7565 return strcmp(filename, entry) == 0; 7566 } 7567 7568 static int open_net_route(void *cpu_env, int fd) 7569 { 7570 FILE *fp; 7571 char *line = NULL; 7572 size_t len = 0; 7573 ssize_t read; 7574 7575 fp = fopen("/proc/net/route", "r"); 7576 if (fp == NULL) { 7577 return -1; 7578 } 7579 7580 /* read header */ 7581 7582 read = getline(&line, &len, fp); 7583 dprintf(fd, "%s", line); 7584 7585 /* read routes */ 7586 7587 while ((read = getline(&line, &len, fp)) != -1) { 7588 char iface[16]; 7589 uint32_t dest, gw, mask; 7590 unsigned int flags, refcnt, use, metric, mtu, window, irtt; 7591 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 7592 iface, &dest, &gw, &flags, &refcnt, &use, &metric, 7593 &mask, &mtu, &window, &irtt); 7594 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 7595 iface, tswap32(dest), tswap32(gw), flags, refcnt, use, 7596 metric, tswap32(mask), mtu, window, irtt); 7597 } 7598 7599 free(line); 7600 fclose(fp); 7601 7602 return 0; 7603 } 7604 #endif 7605 7606 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode) 7607 { 7608 struct fake_open { 7609 const char *filename; 7610 int (*fill)(void *cpu_env, int fd); 7611 int (*cmp)(const char *s1, const char *s2); 7612 }; 7613 const struct fake_open *fake_open; 7614 static const struct fake_open fakes[] = { 7615 { "maps", open_self_maps, is_proc_myself }, 7616 { "stat", open_self_stat, is_proc_myself }, 7617 { "auxv", open_self_auxv, is_proc_myself }, 7618 { "cmdline", open_self_cmdline, is_proc_myself }, 7619 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 7620 { "/proc/net/route", open_net_route, is_proc }, 7621 #endif 7622 { NULL, NULL, NULL } 7623 }; 7624 7625 if (is_proc_myself(pathname, "exe")) { 7626 int execfd = qemu_getauxval(AT_EXECFD); 7627 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode); 7628 } 7629 7630 for (fake_open = fakes; fake_open->filename; fake_open++) { 7631 if (fake_open->cmp(pathname, fake_open->filename)) { 7632 break; 7633 } 7634 } 7635 7636 if (fake_open->filename) { 7637 const char *tmpdir; 7638 char filename[PATH_MAX]; 7639 int fd, r; 7640 7641 /* create temporary file to map stat to */ 7642 tmpdir = getenv("TMPDIR"); 7643 if (!tmpdir) 7644 tmpdir = "/tmp"; 7645 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir); 7646 fd = mkstemp(filename); 7647 if (fd < 0) { 7648 return fd; 7649 } 7650 unlink(filename); 7651 7652 if ((r = fake_open->fill(cpu_env, fd))) { 7653 int e = errno; 7654 close(fd); 7655 errno = e; 7656 return r; 7657 } 7658 lseek(fd, 0, SEEK_SET); 7659 7660 return fd; 7661 } 7662 7663 return safe_openat(dirfd, path(pathname), flags, mode); 7664 } 7665 7666 #define TIMER_MAGIC 0x0caf0000 7667 #define TIMER_MAGIC_MASK 0xffff0000 7668 7669 /* Convert QEMU provided timer ID back to internal 16bit index format */ 7670 static target_timer_t get_timer_id(abi_long arg) 7671 { 7672 target_timer_t timerid = arg; 7673 7674 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) { 7675 return -TARGET_EINVAL; 7676 } 7677 7678 timerid &= 0xffff; 7679 7680 if (timerid >= ARRAY_SIZE(g_posix_timers)) { 7681 return -TARGET_EINVAL; 7682 } 7683 7684 return timerid; 7685 } 7686 7687 static abi_long swap_data_eventfd(void *buf, size_t len) 7688 { 7689 uint64_t *counter = buf; 7690 int i; 7691 7692 if (len < sizeof(uint64_t)) { 7693 return -EINVAL; 7694 } 7695 7696 for (i = 0; i < len; i += sizeof(uint64_t)) { 7697 *counter = tswap64(*counter); 7698 counter++; 7699 } 7700 7701 return len; 7702 } 7703 7704 static TargetFdTrans target_eventfd_trans = { 7705 .host_to_target_data = swap_data_eventfd, 7706 .target_to_host_data = swap_data_eventfd, 7707 }; 7708 7709 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \ 7710 (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \ 7711 defined(__NR_inotify_init1)) 7712 static abi_long host_to_target_data_inotify(void *buf, size_t len) 7713 { 7714 struct inotify_event *ev; 7715 int i; 7716 uint32_t name_len; 7717 7718 for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) { 7719 ev = (struct inotify_event *)((char *)buf + i); 7720 name_len = ev->len; 7721 7722 ev->wd = tswap32(ev->wd); 7723 ev->mask = tswap32(ev->mask); 7724 ev->cookie = tswap32(ev->cookie); 7725 ev->len = tswap32(name_len); 7726 } 7727 7728 return len; 7729 } 7730 7731 static TargetFdTrans target_inotify_trans = { 7732 .host_to_target_data = host_to_target_data_inotify, 7733 }; 7734 #endif 7735 7736 static int target_to_host_cpu_mask(unsigned long *host_mask, 7737 size_t host_size, 7738 abi_ulong target_addr, 7739 size_t target_size) 7740 { 7741 unsigned target_bits = sizeof(abi_ulong) * 8; 7742 unsigned host_bits = sizeof(*host_mask) * 8; 7743 abi_ulong *target_mask; 7744 unsigned i, j; 7745 7746 assert(host_size >= target_size); 7747 7748 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1); 7749 if (!target_mask) { 7750 return -TARGET_EFAULT; 7751 } 7752 memset(host_mask, 0, host_size); 7753 7754 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) { 7755 unsigned bit = i * target_bits; 7756 abi_ulong val; 7757 7758 __get_user(val, &target_mask[i]); 7759 for (j = 0; j < target_bits; j++, bit++) { 7760 if (val & (1UL << j)) { 7761 host_mask[bit / host_bits] |= 1UL << (bit % host_bits); 7762 } 7763 } 7764 } 7765 7766 unlock_user(target_mask, target_addr, 0); 7767 return 0; 7768 } 7769 7770 static int host_to_target_cpu_mask(const unsigned long *host_mask, 7771 size_t host_size, 7772 abi_ulong target_addr, 7773 size_t target_size) 7774 { 7775 unsigned target_bits = sizeof(abi_ulong) * 8; 7776 unsigned host_bits = sizeof(*host_mask) * 8; 7777 abi_ulong *target_mask; 7778 unsigned i, j; 7779 7780 assert(host_size >= target_size); 7781 7782 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0); 7783 if (!target_mask) { 7784 return -TARGET_EFAULT; 7785 } 7786 7787 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) { 7788 unsigned bit = i * target_bits; 7789 abi_ulong val = 0; 7790 7791 for (j = 0; j < target_bits; j++, bit++) { 7792 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) { 7793 val |= 1UL << j; 7794 } 7795 } 7796 __put_user(val, &target_mask[i]); 7797 } 7798 7799 unlock_user(target_mask, target_addr, target_size); 7800 return 0; 7801 } 7802 7803 /* do_syscall() should always have a single exit point at the end so 7804 that actions, such as logging of syscall results, can be performed. 7805 All errnos that do_syscall() returns must be -TARGET_<errcode>. */ 7806 abi_long do_syscall(void *cpu_env, int num, abi_long arg1, 7807 abi_long arg2, abi_long arg3, abi_long arg4, 7808 abi_long arg5, abi_long arg6, abi_long arg7, 7809 abi_long arg8) 7810 { 7811 CPUState *cpu = ENV_GET_CPU(cpu_env); 7812 abi_long ret; 7813 struct stat st; 7814 struct statfs stfs; 7815 void *p; 7816 7817 #if defined(DEBUG_ERESTARTSYS) 7818 /* Debug-only code for exercising the syscall-restart code paths 7819 * in the per-architecture cpu main loops: restart every syscall 7820 * the guest makes once before letting it through. 7821 */ 7822 { 7823 static int flag; 7824 7825 flag = !flag; 7826 if (flag) { 7827 return -TARGET_ERESTARTSYS; 7828 } 7829 } 7830 #endif 7831 7832 #ifdef DEBUG 7833 gemu_log("syscall %d", num); 7834 #endif 7835 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8); 7836 if(do_strace) 7837 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); 7838 7839 switch(num) { 7840 case TARGET_NR_exit: 7841 /* In old applications this may be used to implement _exit(2). 7842 However in threaded applictions it is used for thread termination, 7843 and _exit_group is used for application termination. 7844 Do thread termination if we have more then one thread. */ 7845 7846 if (block_signals()) { 7847 ret = -TARGET_ERESTARTSYS; 7848 break; 7849 } 7850 7851 cpu_list_lock(); 7852 7853 if (CPU_NEXT(first_cpu)) { 7854 TaskState *ts; 7855 7856 /* Remove the CPU from the list. */ 7857 QTAILQ_REMOVE(&cpus, cpu, node); 7858 7859 cpu_list_unlock(); 7860 7861 ts = cpu->opaque; 7862 if (ts->child_tidptr) { 7863 put_user_u32(0, ts->child_tidptr); 7864 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX, 7865 NULL, NULL, 0); 7866 } 7867 thread_cpu = NULL; 7868 object_unref(OBJECT(cpu)); 7869 g_free(ts); 7870 rcu_unregister_thread(); 7871 pthread_exit(NULL); 7872 } 7873 7874 cpu_list_unlock(); 7875 #ifdef TARGET_GPROF 7876 _mcleanup(); 7877 #endif 7878 gdb_exit(cpu_env, arg1); 7879 _exit(arg1); 7880 ret = 0; /* avoid warning */ 7881 break; 7882 case TARGET_NR_read: 7883 if (arg3 == 0) 7884 ret = 0; 7885 else { 7886 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 7887 goto efault; 7888 ret = get_errno(safe_read(arg1, p, arg3)); 7889 if (ret >= 0 && 7890 fd_trans_host_to_target_data(arg1)) { 7891 ret = fd_trans_host_to_target_data(arg1)(p, ret); 7892 } 7893 unlock_user(p, arg2, ret); 7894 } 7895 break; 7896 case TARGET_NR_write: 7897 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 7898 goto efault; 7899 if (fd_trans_target_to_host_data(arg1)) { 7900 void *copy = g_malloc(arg3); 7901 memcpy(copy, p, arg3); 7902 ret = fd_trans_target_to_host_data(arg1)(copy, arg3); 7903 if (ret >= 0) { 7904 ret = get_errno(safe_write(arg1, copy, ret)); 7905 } 7906 g_free(copy); 7907 } else { 7908 ret = get_errno(safe_write(arg1, p, arg3)); 7909 } 7910 unlock_user(p, arg2, 0); 7911 break; 7912 #ifdef TARGET_NR_open 7913 case TARGET_NR_open: 7914 if (!(p = lock_user_string(arg1))) 7915 goto efault; 7916 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p, 7917 target_to_host_bitmask(arg2, fcntl_flags_tbl), 7918 arg3)); 7919 fd_trans_unregister(ret); 7920 unlock_user(p, arg1, 0); 7921 break; 7922 #endif 7923 case TARGET_NR_openat: 7924 if (!(p = lock_user_string(arg2))) 7925 goto efault; 7926 ret = get_errno(do_openat(cpu_env, arg1, p, 7927 target_to_host_bitmask(arg3, fcntl_flags_tbl), 7928 arg4)); 7929 fd_trans_unregister(ret); 7930 unlock_user(p, arg2, 0); 7931 break; 7932 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 7933 case TARGET_NR_name_to_handle_at: 7934 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5); 7935 break; 7936 #endif 7937 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 7938 case TARGET_NR_open_by_handle_at: 7939 ret = do_open_by_handle_at(arg1, arg2, arg3); 7940 fd_trans_unregister(ret); 7941 break; 7942 #endif 7943 case TARGET_NR_close: 7944 fd_trans_unregister(arg1); 7945 ret = get_errno(close(arg1)); 7946 break; 7947 case TARGET_NR_brk: 7948 ret = do_brk(arg1); 7949 break; 7950 #ifdef TARGET_NR_fork 7951 case TARGET_NR_fork: 7952 ret = get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0)); 7953 break; 7954 #endif 7955 #ifdef TARGET_NR_waitpid 7956 case TARGET_NR_waitpid: 7957 { 7958 int status; 7959 ret = get_errno(safe_wait4(arg1, &status, arg3, 0)); 7960 if (!is_error(ret) && arg2 && ret 7961 && put_user_s32(host_to_target_waitstatus(status), arg2)) 7962 goto efault; 7963 } 7964 break; 7965 #endif 7966 #ifdef TARGET_NR_waitid 7967 case TARGET_NR_waitid: 7968 { 7969 siginfo_t info; 7970 info.si_pid = 0; 7971 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL)); 7972 if (!is_error(ret) && arg3 && info.si_pid != 0) { 7973 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) 7974 goto efault; 7975 host_to_target_siginfo(p, &info); 7976 unlock_user(p, arg3, sizeof(target_siginfo_t)); 7977 } 7978 } 7979 break; 7980 #endif 7981 #ifdef TARGET_NR_creat /* not on alpha */ 7982 case TARGET_NR_creat: 7983 if (!(p = lock_user_string(arg1))) 7984 goto efault; 7985 ret = get_errno(creat(p, arg2)); 7986 fd_trans_unregister(ret); 7987 unlock_user(p, arg1, 0); 7988 break; 7989 #endif 7990 #ifdef TARGET_NR_link 7991 case TARGET_NR_link: 7992 { 7993 void * p2; 7994 p = lock_user_string(arg1); 7995 p2 = lock_user_string(arg2); 7996 if (!p || !p2) 7997 ret = -TARGET_EFAULT; 7998 else 7999 ret = get_errno(link(p, p2)); 8000 unlock_user(p2, arg2, 0); 8001 unlock_user(p, arg1, 0); 8002 } 8003 break; 8004 #endif 8005 #if defined(TARGET_NR_linkat) 8006 case TARGET_NR_linkat: 8007 { 8008 void * p2 = NULL; 8009 if (!arg2 || !arg4) 8010 goto efault; 8011 p = lock_user_string(arg2); 8012 p2 = lock_user_string(arg4); 8013 if (!p || !p2) 8014 ret = -TARGET_EFAULT; 8015 else 8016 ret = get_errno(linkat(arg1, p, arg3, p2, arg5)); 8017 unlock_user(p, arg2, 0); 8018 unlock_user(p2, arg4, 0); 8019 } 8020 break; 8021 #endif 8022 #ifdef TARGET_NR_unlink 8023 case TARGET_NR_unlink: 8024 if (!(p = lock_user_string(arg1))) 8025 goto efault; 8026 ret = get_errno(unlink(p)); 8027 unlock_user(p, arg1, 0); 8028 break; 8029 #endif 8030 #if defined(TARGET_NR_unlinkat) 8031 case TARGET_NR_unlinkat: 8032 if (!(p = lock_user_string(arg2))) 8033 goto efault; 8034 ret = get_errno(unlinkat(arg1, p, arg3)); 8035 unlock_user(p, arg2, 0); 8036 break; 8037 #endif 8038 case TARGET_NR_execve: 8039 { 8040 char **argp, **envp; 8041 int argc, envc; 8042 abi_ulong gp; 8043 abi_ulong guest_argp; 8044 abi_ulong guest_envp; 8045 abi_ulong addr; 8046 char **q; 8047 int total_size = 0; 8048 8049 argc = 0; 8050 guest_argp = arg2; 8051 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { 8052 if (get_user_ual(addr, gp)) 8053 goto efault; 8054 if (!addr) 8055 break; 8056 argc++; 8057 } 8058 envc = 0; 8059 guest_envp = arg3; 8060 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { 8061 if (get_user_ual(addr, gp)) 8062 goto efault; 8063 if (!addr) 8064 break; 8065 envc++; 8066 } 8067 8068 argp = g_new0(char *, argc + 1); 8069 envp = g_new0(char *, envc + 1); 8070 8071 for (gp = guest_argp, q = argp; gp; 8072 gp += sizeof(abi_ulong), q++) { 8073 if (get_user_ual(addr, gp)) 8074 goto execve_efault; 8075 if (!addr) 8076 break; 8077 if (!(*q = lock_user_string(addr))) 8078 goto execve_efault; 8079 total_size += strlen(*q) + 1; 8080 } 8081 *q = NULL; 8082 8083 for (gp = guest_envp, q = envp; gp; 8084 gp += sizeof(abi_ulong), q++) { 8085 if (get_user_ual(addr, gp)) 8086 goto execve_efault; 8087 if (!addr) 8088 break; 8089 if (!(*q = lock_user_string(addr))) 8090 goto execve_efault; 8091 total_size += strlen(*q) + 1; 8092 } 8093 *q = NULL; 8094 8095 if (!(p = lock_user_string(arg1))) 8096 goto execve_efault; 8097 /* Although execve() is not an interruptible syscall it is 8098 * a special case where we must use the safe_syscall wrapper: 8099 * if we allow a signal to happen before we make the host 8100 * syscall then we will 'lose' it, because at the point of 8101 * execve the process leaves QEMU's control. So we use the 8102 * safe syscall wrapper to ensure that we either take the 8103 * signal as a guest signal, or else it does not happen 8104 * before the execve completes and makes it the other 8105 * program's problem. 8106 */ 8107 ret = get_errno(safe_execve(p, argp, envp)); 8108 unlock_user(p, arg1, 0); 8109 8110 goto execve_end; 8111 8112 execve_efault: 8113 ret = -TARGET_EFAULT; 8114 8115 execve_end: 8116 for (gp = guest_argp, q = argp; *q; 8117 gp += sizeof(abi_ulong), q++) { 8118 if (get_user_ual(addr, gp) 8119 || !addr) 8120 break; 8121 unlock_user(*q, addr, 0); 8122 } 8123 for (gp = guest_envp, q = envp; *q; 8124 gp += sizeof(abi_ulong), q++) { 8125 if (get_user_ual(addr, gp) 8126 || !addr) 8127 break; 8128 unlock_user(*q, addr, 0); 8129 } 8130 8131 g_free(argp); 8132 g_free(envp); 8133 } 8134 break; 8135 case TARGET_NR_chdir: 8136 if (!(p = lock_user_string(arg1))) 8137 goto efault; 8138 ret = get_errno(chdir(p)); 8139 unlock_user(p, arg1, 0); 8140 break; 8141 #ifdef TARGET_NR_time 8142 case TARGET_NR_time: 8143 { 8144 time_t host_time; 8145 ret = get_errno(time(&host_time)); 8146 if (!is_error(ret) 8147 && arg1 8148 && put_user_sal(host_time, arg1)) 8149 goto efault; 8150 } 8151 break; 8152 #endif 8153 #ifdef TARGET_NR_mknod 8154 case TARGET_NR_mknod: 8155 if (!(p = lock_user_string(arg1))) 8156 goto efault; 8157 ret = get_errno(mknod(p, arg2, arg3)); 8158 unlock_user(p, arg1, 0); 8159 break; 8160 #endif 8161 #if defined(TARGET_NR_mknodat) 8162 case TARGET_NR_mknodat: 8163 if (!(p = lock_user_string(arg2))) 8164 goto efault; 8165 ret = get_errno(mknodat(arg1, p, arg3, arg4)); 8166 unlock_user(p, arg2, 0); 8167 break; 8168 #endif 8169 #ifdef TARGET_NR_chmod 8170 case TARGET_NR_chmod: 8171 if (!(p = lock_user_string(arg1))) 8172 goto efault; 8173 ret = get_errno(chmod(p, arg2)); 8174 unlock_user(p, arg1, 0); 8175 break; 8176 #endif 8177 #ifdef TARGET_NR_break 8178 case TARGET_NR_break: 8179 goto unimplemented; 8180 #endif 8181 #ifdef TARGET_NR_oldstat 8182 case TARGET_NR_oldstat: 8183 goto unimplemented; 8184 #endif 8185 case TARGET_NR_lseek: 8186 ret = get_errno(lseek(arg1, arg2, arg3)); 8187 break; 8188 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) 8189 /* Alpha specific */ 8190 case TARGET_NR_getxpid: 8191 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid(); 8192 ret = get_errno(getpid()); 8193 break; 8194 #endif 8195 #ifdef TARGET_NR_getpid 8196 case TARGET_NR_getpid: 8197 ret = get_errno(getpid()); 8198 break; 8199 #endif 8200 case TARGET_NR_mount: 8201 { 8202 /* need to look at the data field */ 8203 void *p2, *p3; 8204 8205 if (arg1) { 8206 p = lock_user_string(arg1); 8207 if (!p) { 8208 goto efault; 8209 } 8210 } else { 8211 p = NULL; 8212 } 8213 8214 p2 = lock_user_string(arg2); 8215 if (!p2) { 8216 if (arg1) { 8217 unlock_user(p, arg1, 0); 8218 } 8219 goto efault; 8220 } 8221 8222 if (arg3) { 8223 p3 = lock_user_string(arg3); 8224 if (!p3) { 8225 if (arg1) { 8226 unlock_user(p, arg1, 0); 8227 } 8228 unlock_user(p2, arg2, 0); 8229 goto efault; 8230 } 8231 } else { 8232 p3 = NULL; 8233 } 8234 8235 /* FIXME - arg5 should be locked, but it isn't clear how to 8236 * do that since it's not guaranteed to be a NULL-terminated 8237 * string. 8238 */ 8239 if (!arg5) { 8240 ret = mount(p, p2, p3, (unsigned long)arg4, NULL); 8241 } else { 8242 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)); 8243 } 8244 ret = get_errno(ret); 8245 8246 if (arg1) { 8247 unlock_user(p, arg1, 0); 8248 } 8249 unlock_user(p2, arg2, 0); 8250 if (arg3) { 8251 unlock_user(p3, arg3, 0); 8252 } 8253 } 8254 break; 8255 #ifdef TARGET_NR_umount 8256 case TARGET_NR_umount: 8257 if (!(p = lock_user_string(arg1))) 8258 goto efault; 8259 ret = get_errno(umount(p)); 8260 unlock_user(p, arg1, 0); 8261 break; 8262 #endif 8263 #ifdef TARGET_NR_stime /* not on alpha */ 8264 case TARGET_NR_stime: 8265 { 8266 time_t host_time; 8267 if (get_user_sal(host_time, arg1)) 8268 goto efault; 8269 ret = get_errno(stime(&host_time)); 8270 } 8271 break; 8272 #endif 8273 case TARGET_NR_ptrace: 8274 goto unimplemented; 8275 #ifdef TARGET_NR_alarm /* not on alpha */ 8276 case TARGET_NR_alarm: 8277 ret = alarm(arg1); 8278 break; 8279 #endif 8280 #ifdef TARGET_NR_oldfstat 8281 case TARGET_NR_oldfstat: 8282 goto unimplemented; 8283 #endif 8284 #ifdef TARGET_NR_pause /* not on alpha */ 8285 case TARGET_NR_pause: 8286 if (!block_signals()) { 8287 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask); 8288 } 8289 ret = -TARGET_EINTR; 8290 break; 8291 #endif 8292 #ifdef TARGET_NR_utime 8293 case TARGET_NR_utime: 8294 { 8295 struct utimbuf tbuf, *host_tbuf; 8296 struct target_utimbuf *target_tbuf; 8297 if (arg2) { 8298 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) 8299 goto efault; 8300 tbuf.actime = tswapal(target_tbuf->actime); 8301 tbuf.modtime = tswapal(target_tbuf->modtime); 8302 unlock_user_struct(target_tbuf, arg2, 0); 8303 host_tbuf = &tbuf; 8304 } else { 8305 host_tbuf = NULL; 8306 } 8307 if (!(p = lock_user_string(arg1))) 8308 goto efault; 8309 ret = get_errno(utime(p, host_tbuf)); 8310 unlock_user(p, arg1, 0); 8311 } 8312 break; 8313 #endif 8314 #ifdef TARGET_NR_utimes 8315 case TARGET_NR_utimes: 8316 { 8317 struct timeval *tvp, tv[2]; 8318 if (arg2) { 8319 if (copy_from_user_timeval(&tv[0], arg2) 8320 || copy_from_user_timeval(&tv[1], 8321 arg2 + sizeof(struct target_timeval))) 8322 goto efault; 8323 tvp = tv; 8324 } else { 8325 tvp = NULL; 8326 } 8327 if (!(p = lock_user_string(arg1))) 8328 goto efault; 8329 ret = get_errno(utimes(p, tvp)); 8330 unlock_user(p, arg1, 0); 8331 } 8332 break; 8333 #endif 8334 #if defined(TARGET_NR_futimesat) 8335 case TARGET_NR_futimesat: 8336 { 8337 struct timeval *tvp, tv[2]; 8338 if (arg3) { 8339 if (copy_from_user_timeval(&tv[0], arg3) 8340 || copy_from_user_timeval(&tv[1], 8341 arg3 + sizeof(struct target_timeval))) 8342 goto efault; 8343 tvp = tv; 8344 } else { 8345 tvp = NULL; 8346 } 8347 if (!(p = lock_user_string(arg2))) 8348 goto efault; 8349 ret = get_errno(futimesat(arg1, path(p), tvp)); 8350 unlock_user(p, arg2, 0); 8351 } 8352 break; 8353 #endif 8354 #ifdef TARGET_NR_stty 8355 case TARGET_NR_stty: 8356 goto unimplemented; 8357 #endif 8358 #ifdef TARGET_NR_gtty 8359 case TARGET_NR_gtty: 8360 goto unimplemented; 8361 #endif 8362 #ifdef TARGET_NR_access 8363 case TARGET_NR_access: 8364 if (!(p = lock_user_string(arg1))) 8365 goto efault; 8366 ret = get_errno(access(path(p), arg2)); 8367 unlock_user(p, arg1, 0); 8368 break; 8369 #endif 8370 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 8371 case TARGET_NR_faccessat: 8372 if (!(p = lock_user_string(arg2))) 8373 goto efault; 8374 ret = get_errno(faccessat(arg1, p, arg3, 0)); 8375 unlock_user(p, arg2, 0); 8376 break; 8377 #endif 8378 #ifdef TARGET_NR_nice /* not on alpha */ 8379 case TARGET_NR_nice: 8380 ret = get_errno(nice(arg1)); 8381 break; 8382 #endif 8383 #ifdef TARGET_NR_ftime 8384 case TARGET_NR_ftime: 8385 goto unimplemented; 8386 #endif 8387 case TARGET_NR_sync: 8388 sync(); 8389 ret = 0; 8390 break; 8391 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS) 8392 case TARGET_NR_syncfs: 8393 ret = get_errno(syncfs(arg1)); 8394 break; 8395 #endif 8396 case TARGET_NR_kill: 8397 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2))); 8398 break; 8399 #ifdef TARGET_NR_rename 8400 case TARGET_NR_rename: 8401 { 8402 void *p2; 8403 p = lock_user_string(arg1); 8404 p2 = lock_user_string(arg2); 8405 if (!p || !p2) 8406 ret = -TARGET_EFAULT; 8407 else 8408 ret = get_errno(rename(p, p2)); 8409 unlock_user(p2, arg2, 0); 8410 unlock_user(p, arg1, 0); 8411 } 8412 break; 8413 #endif 8414 #if defined(TARGET_NR_renameat) 8415 case TARGET_NR_renameat: 8416 { 8417 void *p2; 8418 p = lock_user_string(arg2); 8419 p2 = lock_user_string(arg4); 8420 if (!p || !p2) 8421 ret = -TARGET_EFAULT; 8422 else 8423 ret = get_errno(renameat(arg1, p, arg3, p2)); 8424 unlock_user(p2, arg4, 0); 8425 unlock_user(p, arg2, 0); 8426 } 8427 break; 8428 #endif 8429 #ifdef TARGET_NR_mkdir 8430 case TARGET_NR_mkdir: 8431 if (!(p = lock_user_string(arg1))) 8432 goto efault; 8433 ret = get_errno(mkdir(p, arg2)); 8434 unlock_user(p, arg1, 0); 8435 break; 8436 #endif 8437 #if defined(TARGET_NR_mkdirat) 8438 case TARGET_NR_mkdirat: 8439 if (!(p = lock_user_string(arg2))) 8440 goto efault; 8441 ret = get_errno(mkdirat(arg1, p, arg3)); 8442 unlock_user(p, arg2, 0); 8443 break; 8444 #endif 8445 #ifdef TARGET_NR_rmdir 8446 case TARGET_NR_rmdir: 8447 if (!(p = lock_user_string(arg1))) 8448 goto efault; 8449 ret = get_errno(rmdir(p)); 8450 unlock_user(p, arg1, 0); 8451 break; 8452 #endif 8453 case TARGET_NR_dup: 8454 ret = get_errno(dup(arg1)); 8455 if (ret >= 0) { 8456 fd_trans_dup(arg1, ret); 8457 } 8458 break; 8459 #ifdef TARGET_NR_pipe 8460 case TARGET_NR_pipe: 8461 ret = do_pipe(cpu_env, arg1, 0, 0); 8462 break; 8463 #endif 8464 #ifdef TARGET_NR_pipe2 8465 case TARGET_NR_pipe2: 8466 ret = do_pipe(cpu_env, arg1, 8467 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1); 8468 break; 8469 #endif 8470 case TARGET_NR_times: 8471 { 8472 struct target_tms *tmsp; 8473 struct tms tms; 8474 ret = get_errno(times(&tms)); 8475 if (arg1) { 8476 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); 8477 if (!tmsp) 8478 goto efault; 8479 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime)); 8480 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime)); 8481 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime)); 8482 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime)); 8483 } 8484 if (!is_error(ret)) 8485 ret = host_to_target_clock_t(ret); 8486 } 8487 break; 8488 #ifdef TARGET_NR_prof 8489 case TARGET_NR_prof: 8490 goto unimplemented; 8491 #endif 8492 #ifdef TARGET_NR_signal 8493 case TARGET_NR_signal: 8494 goto unimplemented; 8495 #endif 8496 case TARGET_NR_acct: 8497 if (arg1 == 0) { 8498 ret = get_errno(acct(NULL)); 8499 } else { 8500 if (!(p = lock_user_string(arg1))) 8501 goto efault; 8502 ret = get_errno(acct(path(p))); 8503 unlock_user(p, arg1, 0); 8504 } 8505 break; 8506 #ifdef TARGET_NR_umount2 8507 case TARGET_NR_umount2: 8508 if (!(p = lock_user_string(arg1))) 8509 goto efault; 8510 ret = get_errno(umount2(p, arg2)); 8511 unlock_user(p, arg1, 0); 8512 break; 8513 #endif 8514 #ifdef TARGET_NR_lock 8515 case TARGET_NR_lock: 8516 goto unimplemented; 8517 #endif 8518 case TARGET_NR_ioctl: 8519 ret = do_ioctl(arg1, arg2, arg3); 8520 break; 8521 case TARGET_NR_fcntl: 8522 ret = do_fcntl(arg1, arg2, arg3); 8523 break; 8524 #ifdef TARGET_NR_mpx 8525 case TARGET_NR_mpx: 8526 goto unimplemented; 8527 #endif 8528 case TARGET_NR_setpgid: 8529 ret = get_errno(setpgid(arg1, arg2)); 8530 break; 8531 #ifdef TARGET_NR_ulimit 8532 case TARGET_NR_ulimit: 8533 goto unimplemented; 8534 #endif 8535 #ifdef TARGET_NR_oldolduname 8536 case TARGET_NR_oldolduname: 8537 goto unimplemented; 8538 #endif 8539 case TARGET_NR_umask: 8540 ret = get_errno(umask(arg1)); 8541 break; 8542 case TARGET_NR_chroot: 8543 if (!(p = lock_user_string(arg1))) 8544 goto efault; 8545 ret = get_errno(chroot(p)); 8546 unlock_user(p, arg1, 0); 8547 break; 8548 #ifdef TARGET_NR_ustat 8549 case TARGET_NR_ustat: 8550 goto unimplemented; 8551 #endif 8552 #ifdef TARGET_NR_dup2 8553 case TARGET_NR_dup2: 8554 ret = get_errno(dup2(arg1, arg2)); 8555 if (ret >= 0) { 8556 fd_trans_dup(arg1, arg2); 8557 } 8558 break; 8559 #endif 8560 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) 8561 case TARGET_NR_dup3: 8562 { 8563 int host_flags; 8564 8565 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) { 8566 return -EINVAL; 8567 } 8568 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl); 8569 ret = get_errno(dup3(arg1, arg2, host_flags)); 8570 if (ret >= 0) { 8571 fd_trans_dup(arg1, arg2); 8572 } 8573 break; 8574 } 8575 #endif 8576 #ifdef TARGET_NR_getppid /* not on alpha */ 8577 case TARGET_NR_getppid: 8578 ret = get_errno(getppid()); 8579 break; 8580 #endif 8581 #ifdef TARGET_NR_getpgrp 8582 case TARGET_NR_getpgrp: 8583 ret = get_errno(getpgrp()); 8584 break; 8585 #endif 8586 case TARGET_NR_setsid: 8587 ret = get_errno(setsid()); 8588 break; 8589 #ifdef TARGET_NR_sigaction 8590 case TARGET_NR_sigaction: 8591 { 8592 #if defined(TARGET_ALPHA) 8593 struct target_sigaction act, oact, *pact = 0; 8594 struct target_old_sigaction *old_act; 8595 if (arg2) { 8596 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 8597 goto efault; 8598 act._sa_handler = old_act->_sa_handler; 8599 target_siginitset(&act.sa_mask, old_act->sa_mask); 8600 act.sa_flags = old_act->sa_flags; 8601 act.sa_restorer = 0; 8602 unlock_user_struct(old_act, arg2, 0); 8603 pact = &act; 8604 } 8605 ret = get_errno(do_sigaction(arg1, pact, &oact)); 8606 if (!is_error(ret) && arg3) { 8607 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 8608 goto efault; 8609 old_act->_sa_handler = oact._sa_handler; 8610 old_act->sa_mask = oact.sa_mask.sig[0]; 8611 old_act->sa_flags = oact.sa_flags; 8612 unlock_user_struct(old_act, arg3, 1); 8613 } 8614 #elif defined(TARGET_MIPS) 8615 struct target_sigaction act, oact, *pact, *old_act; 8616 8617 if (arg2) { 8618 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 8619 goto efault; 8620 act._sa_handler = old_act->_sa_handler; 8621 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); 8622 act.sa_flags = old_act->sa_flags; 8623 unlock_user_struct(old_act, arg2, 0); 8624 pact = &act; 8625 } else { 8626 pact = NULL; 8627 } 8628 8629 ret = get_errno(do_sigaction(arg1, pact, &oact)); 8630 8631 if (!is_error(ret) && arg3) { 8632 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 8633 goto efault; 8634 old_act->_sa_handler = oact._sa_handler; 8635 old_act->sa_flags = oact.sa_flags; 8636 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; 8637 old_act->sa_mask.sig[1] = 0; 8638 old_act->sa_mask.sig[2] = 0; 8639 old_act->sa_mask.sig[3] = 0; 8640 unlock_user_struct(old_act, arg3, 1); 8641 } 8642 #else 8643 struct target_old_sigaction *old_act; 8644 struct target_sigaction act, oact, *pact; 8645 if (arg2) { 8646 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 8647 goto efault; 8648 act._sa_handler = old_act->_sa_handler; 8649 target_siginitset(&act.sa_mask, old_act->sa_mask); 8650 act.sa_flags = old_act->sa_flags; 8651 act.sa_restorer = old_act->sa_restorer; 8652 unlock_user_struct(old_act, arg2, 0); 8653 pact = &act; 8654 } else { 8655 pact = NULL; 8656 } 8657 ret = get_errno(do_sigaction(arg1, pact, &oact)); 8658 if (!is_error(ret) && arg3) { 8659 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 8660 goto efault; 8661 old_act->_sa_handler = oact._sa_handler; 8662 old_act->sa_mask = oact.sa_mask.sig[0]; 8663 old_act->sa_flags = oact.sa_flags; 8664 old_act->sa_restorer = oact.sa_restorer; 8665 unlock_user_struct(old_act, arg3, 1); 8666 } 8667 #endif 8668 } 8669 break; 8670 #endif 8671 case TARGET_NR_rt_sigaction: 8672 { 8673 #if defined(TARGET_ALPHA) 8674 /* For Alpha and SPARC this is a 5 argument syscall, with 8675 * a 'restorer' parameter which must be copied into the 8676 * sa_restorer field of the sigaction struct. 8677 * For Alpha that 'restorer' is arg5; for SPARC it is arg4, 8678 * and arg5 is the sigsetsize. 8679 * Alpha also has a separate rt_sigaction struct that it uses 8680 * here; SPARC uses the usual sigaction struct. 8681 */ 8682 struct target_rt_sigaction *rt_act; 8683 struct target_sigaction act, oact, *pact = 0; 8684 8685 if (arg4 != sizeof(target_sigset_t)) { 8686 ret = -TARGET_EINVAL; 8687 break; 8688 } 8689 if (arg2) { 8690 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1)) 8691 goto efault; 8692 act._sa_handler = rt_act->_sa_handler; 8693 act.sa_mask = rt_act->sa_mask; 8694 act.sa_flags = rt_act->sa_flags; 8695 act.sa_restorer = arg5; 8696 unlock_user_struct(rt_act, arg2, 0); 8697 pact = &act; 8698 } 8699 ret = get_errno(do_sigaction(arg1, pact, &oact)); 8700 if (!is_error(ret) && arg3) { 8701 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0)) 8702 goto efault; 8703 rt_act->_sa_handler = oact._sa_handler; 8704 rt_act->sa_mask = oact.sa_mask; 8705 rt_act->sa_flags = oact.sa_flags; 8706 unlock_user_struct(rt_act, arg3, 1); 8707 } 8708 #else 8709 #ifdef TARGET_SPARC 8710 target_ulong restorer = arg4; 8711 target_ulong sigsetsize = arg5; 8712 #else 8713 target_ulong sigsetsize = arg4; 8714 #endif 8715 struct target_sigaction *act; 8716 struct target_sigaction *oact; 8717 8718 if (sigsetsize != sizeof(target_sigset_t)) { 8719 ret = -TARGET_EINVAL; 8720 break; 8721 } 8722 if (arg2) { 8723 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) { 8724 goto efault; 8725 } 8726 #ifdef TARGET_SPARC 8727 act->sa_restorer = restorer; 8728 #endif 8729 } else { 8730 act = NULL; 8731 } 8732 if (arg3) { 8733 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { 8734 ret = -TARGET_EFAULT; 8735 goto rt_sigaction_fail; 8736 } 8737 } else 8738 oact = NULL; 8739 ret = get_errno(do_sigaction(arg1, act, oact)); 8740 rt_sigaction_fail: 8741 if (act) 8742 unlock_user_struct(act, arg2, 0); 8743 if (oact) 8744 unlock_user_struct(oact, arg3, 1); 8745 #endif 8746 } 8747 break; 8748 #ifdef TARGET_NR_sgetmask /* not on alpha */ 8749 case TARGET_NR_sgetmask: 8750 { 8751 sigset_t cur_set; 8752 abi_ulong target_set; 8753 ret = do_sigprocmask(0, NULL, &cur_set); 8754 if (!ret) { 8755 host_to_target_old_sigset(&target_set, &cur_set); 8756 ret = target_set; 8757 } 8758 } 8759 break; 8760 #endif 8761 #ifdef TARGET_NR_ssetmask /* not on alpha */ 8762 case TARGET_NR_ssetmask: 8763 { 8764 sigset_t set, oset; 8765 abi_ulong target_set = arg1; 8766 target_to_host_old_sigset(&set, &target_set); 8767 ret = do_sigprocmask(SIG_SETMASK, &set, &oset); 8768 if (!ret) { 8769 host_to_target_old_sigset(&target_set, &oset); 8770 ret = target_set; 8771 } 8772 } 8773 break; 8774 #endif 8775 #ifdef TARGET_NR_sigprocmask 8776 case TARGET_NR_sigprocmask: 8777 { 8778 #if defined(TARGET_ALPHA) 8779 sigset_t set, oldset; 8780 abi_ulong mask; 8781 int how; 8782 8783 switch (arg1) { 8784 case TARGET_SIG_BLOCK: 8785 how = SIG_BLOCK; 8786 break; 8787 case TARGET_SIG_UNBLOCK: 8788 how = SIG_UNBLOCK; 8789 break; 8790 case TARGET_SIG_SETMASK: 8791 how = SIG_SETMASK; 8792 break; 8793 default: 8794 ret = -TARGET_EINVAL; 8795 goto fail; 8796 } 8797 mask = arg2; 8798 target_to_host_old_sigset(&set, &mask); 8799 8800 ret = do_sigprocmask(how, &set, &oldset); 8801 if (!is_error(ret)) { 8802 host_to_target_old_sigset(&mask, &oldset); 8803 ret = mask; 8804 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */ 8805 } 8806 #else 8807 sigset_t set, oldset, *set_ptr; 8808 int how; 8809 8810 if (arg2) { 8811 switch (arg1) { 8812 case TARGET_SIG_BLOCK: 8813 how = SIG_BLOCK; 8814 break; 8815 case TARGET_SIG_UNBLOCK: 8816 how = SIG_UNBLOCK; 8817 break; 8818 case TARGET_SIG_SETMASK: 8819 how = SIG_SETMASK; 8820 break; 8821 default: 8822 ret = -TARGET_EINVAL; 8823 goto fail; 8824 } 8825 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 8826 goto efault; 8827 target_to_host_old_sigset(&set, p); 8828 unlock_user(p, arg2, 0); 8829 set_ptr = &set; 8830 } else { 8831 how = 0; 8832 set_ptr = NULL; 8833 } 8834 ret = do_sigprocmask(how, set_ptr, &oldset); 8835 if (!is_error(ret) && arg3) { 8836 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 8837 goto efault; 8838 host_to_target_old_sigset(p, &oldset); 8839 unlock_user(p, arg3, sizeof(target_sigset_t)); 8840 } 8841 #endif 8842 } 8843 break; 8844 #endif 8845 case TARGET_NR_rt_sigprocmask: 8846 { 8847 int how = arg1; 8848 sigset_t set, oldset, *set_ptr; 8849 8850 if (arg4 != sizeof(target_sigset_t)) { 8851 ret = -TARGET_EINVAL; 8852 break; 8853 } 8854 8855 if (arg2) { 8856 switch(how) { 8857 case TARGET_SIG_BLOCK: 8858 how = SIG_BLOCK; 8859 break; 8860 case TARGET_SIG_UNBLOCK: 8861 how = SIG_UNBLOCK; 8862 break; 8863 case TARGET_SIG_SETMASK: 8864 how = SIG_SETMASK; 8865 break; 8866 default: 8867 ret = -TARGET_EINVAL; 8868 goto fail; 8869 } 8870 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 8871 goto efault; 8872 target_to_host_sigset(&set, p); 8873 unlock_user(p, arg2, 0); 8874 set_ptr = &set; 8875 } else { 8876 how = 0; 8877 set_ptr = NULL; 8878 } 8879 ret = do_sigprocmask(how, set_ptr, &oldset); 8880 if (!is_error(ret) && arg3) { 8881 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 8882 goto efault; 8883 host_to_target_sigset(p, &oldset); 8884 unlock_user(p, arg3, sizeof(target_sigset_t)); 8885 } 8886 } 8887 break; 8888 #ifdef TARGET_NR_sigpending 8889 case TARGET_NR_sigpending: 8890 { 8891 sigset_t set; 8892 ret = get_errno(sigpending(&set)); 8893 if (!is_error(ret)) { 8894 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 8895 goto efault; 8896 host_to_target_old_sigset(p, &set); 8897 unlock_user(p, arg1, sizeof(target_sigset_t)); 8898 } 8899 } 8900 break; 8901 #endif 8902 case TARGET_NR_rt_sigpending: 8903 { 8904 sigset_t set; 8905 8906 /* Yes, this check is >, not != like most. We follow the kernel's 8907 * logic and it does it like this because it implements 8908 * NR_sigpending through the same code path, and in that case 8909 * the old_sigset_t is smaller in size. 8910 */ 8911 if (arg2 > sizeof(target_sigset_t)) { 8912 ret = -TARGET_EINVAL; 8913 break; 8914 } 8915 8916 ret = get_errno(sigpending(&set)); 8917 if (!is_error(ret)) { 8918 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 8919 goto efault; 8920 host_to_target_sigset(p, &set); 8921 unlock_user(p, arg1, sizeof(target_sigset_t)); 8922 } 8923 } 8924 break; 8925 #ifdef TARGET_NR_sigsuspend 8926 case TARGET_NR_sigsuspend: 8927 { 8928 TaskState *ts = cpu->opaque; 8929 #if defined(TARGET_ALPHA) 8930 abi_ulong mask = arg1; 8931 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask); 8932 #else 8933 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 8934 goto efault; 8935 target_to_host_old_sigset(&ts->sigsuspend_mask, p); 8936 unlock_user(p, arg1, 0); 8937 #endif 8938 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask, 8939 SIGSET_T_SIZE)); 8940 if (ret != -TARGET_ERESTARTSYS) { 8941 ts->in_sigsuspend = 1; 8942 } 8943 } 8944 break; 8945 #endif 8946 case TARGET_NR_rt_sigsuspend: 8947 { 8948 TaskState *ts = cpu->opaque; 8949 8950 if (arg2 != sizeof(target_sigset_t)) { 8951 ret = -TARGET_EINVAL; 8952 break; 8953 } 8954 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 8955 goto efault; 8956 target_to_host_sigset(&ts->sigsuspend_mask, p); 8957 unlock_user(p, arg1, 0); 8958 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask, 8959 SIGSET_T_SIZE)); 8960 if (ret != -TARGET_ERESTARTSYS) { 8961 ts->in_sigsuspend = 1; 8962 } 8963 } 8964 break; 8965 case TARGET_NR_rt_sigtimedwait: 8966 { 8967 sigset_t set; 8968 struct timespec uts, *puts; 8969 siginfo_t uinfo; 8970 8971 if (arg4 != sizeof(target_sigset_t)) { 8972 ret = -TARGET_EINVAL; 8973 break; 8974 } 8975 8976 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 8977 goto efault; 8978 target_to_host_sigset(&set, p); 8979 unlock_user(p, arg1, 0); 8980 if (arg3) { 8981 puts = &uts; 8982 target_to_host_timespec(puts, arg3); 8983 } else { 8984 puts = NULL; 8985 } 8986 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts, 8987 SIGSET_T_SIZE)); 8988 if (!is_error(ret)) { 8989 if (arg2) { 8990 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 8991 0); 8992 if (!p) { 8993 goto efault; 8994 } 8995 host_to_target_siginfo(p, &uinfo); 8996 unlock_user(p, arg2, sizeof(target_siginfo_t)); 8997 } 8998 ret = host_to_target_signal(ret); 8999 } 9000 } 9001 break; 9002 case TARGET_NR_rt_sigqueueinfo: 9003 { 9004 siginfo_t uinfo; 9005 9006 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1); 9007 if (!p) { 9008 goto efault; 9009 } 9010 target_to_host_siginfo(&uinfo, p); 9011 unlock_user(p, arg3, 0); 9012 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo)); 9013 } 9014 break; 9015 case TARGET_NR_rt_tgsigqueueinfo: 9016 { 9017 siginfo_t uinfo; 9018 9019 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1); 9020 if (!p) { 9021 goto efault; 9022 } 9023 target_to_host_siginfo(&uinfo, p); 9024 unlock_user(p, arg4, 0); 9025 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo)); 9026 } 9027 break; 9028 #ifdef TARGET_NR_sigreturn 9029 case TARGET_NR_sigreturn: 9030 if (block_signals()) { 9031 ret = -TARGET_ERESTARTSYS; 9032 } else { 9033 ret = do_sigreturn(cpu_env); 9034 } 9035 break; 9036 #endif 9037 case TARGET_NR_rt_sigreturn: 9038 if (block_signals()) { 9039 ret = -TARGET_ERESTARTSYS; 9040 } else { 9041 ret = do_rt_sigreturn(cpu_env); 9042 } 9043 break; 9044 case TARGET_NR_sethostname: 9045 if (!(p = lock_user_string(arg1))) 9046 goto efault; 9047 ret = get_errno(sethostname(p, arg2)); 9048 unlock_user(p, arg1, 0); 9049 break; 9050 case TARGET_NR_setrlimit: 9051 { 9052 int resource = target_to_host_resource(arg1); 9053 struct target_rlimit *target_rlim; 9054 struct rlimit rlim; 9055 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) 9056 goto efault; 9057 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur); 9058 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max); 9059 unlock_user_struct(target_rlim, arg2, 0); 9060 ret = get_errno(setrlimit(resource, &rlim)); 9061 } 9062 break; 9063 case TARGET_NR_getrlimit: 9064 { 9065 int resource = target_to_host_resource(arg1); 9066 struct target_rlimit *target_rlim; 9067 struct rlimit rlim; 9068 9069 ret = get_errno(getrlimit(resource, &rlim)); 9070 if (!is_error(ret)) { 9071 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 9072 goto efault; 9073 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 9074 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 9075 unlock_user_struct(target_rlim, arg2, 1); 9076 } 9077 } 9078 break; 9079 case TARGET_NR_getrusage: 9080 { 9081 struct rusage rusage; 9082 ret = get_errno(getrusage(arg1, &rusage)); 9083 if (!is_error(ret)) { 9084 ret = host_to_target_rusage(arg2, &rusage); 9085 } 9086 } 9087 break; 9088 case TARGET_NR_gettimeofday: 9089 { 9090 struct timeval tv; 9091 ret = get_errno(gettimeofday(&tv, NULL)); 9092 if (!is_error(ret)) { 9093 if (copy_to_user_timeval(arg1, &tv)) 9094 goto efault; 9095 } 9096 } 9097 break; 9098 case TARGET_NR_settimeofday: 9099 { 9100 struct timeval tv, *ptv = NULL; 9101 struct timezone tz, *ptz = NULL; 9102 9103 if (arg1) { 9104 if (copy_from_user_timeval(&tv, arg1)) { 9105 goto efault; 9106 } 9107 ptv = &tv; 9108 } 9109 9110 if (arg2) { 9111 if (copy_from_user_timezone(&tz, arg2)) { 9112 goto efault; 9113 } 9114 ptz = &tz; 9115 } 9116 9117 ret = get_errno(settimeofday(ptv, ptz)); 9118 } 9119 break; 9120 #if defined(TARGET_NR_select) 9121 case TARGET_NR_select: 9122 #if defined(TARGET_WANT_NI_OLD_SELECT) 9123 /* some architectures used to have old_select here 9124 * but now ENOSYS it. 9125 */ 9126 ret = -TARGET_ENOSYS; 9127 #elif defined(TARGET_WANT_OLD_SYS_SELECT) 9128 ret = do_old_select(arg1); 9129 #else 9130 ret = do_select(arg1, arg2, arg3, arg4, arg5); 9131 #endif 9132 break; 9133 #endif 9134 #ifdef TARGET_NR_pselect6 9135 case TARGET_NR_pselect6: 9136 { 9137 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr; 9138 fd_set rfds, wfds, efds; 9139 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 9140 struct timespec ts, *ts_ptr; 9141 9142 /* 9143 * The 6th arg is actually two args smashed together, 9144 * so we cannot use the C library. 9145 */ 9146 sigset_t set; 9147 struct { 9148 sigset_t *set; 9149 size_t size; 9150 } sig, *sig_ptr; 9151 9152 abi_ulong arg_sigset, arg_sigsize, *arg7; 9153 target_sigset_t *target_sigset; 9154 9155 n = arg1; 9156 rfd_addr = arg2; 9157 wfd_addr = arg3; 9158 efd_addr = arg4; 9159 ts_addr = arg5; 9160 9161 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 9162 if (ret) { 9163 goto fail; 9164 } 9165 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 9166 if (ret) { 9167 goto fail; 9168 } 9169 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 9170 if (ret) { 9171 goto fail; 9172 } 9173 9174 /* 9175 * This takes a timespec, and not a timeval, so we cannot 9176 * use the do_select() helper ... 9177 */ 9178 if (ts_addr) { 9179 if (target_to_host_timespec(&ts, ts_addr)) { 9180 goto efault; 9181 } 9182 ts_ptr = &ts; 9183 } else { 9184 ts_ptr = NULL; 9185 } 9186 9187 /* Extract the two packed args for the sigset */ 9188 if (arg6) { 9189 sig_ptr = &sig; 9190 sig.size = SIGSET_T_SIZE; 9191 9192 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1); 9193 if (!arg7) { 9194 goto efault; 9195 } 9196 arg_sigset = tswapal(arg7[0]); 9197 arg_sigsize = tswapal(arg7[1]); 9198 unlock_user(arg7, arg6, 0); 9199 9200 if (arg_sigset) { 9201 sig.set = &set; 9202 if (arg_sigsize != sizeof(*target_sigset)) { 9203 /* Like the kernel, we enforce correct size sigsets */ 9204 ret = -TARGET_EINVAL; 9205 goto fail; 9206 } 9207 target_sigset = lock_user(VERIFY_READ, arg_sigset, 9208 sizeof(*target_sigset), 1); 9209 if (!target_sigset) { 9210 goto efault; 9211 } 9212 target_to_host_sigset(&set, target_sigset); 9213 unlock_user(target_sigset, arg_sigset, 0); 9214 } else { 9215 sig.set = NULL; 9216 } 9217 } else { 9218 sig_ptr = NULL; 9219 } 9220 9221 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 9222 ts_ptr, sig_ptr)); 9223 9224 if (!is_error(ret)) { 9225 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 9226 goto efault; 9227 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 9228 goto efault; 9229 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 9230 goto efault; 9231 9232 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) 9233 goto efault; 9234 } 9235 } 9236 break; 9237 #endif 9238 #ifdef TARGET_NR_symlink 9239 case TARGET_NR_symlink: 9240 { 9241 void *p2; 9242 p = lock_user_string(arg1); 9243 p2 = lock_user_string(arg2); 9244 if (!p || !p2) 9245 ret = -TARGET_EFAULT; 9246 else 9247 ret = get_errno(symlink(p, p2)); 9248 unlock_user(p2, arg2, 0); 9249 unlock_user(p, arg1, 0); 9250 } 9251 break; 9252 #endif 9253 #if defined(TARGET_NR_symlinkat) 9254 case TARGET_NR_symlinkat: 9255 { 9256 void *p2; 9257 p = lock_user_string(arg1); 9258 p2 = lock_user_string(arg3); 9259 if (!p || !p2) 9260 ret = -TARGET_EFAULT; 9261 else 9262 ret = get_errno(symlinkat(p, arg2, p2)); 9263 unlock_user(p2, arg3, 0); 9264 unlock_user(p, arg1, 0); 9265 } 9266 break; 9267 #endif 9268 #ifdef TARGET_NR_oldlstat 9269 case TARGET_NR_oldlstat: 9270 goto unimplemented; 9271 #endif 9272 #ifdef TARGET_NR_readlink 9273 case TARGET_NR_readlink: 9274 { 9275 void *p2; 9276 p = lock_user_string(arg1); 9277 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); 9278 if (!p || !p2) { 9279 ret = -TARGET_EFAULT; 9280 } else if (!arg3) { 9281 /* Short circuit this for the magic exe check. */ 9282 ret = -TARGET_EINVAL; 9283 } else if (is_proc_myself((const char *)p, "exe")) { 9284 char real[PATH_MAX], *temp; 9285 temp = realpath(exec_path, real); 9286 /* Return value is # of bytes that we wrote to the buffer. */ 9287 if (temp == NULL) { 9288 ret = get_errno(-1); 9289 } else { 9290 /* Don't worry about sign mismatch as earlier mapping 9291 * logic would have thrown a bad address error. */ 9292 ret = MIN(strlen(real), arg3); 9293 /* We cannot NUL terminate the string. */ 9294 memcpy(p2, real, ret); 9295 } 9296 } else { 9297 ret = get_errno(readlink(path(p), p2, arg3)); 9298 } 9299 unlock_user(p2, arg2, ret); 9300 unlock_user(p, arg1, 0); 9301 } 9302 break; 9303 #endif 9304 #if defined(TARGET_NR_readlinkat) 9305 case TARGET_NR_readlinkat: 9306 { 9307 void *p2; 9308 p = lock_user_string(arg2); 9309 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); 9310 if (!p || !p2) { 9311 ret = -TARGET_EFAULT; 9312 } else if (is_proc_myself((const char *)p, "exe")) { 9313 char real[PATH_MAX], *temp; 9314 temp = realpath(exec_path, real); 9315 ret = temp == NULL ? get_errno(-1) : strlen(real) ; 9316 snprintf((char *)p2, arg4, "%s", real); 9317 } else { 9318 ret = get_errno(readlinkat(arg1, path(p), p2, arg4)); 9319 } 9320 unlock_user(p2, arg3, ret); 9321 unlock_user(p, arg2, 0); 9322 } 9323 break; 9324 #endif 9325 #ifdef TARGET_NR_uselib 9326 case TARGET_NR_uselib: 9327 goto unimplemented; 9328 #endif 9329 #ifdef TARGET_NR_swapon 9330 case TARGET_NR_swapon: 9331 if (!(p = lock_user_string(arg1))) 9332 goto efault; 9333 ret = get_errno(swapon(p, arg2)); 9334 unlock_user(p, arg1, 0); 9335 break; 9336 #endif 9337 case TARGET_NR_reboot: 9338 if (arg3 == LINUX_REBOOT_CMD_RESTART2) { 9339 /* arg4 must be ignored in all other cases */ 9340 p = lock_user_string(arg4); 9341 if (!p) { 9342 goto efault; 9343 } 9344 ret = get_errno(reboot(arg1, arg2, arg3, p)); 9345 unlock_user(p, arg4, 0); 9346 } else { 9347 ret = get_errno(reboot(arg1, arg2, arg3, NULL)); 9348 } 9349 break; 9350 #ifdef TARGET_NR_readdir 9351 case TARGET_NR_readdir: 9352 goto unimplemented; 9353 #endif 9354 #ifdef TARGET_NR_mmap 9355 case TARGET_NR_mmap: 9356 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 9357 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \ 9358 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ 9359 || defined(TARGET_S390X) 9360 { 9361 abi_ulong *v; 9362 abi_ulong v1, v2, v3, v4, v5, v6; 9363 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) 9364 goto efault; 9365 v1 = tswapal(v[0]); 9366 v2 = tswapal(v[1]); 9367 v3 = tswapal(v[2]); 9368 v4 = tswapal(v[3]); 9369 v5 = tswapal(v[4]); 9370 v6 = tswapal(v[5]); 9371 unlock_user(v, arg1, 0); 9372 ret = get_errno(target_mmap(v1, v2, v3, 9373 target_to_host_bitmask(v4, mmap_flags_tbl), 9374 v5, v6)); 9375 } 9376 #else 9377 ret = get_errno(target_mmap(arg1, arg2, arg3, 9378 target_to_host_bitmask(arg4, mmap_flags_tbl), 9379 arg5, 9380 arg6)); 9381 #endif 9382 break; 9383 #endif 9384 #ifdef TARGET_NR_mmap2 9385 case TARGET_NR_mmap2: 9386 #ifndef MMAP_SHIFT 9387 #define MMAP_SHIFT 12 9388 #endif 9389 ret = get_errno(target_mmap(arg1, arg2, arg3, 9390 target_to_host_bitmask(arg4, mmap_flags_tbl), 9391 arg5, 9392 arg6 << MMAP_SHIFT)); 9393 break; 9394 #endif 9395 case TARGET_NR_munmap: 9396 ret = get_errno(target_munmap(arg1, arg2)); 9397 break; 9398 case TARGET_NR_mprotect: 9399 { 9400 TaskState *ts = cpu->opaque; 9401 /* Special hack to detect libc making the stack executable. */ 9402 if ((arg3 & PROT_GROWSDOWN) 9403 && arg1 >= ts->info->stack_limit 9404 && arg1 <= ts->info->start_stack) { 9405 arg3 &= ~PROT_GROWSDOWN; 9406 arg2 = arg2 + arg1 - ts->info->stack_limit; 9407 arg1 = ts->info->stack_limit; 9408 } 9409 } 9410 ret = get_errno(target_mprotect(arg1, arg2, arg3)); 9411 break; 9412 #ifdef TARGET_NR_mremap 9413 case TARGET_NR_mremap: 9414 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); 9415 break; 9416 #endif 9417 /* ??? msync/mlock/munlock are broken for softmmu. */ 9418 #ifdef TARGET_NR_msync 9419 case TARGET_NR_msync: 9420 ret = get_errno(msync(g2h(arg1), arg2, arg3)); 9421 break; 9422 #endif 9423 #ifdef TARGET_NR_mlock 9424 case TARGET_NR_mlock: 9425 ret = get_errno(mlock(g2h(arg1), arg2)); 9426 break; 9427 #endif 9428 #ifdef TARGET_NR_munlock 9429 case TARGET_NR_munlock: 9430 ret = get_errno(munlock(g2h(arg1), arg2)); 9431 break; 9432 #endif 9433 #ifdef TARGET_NR_mlockall 9434 case TARGET_NR_mlockall: 9435 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1))); 9436 break; 9437 #endif 9438 #ifdef TARGET_NR_munlockall 9439 case TARGET_NR_munlockall: 9440 ret = get_errno(munlockall()); 9441 break; 9442 #endif 9443 case TARGET_NR_truncate: 9444 if (!(p = lock_user_string(arg1))) 9445 goto efault; 9446 ret = get_errno(truncate(p, arg2)); 9447 unlock_user(p, arg1, 0); 9448 break; 9449 case TARGET_NR_ftruncate: 9450 ret = get_errno(ftruncate(arg1, arg2)); 9451 break; 9452 case TARGET_NR_fchmod: 9453 ret = get_errno(fchmod(arg1, arg2)); 9454 break; 9455 #if defined(TARGET_NR_fchmodat) 9456 case TARGET_NR_fchmodat: 9457 if (!(p = lock_user_string(arg2))) 9458 goto efault; 9459 ret = get_errno(fchmodat(arg1, p, arg3, 0)); 9460 unlock_user(p, arg2, 0); 9461 break; 9462 #endif 9463 case TARGET_NR_getpriority: 9464 /* Note that negative values are valid for getpriority, so we must 9465 differentiate based on errno settings. */ 9466 errno = 0; 9467 ret = getpriority(arg1, arg2); 9468 if (ret == -1 && errno != 0) { 9469 ret = -host_to_target_errno(errno); 9470 break; 9471 } 9472 #ifdef TARGET_ALPHA 9473 /* Return value is the unbiased priority. Signal no error. */ 9474 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; 9475 #else 9476 /* Return value is a biased priority to avoid negative numbers. */ 9477 ret = 20 - ret; 9478 #endif 9479 break; 9480 case TARGET_NR_setpriority: 9481 ret = get_errno(setpriority(arg1, arg2, arg3)); 9482 break; 9483 #ifdef TARGET_NR_profil 9484 case TARGET_NR_profil: 9485 goto unimplemented; 9486 #endif 9487 case TARGET_NR_statfs: 9488 if (!(p = lock_user_string(arg1))) 9489 goto efault; 9490 ret = get_errno(statfs(path(p), &stfs)); 9491 unlock_user(p, arg1, 0); 9492 convert_statfs: 9493 if (!is_error(ret)) { 9494 struct target_statfs *target_stfs; 9495 9496 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) 9497 goto efault; 9498 __put_user(stfs.f_type, &target_stfs->f_type); 9499 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 9500 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 9501 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 9502 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 9503 __put_user(stfs.f_files, &target_stfs->f_files); 9504 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 9505 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 9506 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 9507 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 9508 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 9509 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 9510 unlock_user_struct(target_stfs, arg2, 1); 9511 } 9512 break; 9513 case TARGET_NR_fstatfs: 9514 ret = get_errno(fstatfs(arg1, &stfs)); 9515 goto convert_statfs; 9516 #ifdef TARGET_NR_statfs64 9517 case TARGET_NR_statfs64: 9518 if (!(p = lock_user_string(arg1))) 9519 goto efault; 9520 ret = get_errno(statfs(path(p), &stfs)); 9521 unlock_user(p, arg1, 0); 9522 convert_statfs64: 9523 if (!is_error(ret)) { 9524 struct target_statfs64 *target_stfs; 9525 9526 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) 9527 goto efault; 9528 __put_user(stfs.f_type, &target_stfs->f_type); 9529 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 9530 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 9531 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 9532 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 9533 __put_user(stfs.f_files, &target_stfs->f_files); 9534 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 9535 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 9536 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 9537 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 9538 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 9539 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 9540 unlock_user_struct(target_stfs, arg3, 1); 9541 } 9542 break; 9543 case TARGET_NR_fstatfs64: 9544 ret = get_errno(fstatfs(arg1, &stfs)); 9545 goto convert_statfs64; 9546 #endif 9547 #ifdef TARGET_NR_ioperm 9548 case TARGET_NR_ioperm: 9549 goto unimplemented; 9550 #endif 9551 #ifdef TARGET_NR_socketcall 9552 case TARGET_NR_socketcall: 9553 ret = do_socketcall(arg1, arg2); 9554 break; 9555 #endif 9556 #ifdef TARGET_NR_accept 9557 case TARGET_NR_accept: 9558 ret = do_accept4(arg1, arg2, arg3, 0); 9559 break; 9560 #endif 9561 #ifdef TARGET_NR_accept4 9562 case TARGET_NR_accept4: 9563 ret = do_accept4(arg1, arg2, arg3, arg4); 9564 break; 9565 #endif 9566 #ifdef TARGET_NR_bind 9567 case TARGET_NR_bind: 9568 ret = do_bind(arg1, arg2, arg3); 9569 break; 9570 #endif 9571 #ifdef TARGET_NR_connect 9572 case TARGET_NR_connect: 9573 ret = do_connect(arg1, arg2, arg3); 9574 break; 9575 #endif 9576 #ifdef TARGET_NR_getpeername 9577 case TARGET_NR_getpeername: 9578 ret = do_getpeername(arg1, arg2, arg3); 9579 break; 9580 #endif 9581 #ifdef TARGET_NR_getsockname 9582 case TARGET_NR_getsockname: 9583 ret = do_getsockname(arg1, arg2, arg3); 9584 break; 9585 #endif 9586 #ifdef TARGET_NR_getsockopt 9587 case TARGET_NR_getsockopt: 9588 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5); 9589 break; 9590 #endif 9591 #ifdef TARGET_NR_listen 9592 case TARGET_NR_listen: 9593 ret = get_errno(listen(arg1, arg2)); 9594 break; 9595 #endif 9596 #ifdef TARGET_NR_recv 9597 case TARGET_NR_recv: 9598 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); 9599 break; 9600 #endif 9601 #ifdef TARGET_NR_recvfrom 9602 case TARGET_NR_recvfrom: 9603 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); 9604 break; 9605 #endif 9606 #ifdef TARGET_NR_recvmsg 9607 case TARGET_NR_recvmsg: 9608 ret = do_sendrecvmsg(arg1, arg2, arg3, 0); 9609 break; 9610 #endif 9611 #ifdef TARGET_NR_send 9612 case TARGET_NR_send: 9613 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0); 9614 break; 9615 #endif 9616 #ifdef TARGET_NR_sendmsg 9617 case TARGET_NR_sendmsg: 9618 ret = do_sendrecvmsg(arg1, arg2, arg3, 1); 9619 break; 9620 #endif 9621 #ifdef TARGET_NR_sendmmsg 9622 case TARGET_NR_sendmmsg: 9623 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1); 9624 break; 9625 case TARGET_NR_recvmmsg: 9626 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0); 9627 break; 9628 #endif 9629 #ifdef TARGET_NR_sendto 9630 case TARGET_NR_sendto: 9631 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); 9632 break; 9633 #endif 9634 #ifdef TARGET_NR_shutdown 9635 case TARGET_NR_shutdown: 9636 ret = get_errno(shutdown(arg1, arg2)); 9637 break; 9638 #endif 9639 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom) 9640 case TARGET_NR_getrandom: 9641 p = lock_user(VERIFY_WRITE, arg1, arg2, 0); 9642 if (!p) { 9643 goto efault; 9644 } 9645 ret = get_errno(getrandom(p, arg2, arg3)); 9646 unlock_user(p, arg1, ret); 9647 break; 9648 #endif 9649 #ifdef TARGET_NR_socket 9650 case TARGET_NR_socket: 9651 ret = do_socket(arg1, arg2, arg3); 9652 break; 9653 #endif 9654 #ifdef TARGET_NR_socketpair 9655 case TARGET_NR_socketpair: 9656 ret = do_socketpair(arg1, arg2, arg3, arg4); 9657 break; 9658 #endif 9659 #ifdef TARGET_NR_setsockopt 9660 case TARGET_NR_setsockopt: 9661 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); 9662 break; 9663 #endif 9664 #if defined(TARGET_NR_syslog) 9665 case TARGET_NR_syslog: 9666 { 9667 int len = arg2; 9668 9669 switch (arg1) { 9670 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */ 9671 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */ 9672 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */ 9673 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */ 9674 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */ 9675 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */ 9676 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */ 9677 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */ 9678 { 9679 ret = get_errno(sys_syslog((int)arg1, NULL, (int)arg3)); 9680 } 9681 break; 9682 case TARGET_SYSLOG_ACTION_READ: /* Read from log */ 9683 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */ 9684 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */ 9685 { 9686 ret = -TARGET_EINVAL; 9687 if (len < 0) { 9688 goto fail; 9689 } 9690 ret = 0; 9691 if (len == 0) { 9692 break; 9693 } 9694 p = lock_user(VERIFY_WRITE, arg2, arg3, 0); 9695 if (!p) { 9696 ret = -TARGET_EFAULT; 9697 goto fail; 9698 } 9699 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); 9700 unlock_user(p, arg2, arg3); 9701 } 9702 break; 9703 default: 9704 ret = -EINVAL; 9705 break; 9706 } 9707 } 9708 break; 9709 #endif 9710 case TARGET_NR_setitimer: 9711 { 9712 struct itimerval value, ovalue, *pvalue; 9713 9714 if (arg2) { 9715 pvalue = &value; 9716 if (copy_from_user_timeval(&pvalue->it_interval, arg2) 9717 || copy_from_user_timeval(&pvalue->it_value, 9718 arg2 + sizeof(struct target_timeval))) 9719 goto efault; 9720 } else { 9721 pvalue = NULL; 9722 } 9723 ret = get_errno(setitimer(arg1, pvalue, &ovalue)); 9724 if (!is_error(ret) && arg3) { 9725 if (copy_to_user_timeval(arg3, 9726 &ovalue.it_interval) 9727 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), 9728 &ovalue.it_value)) 9729 goto efault; 9730 } 9731 } 9732 break; 9733 case TARGET_NR_getitimer: 9734 { 9735 struct itimerval value; 9736 9737 ret = get_errno(getitimer(arg1, &value)); 9738 if (!is_error(ret) && arg2) { 9739 if (copy_to_user_timeval(arg2, 9740 &value.it_interval) 9741 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), 9742 &value.it_value)) 9743 goto efault; 9744 } 9745 } 9746 break; 9747 #ifdef TARGET_NR_stat 9748 case TARGET_NR_stat: 9749 if (!(p = lock_user_string(arg1))) 9750 goto efault; 9751 ret = get_errno(stat(path(p), &st)); 9752 unlock_user(p, arg1, 0); 9753 goto do_stat; 9754 #endif 9755 #ifdef TARGET_NR_lstat 9756 case TARGET_NR_lstat: 9757 if (!(p = lock_user_string(arg1))) 9758 goto efault; 9759 ret = get_errno(lstat(path(p), &st)); 9760 unlock_user(p, arg1, 0); 9761 goto do_stat; 9762 #endif 9763 case TARGET_NR_fstat: 9764 { 9765 ret = get_errno(fstat(arg1, &st)); 9766 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat) 9767 do_stat: 9768 #endif 9769 if (!is_error(ret)) { 9770 struct target_stat *target_st; 9771 9772 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) 9773 goto efault; 9774 memset(target_st, 0, sizeof(*target_st)); 9775 __put_user(st.st_dev, &target_st->st_dev); 9776 __put_user(st.st_ino, &target_st->st_ino); 9777 __put_user(st.st_mode, &target_st->st_mode); 9778 __put_user(st.st_uid, &target_st->st_uid); 9779 __put_user(st.st_gid, &target_st->st_gid); 9780 __put_user(st.st_nlink, &target_st->st_nlink); 9781 __put_user(st.st_rdev, &target_st->st_rdev); 9782 __put_user(st.st_size, &target_st->st_size); 9783 __put_user(st.st_blksize, &target_st->st_blksize); 9784 __put_user(st.st_blocks, &target_st->st_blocks); 9785 __put_user(st.st_atime, &target_st->target_st_atime); 9786 __put_user(st.st_mtime, &target_st->target_st_mtime); 9787 __put_user(st.st_ctime, &target_st->target_st_ctime); 9788 unlock_user_struct(target_st, arg2, 1); 9789 } 9790 } 9791 break; 9792 #ifdef TARGET_NR_olduname 9793 case TARGET_NR_olduname: 9794 goto unimplemented; 9795 #endif 9796 #ifdef TARGET_NR_iopl 9797 case TARGET_NR_iopl: 9798 goto unimplemented; 9799 #endif 9800 case TARGET_NR_vhangup: 9801 ret = get_errno(vhangup()); 9802 break; 9803 #ifdef TARGET_NR_idle 9804 case TARGET_NR_idle: 9805 goto unimplemented; 9806 #endif 9807 #ifdef TARGET_NR_syscall 9808 case TARGET_NR_syscall: 9809 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5, 9810 arg6, arg7, arg8, 0); 9811 break; 9812 #endif 9813 case TARGET_NR_wait4: 9814 { 9815 int status; 9816 abi_long status_ptr = arg2; 9817 struct rusage rusage, *rusage_ptr; 9818 abi_ulong target_rusage = arg4; 9819 abi_long rusage_err; 9820 if (target_rusage) 9821 rusage_ptr = &rusage; 9822 else 9823 rusage_ptr = NULL; 9824 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr)); 9825 if (!is_error(ret)) { 9826 if (status_ptr && ret) { 9827 status = host_to_target_waitstatus(status); 9828 if (put_user_s32(status, status_ptr)) 9829 goto efault; 9830 } 9831 if (target_rusage) { 9832 rusage_err = host_to_target_rusage(target_rusage, &rusage); 9833 if (rusage_err) { 9834 ret = rusage_err; 9835 } 9836 } 9837 } 9838 } 9839 break; 9840 #ifdef TARGET_NR_swapoff 9841 case TARGET_NR_swapoff: 9842 if (!(p = lock_user_string(arg1))) 9843 goto efault; 9844 ret = get_errno(swapoff(p)); 9845 unlock_user(p, arg1, 0); 9846 break; 9847 #endif 9848 case TARGET_NR_sysinfo: 9849 { 9850 struct target_sysinfo *target_value; 9851 struct sysinfo value; 9852 ret = get_errno(sysinfo(&value)); 9853 if (!is_error(ret) && arg1) 9854 { 9855 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) 9856 goto efault; 9857 __put_user(value.uptime, &target_value->uptime); 9858 __put_user(value.loads[0], &target_value->loads[0]); 9859 __put_user(value.loads[1], &target_value->loads[1]); 9860 __put_user(value.loads[2], &target_value->loads[2]); 9861 __put_user(value.totalram, &target_value->totalram); 9862 __put_user(value.freeram, &target_value->freeram); 9863 __put_user(value.sharedram, &target_value->sharedram); 9864 __put_user(value.bufferram, &target_value->bufferram); 9865 __put_user(value.totalswap, &target_value->totalswap); 9866 __put_user(value.freeswap, &target_value->freeswap); 9867 __put_user(value.procs, &target_value->procs); 9868 __put_user(value.totalhigh, &target_value->totalhigh); 9869 __put_user(value.freehigh, &target_value->freehigh); 9870 __put_user(value.mem_unit, &target_value->mem_unit); 9871 unlock_user_struct(target_value, arg1, 1); 9872 } 9873 } 9874 break; 9875 #ifdef TARGET_NR_ipc 9876 case TARGET_NR_ipc: 9877 ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6); 9878 break; 9879 #endif 9880 #ifdef TARGET_NR_semget 9881 case TARGET_NR_semget: 9882 ret = get_errno(semget(arg1, arg2, arg3)); 9883 break; 9884 #endif 9885 #ifdef TARGET_NR_semop 9886 case TARGET_NR_semop: 9887 ret = do_semop(arg1, arg2, arg3); 9888 break; 9889 #endif 9890 #ifdef TARGET_NR_semctl 9891 case TARGET_NR_semctl: 9892 ret = do_semctl(arg1, arg2, arg3, arg4); 9893 break; 9894 #endif 9895 #ifdef TARGET_NR_msgctl 9896 case TARGET_NR_msgctl: 9897 ret = do_msgctl(arg1, arg2, arg3); 9898 break; 9899 #endif 9900 #ifdef TARGET_NR_msgget 9901 case TARGET_NR_msgget: 9902 ret = get_errno(msgget(arg1, arg2)); 9903 break; 9904 #endif 9905 #ifdef TARGET_NR_msgrcv 9906 case TARGET_NR_msgrcv: 9907 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5); 9908 break; 9909 #endif 9910 #ifdef TARGET_NR_msgsnd 9911 case TARGET_NR_msgsnd: 9912 ret = do_msgsnd(arg1, arg2, arg3, arg4); 9913 break; 9914 #endif 9915 #ifdef TARGET_NR_shmget 9916 case TARGET_NR_shmget: 9917 ret = get_errno(shmget(arg1, arg2, arg3)); 9918 break; 9919 #endif 9920 #ifdef TARGET_NR_shmctl 9921 case TARGET_NR_shmctl: 9922 ret = do_shmctl(arg1, arg2, arg3); 9923 break; 9924 #endif 9925 #ifdef TARGET_NR_shmat 9926 case TARGET_NR_shmat: 9927 ret = do_shmat(cpu_env, arg1, arg2, arg3); 9928 break; 9929 #endif 9930 #ifdef TARGET_NR_shmdt 9931 case TARGET_NR_shmdt: 9932 ret = do_shmdt(arg1); 9933 break; 9934 #endif 9935 case TARGET_NR_fsync: 9936 ret = get_errno(fsync(arg1)); 9937 break; 9938 case TARGET_NR_clone: 9939 /* Linux manages to have three different orderings for its 9940 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines 9941 * match the kernel's CONFIG_CLONE_* settings. 9942 * Microblaze is further special in that it uses a sixth 9943 * implicit argument to clone for the TLS pointer. 9944 */ 9945 #if defined(TARGET_MICROBLAZE) 9946 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5)); 9947 #elif defined(TARGET_CLONE_BACKWARDS) 9948 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); 9949 #elif defined(TARGET_CLONE_BACKWARDS2) 9950 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); 9951 #else 9952 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); 9953 #endif 9954 break; 9955 #ifdef __NR_exit_group 9956 /* new thread calls */ 9957 case TARGET_NR_exit_group: 9958 #ifdef TARGET_GPROF 9959 _mcleanup(); 9960 #endif 9961 gdb_exit(cpu_env, arg1); 9962 ret = get_errno(exit_group(arg1)); 9963 break; 9964 #endif 9965 case TARGET_NR_setdomainname: 9966 if (!(p = lock_user_string(arg1))) 9967 goto efault; 9968 ret = get_errno(setdomainname(p, arg2)); 9969 unlock_user(p, arg1, 0); 9970 break; 9971 case TARGET_NR_uname: 9972 /* no need to transcode because we use the linux syscall */ 9973 { 9974 struct new_utsname * buf; 9975 9976 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) 9977 goto efault; 9978 ret = get_errno(sys_uname(buf)); 9979 if (!is_error(ret)) { 9980 /* Overwrite the native machine name with whatever is being 9981 emulated. */ 9982 strcpy (buf->machine, cpu_to_uname_machine(cpu_env)); 9983 /* Allow the user to override the reported release. */ 9984 if (qemu_uname_release && *qemu_uname_release) { 9985 g_strlcpy(buf->release, qemu_uname_release, 9986 sizeof(buf->release)); 9987 } 9988 } 9989 unlock_user_struct(buf, arg1, 1); 9990 } 9991 break; 9992 #ifdef TARGET_I386 9993 case TARGET_NR_modify_ldt: 9994 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3); 9995 break; 9996 #if !defined(TARGET_X86_64) 9997 case TARGET_NR_vm86old: 9998 goto unimplemented; 9999 case TARGET_NR_vm86: 10000 ret = do_vm86(cpu_env, arg1, arg2); 10001 break; 10002 #endif 10003 #endif 10004 case TARGET_NR_adjtimex: 10005 { 10006 struct timex host_buf; 10007 10008 if (target_to_host_timex(&host_buf, arg1) != 0) { 10009 goto efault; 10010 } 10011 ret = get_errno(adjtimex(&host_buf)); 10012 if (!is_error(ret)) { 10013 if (host_to_target_timex(arg1, &host_buf) != 0) { 10014 goto efault; 10015 } 10016 } 10017 } 10018 break; 10019 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME) 10020 case TARGET_NR_clock_adjtime: 10021 { 10022 struct timex htx, *phtx = &htx; 10023 10024 if (target_to_host_timex(phtx, arg2) != 0) { 10025 goto efault; 10026 } 10027 ret = get_errno(clock_adjtime(arg1, phtx)); 10028 if (!is_error(ret) && phtx) { 10029 if (host_to_target_timex(arg2, phtx) != 0) { 10030 goto efault; 10031 } 10032 } 10033 } 10034 break; 10035 #endif 10036 #ifdef TARGET_NR_create_module 10037 case TARGET_NR_create_module: 10038 #endif 10039 case TARGET_NR_init_module: 10040 case TARGET_NR_delete_module: 10041 #ifdef TARGET_NR_get_kernel_syms 10042 case TARGET_NR_get_kernel_syms: 10043 #endif 10044 goto unimplemented; 10045 case TARGET_NR_quotactl: 10046 goto unimplemented; 10047 case TARGET_NR_getpgid: 10048 ret = get_errno(getpgid(arg1)); 10049 break; 10050 case TARGET_NR_fchdir: 10051 ret = get_errno(fchdir(arg1)); 10052 break; 10053 #ifdef TARGET_NR_bdflush /* not on x86_64 */ 10054 case TARGET_NR_bdflush: 10055 goto unimplemented; 10056 #endif 10057 #ifdef TARGET_NR_sysfs 10058 case TARGET_NR_sysfs: 10059 goto unimplemented; 10060 #endif 10061 case TARGET_NR_personality: 10062 ret = get_errno(personality(arg1)); 10063 break; 10064 #ifdef TARGET_NR_afs_syscall 10065 case TARGET_NR_afs_syscall: 10066 goto unimplemented; 10067 #endif 10068 #ifdef TARGET_NR__llseek /* Not on alpha */ 10069 case TARGET_NR__llseek: 10070 { 10071 int64_t res; 10072 #if !defined(__NR_llseek) 10073 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5); 10074 if (res == -1) { 10075 ret = get_errno(res); 10076 } else { 10077 ret = 0; 10078 } 10079 #else 10080 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); 10081 #endif 10082 if ((ret == 0) && put_user_s64(res, arg4)) { 10083 goto efault; 10084 } 10085 } 10086 break; 10087 #endif 10088 #ifdef TARGET_NR_getdents 10089 case TARGET_NR_getdents: 10090 #ifdef __NR_getdents 10091 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 10092 { 10093 struct target_dirent *target_dirp; 10094 struct linux_dirent *dirp; 10095 abi_long count = arg3; 10096 10097 dirp = g_try_malloc(count); 10098 if (!dirp) { 10099 ret = -TARGET_ENOMEM; 10100 goto fail; 10101 } 10102 10103 ret = get_errno(sys_getdents(arg1, dirp, count)); 10104 if (!is_error(ret)) { 10105 struct linux_dirent *de; 10106 struct target_dirent *tde; 10107 int len = ret; 10108 int reclen, treclen; 10109 int count1, tnamelen; 10110 10111 count1 = 0; 10112 de = dirp; 10113 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 10114 goto efault; 10115 tde = target_dirp; 10116 while (len > 0) { 10117 reclen = de->d_reclen; 10118 tnamelen = reclen - offsetof(struct linux_dirent, d_name); 10119 assert(tnamelen >= 0); 10120 treclen = tnamelen + offsetof(struct target_dirent, d_name); 10121 assert(count1 + treclen <= count); 10122 tde->d_reclen = tswap16(treclen); 10123 tde->d_ino = tswapal(de->d_ino); 10124 tde->d_off = tswapal(de->d_off); 10125 memcpy(tde->d_name, de->d_name, tnamelen); 10126 de = (struct linux_dirent *)((char *)de + reclen); 10127 len -= reclen; 10128 tde = (struct target_dirent *)((char *)tde + treclen); 10129 count1 += treclen; 10130 } 10131 ret = count1; 10132 unlock_user(target_dirp, arg2, ret); 10133 } 10134 g_free(dirp); 10135 } 10136 #else 10137 { 10138 struct linux_dirent *dirp; 10139 abi_long count = arg3; 10140 10141 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 10142 goto efault; 10143 ret = get_errno(sys_getdents(arg1, dirp, count)); 10144 if (!is_error(ret)) { 10145 struct linux_dirent *de; 10146 int len = ret; 10147 int reclen; 10148 de = dirp; 10149 while (len > 0) { 10150 reclen = de->d_reclen; 10151 if (reclen > len) 10152 break; 10153 de->d_reclen = tswap16(reclen); 10154 tswapls(&de->d_ino); 10155 tswapls(&de->d_off); 10156 de = (struct linux_dirent *)((char *)de + reclen); 10157 len -= reclen; 10158 } 10159 } 10160 unlock_user(dirp, arg2, ret); 10161 } 10162 #endif 10163 #else 10164 /* Implement getdents in terms of getdents64 */ 10165 { 10166 struct linux_dirent64 *dirp; 10167 abi_long count = arg3; 10168 10169 dirp = lock_user(VERIFY_WRITE, arg2, count, 0); 10170 if (!dirp) { 10171 goto efault; 10172 } 10173 ret = get_errno(sys_getdents64(arg1, dirp, count)); 10174 if (!is_error(ret)) { 10175 /* Convert the dirent64 structs to target dirent. We do this 10176 * in-place, since we can guarantee that a target_dirent is no 10177 * larger than a dirent64; however this means we have to be 10178 * careful to read everything before writing in the new format. 10179 */ 10180 struct linux_dirent64 *de; 10181 struct target_dirent *tde; 10182 int len = ret; 10183 int tlen = 0; 10184 10185 de = dirp; 10186 tde = (struct target_dirent *)dirp; 10187 while (len > 0) { 10188 int namelen, treclen; 10189 int reclen = de->d_reclen; 10190 uint64_t ino = de->d_ino; 10191 int64_t off = de->d_off; 10192 uint8_t type = de->d_type; 10193 10194 namelen = strlen(de->d_name); 10195 treclen = offsetof(struct target_dirent, d_name) 10196 + namelen + 2; 10197 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long)); 10198 10199 memmove(tde->d_name, de->d_name, namelen + 1); 10200 tde->d_ino = tswapal(ino); 10201 tde->d_off = tswapal(off); 10202 tde->d_reclen = tswap16(treclen); 10203 /* The target_dirent type is in what was formerly a padding 10204 * byte at the end of the structure: 10205 */ 10206 *(((char *)tde) + treclen - 1) = type; 10207 10208 de = (struct linux_dirent64 *)((char *)de + reclen); 10209 tde = (struct target_dirent *)((char *)tde + treclen); 10210 len -= reclen; 10211 tlen += treclen; 10212 } 10213 ret = tlen; 10214 } 10215 unlock_user(dirp, arg2, ret); 10216 } 10217 #endif 10218 break; 10219 #endif /* TARGET_NR_getdents */ 10220 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 10221 case TARGET_NR_getdents64: 10222 { 10223 struct linux_dirent64 *dirp; 10224 abi_long count = arg3; 10225 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 10226 goto efault; 10227 ret = get_errno(sys_getdents64(arg1, dirp, count)); 10228 if (!is_error(ret)) { 10229 struct linux_dirent64 *de; 10230 int len = ret; 10231 int reclen; 10232 de = dirp; 10233 while (len > 0) { 10234 reclen = de->d_reclen; 10235 if (reclen > len) 10236 break; 10237 de->d_reclen = tswap16(reclen); 10238 tswap64s((uint64_t *)&de->d_ino); 10239 tswap64s((uint64_t *)&de->d_off); 10240 de = (struct linux_dirent64 *)((char *)de + reclen); 10241 len -= reclen; 10242 } 10243 } 10244 unlock_user(dirp, arg2, ret); 10245 } 10246 break; 10247 #endif /* TARGET_NR_getdents64 */ 10248 #if defined(TARGET_NR__newselect) 10249 case TARGET_NR__newselect: 10250 ret = do_select(arg1, arg2, arg3, arg4, arg5); 10251 break; 10252 #endif 10253 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) 10254 # ifdef TARGET_NR_poll 10255 case TARGET_NR_poll: 10256 # endif 10257 # ifdef TARGET_NR_ppoll 10258 case TARGET_NR_ppoll: 10259 # endif 10260 { 10261 struct target_pollfd *target_pfd; 10262 unsigned int nfds = arg2; 10263 struct pollfd *pfd; 10264 unsigned int i; 10265 10266 pfd = NULL; 10267 target_pfd = NULL; 10268 if (nfds) { 10269 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) { 10270 ret = -TARGET_EINVAL; 10271 break; 10272 } 10273 10274 target_pfd = lock_user(VERIFY_WRITE, arg1, 10275 sizeof(struct target_pollfd) * nfds, 1); 10276 if (!target_pfd) { 10277 goto efault; 10278 } 10279 10280 pfd = alloca(sizeof(struct pollfd) * nfds); 10281 for (i = 0; i < nfds; i++) { 10282 pfd[i].fd = tswap32(target_pfd[i].fd); 10283 pfd[i].events = tswap16(target_pfd[i].events); 10284 } 10285 } 10286 10287 switch (num) { 10288 # ifdef TARGET_NR_ppoll 10289 case TARGET_NR_ppoll: 10290 { 10291 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; 10292 target_sigset_t *target_set; 10293 sigset_t _set, *set = &_set; 10294 10295 if (arg3) { 10296 if (target_to_host_timespec(timeout_ts, arg3)) { 10297 unlock_user(target_pfd, arg1, 0); 10298 goto efault; 10299 } 10300 } else { 10301 timeout_ts = NULL; 10302 } 10303 10304 if (arg4) { 10305 if (arg5 != sizeof(target_sigset_t)) { 10306 unlock_user(target_pfd, arg1, 0); 10307 ret = -TARGET_EINVAL; 10308 break; 10309 } 10310 10311 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1); 10312 if (!target_set) { 10313 unlock_user(target_pfd, arg1, 0); 10314 goto efault; 10315 } 10316 target_to_host_sigset(set, target_set); 10317 } else { 10318 set = NULL; 10319 } 10320 10321 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts, 10322 set, SIGSET_T_SIZE)); 10323 10324 if (!is_error(ret) && arg3) { 10325 host_to_target_timespec(arg3, timeout_ts); 10326 } 10327 if (arg4) { 10328 unlock_user(target_set, arg4, 0); 10329 } 10330 break; 10331 } 10332 # endif 10333 # ifdef TARGET_NR_poll 10334 case TARGET_NR_poll: 10335 { 10336 struct timespec ts, *pts; 10337 10338 if (arg3 >= 0) { 10339 /* Convert ms to secs, ns */ 10340 ts.tv_sec = arg3 / 1000; 10341 ts.tv_nsec = (arg3 % 1000) * 1000000LL; 10342 pts = &ts; 10343 } else { 10344 /* -ve poll() timeout means "infinite" */ 10345 pts = NULL; 10346 } 10347 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0)); 10348 break; 10349 } 10350 # endif 10351 default: 10352 g_assert_not_reached(); 10353 } 10354 10355 if (!is_error(ret)) { 10356 for(i = 0; i < nfds; i++) { 10357 target_pfd[i].revents = tswap16(pfd[i].revents); 10358 } 10359 } 10360 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds); 10361 } 10362 break; 10363 #endif 10364 case TARGET_NR_flock: 10365 /* NOTE: the flock constant seems to be the same for every 10366 Linux platform */ 10367 ret = get_errno(safe_flock(arg1, arg2)); 10368 break; 10369 case TARGET_NR_readv: 10370 { 10371 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 10372 if (vec != NULL) { 10373 ret = get_errno(safe_readv(arg1, vec, arg3)); 10374 unlock_iovec(vec, arg2, arg3, 1); 10375 } else { 10376 ret = -host_to_target_errno(errno); 10377 } 10378 } 10379 break; 10380 case TARGET_NR_writev: 10381 { 10382 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 10383 if (vec != NULL) { 10384 ret = get_errno(safe_writev(arg1, vec, arg3)); 10385 unlock_iovec(vec, arg2, arg3, 0); 10386 } else { 10387 ret = -host_to_target_errno(errno); 10388 } 10389 } 10390 break; 10391 #if defined(TARGET_NR_preadv) 10392 case TARGET_NR_preadv: 10393 { 10394 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 10395 if (vec != NULL) { 10396 ret = get_errno(safe_preadv(arg1, vec, arg3, arg4, arg5)); 10397 unlock_iovec(vec, arg2, arg3, 1); 10398 } else { 10399 ret = -host_to_target_errno(errno); 10400 } 10401 } 10402 break; 10403 #endif 10404 #if defined(TARGET_NR_pwritev) 10405 case TARGET_NR_pwritev: 10406 { 10407 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 10408 if (vec != NULL) { 10409 ret = get_errno(safe_pwritev(arg1, vec, arg3, arg4, arg5)); 10410 unlock_iovec(vec, arg2, arg3, 0); 10411 } else { 10412 ret = -host_to_target_errno(errno); 10413 } 10414 } 10415 break; 10416 #endif 10417 case TARGET_NR_getsid: 10418 ret = get_errno(getsid(arg1)); 10419 break; 10420 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ 10421 case TARGET_NR_fdatasync: 10422 ret = get_errno(fdatasync(arg1)); 10423 break; 10424 #endif 10425 #ifdef TARGET_NR__sysctl 10426 case TARGET_NR__sysctl: 10427 /* We don't implement this, but ENOTDIR is always a safe 10428 return value. */ 10429 ret = -TARGET_ENOTDIR; 10430 break; 10431 #endif 10432 case TARGET_NR_sched_getaffinity: 10433 { 10434 unsigned int mask_size; 10435 unsigned long *mask; 10436 10437 /* 10438 * sched_getaffinity needs multiples of ulong, so need to take 10439 * care of mismatches between target ulong and host ulong sizes. 10440 */ 10441 if (arg2 & (sizeof(abi_ulong) - 1)) { 10442 ret = -TARGET_EINVAL; 10443 break; 10444 } 10445 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 10446 10447 mask = alloca(mask_size); 10448 memset(mask, 0, mask_size); 10449 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask)); 10450 10451 if (!is_error(ret)) { 10452 if (ret > arg2) { 10453 /* More data returned than the caller's buffer will fit. 10454 * This only happens if sizeof(abi_long) < sizeof(long) 10455 * and the caller passed us a buffer holding an odd number 10456 * of abi_longs. If the host kernel is actually using the 10457 * extra 4 bytes then fail EINVAL; otherwise we can just 10458 * ignore them and only copy the interesting part. 10459 */ 10460 int numcpus = sysconf(_SC_NPROCESSORS_CONF); 10461 if (numcpus > arg2 * 8) { 10462 ret = -TARGET_EINVAL; 10463 break; 10464 } 10465 ret = arg2; 10466 } 10467 10468 ret = host_to_target_cpu_mask(mask, mask_size, arg3, arg2); 10469 } 10470 } 10471 break; 10472 case TARGET_NR_sched_setaffinity: 10473 { 10474 unsigned int mask_size; 10475 unsigned long *mask; 10476 10477 /* 10478 * sched_setaffinity needs multiples of ulong, so need to take 10479 * care of mismatches between target ulong and host ulong sizes. 10480 */ 10481 if (arg2 & (sizeof(abi_ulong) - 1)) { 10482 ret = -TARGET_EINVAL; 10483 break; 10484 } 10485 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 10486 mask = alloca(mask_size); 10487 10488 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2); 10489 if (ret) { 10490 break; 10491 } 10492 10493 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask)); 10494 } 10495 break; 10496 case TARGET_NR_getcpu: 10497 { 10498 unsigned cpu, node; 10499 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL, 10500 arg2 ? &node : NULL, 10501 NULL)); 10502 if (is_error(ret)) { 10503 goto fail; 10504 } 10505 if (arg1 && put_user_u32(cpu, arg1)) { 10506 goto efault; 10507 } 10508 if (arg2 && put_user_u32(node, arg2)) { 10509 goto efault; 10510 } 10511 } 10512 break; 10513 case TARGET_NR_sched_setparam: 10514 { 10515 struct sched_param *target_schp; 10516 struct sched_param schp; 10517 10518 if (arg2 == 0) { 10519 return -TARGET_EINVAL; 10520 } 10521 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) 10522 goto efault; 10523 schp.sched_priority = tswap32(target_schp->sched_priority); 10524 unlock_user_struct(target_schp, arg2, 0); 10525 ret = get_errno(sched_setparam(arg1, &schp)); 10526 } 10527 break; 10528 case TARGET_NR_sched_getparam: 10529 { 10530 struct sched_param *target_schp; 10531 struct sched_param schp; 10532 10533 if (arg2 == 0) { 10534 return -TARGET_EINVAL; 10535 } 10536 ret = get_errno(sched_getparam(arg1, &schp)); 10537 if (!is_error(ret)) { 10538 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) 10539 goto efault; 10540 target_schp->sched_priority = tswap32(schp.sched_priority); 10541 unlock_user_struct(target_schp, arg2, 1); 10542 } 10543 } 10544 break; 10545 case TARGET_NR_sched_setscheduler: 10546 { 10547 struct sched_param *target_schp; 10548 struct sched_param schp; 10549 if (arg3 == 0) { 10550 return -TARGET_EINVAL; 10551 } 10552 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) 10553 goto efault; 10554 schp.sched_priority = tswap32(target_schp->sched_priority); 10555 unlock_user_struct(target_schp, arg3, 0); 10556 ret = get_errno(sched_setscheduler(arg1, arg2, &schp)); 10557 } 10558 break; 10559 case TARGET_NR_sched_getscheduler: 10560 ret = get_errno(sched_getscheduler(arg1)); 10561 break; 10562 case TARGET_NR_sched_yield: 10563 ret = get_errno(sched_yield()); 10564 break; 10565 case TARGET_NR_sched_get_priority_max: 10566 ret = get_errno(sched_get_priority_max(arg1)); 10567 break; 10568 case TARGET_NR_sched_get_priority_min: 10569 ret = get_errno(sched_get_priority_min(arg1)); 10570 break; 10571 case TARGET_NR_sched_rr_get_interval: 10572 { 10573 struct timespec ts; 10574 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 10575 if (!is_error(ret)) { 10576 ret = host_to_target_timespec(arg2, &ts); 10577 } 10578 } 10579 break; 10580 case TARGET_NR_nanosleep: 10581 { 10582 struct timespec req, rem; 10583 target_to_host_timespec(&req, arg1); 10584 ret = get_errno(safe_nanosleep(&req, &rem)); 10585 if (is_error(ret) && arg2) { 10586 host_to_target_timespec(arg2, &rem); 10587 } 10588 } 10589 break; 10590 #ifdef TARGET_NR_query_module 10591 case TARGET_NR_query_module: 10592 goto unimplemented; 10593 #endif 10594 #ifdef TARGET_NR_nfsservctl 10595 case TARGET_NR_nfsservctl: 10596 goto unimplemented; 10597 #endif 10598 case TARGET_NR_prctl: 10599 switch (arg1) { 10600 case PR_GET_PDEATHSIG: 10601 { 10602 int deathsig; 10603 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5)); 10604 if (!is_error(ret) && arg2 10605 && put_user_ual(deathsig, arg2)) { 10606 goto efault; 10607 } 10608 break; 10609 } 10610 #ifdef PR_GET_NAME 10611 case PR_GET_NAME: 10612 { 10613 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1); 10614 if (!name) { 10615 goto efault; 10616 } 10617 ret = get_errno(prctl(arg1, (unsigned long)name, 10618 arg3, arg4, arg5)); 10619 unlock_user(name, arg2, 16); 10620 break; 10621 } 10622 case PR_SET_NAME: 10623 { 10624 void *name = lock_user(VERIFY_READ, arg2, 16, 1); 10625 if (!name) { 10626 goto efault; 10627 } 10628 ret = get_errno(prctl(arg1, (unsigned long)name, 10629 arg3, arg4, arg5)); 10630 unlock_user(name, arg2, 0); 10631 break; 10632 } 10633 #endif 10634 case PR_GET_SECCOMP: 10635 case PR_SET_SECCOMP: 10636 /* Disable seccomp to prevent the target disabling syscalls we 10637 * need. */ 10638 ret = -TARGET_EINVAL; 10639 break; 10640 default: 10641 /* Most prctl options have no pointer arguments */ 10642 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5)); 10643 break; 10644 } 10645 break; 10646 #ifdef TARGET_NR_arch_prctl 10647 case TARGET_NR_arch_prctl: 10648 #if defined(TARGET_I386) && !defined(TARGET_ABI32) 10649 ret = do_arch_prctl(cpu_env, arg1, arg2); 10650 break; 10651 #else 10652 goto unimplemented; 10653 #endif 10654 #endif 10655 #ifdef TARGET_NR_pread64 10656 case TARGET_NR_pread64: 10657 if (regpairs_aligned(cpu_env, num)) { 10658 arg4 = arg5; 10659 arg5 = arg6; 10660 } 10661 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 10662 goto efault; 10663 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); 10664 unlock_user(p, arg2, ret); 10665 break; 10666 case TARGET_NR_pwrite64: 10667 if (regpairs_aligned(cpu_env, num)) { 10668 arg4 = arg5; 10669 arg5 = arg6; 10670 } 10671 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 10672 goto efault; 10673 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); 10674 unlock_user(p, arg2, 0); 10675 break; 10676 #endif 10677 case TARGET_NR_getcwd: 10678 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) 10679 goto efault; 10680 ret = get_errno(sys_getcwd1(p, arg2)); 10681 unlock_user(p, arg1, ret); 10682 break; 10683 case TARGET_NR_capget: 10684 case TARGET_NR_capset: 10685 { 10686 struct target_user_cap_header *target_header; 10687 struct target_user_cap_data *target_data = NULL; 10688 struct __user_cap_header_struct header; 10689 struct __user_cap_data_struct data[2]; 10690 struct __user_cap_data_struct *dataptr = NULL; 10691 int i, target_datalen; 10692 int data_items = 1; 10693 10694 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) { 10695 goto efault; 10696 } 10697 header.version = tswap32(target_header->version); 10698 header.pid = tswap32(target_header->pid); 10699 10700 if (header.version != _LINUX_CAPABILITY_VERSION) { 10701 /* Version 2 and up takes pointer to two user_data structs */ 10702 data_items = 2; 10703 } 10704 10705 target_datalen = sizeof(*target_data) * data_items; 10706 10707 if (arg2) { 10708 if (num == TARGET_NR_capget) { 10709 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0); 10710 } else { 10711 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1); 10712 } 10713 if (!target_data) { 10714 unlock_user_struct(target_header, arg1, 0); 10715 goto efault; 10716 } 10717 10718 if (num == TARGET_NR_capset) { 10719 for (i = 0; i < data_items; i++) { 10720 data[i].effective = tswap32(target_data[i].effective); 10721 data[i].permitted = tswap32(target_data[i].permitted); 10722 data[i].inheritable = tswap32(target_data[i].inheritable); 10723 } 10724 } 10725 10726 dataptr = data; 10727 } 10728 10729 if (num == TARGET_NR_capget) { 10730 ret = get_errno(capget(&header, dataptr)); 10731 } else { 10732 ret = get_errno(capset(&header, dataptr)); 10733 } 10734 10735 /* The kernel always updates version for both capget and capset */ 10736 target_header->version = tswap32(header.version); 10737 unlock_user_struct(target_header, arg1, 1); 10738 10739 if (arg2) { 10740 if (num == TARGET_NR_capget) { 10741 for (i = 0; i < data_items; i++) { 10742 target_data[i].effective = tswap32(data[i].effective); 10743 target_data[i].permitted = tswap32(data[i].permitted); 10744 target_data[i].inheritable = tswap32(data[i].inheritable); 10745 } 10746 unlock_user(target_data, arg2, target_datalen); 10747 } else { 10748 unlock_user(target_data, arg2, 0); 10749 } 10750 } 10751 break; 10752 } 10753 case TARGET_NR_sigaltstack: 10754 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env)); 10755 break; 10756 10757 #ifdef CONFIG_SENDFILE 10758 case TARGET_NR_sendfile: 10759 { 10760 off_t *offp = NULL; 10761 off_t off; 10762 if (arg3) { 10763 ret = get_user_sal(off, arg3); 10764 if (is_error(ret)) { 10765 break; 10766 } 10767 offp = &off; 10768 } 10769 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 10770 if (!is_error(ret) && arg3) { 10771 abi_long ret2 = put_user_sal(off, arg3); 10772 if (is_error(ret2)) { 10773 ret = ret2; 10774 } 10775 } 10776 break; 10777 } 10778 #ifdef TARGET_NR_sendfile64 10779 case TARGET_NR_sendfile64: 10780 { 10781 off_t *offp = NULL; 10782 off_t off; 10783 if (arg3) { 10784 ret = get_user_s64(off, arg3); 10785 if (is_error(ret)) { 10786 break; 10787 } 10788 offp = &off; 10789 } 10790 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 10791 if (!is_error(ret) && arg3) { 10792 abi_long ret2 = put_user_s64(off, arg3); 10793 if (is_error(ret2)) { 10794 ret = ret2; 10795 } 10796 } 10797 break; 10798 } 10799 #endif 10800 #else 10801 case TARGET_NR_sendfile: 10802 #ifdef TARGET_NR_sendfile64 10803 case TARGET_NR_sendfile64: 10804 #endif 10805 goto unimplemented; 10806 #endif 10807 10808 #ifdef TARGET_NR_getpmsg 10809 case TARGET_NR_getpmsg: 10810 goto unimplemented; 10811 #endif 10812 #ifdef TARGET_NR_putpmsg 10813 case TARGET_NR_putpmsg: 10814 goto unimplemented; 10815 #endif 10816 #ifdef TARGET_NR_vfork 10817 case TARGET_NR_vfork: 10818 ret = get_errno(do_fork(cpu_env, 10819 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD, 10820 0, 0, 0, 0)); 10821 break; 10822 #endif 10823 #ifdef TARGET_NR_ugetrlimit 10824 case TARGET_NR_ugetrlimit: 10825 { 10826 struct rlimit rlim; 10827 int resource = target_to_host_resource(arg1); 10828 ret = get_errno(getrlimit(resource, &rlim)); 10829 if (!is_error(ret)) { 10830 struct target_rlimit *target_rlim; 10831 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 10832 goto efault; 10833 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 10834 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 10835 unlock_user_struct(target_rlim, arg2, 1); 10836 } 10837 break; 10838 } 10839 #endif 10840 #ifdef TARGET_NR_truncate64 10841 case TARGET_NR_truncate64: 10842 if (!(p = lock_user_string(arg1))) 10843 goto efault; 10844 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); 10845 unlock_user(p, arg1, 0); 10846 break; 10847 #endif 10848 #ifdef TARGET_NR_ftruncate64 10849 case TARGET_NR_ftruncate64: 10850 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); 10851 break; 10852 #endif 10853 #ifdef TARGET_NR_stat64 10854 case TARGET_NR_stat64: 10855 if (!(p = lock_user_string(arg1))) 10856 goto efault; 10857 ret = get_errno(stat(path(p), &st)); 10858 unlock_user(p, arg1, 0); 10859 if (!is_error(ret)) 10860 ret = host_to_target_stat64(cpu_env, arg2, &st); 10861 break; 10862 #endif 10863 #ifdef TARGET_NR_lstat64 10864 case TARGET_NR_lstat64: 10865 if (!(p = lock_user_string(arg1))) 10866 goto efault; 10867 ret = get_errno(lstat(path(p), &st)); 10868 unlock_user(p, arg1, 0); 10869 if (!is_error(ret)) 10870 ret = host_to_target_stat64(cpu_env, arg2, &st); 10871 break; 10872 #endif 10873 #ifdef TARGET_NR_fstat64 10874 case TARGET_NR_fstat64: 10875 ret = get_errno(fstat(arg1, &st)); 10876 if (!is_error(ret)) 10877 ret = host_to_target_stat64(cpu_env, arg2, &st); 10878 break; 10879 #endif 10880 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) 10881 #ifdef TARGET_NR_fstatat64 10882 case TARGET_NR_fstatat64: 10883 #endif 10884 #ifdef TARGET_NR_newfstatat 10885 case TARGET_NR_newfstatat: 10886 #endif 10887 if (!(p = lock_user_string(arg2))) 10888 goto efault; 10889 ret = get_errno(fstatat(arg1, path(p), &st, arg4)); 10890 if (!is_error(ret)) 10891 ret = host_to_target_stat64(cpu_env, arg3, &st); 10892 break; 10893 #endif 10894 #ifdef TARGET_NR_lchown 10895 case TARGET_NR_lchown: 10896 if (!(p = lock_user_string(arg1))) 10897 goto efault; 10898 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); 10899 unlock_user(p, arg1, 0); 10900 break; 10901 #endif 10902 #ifdef TARGET_NR_getuid 10903 case TARGET_NR_getuid: 10904 ret = get_errno(high2lowuid(getuid())); 10905 break; 10906 #endif 10907 #ifdef TARGET_NR_getgid 10908 case TARGET_NR_getgid: 10909 ret = get_errno(high2lowgid(getgid())); 10910 break; 10911 #endif 10912 #ifdef TARGET_NR_geteuid 10913 case TARGET_NR_geteuid: 10914 ret = get_errno(high2lowuid(geteuid())); 10915 break; 10916 #endif 10917 #ifdef TARGET_NR_getegid 10918 case TARGET_NR_getegid: 10919 ret = get_errno(high2lowgid(getegid())); 10920 break; 10921 #endif 10922 case TARGET_NR_setreuid: 10923 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); 10924 break; 10925 case TARGET_NR_setregid: 10926 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); 10927 break; 10928 case TARGET_NR_getgroups: 10929 { 10930 int gidsetsize = arg1; 10931 target_id *target_grouplist; 10932 gid_t *grouplist; 10933 int i; 10934 10935 grouplist = alloca(gidsetsize * sizeof(gid_t)); 10936 ret = get_errno(getgroups(gidsetsize, grouplist)); 10937 if (gidsetsize == 0) 10938 break; 10939 if (!is_error(ret)) { 10940 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0); 10941 if (!target_grouplist) 10942 goto efault; 10943 for(i = 0;i < ret; i++) 10944 target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); 10945 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id)); 10946 } 10947 } 10948 break; 10949 case TARGET_NR_setgroups: 10950 { 10951 int gidsetsize = arg1; 10952 target_id *target_grouplist; 10953 gid_t *grouplist = NULL; 10954 int i; 10955 if (gidsetsize) { 10956 grouplist = alloca(gidsetsize * sizeof(gid_t)); 10957 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1); 10958 if (!target_grouplist) { 10959 ret = -TARGET_EFAULT; 10960 goto fail; 10961 } 10962 for (i = 0; i < gidsetsize; i++) { 10963 grouplist[i] = low2highgid(tswapid(target_grouplist[i])); 10964 } 10965 unlock_user(target_grouplist, arg2, 0); 10966 } 10967 ret = get_errno(setgroups(gidsetsize, grouplist)); 10968 } 10969 break; 10970 case TARGET_NR_fchown: 10971 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); 10972 break; 10973 #if defined(TARGET_NR_fchownat) 10974 case TARGET_NR_fchownat: 10975 if (!(p = lock_user_string(arg2))) 10976 goto efault; 10977 ret = get_errno(fchownat(arg1, p, low2highuid(arg3), 10978 low2highgid(arg4), arg5)); 10979 unlock_user(p, arg2, 0); 10980 break; 10981 #endif 10982 #ifdef TARGET_NR_setresuid 10983 case TARGET_NR_setresuid: 10984 ret = get_errno(sys_setresuid(low2highuid(arg1), 10985 low2highuid(arg2), 10986 low2highuid(arg3))); 10987 break; 10988 #endif 10989 #ifdef TARGET_NR_getresuid 10990 case TARGET_NR_getresuid: 10991 { 10992 uid_t ruid, euid, suid; 10993 ret = get_errno(getresuid(&ruid, &euid, &suid)); 10994 if (!is_error(ret)) { 10995 if (put_user_id(high2lowuid(ruid), arg1) 10996 || put_user_id(high2lowuid(euid), arg2) 10997 || put_user_id(high2lowuid(suid), arg3)) 10998 goto efault; 10999 } 11000 } 11001 break; 11002 #endif 11003 #ifdef TARGET_NR_getresgid 11004 case TARGET_NR_setresgid: 11005 ret = get_errno(sys_setresgid(low2highgid(arg1), 11006 low2highgid(arg2), 11007 low2highgid(arg3))); 11008 break; 11009 #endif 11010 #ifdef TARGET_NR_getresgid 11011 case TARGET_NR_getresgid: 11012 { 11013 gid_t rgid, egid, sgid; 11014 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 11015 if (!is_error(ret)) { 11016 if (put_user_id(high2lowgid(rgid), arg1) 11017 || put_user_id(high2lowgid(egid), arg2) 11018 || put_user_id(high2lowgid(sgid), arg3)) 11019 goto efault; 11020 } 11021 } 11022 break; 11023 #endif 11024 #ifdef TARGET_NR_chown 11025 case TARGET_NR_chown: 11026 if (!(p = lock_user_string(arg1))) 11027 goto efault; 11028 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); 11029 unlock_user(p, arg1, 0); 11030 break; 11031 #endif 11032 case TARGET_NR_setuid: 11033 ret = get_errno(sys_setuid(low2highuid(arg1))); 11034 break; 11035 case TARGET_NR_setgid: 11036 ret = get_errno(sys_setgid(low2highgid(arg1))); 11037 break; 11038 case TARGET_NR_setfsuid: 11039 ret = get_errno(setfsuid(arg1)); 11040 break; 11041 case TARGET_NR_setfsgid: 11042 ret = get_errno(setfsgid(arg1)); 11043 break; 11044 11045 #ifdef TARGET_NR_lchown32 11046 case TARGET_NR_lchown32: 11047 if (!(p = lock_user_string(arg1))) 11048 goto efault; 11049 ret = get_errno(lchown(p, arg2, arg3)); 11050 unlock_user(p, arg1, 0); 11051 break; 11052 #endif 11053 #ifdef TARGET_NR_getuid32 11054 case TARGET_NR_getuid32: 11055 ret = get_errno(getuid()); 11056 break; 11057 #endif 11058 11059 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) 11060 /* Alpha specific */ 11061 case TARGET_NR_getxuid: 11062 { 11063 uid_t euid; 11064 euid=geteuid(); 11065 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid; 11066 } 11067 ret = get_errno(getuid()); 11068 break; 11069 #endif 11070 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) 11071 /* Alpha specific */ 11072 case TARGET_NR_getxgid: 11073 { 11074 uid_t egid; 11075 egid=getegid(); 11076 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid; 11077 } 11078 ret = get_errno(getgid()); 11079 break; 11080 #endif 11081 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) 11082 /* Alpha specific */ 11083 case TARGET_NR_osf_getsysinfo: 11084 ret = -TARGET_EOPNOTSUPP; 11085 switch (arg1) { 11086 case TARGET_GSI_IEEE_FP_CONTROL: 11087 { 11088 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env); 11089 11090 /* Copied from linux ieee_fpcr_to_swcr. */ 11091 swcr = (fpcr >> 35) & SWCR_STATUS_MASK; 11092 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ; 11093 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV 11094 | SWCR_TRAP_ENABLE_DZE 11095 | SWCR_TRAP_ENABLE_OVF); 11096 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF 11097 | SWCR_TRAP_ENABLE_INE); 11098 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ; 11099 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO; 11100 11101 if (put_user_u64 (swcr, arg2)) 11102 goto efault; 11103 ret = 0; 11104 } 11105 break; 11106 11107 /* case GSI_IEEE_STATE_AT_SIGNAL: 11108 -- Not implemented in linux kernel. 11109 case GSI_UACPROC: 11110 -- Retrieves current unaligned access state; not much used. 11111 case GSI_PROC_TYPE: 11112 -- Retrieves implver information; surely not used. 11113 case GSI_GET_HWRPB: 11114 -- Grabs a copy of the HWRPB; surely not used. 11115 */ 11116 } 11117 break; 11118 #endif 11119 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) 11120 /* Alpha specific */ 11121 case TARGET_NR_osf_setsysinfo: 11122 ret = -TARGET_EOPNOTSUPP; 11123 switch (arg1) { 11124 case TARGET_SSI_IEEE_FP_CONTROL: 11125 { 11126 uint64_t swcr, fpcr, orig_fpcr; 11127 11128 if (get_user_u64 (swcr, arg2)) { 11129 goto efault; 11130 } 11131 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 11132 fpcr = orig_fpcr & FPCR_DYN_MASK; 11133 11134 /* Copied from linux ieee_swcr_to_fpcr. */ 11135 fpcr |= (swcr & SWCR_STATUS_MASK) << 35; 11136 fpcr |= (swcr & SWCR_MAP_DMZ) << 36; 11137 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV 11138 | SWCR_TRAP_ENABLE_DZE 11139 | SWCR_TRAP_ENABLE_OVF)) << 48; 11140 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF 11141 | SWCR_TRAP_ENABLE_INE)) << 57; 11142 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0); 11143 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41; 11144 11145 cpu_alpha_store_fpcr(cpu_env, fpcr); 11146 ret = 0; 11147 } 11148 break; 11149 11150 case TARGET_SSI_IEEE_RAISE_EXCEPTION: 11151 { 11152 uint64_t exc, fpcr, orig_fpcr; 11153 int si_code; 11154 11155 if (get_user_u64(exc, arg2)) { 11156 goto efault; 11157 } 11158 11159 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 11160 11161 /* We only add to the exception status here. */ 11162 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35); 11163 11164 cpu_alpha_store_fpcr(cpu_env, fpcr); 11165 ret = 0; 11166 11167 /* Old exceptions are not signaled. */ 11168 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK); 11169 11170 /* If any exceptions set by this call, 11171 and are unmasked, send a signal. */ 11172 si_code = 0; 11173 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) { 11174 si_code = TARGET_FPE_FLTRES; 11175 } 11176 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) { 11177 si_code = TARGET_FPE_FLTUND; 11178 } 11179 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) { 11180 si_code = TARGET_FPE_FLTOVF; 11181 } 11182 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) { 11183 si_code = TARGET_FPE_FLTDIV; 11184 } 11185 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) { 11186 si_code = TARGET_FPE_FLTINV; 11187 } 11188 if (si_code != 0) { 11189 target_siginfo_t info; 11190 info.si_signo = SIGFPE; 11191 info.si_errno = 0; 11192 info.si_code = si_code; 11193 info._sifields._sigfault._addr 11194 = ((CPUArchState *)cpu_env)->pc; 11195 queue_signal((CPUArchState *)cpu_env, info.si_signo, 11196 QEMU_SI_FAULT, &info); 11197 } 11198 } 11199 break; 11200 11201 /* case SSI_NVPAIRS: 11202 -- Used with SSIN_UACPROC to enable unaligned accesses. 11203 case SSI_IEEE_STATE_AT_SIGNAL: 11204 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: 11205 -- Not implemented in linux kernel 11206 */ 11207 } 11208 break; 11209 #endif 11210 #ifdef TARGET_NR_osf_sigprocmask 11211 /* Alpha specific. */ 11212 case TARGET_NR_osf_sigprocmask: 11213 { 11214 abi_ulong mask; 11215 int how; 11216 sigset_t set, oldset; 11217 11218 switch(arg1) { 11219 case TARGET_SIG_BLOCK: 11220 how = SIG_BLOCK; 11221 break; 11222 case TARGET_SIG_UNBLOCK: 11223 how = SIG_UNBLOCK; 11224 break; 11225 case TARGET_SIG_SETMASK: 11226 how = SIG_SETMASK; 11227 break; 11228 default: 11229 ret = -TARGET_EINVAL; 11230 goto fail; 11231 } 11232 mask = arg2; 11233 target_to_host_old_sigset(&set, &mask); 11234 ret = do_sigprocmask(how, &set, &oldset); 11235 if (!ret) { 11236 host_to_target_old_sigset(&mask, &oldset); 11237 ret = mask; 11238 } 11239 } 11240 break; 11241 #endif 11242 11243 #ifdef TARGET_NR_getgid32 11244 case TARGET_NR_getgid32: 11245 ret = get_errno(getgid()); 11246 break; 11247 #endif 11248 #ifdef TARGET_NR_geteuid32 11249 case TARGET_NR_geteuid32: 11250 ret = get_errno(geteuid()); 11251 break; 11252 #endif 11253 #ifdef TARGET_NR_getegid32 11254 case TARGET_NR_getegid32: 11255 ret = get_errno(getegid()); 11256 break; 11257 #endif 11258 #ifdef TARGET_NR_setreuid32 11259 case TARGET_NR_setreuid32: 11260 ret = get_errno(setreuid(arg1, arg2)); 11261 break; 11262 #endif 11263 #ifdef TARGET_NR_setregid32 11264 case TARGET_NR_setregid32: 11265 ret = get_errno(setregid(arg1, arg2)); 11266 break; 11267 #endif 11268 #ifdef TARGET_NR_getgroups32 11269 case TARGET_NR_getgroups32: 11270 { 11271 int gidsetsize = arg1; 11272 uint32_t *target_grouplist; 11273 gid_t *grouplist; 11274 int i; 11275 11276 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11277 ret = get_errno(getgroups(gidsetsize, grouplist)); 11278 if (gidsetsize == 0) 11279 break; 11280 if (!is_error(ret)) { 11281 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); 11282 if (!target_grouplist) { 11283 ret = -TARGET_EFAULT; 11284 goto fail; 11285 } 11286 for(i = 0;i < ret; i++) 11287 target_grouplist[i] = tswap32(grouplist[i]); 11288 unlock_user(target_grouplist, arg2, gidsetsize * 4); 11289 } 11290 } 11291 break; 11292 #endif 11293 #ifdef TARGET_NR_setgroups32 11294 case TARGET_NR_setgroups32: 11295 { 11296 int gidsetsize = arg1; 11297 uint32_t *target_grouplist; 11298 gid_t *grouplist; 11299 int i; 11300 11301 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11302 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); 11303 if (!target_grouplist) { 11304 ret = -TARGET_EFAULT; 11305 goto fail; 11306 } 11307 for(i = 0;i < gidsetsize; i++) 11308 grouplist[i] = tswap32(target_grouplist[i]); 11309 unlock_user(target_grouplist, arg2, 0); 11310 ret = get_errno(setgroups(gidsetsize, grouplist)); 11311 } 11312 break; 11313 #endif 11314 #ifdef TARGET_NR_fchown32 11315 case TARGET_NR_fchown32: 11316 ret = get_errno(fchown(arg1, arg2, arg3)); 11317 break; 11318 #endif 11319 #ifdef TARGET_NR_setresuid32 11320 case TARGET_NR_setresuid32: 11321 ret = get_errno(sys_setresuid(arg1, arg2, arg3)); 11322 break; 11323 #endif 11324 #ifdef TARGET_NR_getresuid32 11325 case TARGET_NR_getresuid32: 11326 { 11327 uid_t ruid, euid, suid; 11328 ret = get_errno(getresuid(&ruid, &euid, &suid)); 11329 if (!is_error(ret)) { 11330 if (put_user_u32(ruid, arg1) 11331 || put_user_u32(euid, arg2) 11332 || put_user_u32(suid, arg3)) 11333 goto efault; 11334 } 11335 } 11336 break; 11337 #endif 11338 #ifdef TARGET_NR_setresgid32 11339 case TARGET_NR_setresgid32: 11340 ret = get_errno(sys_setresgid(arg1, arg2, arg3)); 11341 break; 11342 #endif 11343 #ifdef TARGET_NR_getresgid32 11344 case TARGET_NR_getresgid32: 11345 { 11346 gid_t rgid, egid, sgid; 11347 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 11348 if (!is_error(ret)) { 11349 if (put_user_u32(rgid, arg1) 11350 || put_user_u32(egid, arg2) 11351 || put_user_u32(sgid, arg3)) 11352 goto efault; 11353 } 11354 } 11355 break; 11356 #endif 11357 #ifdef TARGET_NR_chown32 11358 case TARGET_NR_chown32: 11359 if (!(p = lock_user_string(arg1))) 11360 goto efault; 11361 ret = get_errno(chown(p, arg2, arg3)); 11362 unlock_user(p, arg1, 0); 11363 break; 11364 #endif 11365 #ifdef TARGET_NR_setuid32 11366 case TARGET_NR_setuid32: 11367 ret = get_errno(sys_setuid(arg1)); 11368 break; 11369 #endif 11370 #ifdef TARGET_NR_setgid32 11371 case TARGET_NR_setgid32: 11372 ret = get_errno(sys_setgid(arg1)); 11373 break; 11374 #endif 11375 #ifdef TARGET_NR_setfsuid32 11376 case TARGET_NR_setfsuid32: 11377 ret = get_errno(setfsuid(arg1)); 11378 break; 11379 #endif 11380 #ifdef TARGET_NR_setfsgid32 11381 case TARGET_NR_setfsgid32: 11382 ret = get_errno(setfsgid(arg1)); 11383 break; 11384 #endif 11385 11386 case TARGET_NR_pivot_root: 11387 goto unimplemented; 11388 #ifdef TARGET_NR_mincore 11389 case TARGET_NR_mincore: 11390 { 11391 void *a; 11392 ret = -TARGET_ENOMEM; 11393 a = lock_user(VERIFY_READ, arg1, arg2, 0); 11394 if (!a) { 11395 goto fail; 11396 } 11397 ret = -TARGET_EFAULT; 11398 p = lock_user_string(arg3); 11399 if (!p) { 11400 goto mincore_fail; 11401 } 11402 ret = get_errno(mincore(a, arg2, p)); 11403 unlock_user(p, arg3, ret); 11404 mincore_fail: 11405 unlock_user(a, arg1, 0); 11406 } 11407 break; 11408 #endif 11409 #ifdef TARGET_NR_arm_fadvise64_64 11410 case TARGET_NR_arm_fadvise64_64: 11411 /* arm_fadvise64_64 looks like fadvise64_64 but 11412 * with different argument order: fd, advice, offset, len 11413 * rather than the usual fd, offset, len, advice. 11414 * Note that offset and len are both 64-bit so appear as 11415 * pairs of 32-bit registers. 11416 */ 11417 ret = posix_fadvise(arg1, target_offset64(arg3, arg4), 11418 target_offset64(arg5, arg6), arg2); 11419 ret = -host_to_target_errno(ret); 11420 break; 11421 #endif 11422 11423 #if TARGET_ABI_BITS == 32 11424 11425 #ifdef TARGET_NR_fadvise64_64 11426 case TARGET_NR_fadvise64_64: 11427 #if defined(TARGET_PPC) 11428 /* 6 args: fd, advice, offset (high, low), len (high, low) */ 11429 ret = arg2; 11430 arg2 = arg3; 11431 arg3 = arg4; 11432 arg4 = arg5; 11433 arg5 = arg6; 11434 arg6 = ret; 11435 #else 11436 /* 6 args: fd, offset (high, low), len (high, low), advice */ 11437 if (regpairs_aligned(cpu_env, num)) { 11438 /* offset is in (3,4), len in (5,6) and advice in 7 */ 11439 arg2 = arg3; 11440 arg3 = arg4; 11441 arg4 = arg5; 11442 arg5 = arg6; 11443 arg6 = arg7; 11444 } 11445 #endif 11446 ret = -host_to_target_errno(posix_fadvise(arg1, 11447 target_offset64(arg2, arg3), 11448 target_offset64(arg4, arg5), 11449 arg6)); 11450 break; 11451 #endif 11452 11453 #ifdef TARGET_NR_fadvise64 11454 case TARGET_NR_fadvise64: 11455 /* 5 args: fd, offset (high, low), len, advice */ 11456 if (regpairs_aligned(cpu_env, num)) { 11457 /* offset is in (3,4), len in 5 and advice in 6 */ 11458 arg2 = arg3; 11459 arg3 = arg4; 11460 arg4 = arg5; 11461 arg5 = arg6; 11462 } 11463 ret = -host_to_target_errno(posix_fadvise(arg1, 11464 target_offset64(arg2, arg3), 11465 arg4, arg5)); 11466 break; 11467 #endif 11468 11469 #else /* not a 32-bit ABI */ 11470 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64) 11471 #ifdef TARGET_NR_fadvise64_64 11472 case TARGET_NR_fadvise64_64: 11473 #endif 11474 #ifdef TARGET_NR_fadvise64 11475 case TARGET_NR_fadvise64: 11476 #endif 11477 #ifdef TARGET_S390X 11478 switch (arg4) { 11479 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ 11480 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ 11481 case 6: arg4 = POSIX_FADV_DONTNEED; break; 11482 case 7: arg4 = POSIX_FADV_NOREUSE; break; 11483 default: break; 11484 } 11485 #endif 11486 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4)); 11487 break; 11488 #endif 11489 #endif /* end of 64-bit ABI fadvise handling */ 11490 11491 #ifdef TARGET_NR_madvise 11492 case TARGET_NR_madvise: 11493 /* A straight passthrough may not be safe because qemu sometimes 11494 turns private file-backed mappings into anonymous mappings. 11495 This will break MADV_DONTNEED. 11496 This is a hint, so ignoring and returning success is ok. */ 11497 ret = get_errno(0); 11498 break; 11499 #endif 11500 #if TARGET_ABI_BITS == 32 11501 case TARGET_NR_fcntl64: 11502 { 11503 int cmd; 11504 struct flock64 fl; 11505 from_flock64_fn *copyfrom = copy_from_user_flock64; 11506 to_flock64_fn *copyto = copy_to_user_flock64; 11507 11508 #ifdef TARGET_ARM 11509 if (((CPUARMState *)cpu_env)->eabi) { 11510 copyfrom = copy_from_user_eabi_flock64; 11511 copyto = copy_to_user_eabi_flock64; 11512 } 11513 #endif 11514 11515 cmd = target_to_host_fcntl_cmd(arg2); 11516 if (cmd == -TARGET_EINVAL) { 11517 ret = cmd; 11518 break; 11519 } 11520 11521 switch(arg2) { 11522 case TARGET_F_GETLK64: 11523 ret = copyfrom(&fl, arg3); 11524 if (ret) { 11525 break; 11526 } 11527 ret = get_errno(fcntl(arg1, cmd, &fl)); 11528 if (ret == 0) { 11529 ret = copyto(arg3, &fl); 11530 } 11531 break; 11532 11533 case TARGET_F_SETLK64: 11534 case TARGET_F_SETLKW64: 11535 ret = copyfrom(&fl, arg3); 11536 if (ret) { 11537 break; 11538 } 11539 ret = get_errno(safe_fcntl(arg1, cmd, &fl)); 11540 break; 11541 default: 11542 ret = do_fcntl(arg1, arg2, arg3); 11543 break; 11544 } 11545 break; 11546 } 11547 #endif 11548 #ifdef TARGET_NR_cacheflush 11549 case TARGET_NR_cacheflush: 11550 /* self-modifying code is handled automatically, so nothing needed */ 11551 ret = 0; 11552 break; 11553 #endif 11554 #ifdef TARGET_NR_security 11555 case TARGET_NR_security: 11556 goto unimplemented; 11557 #endif 11558 #ifdef TARGET_NR_getpagesize 11559 case TARGET_NR_getpagesize: 11560 ret = TARGET_PAGE_SIZE; 11561 break; 11562 #endif 11563 case TARGET_NR_gettid: 11564 ret = get_errno(gettid()); 11565 break; 11566 #ifdef TARGET_NR_readahead 11567 case TARGET_NR_readahead: 11568 #if TARGET_ABI_BITS == 32 11569 if (regpairs_aligned(cpu_env, num)) { 11570 arg2 = arg3; 11571 arg3 = arg4; 11572 arg4 = arg5; 11573 } 11574 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4)); 11575 #else 11576 ret = get_errno(readahead(arg1, arg2, arg3)); 11577 #endif 11578 break; 11579 #endif 11580 #ifdef CONFIG_ATTR 11581 #ifdef TARGET_NR_setxattr 11582 case TARGET_NR_listxattr: 11583 case TARGET_NR_llistxattr: 11584 { 11585 void *p, *b = 0; 11586 if (arg2) { 11587 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 11588 if (!b) { 11589 ret = -TARGET_EFAULT; 11590 break; 11591 } 11592 } 11593 p = lock_user_string(arg1); 11594 if (p) { 11595 if (num == TARGET_NR_listxattr) { 11596 ret = get_errno(listxattr(p, b, arg3)); 11597 } else { 11598 ret = get_errno(llistxattr(p, b, arg3)); 11599 } 11600 } else { 11601 ret = -TARGET_EFAULT; 11602 } 11603 unlock_user(p, arg1, 0); 11604 unlock_user(b, arg2, arg3); 11605 break; 11606 } 11607 case TARGET_NR_flistxattr: 11608 { 11609 void *b = 0; 11610 if (arg2) { 11611 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 11612 if (!b) { 11613 ret = -TARGET_EFAULT; 11614 break; 11615 } 11616 } 11617 ret = get_errno(flistxattr(arg1, b, arg3)); 11618 unlock_user(b, arg2, arg3); 11619 break; 11620 } 11621 case TARGET_NR_setxattr: 11622 case TARGET_NR_lsetxattr: 11623 { 11624 void *p, *n, *v = 0; 11625 if (arg3) { 11626 v = lock_user(VERIFY_READ, arg3, arg4, 1); 11627 if (!v) { 11628 ret = -TARGET_EFAULT; 11629 break; 11630 } 11631 } 11632 p = lock_user_string(arg1); 11633 n = lock_user_string(arg2); 11634 if (p && n) { 11635 if (num == TARGET_NR_setxattr) { 11636 ret = get_errno(setxattr(p, n, v, arg4, arg5)); 11637 } else { 11638 ret = get_errno(lsetxattr(p, n, v, arg4, arg5)); 11639 } 11640 } else { 11641 ret = -TARGET_EFAULT; 11642 } 11643 unlock_user(p, arg1, 0); 11644 unlock_user(n, arg2, 0); 11645 unlock_user(v, arg3, 0); 11646 } 11647 break; 11648 case TARGET_NR_fsetxattr: 11649 { 11650 void *n, *v = 0; 11651 if (arg3) { 11652 v = lock_user(VERIFY_READ, arg3, arg4, 1); 11653 if (!v) { 11654 ret = -TARGET_EFAULT; 11655 break; 11656 } 11657 } 11658 n = lock_user_string(arg2); 11659 if (n) { 11660 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5)); 11661 } else { 11662 ret = -TARGET_EFAULT; 11663 } 11664 unlock_user(n, arg2, 0); 11665 unlock_user(v, arg3, 0); 11666 } 11667 break; 11668 case TARGET_NR_getxattr: 11669 case TARGET_NR_lgetxattr: 11670 { 11671 void *p, *n, *v = 0; 11672 if (arg3) { 11673 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 11674 if (!v) { 11675 ret = -TARGET_EFAULT; 11676 break; 11677 } 11678 } 11679 p = lock_user_string(arg1); 11680 n = lock_user_string(arg2); 11681 if (p && n) { 11682 if (num == TARGET_NR_getxattr) { 11683 ret = get_errno(getxattr(p, n, v, arg4)); 11684 } else { 11685 ret = get_errno(lgetxattr(p, n, v, arg4)); 11686 } 11687 } else { 11688 ret = -TARGET_EFAULT; 11689 } 11690 unlock_user(p, arg1, 0); 11691 unlock_user(n, arg2, 0); 11692 unlock_user(v, arg3, arg4); 11693 } 11694 break; 11695 case TARGET_NR_fgetxattr: 11696 { 11697 void *n, *v = 0; 11698 if (arg3) { 11699 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 11700 if (!v) { 11701 ret = -TARGET_EFAULT; 11702 break; 11703 } 11704 } 11705 n = lock_user_string(arg2); 11706 if (n) { 11707 ret = get_errno(fgetxattr(arg1, n, v, arg4)); 11708 } else { 11709 ret = -TARGET_EFAULT; 11710 } 11711 unlock_user(n, arg2, 0); 11712 unlock_user(v, arg3, arg4); 11713 } 11714 break; 11715 case TARGET_NR_removexattr: 11716 case TARGET_NR_lremovexattr: 11717 { 11718 void *p, *n; 11719 p = lock_user_string(arg1); 11720 n = lock_user_string(arg2); 11721 if (p && n) { 11722 if (num == TARGET_NR_removexattr) { 11723 ret = get_errno(removexattr(p, n)); 11724 } else { 11725 ret = get_errno(lremovexattr(p, n)); 11726 } 11727 } else { 11728 ret = -TARGET_EFAULT; 11729 } 11730 unlock_user(p, arg1, 0); 11731 unlock_user(n, arg2, 0); 11732 } 11733 break; 11734 case TARGET_NR_fremovexattr: 11735 { 11736 void *n; 11737 n = lock_user_string(arg2); 11738 if (n) { 11739 ret = get_errno(fremovexattr(arg1, n)); 11740 } else { 11741 ret = -TARGET_EFAULT; 11742 } 11743 unlock_user(n, arg2, 0); 11744 } 11745 break; 11746 #endif 11747 #endif /* CONFIG_ATTR */ 11748 #ifdef TARGET_NR_set_thread_area 11749 case TARGET_NR_set_thread_area: 11750 #if defined(TARGET_MIPS) 11751 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1; 11752 ret = 0; 11753 break; 11754 #elif defined(TARGET_CRIS) 11755 if (arg1 & 0xff) 11756 ret = -TARGET_EINVAL; 11757 else { 11758 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1; 11759 ret = 0; 11760 } 11761 break; 11762 #elif defined(TARGET_I386) && defined(TARGET_ABI32) 11763 ret = do_set_thread_area(cpu_env, arg1); 11764 break; 11765 #elif defined(TARGET_M68K) 11766 { 11767 TaskState *ts = cpu->opaque; 11768 ts->tp_value = arg1; 11769 ret = 0; 11770 break; 11771 } 11772 #else 11773 goto unimplemented_nowarn; 11774 #endif 11775 #endif 11776 #ifdef TARGET_NR_get_thread_area 11777 case TARGET_NR_get_thread_area: 11778 #if defined(TARGET_I386) && defined(TARGET_ABI32) 11779 ret = do_get_thread_area(cpu_env, arg1); 11780 break; 11781 #elif defined(TARGET_M68K) 11782 { 11783 TaskState *ts = cpu->opaque; 11784 ret = ts->tp_value; 11785 break; 11786 } 11787 #else 11788 goto unimplemented_nowarn; 11789 #endif 11790 #endif 11791 #ifdef TARGET_NR_getdomainname 11792 case TARGET_NR_getdomainname: 11793 goto unimplemented_nowarn; 11794 #endif 11795 11796 #ifdef TARGET_NR_clock_gettime 11797 case TARGET_NR_clock_gettime: 11798 { 11799 struct timespec ts; 11800 ret = get_errno(clock_gettime(arg1, &ts)); 11801 if (!is_error(ret)) { 11802 host_to_target_timespec(arg2, &ts); 11803 } 11804 break; 11805 } 11806 #endif 11807 #ifdef TARGET_NR_clock_getres 11808 case TARGET_NR_clock_getres: 11809 { 11810 struct timespec ts; 11811 ret = get_errno(clock_getres(arg1, &ts)); 11812 if (!is_error(ret)) { 11813 host_to_target_timespec(arg2, &ts); 11814 } 11815 break; 11816 } 11817 #endif 11818 #ifdef TARGET_NR_clock_nanosleep 11819 case TARGET_NR_clock_nanosleep: 11820 { 11821 struct timespec ts; 11822 target_to_host_timespec(&ts, arg3); 11823 ret = get_errno(safe_clock_nanosleep(arg1, arg2, 11824 &ts, arg4 ? &ts : NULL)); 11825 if (arg4) 11826 host_to_target_timespec(arg4, &ts); 11827 11828 #if defined(TARGET_PPC) 11829 /* clock_nanosleep is odd in that it returns positive errno values. 11830 * On PPC, CR0 bit 3 should be set in such a situation. */ 11831 if (ret && ret != -TARGET_ERESTARTSYS) { 11832 ((CPUPPCState *)cpu_env)->crf[0] |= 1; 11833 } 11834 #endif 11835 break; 11836 } 11837 #endif 11838 11839 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 11840 case TARGET_NR_set_tid_address: 11841 ret = get_errno(set_tid_address((int *)g2h(arg1))); 11842 break; 11843 #endif 11844 11845 case TARGET_NR_tkill: 11846 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2))); 11847 break; 11848 11849 case TARGET_NR_tgkill: 11850 ret = get_errno(safe_tgkill((int)arg1, (int)arg2, 11851 target_to_host_signal(arg3))); 11852 break; 11853 11854 #ifdef TARGET_NR_set_robust_list 11855 case TARGET_NR_set_robust_list: 11856 case TARGET_NR_get_robust_list: 11857 /* The ABI for supporting robust futexes has userspace pass 11858 * the kernel a pointer to a linked list which is updated by 11859 * userspace after the syscall; the list is walked by the kernel 11860 * when the thread exits. Since the linked list in QEMU guest 11861 * memory isn't a valid linked list for the host and we have 11862 * no way to reliably intercept the thread-death event, we can't 11863 * support these. Silently return ENOSYS so that guest userspace 11864 * falls back to a non-robust futex implementation (which should 11865 * be OK except in the corner case of the guest crashing while 11866 * holding a mutex that is shared with another process via 11867 * shared memory). 11868 */ 11869 goto unimplemented_nowarn; 11870 #endif 11871 11872 #if defined(TARGET_NR_utimensat) 11873 case TARGET_NR_utimensat: 11874 { 11875 struct timespec *tsp, ts[2]; 11876 if (!arg3) { 11877 tsp = NULL; 11878 } else { 11879 target_to_host_timespec(ts, arg3); 11880 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec)); 11881 tsp = ts; 11882 } 11883 if (!arg2) 11884 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 11885 else { 11886 if (!(p = lock_user_string(arg2))) { 11887 ret = -TARGET_EFAULT; 11888 goto fail; 11889 } 11890 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 11891 unlock_user(p, arg2, 0); 11892 } 11893 } 11894 break; 11895 #endif 11896 case TARGET_NR_futex: 11897 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6); 11898 break; 11899 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 11900 case TARGET_NR_inotify_init: 11901 ret = get_errno(sys_inotify_init()); 11902 if (ret >= 0) { 11903 fd_trans_register(ret, &target_inotify_trans); 11904 } 11905 break; 11906 #endif 11907 #ifdef CONFIG_INOTIFY1 11908 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 11909 case TARGET_NR_inotify_init1: 11910 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1, 11911 fcntl_flags_tbl))); 11912 if (ret >= 0) { 11913 fd_trans_register(ret, &target_inotify_trans); 11914 } 11915 break; 11916 #endif 11917 #endif 11918 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 11919 case TARGET_NR_inotify_add_watch: 11920 p = lock_user_string(arg2); 11921 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3)); 11922 unlock_user(p, arg2, 0); 11923 break; 11924 #endif 11925 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 11926 case TARGET_NR_inotify_rm_watch: 11927 ret = get_errno(sys_inotify_rm_watch(arg1, arg2)); 11928 break; 11929 #endif 11930 11931 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 11932 case TARGET_NR_mq_open: 11933 { 11934 struct mq_attr posix_mq_attr; 11935 struct mq_attr *pposix_mq_attr; 11936 int host_flags; 11937 11938 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl); 11939 pposix_mq_attr = NULL; 11940 if (arg4) { 11941 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) { 11942 goto efault; 11943 } 11944 pposix_mq_attr = &posix_mq_attr; 11945 } 11946 p = lock_user_string(arg1 - 1); 11947 if (!p) { 11948 goto efault; 11949 } 11950 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr)); 11951 unlock_user (p, arg1, 0); 11952 } 11953 break; 11954 11955 case TARGET_NR_mq_unlink: 11956 p = lock_user_string(arg1 - 1); 11957 if (!p) { 11958 ret = -TARGET_EFAULT; 11959 break; 11960 } 11961 ret = get_errno(mq_unlink(p)); 11962 unlock_user (p, arg1, 0); 11963 break; 11964 11965 case TARGET_NR_mq_timedsend: 11966 { 11967 struct timespec ts; 11968 11969 p = lock_user (VERIFY_READ, arg2, arg3, 1); 11970 if (arg5 != 0) { 11971 target_to_host_timespec(&ts, arg5); 11972 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts)); 11973 host_to_target_timespec(arg5, &ts); 11974 } else { 11975 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL)); 11976 } 11977 unlock_user (p, arg2, arg3); 11978 } 11979 break; 11980 11981 case TARGET_NR_mq_timedreceive: 11982 { 11983 struct timespec ts; 11984 unsigned int prio; 11985 11986 p = lock_user (VERIFY_READ, arg2, arg3, 1); 11987 if (arg5 != 0) { 11988 target_to_host_timespec(&ts, arg5); 11989 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 11990 &prio, &ts)); 11991 host_to_target_timespec(arg5, &ts); 11992 } else { 11993 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 11994 &prio, NULL)); 11995 } 11996 unlock_user (p, arg2, arg3); 11997 if (arg4 != 0) 11998 put_user_u32(prio, arg4); 11999 } 12000 break; 12001 12002 /* Not implemented for now... */ 12003 /* case TARGET_NR_mq_notify: */ 12004 /* break; */ 12005 12006 case TARGET_NR_mq_getsetattr: 12007 { 12008 struct mq_attr posix_mq_attr_in, posix_mq_attr_out; 12009 ret = 0; 12010 if (arg3 != 0) { 12011 ret = mq_getattr(arg1, &posix_mq_attr_out); 12012 copy_to_user_mq_attr(arg3, &posix_mq_attr_out); 12013 } 12014 if (arg2 != 0) { 12015 copy_from_user_mq_attr(&posix_mq_attr_in, arg2); 12016 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out); 12017 } 12018 12019 } 12020 break; 12021 #endif 12022 12023 #ifdef CONFIG_SPLICE 12024 #ifdef TARGET_NR_tee 12025 case TARGET_NR_tee: 12026 { 12027 ret = get_errno(tee(arg1,arg2,arg3,arg4)); 12028 } 12029 break; 12030 #endif 12031 #ifdef TARGET_NR_splice 12032 case TARGET_NR_splice: 12033 { 12034 loff_t loff_in, loff_out; 12035 loff_t *ploff_in = NULL, *ploff_out = NULL; 12036 if (arg2) { 12037 if (get_user_u64(loff_in, arg2)) { 12038 goto efault; 12039 } 12040 ploff_in = &loff_in; 12041 } 12042 if (arg4) { 12043 if (get_user_u64(loff_out, arg4)) { 12044 goto efault; 12045 } 12046 ploff_out = &loff_out; 12047 } 12048 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); 12049 if (arg2) { 12050 if (put_user_u64(loff_in, arg2)) { 12051 goto efault; 12052 } 12053 } 12054 if (arg4) { 12055 if (put_user_u64(loff_out, arg4)) { 12056 goto efault; 12057 } 12058 } 12059 } 12060 break; 12061 #endif 12062 #ifdef TARGET_NR_vmsplice 12063 case TARGET_NR_vmsplice: 12064 { 12065 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 12066 if (vec != NULL) { 12067 ret = get_errno(vmsplice(arg1, vec, arg3, arg4)); 12068 unlock_iovec(vec, arg2, arg3, 0); 12069 } else { 12070 ret = -host_to_target_errno(errno); 12071 } 12072 } 12073 break; 12074 #endif 12075 #endif /* CONFIG_SPLICE */ 12076 #ifdef CONFIG_EVENTFD 12077 #if defined(TARGET_NR_eventfd) 12078 case TARGET_NR_eventfd: 12079 ret = get_errno(eventfd(arg1, 0)); 12080 if (ret >= 0) { 12081 fd_trans_register(ret, &target_eventfd_trans); 12082 } 12083 break; 12084 #endif 12085 #if defined(TARGET_NR_eventfd2) 12086 case TARGET_NR_eventfd2: 12087 { 12088 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)); 12089 if (arg2 & TARGET_O_NONBLOCK) { 12090 host_flags |= O_NONBLOCK; 12091 } 12092 if (arg2 & TARGET_O_CLOEXEC) { 12093 host_flags |= O_CLOEXEC; 12094 } 12095 ret = get_errno(eventfd(arg1, host_flags)); 12096 if (ret >= 0) { 12097 fd_trans_register(ret, &target_eventfd_trans); 12098 } 12099 break; 12100 } 12101 #endif 12102 #endif /* CONFIG_EVENTFD */ 12103 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) 12104 case TARGET_NR_fallocate: 12105 #if TARGET_ABI_BITS == 32 12106 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4), 12107 target_offset64(arg5, arg6))); 12108 #else 12109 ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); 12110 #endif 12111 break; 12112 #endif 12113 #if defined(CONFIG_SYNC_FILE_RANGE) 12114 #if defined(TARGET_NR_sync_file_range) 12115 case TARGET_NR_sync_file_range: 12116 #if TARGET_ABI_BITS == 32 12117 #if defined(TARGET_MIPS) 12118 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 12119 target_offset64(arg5, arg6), arg7)); 12120 #else 12121 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3), 12122 target_offset64(arg4, arg5), arg6)); 12123 #endif /* !TARGET_MIPS */ 12124 #else 12125 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4)); 12126 #endif 12127 break; 12128 #endif 12129 #if defined(TARGET_NR_sync_file_range2) 12130 case TARGET_NR_sync_file_range2: 12131 /* This is like sync_file_range but the arguments are reordered */ 12132 #if TARGET_ABI_BITS == 32 12133 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 12134 target_offset64(arg5, arg6), arg2)); 12135 #else 12136 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2)); 12137 #endif 12138 break; 12139 #endif 12140 #endif 12141 #if defined(TARGET_NR_signalfd4) 12142 case TARGET_NR_signalfd4: 12143 ret = do_signalfd4(arg1, arg2, arg4); 12144 break; 12145 #endif 12146 #if defined(TARGET_NR_signalfd) 12147 case TARGET_NR_signalfd: 12148 ret = do_signalfd4(arg1, arg2, 0); 12149 break; 12150 #endif 12151 #if defined(CONFIG_EPOLL) 12152 #if defined(TARGET_NR_epoll_create) 12153 case TARGET_NR_epoll_create: 12154 ret = get_errno(epoll_create(arg1)); 12155 break; 12156 #endif 12157 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1) 12158 case TARGET_NR_epoll_create1: 12159 ret = get_errno(epoll_create1(arg1)); 12160 break; 12161 #endif 12162 #if defined(TARGET_NR_epoll_ctl) 12163 case TARGET_NR_epoll_ctl: 12164 { 12165 struct epoll_event ep; 12166 struct epoll_event *epp = 0; 12167 if (arg4) { 12168 struct target_epoll_event *target_ep; 12169 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) { 12170 goto efault; 12171 } 12172 ep.events = tswap32(target_ep->events); 12173 /* The epoll_data_t union is just opaque data to the kernel, 12174 * so we transfer all 64 bits across and need not worry what 12175 * actual data type it is. 12176 */ 12177 ep.data.u64 = tswap64(target_ep->data.u64); 12178 unlock_user_struct(target_ep, arg4, 0); 12179 epp = &ep; 12180 } 12181 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp)); 12182 break; 12183 } 12184 #endif 12185 12186 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait) 12187 #if defined(TARGET_NR_epoll_wait) 12188 case TARGET_NR_epoll_wait: 12189 #endif 12190 #if defined(TARGET_NR_epoll_pwait) 12191 case TARGET_NR_epoll_pwait: 12192 #endif 12193 { 12194 struct target_epoll_event *target_ep; 12195 struct epoll_event *ep; 12196 int epfd = arg1; 12197 int maxevents = arg3; 12198 int timeout = arg4; 12199 12200 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) { 12201 ret = -TARGET_EINVAL; 12202 break; 12203 } 12204 12205 target_ep = lock_user(VERIFY_WRITE, arg2, 12206 maxevents * sizeof(struct target_epoll_event), 1); 12207 if (!target_ep) { 12208 goto efault; 12209 } 12210 12211 ep = g_try_new(struct epoll_event, maxevents); 12212 if (!ep) { 12213 unlock_user(target_ep, arg2, 0); 12214 ret = -TARGET_ENOMEM; 12215 break; 12216 } 12217 12218 switch (num) { 12219 #if defined(TARGET_NR_epoll_pwait) 12220 case TARGET_NR_epoll_pwait: 12221 { 12222 target_sigset_t *target_set; 12223 sigset_t _set, *set = &_set; 12224 12225 if (arg5) { 12226 if (arg6 != sizeof(target_sigset_t)) { 12227 ret = -TARGET_EINVAL; 12228 break; 12229 } 12230 12231 target_set = lock_user(VERIFY_READ, arg5, 12232 sizeof(target_sigset_t), 1); 12233 if (!target_set) { 12234 ret = -TARGET_EFAULT; 12235 break; 12236 } 12237 target_to_host_sigset(set, target_set); 12238 unlock_user(target_set, arg5, 0); 12239 } else { 12240 set = NULL; 12241 } 12242 12243 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout, 12244 set, SIGSET_T_SIZE)); 12245 break; 12246 } 12247 #endif 12248 #if defined(TARGET_NR_epoll_wait) 12249 case TARGET_NR_epoll_wait: 12250 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout, 12251 NULL, 0)); 12252 break; 12253 #endif 12254 default: 12255 ret = -TARGET_ENOSYS; 12256 } 12257 if (!is_error(ret)) { 12258 int i; 12259 for (i = 0; i < ret; i++) { 12260 target_ep[i].events = tswap32(ep[i].events); 12261 target_ep[i].data.u64 = tswap64(ep[i].data.u64); 12262 } 12263 unlock_user(target_ep, arg2, 12264 ret * sizeof(struct target_epoll_event)); 12265 } else { 12266 unlock_user(target_ep, arg2, 0); 12267 } 12268 g_free(ep); 12269 break; 12270 } 12271 #endif 12272 #endif 12273 #ifdef TARGET_NR_prlimit64 12274 case TARGET_NR_prlimit64: 12275 { 12276 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */ 12277 struct target_rlimit64 *target_rnew, *target_rold; 12278 struct host_rlimit64 rnew, rold, *rnewp = 0; 12279 int resource = target_to_host_resource(arg2); 12280 if (arg3) { 12281 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { 12282 goto efault; 12283 } 12284 rnew.rlim_cur = tswap64(target_rnew->rlim_cur); 12285 rnew.rlim_max = tswap64(target_rnew->rlim_max); 12286 unlock_user_struct(target_rnew, arg3, 0); 12287 rnewp = &rnew; 12288 } 12289 12290 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0)); 12291 if (!is_error(ret) && arg4) { 12292 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { 12293 goto efault; 12294 } 12295 target_rold->rlim_cur = tswap64(rold.rlim_cur); 12296 target_rold->rlim_max = tswap64(rold.rlim_max); 12297 unlock_user_struct(target_rold, arg4, 1); 12298 } 12299 break; 12300 } 12301 #endif 12302 #ifdef TARGET_NR_gethostname 12303 case TARGET_NR_gethostname: 12304 { 12305 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0); 12306 if (name) { 12307 ret = get_errno(gethostname(name, arg2)); 12308 unlock_user(name, arg1, arg2); 12309 } else { 12310 ret = -TARGET_EFAULT; 12311 } 12312 break; 12313 } 12314 #endif 12315 #ifdef TARGET_NR_atomic_cmpxchg_32 12316 case TARGET_NR_atomic_cmpxchg_32: 12317 { 12318 /* should use start_exclusive from main.c */ 12319 abi_ulong mem_value; 12320 if (get_user_u32(mem_value, arg6)) { 12321 target_siginfo_t info; 12322 info.si_signo = SIGSEGV; 12323 info.si_errno = 0; 12324 info.si_code = TARGET_SEGV_MAPERR; 12325 info._sifields._sigfault._addr = arg6; 12326 queue_signal((CPUArchState *)cpu_env, info.si_signo, 12327 QEMU_SI_FAULT, &info); 12328 ret = 0xdeadbeef; 12329 12330 } 12331 if (mem_value == arg2) 12332 put_user_u32(arg1, arg6); 12333 ret = mem_value; 12334 break; 12335 } 12336 #endif 12337 #ifdef TARGET_NR_atomic_barrier 12338 case TARGET_NR_atomic_barrier: 12339 { 12340 /* Like the kernel implementation and the qemu arm barrier, no-op this? */ 12341 ret = 0; 12342 break; 12343 } 12344 #endif 12345 12346 #ifdef TARGET_NR_timer_create 12347 case TARGET_NR_timer_create: 12348 { 12349 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */ 12350 12351 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL; 12352 12353 int clkid = arg1; 12354 int timer_index = next_free_host_timer(); 12355 12356 if (timer_index < 0) { 12357 ret = -TARGET_EAGAIN; 12358 } else { 12359 timer_t *phtimer = g_posix_timers + timer_index; 12360 12361 if (arg2) { 12362 phost_sevp = &host_sevp; 12363 ret = target_to_host_sigevent(phost_sevp, arg2); 12364 if (ret != 0) { 12365 break; 12366 } 12367 } 12368 12369 ret = get_errno(timer_create(clkid, phost_sevp, phtimer)); 12370 if (ret) { 12371 phtimer = NULL; 12372 } else { 12373 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) { 12374 goto efault; 12375 } 12376 } 12377 } 12378 break; 12379 } 12380 #endif 12381 12382 #ifdef TARGET_NR_timer_settime 12383 case TARGET_NR_timer_settime: 12384 { 12385 /* args: timer_t timerid, int flags, const struct itimerspec *new_value, 12386 * struct itimerspec * old_value */ 12387 target_timer_t timerid = get_timer_id(arg1); 12388 12389 if (timerid < 0) { 12390 ret = timerid; 12391 } else if (arg3 == 0) { 12392 ret = -TARGET_EINVAL; 12393 } else { 12394 timer_t htimer = g_posix_timers[timerid]; 12395 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},}; 12396 12397 if (target_to_host_itimerspec(&hspec_new, arg3)) { 12398 goto efault; 12399 } 12400 ret = get_errno( 12401 timer_settime(htimer, arg2, &hspec_new, &hspec_old)); 12402 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) { 12403 goto efault; 12404 } 12405 } 12406 break; 12407 } 12408 #endif 12409 12410 #ifdef TARGET_NR_timer_gettime 12411 case TARGET_NR_timer_gettime: 12412 { 12413 /* args: timer_t timerid, struct itimerspec *curr_value */ 12414 target_timer_t timerid = get_timer_id(arg1); 12415 12416 if (timerid < 0) { 12417 ret = timerid; 12418 } else if (!arg2) { 12419 ret = -TARGET_EFAULT; 12420 } else { 12421 timer_t htimer = g_posix_timers[timerid]; 12422 struct itimerspec hspec; 12423 ret = get_errno(timer_gettime(htimer, &hspec)); 12424 12425 if (host_to_target_itimerspec(arg2, &hspec)) { 12426 ret = -TARGET_EFAULT; 12427 } 12428 } 12429 break; 12430 } 12431 #endif 12432 12433 #ifdef TARGET_NR_timer_getoverrun 12434 case TARGET_NR_timer_getoverrun: 12435 { 12436 /* args: timer_t timerid */ 12437 target_timer_t timerid = get_timer_id(arg1); 12438 12439 if (timerid < 0) { 12440 ret = timerid; 12441 } else { 12442 timer_t htimer = g_posix_timers[timerid]; 12443 ret = get_errno(timer_getoverrun(htimer)); 12444 } 12445 fd_trans_unregister(ret); 12446 break; 12447 } 12448 #endif 12449 12450 #ifdef TARGET_NR_timer_delete 12451 case TARGET_NR_timer_delete: 12452 { 12453 /* args: timer_t timerid */ 12454 target_timer_t timerid = get_timer_id(arg1); 12455 12456 if (timerid < 0) { 12457 ret = timerid; 12458 } else { 12459 timer_t htimer = g_posix_timers[timerid]; 12460 ret = get_errno(timer_delete(htimer)); 12461 g_posix_timers[timerid] = 0; 12462 } 12463 break; 12464 } 12465 #endif 12466 12467 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD) 12468 case TARGET_NR_timerfd_create: 12469 ret = get_errno(timerfd_create(arg1, 12470 target_to_host_bitmask(arg2, fcntl_flags_tbl))); 12471 break; 12472 #endif 12473 12474 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD) 12475 case TARGET_NR_timerfd_gettime: 12476 { 12477 struct itimerspec its_curr; 12478 12479 ret = get_errno(timerfd_gettime(arg1, &its_curr)); 12480 12481 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) { 12482 goto efault; 12483 } 12484 } 12485 break; 12486 #endif 12487 12488 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD) 12489 case TARGET_NR_timerfd_settime: 12490 { 12491 struct itimerspec its_new, its_old, *p_new; 12492 12493 if (arg3) { 12494 if (target_to_host_itimerspec(&its_new, arg3)) { 12495 goto efault; 12496 } 12497 p_new = &its_new; 12498 } else { 12499 p_new = NULL; 12500 } 12501 12502 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old)); 12503 12504 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) { 12505 goto efault; 12506 } 12507 } 12508 break; 12509 #endif 12510 12511 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get) 12512 case TARGET_NR_ioprio_get: 12513 ret = get_errno(ioprio_get(arg1, arg2)); 12514 break; 12515 #endif 12516 12517 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set) 12518 case TARGET_NR_ioprio_set: 12519 ret = get_errno(ioprio_set(arg1, arg2, arg3)); 12520 break; 12521 #endif 12522 12523 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS) 12524 case TARGET_NR_setns: 12525 ret = get_errno(setns(arg1, arg2)); 12526 break; 12527 #endif 12528 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS) 12529 case TARGET_NR_unshare: 12530 ret = get_errno(unshare(arg1)); 12531 break; 12532 #endif 12533 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp) 12534 case TARGET_NR_kcmp: 12535 ret = get_errno(kcmp(arg1, arg2, arg3, arg4, arg5)); 12536 break; 12537 #endif 12538 12539 default: 12540 unimplemented: 12541 gemu_log("qemu: Unsupported syscall: %d\n", num); 12542 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list) 12543 unimplemented_nowarn: 12544 #endif 12545 ret = -TARGET_ENOSYS; 12546 break; 12547 } 12548 fail: 12549 #ifdef DEBUG 12550 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret); 12551 #endif 12552 if(do_strace) 12553 print_syscall_ret(num, ret); 12554 trace_guest_user_syscall_ret(cpu, num, ret); 12555 return ret; 12556 efault: 12557 ret = -TARGET_EFAULT; 12558 goto fail; 12559 } 12560