1 /* 2 * Linux syscalls 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #define _ATFILE_SOURCE 20 #include "qemu/osdep.h" 21 #include "qemu/cutils.h" 22 #include "qemu/path.h" 23 #include <elf.h> 24 #include <endian.h> 25 #include <grp.h> 26 #include <sys/ipc.h> 27 #include <sys/msg.h> 28 #include <sys/wait.h> 29 #include <sys/mount.h> 30 #include <sys/file.h> 31 #include <sys/fsuid.h> 32 #include <sys/personality.h> 33 #include <sys/prctl.h> 34 #include <sys/resource.h> 35 #include <sys/swap.h> 36 #include <linux/capability.h> 37 #include <sched.h> 38 #include <sys/timex.h> 39 #include <sys/socket.h> 40 #include <sys/un.h> 41 #include <sys/uio.h> 42 #include <poll.h> 43 #include <sys/times.h> 44 #include <sys/shm.h> 45 #include <sys/sem.h> 46 #include <sys/statfs.h> 47 #include <utime.h> 48 #include <sys/sysinfo.h> 49 #include <sys/signalfd.h> 50 //#include <sys/user.h> 51 #include <netinet/ip.h> 52 #include <netinet/tcp.h> 53 #include <linux/wireless.h> 54 #include <linux/icmp.h> 55 #include <linux/icmpv6.h> 56 #include <linux/errqueue.h> 57 #include <linux/random.h> 58 #include "qemu-common.h" 59 #ifdef CONFIG_TIMERFD 60 #include <sys/timerfd.h> 61 #endif 62 #ifdef TARGET_GPROF 63 #include <sys/gmon.h> 64 #endif 65 #ifdef CONFIG_EVENTFD 66 #include <sys/eventfd.h> 67 #endif 68 #ifdef CONFIG_EPOLL 69 #include <sys/epoll.h> 70 #endif 71 #ifdef CONFIG_ATTR 72 #include "qemu/xattr.h" 73 #endif 74 #ifdef CONFIG_SENDFILE 75 #include <sys/sendfile.h> 76 #endif 77 78 #define termios host_termios 79 #define winsize host_winsize 80 #define termio host_termio 81 #define sgttyb host_sgttyb /* same as target */ 82 #define tchars host_tchars /* same as target */ 83 #define ltchars host_ltchars /* same as target */ 84 85 #include <linux/termios.h> 86 #include <linux/unistd.h> 87 #include <linux/cdrom.h> 88 #include <linux/hdreg.h> 89 #include <linux/soundcard.h> 90 #include <linux/kd.h> 91 #include <linux/mtio.h> 92 #include <linux/fs.h> 93 #if defined(CONFIG_FIEMAP) 94 #include <linux/fiemap.h> 95 #endif 96 #include <linux/fb.h> 97 #include <linux/vt.h> 98 #include <linux/dm-ioctl.h> 99 #include <linux/reboot.h> 100 #include <linux/route.h> 101 #include <linux/filter.h> 102 #include <linux/blkpg.h> 103 #include <netpacket/packet.h> 104 #include <linux/netlink.h> 105 #ifdef CONFIG_RTNETLINK 106 #include <linux/rtnetlink.h> 107 #include <linux/if_bridge.h> 108 #endif 109 #include <linux/audit.h> 110 #include "linux_loop.h" 111 #include "uname.h" 112 113 #include "qemu.h" 114 115 #ifndef CLONE_IO 116 #define CLONE_IO 0x80000000 /* Clone io context */ 117 #endif 118 119 /* We can't directly call the host clone syscall, because this will 120 * badly confuse libc (breaking mutexes, for example). So we must 121 * divide clone flags into: 122 * * flag combinations that look like pthread_create() 123 * * flag combinations that look like fork() 124 * * flags we can implement within QEMU itself 125 * * flags we can't support and will return an error for 126 */ 127 /* For thread creation, all these flags must be present; for 128 * fork, none must be present. 129 */ 130 #define CLONE_THREAD_FLAGS \ 131 (CLONE_VM | CLONE_FS | CLONE_FILES | \ 132 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM) 133 134 /* These flags are ignored: 135 * CLONE_DETACHED is now ignored by the kernel; 136 * CLONE_IO is just an optimisation hint to the I/O scheduler 137 */ 138 #define CLONE_IGNORED_FLAGS \ 139 (CLONE_DETACHED | CLONE_IO) 140 141 /* Flags for fork which we can implement within QEMU itself */ 142 #define CLONE_OPTIONAL_FORK_FLAGS \ 143 (CLONE_SETTLS | CLONE_PARENT_SETTID | \ 144 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID) 145 146 /* Flags for thread creation which we can implement within QEMU itself */ 147 #define CLONE_OPTIONAL_THREAD_FLAGS \ 148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \ 149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT) 150 151 #define CLONE_INVALID_FORK_FLAGS \ 152 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS)) 153 154 #define CLONE_INVALID_THREAD_FLAGS \ 155 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \ 156 CLONE_IGNORED_FLAGS)) 157 158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits 159 * have almost all been allocated. We cannot support any of 160 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC, 161 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED. 162 * The checks against the invalid thread masks above will catch these. 163 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.) 164 */ 165 166 //#define DEBUG 167 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted 168 * once. This exercises the codepaths for restart. 169 */ 170 //#define DEBUG_ERESTARTSYS 171 172 //#include <linux/msdos_fs.h> 173 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2]) 174 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2]) 175 176 #undef _syscall0 177 #undef _syscall1 178 #undef _syscall2 179 #undef _syscall3 180 #undef _syscall4 181 #undef _syscall5 182 #undef _syscall6 183 184 #define _syscall0(type,name) \ 185 static type name (void) \ 186 { \ 187 return syscall(__NR_##name); \ 188 } 189 190 #define _syscall1(type,name,type1,arg1) \ 191 static type name (type1 arg1) \ 192 { \ 193 return syscall(__NR_##name, arg1); \ 194 } 195 196 #define _syscall2(type,name,type1,arg1,type2,arg2) \ 197 static type name (type1 arg1,type2 arg2) \ 198 { \ 199 return syscall(__NR_##name, arg1, arg2); \ 200 } 201 202 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 203 static type name (type1 arg1,type2 arg2,type3 arg3) \ 204 { \ 205 return syscall(__NR_##name, arg1, arg2, arg3); \ 206 } 207 208 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 209 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ 210 { \ 211 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 212 } 213 214 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 215 type5,arg5) \ 216 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ 217 { \ 218 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 219 } 220 221 222 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 223 type5,arg5,type6,arg6) \ 224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \ 225 type6 arg6) \ 226 { \ 227 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 228 } 229 230 231 #define __NR_sys_uname __NR_uname 232 #define __NR_sys_getcwd1 __NR_getcwd 233 #define __NR_sys_getdents __NR_getdents 234 #define __NR_sys_getdents64 __NR_getdents64 235 #define __NR_sys_getpriority __NR_getpriority 236 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo 237 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo 238 #define __NR_sys_syslog __NR_syslog 239 #define __NR_sys_futex __NR_futex 240 #define __NR_sys_inotify_init __NR_inotify_init 241 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch 242 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch 243 244 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__) 245 #define __NR__llseek __NR_lseek 246 #endif 247 248 /* Newer kernel ports have llseek() instead of _llseek() */ 249 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek) 250 #define TARGET_NR__llseek TARGET_NR_llseek 251 #endif 252 253 #ifdef __NR_gettid 254 _syscall0(int, gettid) 255 #else 256 /* This is a replacement for the host gettid() and must return a host 257 errno. */ 258 static int gettid(void) { 259 return -ENOSYS; 260 } 261 #endif 262 #if defined(TARGET_NR_getdents) && defined(__NR_getdents) 263 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count); 264 #endif 265 #if !defined(__NR_getdents) || \ 266 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64)) 267 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count); 268 #endif 269 #if defined(TARGET_NR__llseek) && defined(__NR_llseek) 270 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo, 271 loff_t *, res, uint, wh); 272 #endif 273 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo) 274 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig, 275 siginfo_t *, uinfo) 276 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len) 277 #ifdef __NR_exit_group 278 _syscall1(int,exit_group,int,error_code) 279 #endif 280 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 281 _syscall1(int,set_tid_address,int *,tidptr) 282 #endif 283 #if defined(TARGET_NR_futex) && defined(__NR_futex) 284 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, 285 const struct timespec *,timeout,int *,uaddr2,int,val3) 286 #endif 287 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity 288 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, 289 unsigned long *, user_mask_ptr); 290 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity 291 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len, 292 unsigned long *, user_mask_ptr); 293 #define __NR_sys_getcpu __NR_getcpu 294 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache); 295 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd, 296 void *, arg); 297 _syscall2(int, capget, struct __user_cap_header_struct *, header, 298 struct __user_cap_data_struct *, data); 299 _syscall2(int, capset, struct __user_cap_header_struct *, header, 300 struct __user_cap_data_struct *, data); 301 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get) 302 _syscall2(int, ioprio_get, int, which, int, who) 303 #endif 304 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set) 305 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio) 306 #endif 307 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom) 308 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags) 309 #endif 310 311 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp) 312 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type, 313 unsigned long, idx1, unsigned long, idx2) 314 #endif 315 316 static bitmask_transtbl fcntl_flags_tbl[] = { 317 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, }, 318 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, }, 319 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, }, 320 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, }, 321 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, }, 322 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, }, 323 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, }, 324 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, }, 325 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, }, 326 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, }, 327 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, }, 328 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, }, 329 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, }, 330 #if defined(O_DIRECT) 331 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, }, 332 #endif 333 #if defined(O_NOATIME) 334 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME }, 335 #endif 336 #if defined(O_CLOEXEC) 337 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC }, 338 #endif 339 #if defined(O_PATH) 340 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH }, 341 #endif 342 #if defined(O_TMPFILE) 343 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE }, 344 #endif 345 /* Don't terminate the list prematurely on 64-bit host+guest. */ 346 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0 347 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, }, 348 #endif 349 { 0, 0, 0, 0 } 350 }; 351 352 enum { 353 QEMU_IFLA_BR_UNSPEC, 354 QEMU_IFLA_BR_FORWARD_DELAY, 355 QEMU_IFLA_BR_HELLO_TIME, 356 QEMU_IFLA_BR_MAX_AGE, 357 QEMU_IFLA_BR_AGEING_TIME, 358 QEMU_IFLA_BR_STP_STATE, 359 QEMU_IFLA_BR_PRIORITY, 360 QEMU_IFLA_BR_VLAN_FILTERING, 361 QEMU_IFLA_BR_VLAN_PROTOCOL, 362 QEMU_IFLA_BR_GROUP_FWD_MASK, 363 QEMU_IFLA_BR_ROOT_ID, 364 QEMU_IFLA_BR_BRIDGE_ID, 365 QEMU_IFLA_BR_ROOT_PORT, 366 QEMU_IFLA_BR_ROOT_PATH_COST, 367 QEMU_IFLA_BR_TOPOLOGY_CHANGE, 368 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED, 369 QEMU_IFLA_BR_HELLO_TIMER, 370 QEMU_IFLA_BR_TCN_TIMER, 371 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER, 372 QEMU_IFLA_BR_GC_TIMER, 373 QEMU_IFLA_BR_GROUP_ADDR, 374 QEMU_IFLA_BR_FDB_FLUSH, 375 QEMU_IFLA_BR_MCAST_ROUTER, 376 QEMU_IFLA_BR_MCAST_SNOOPING, 377 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR, 378 QEMU_IFLA_BR_MCAST_QUERIER, 379 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY, 380 QEMU_IFLA_BR_MCAST_HASH_MAX, 381 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT, 382 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT, 383 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL, 384 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL, 385 QEMU_IFLA_BR_MCAST_QUERIER_INTVL, 386 QEMU_IFLA_BR_MCAST_QUERY_INTVL, 387 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, 388 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL, 389 QEMU_IFLA_BR_NF_CALL_IPTABLES, 390 QEMU_IFLA_BR_NF_CALL_IP6TABLES, 391 QEMU_IFLA_BR_NF_CALL_ARPTABLES, 392 QEMU_IFLA_BR_VLAN_DEFAULT_PVID, 393 QEMU_IFLA_BR_PAD, 394 QEMU_IFLA_BR_VLAN_STATS_ENABLED, 395 QEMU_IFLA_BR_MCAST_STATS_ENABLED, 396 QEMU___IFLA_BR_MAX, 397 }; 398 399 enum { 400 QEMU_IFLA_UNSPEC, 401 QEMU_IFLA_ADDRESS, 402 QEMU_IFLA_BROADCAST, 403 QEMU_IFLA_IFNAME, 404 QEMU_IFLA_MTU, 405 QEMU_IFLA_LINK, 406 QEMU_IFLA_QDISC, 407 QEMU_IFLA_STATS, 408 QEMU_IFLA_COST, 409 QEMU_IFLA_PRIORITY, 410 QEMU_IFLA_MASTER, 411 QEMU_IFLA_WIRELESS, 412 QEMU_IFLA_PROTINFO, 413 QEMU_IFLA_TXQLEN, 414 QEMU_IFLA_MAP, 415 QEMU_IFLA_WEIGHT, 416 QEMU_IFLA_OPERSTATE, 417 QEMU_IFLA_LINKMODE, 418 QEMU_IFLA_LINKINFO, 419 QEMU_IFLA_NET_NS_PID, 420 QEMU_IFLA_IFALIAS, 421 QEMU_IFLA_NUM_VF, 422 QEMU_IFLA_VFINFO_LIST, 423 QEMU_IFLA_STATS64, 424 QEMU_IFLA_VF_PORTS, 425 QEMU_IFLA_PORT_SELF, 426 QEMU_IFLA_AF_SPEC, 427 QEMU_IFLA_GROUP, 428 QEMU_IFLA_NET_NS_FD, 429 QEMU_IFLA_EXT_MASK, 430 QEMU_IFLA_PROMISCUITY, 431 QEMU_IFLA_NUM_TX_QUEUES, 432 QEMU_IFLA_NUM_RX_QUEUES, 433 QEMU_IFLA_CARRIER, 434 QEMU_IFLA_PHYS_PORT_ID, 435 QEMU_IFLA_CARRIER_CHANGES, 436 QEMU_IFLA_PHYS_SWITCH_ID, 437 QEMU_IFLA_LINK_NETNSID, 438 QEMU_IFLA_PHYS_PORT_NAME, 439 QEMU_IFLA_PROTO_DOWN, 440 QEMU_IFLA_GSO_MAX_SEGS, 441 QEMU_IFLA_GSO_MAX_SIZE, 442 QEMU_IFLA_PAD, 443 QEMU_IFLA_XDP, 444 QEMU___IFLA_MAX 445 }; 446 447 enum { 448 QEMU_IFLA_BRPORT_UNSPEC, 449 QEMU_IFLA_BRPORT_STATE, 450 QEMU_IFLA_BRPORT_PRIORITY, 451 QEMU_IFLA_BRPORT_COST, 452 QEMU_IFLA_BRPORT_MODE, 453 QEMU_IFLA_BRPORT_GUARD, 454 QEMU_IFLA_BRPORT_PROTECT, 455 QEMU_IFLA_BRPORT_FAST_LEAVE, 456 QEMU_IFLA_BRPORT_LEARNING, 457 QEMU_IFLA_BRPORT_UNICAST_FLOOD, 458 QEMU_IFLA_BRPORT_PROXYARP, 459 QEMU_IFLA_BRPORT_LEARNING_SYNC, 460 QEMU_IFLA_BRPORT_PROXYARP_WIFI, 461 QEMU_IFLA_BRPORT_ROOT_ID, 462 QEMU_IFLA_BRPORT_BRIDGE_ID, 463 QEMU_IFLA_BRPORT_DESIGNATED_PORT, 464 QEMU_IFLA_BRPORT_DESIGNATED_COST, 465 QEMU_IFLA_BRPORT_ID, 466 QEMU_IFLA_BRPORT_NO, 467 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK, 468 QEMU_IFLA_BRPORT_CONFIG_PENDING, 469 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER, 470 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER, 471 QEMU_IFLA_BRPORT_HOLD_TIMER, 472 QEMU_IFLA_BRPORT_FLUSH, 473 QEMU_IFLA_BRPORT_MULTICAST_ROUTER, 474 QEMU_IFLA_BRPORT_PAD, 475 QEMU___IFLA_BRPORT_MAX 476 }; 477 478 enum { 479 QEMU_IFLA_INFO_UNSPEC, 480 QEMU_IFLA_INFO_KIND, 481 QEMU_IFLA_INFO_DATA, 482 QEMU_IFLA_INFO_XSTATS, 483 QEMU_IFLA_INFO_SLAVE_KIND, 484 QEMU_IFLA_INFO_SLAVE_DATA, 485 QEMU___IFLA_INFO_MAX, 486 }; 487 488 enum { 489 QEMU_IFLA_INET_UNSPEC, 490 QEMU_IFLA_INET_CONF, 491 QEMU___IFLA_INET_MAX, 492 }; 493 494 enum { 495 QEMU_IFLA_INET6_UNSPEC, 496 QEMU_IFLA_INET6_FLAGS, 497 QEMU_IFLA_INET6_CONF, 498 QEMU_IFLA_INET6_STATS, 499 QEMU_IFLA_INET6_MCAST, 500 QEMU_IFLA_INET6_CACHEINFO, 501 QEMU_IFLA_INET6_ICMP6STATS, 502 QEMU_IFLA_INET6_TOKEN, 503 QEMU_IFLA_INET6_ADDR_GEN_MODE, 504 QEMU___IFLA_INET6_MAX 505 }; 506 507 typedef abi_long (*TargetFdDataFunc)(void *, size_t); 508 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t); 509 typedef struct TargetFdTrans { 510 TargetFdDataFunc host_to_target_data; 511 TargetFdDataFunc target_to_host_data; 512 TargetFdAddrFunc target_to_host_addr; 513 } TargetFdTrans; 514 515 static TargetFdTrans **target_fd_trans; 516 517 static unsigned int target_fd_max; 518 519 static TargetFdDataFunc fd_trans_target_to_host_data(int fd) 520 { 521 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) { 522 return target_fd_trans[fd]->target_to_host_data; 523 } 524 return NULL; 525 } 526 527 static TargetFdDataFunc fd_trans_host_to_target_data(int fd) 528 { 529 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) { 530 return target_fd_trans[fd]->host_to_target_data; 531 } 532 return NULL; 533 } 534 535 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd) 536 { 537 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) { 538 return target_fd_trans[fd]->target_to_host_addr; 539 } 540 return NULL; 541 } 542 543 static void fd_trans_register(int fd, TargetFdTrans *trans) 544 { 545 unsigned int oldmax; 546 547 if (fd >= target_fd_max) { 548 oldmax = target_fd_max; 549 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */ 550 target_fd_trans = g_renew(TargetFdTrans *, 551 target_fd_trans, target_fd_max); 552 memset((void *)(target_fd_trans + oldmax), 0, 553 (target_fd_max - oldmax) * sizeof(TargetFdTrans *)); 554 } 555 target_fd_trans[fd] = trans; 556 } 557 558 static void fd_trans_unregister(int fd) 559 { 560 if (fd >= 0 && fd < target_fd_max) { 561 target_fd_trans[fd] = NULL; 562 } 563 } 564 565 static void fd_trans_dup(int oldfd, int newfd) 566 { 567 fd_trans_unregister(newfd); 568 if (oldfd < target_fd_max && target_fd_trans[oldfd]) { 569 fd_trans_register(newfd, target_fd_trans[oldfd]); 570 } 571 } 572 573 static int sys_getcwd1(char *buf, size_t size) 574 { 575 if (getcwd(buf, size) == NULL) { 576 /* getcwd() sets errno */ 577 return (-1); 578 } 579 return strlen(buf)+1; 580 } 581 582 #ifdef TARGET_NR_utimensat 583 #if defined(__NR_utimensat) 584 #define __NR_sys_utimensat __NR_utimensat 585 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname, 586 const struct timespec *,tsp,int,flags) 587 #else 588 static int sys_utimensat(int dirfd, const char *pathname, 589 const struct timespec times[2], int flags) 590 { 591 errno = ENOSYS; 592 return -1; 593 } 594 #endif 595 #endif /* TARGET_NR_utimensat */ 596 597 #ifdef TARGET_NR_renameat2 598 #if defined(__NR_renameat2) 599 #define __NR_sys_renameat2 __NR_renameat2 600 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd, 601 const char *, new, unsigned int, flags) 602 #else 603 static int sys_renameat2(int oldfd, const char *old, 604 int newfd, const char *new, int flags) 605 { 606 if (flags == 0) { 607 return renameat(oldfd, old, newfd, new); 608 } 609 errno = ENOSYS; 610 return -1; 611 } 612 #endif 613 #endif /* TARGET_NR_renameat2 */ 614 615 #ifdef CONFIG_INOTIFY 616 #include <sys/inotify.h> 617 618 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 619 static int sys_inotify_init(void) 620 { 621 return (inotify_init()); 622 } 623 #endif 624 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 625 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask) 626 { 627 return (inotify_add_watch(fd, pathname, mask)); 628 } 629 #endif 630 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 631 static int sys_inotify_rm_watch(int fd, int32_t wd) 632 { 633 return (inotify_rm_watch(fd, wd)); 634 } 635 #endif 636 #ifdef CONFIG_INOTIFY1 637 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 638 static int sys_inotify_init1(int flags) 639 { 640 return (inotify_init1(flags)); 641 } 642 #endif 643 #endif 644 #else 645 /* Userspace can usually survive runtime without inotify */ 646 #undef TARGET_NR_inotify_init 647 #undef TARGET_NR_inotify_init1 648 #undef TARGET_NR_inotify_add_watch 649 #undef TARGET_NR_inotify_rm_watch 650 #endif /* CONFIG_INOTIFY */ 651 652 #if defined(TARGET_NR_prlimit64) 653 #ifndef __NR_prlimit64 654 # define __NR_prlimit64 -1 655 #endif 656 #define __NR_sys_prlimit64 __NR_prlimit64 657 /* The glibc rlimit structure may not be that used by the underlying syscall */ 658 struct host_rlimit64 { 659 uint64_t rlim_cur; 660 uint64_t rlim_max; 661 }; 662 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource, 663 const struct host_rlimit64 *, new_limit, 664 struct host_rlimit64 *, old_limit) 665 #endif 666 667 668 #if defined(TARGET_NR_timer_create) 669 /* Maxiumum of 32 active POSIX timers allowed at any one time. */ 670 static timer_t g_posix_timers[32] = { 0, } ; 671 672 static inline int next_free_host_timer(void) 673 { 674 int k ; 675 /* FIXME: Does finding the next free slot require a lock? */ 676 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) { 677 if (g_posix_timers[k] == 0) { 678 g_posix_timers[k] = (timer_t) 1; 679 return k; 680 } 681 } 682 return -1; 683 } 684 #endif 685 686 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */ 687 #ifdef TARGET_ARM 688 static inline int regpairs_aligned(void *cpu_env, int num) 689 { 690 return ((((CPUARMState *)cpu_env)->eabi) == 1) ; 691 } 692 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32) 693 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; } 694 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64) 695 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs 696 * of registers which translates to the same as ARM/MIPS, because we start with 697 * r3 as arg1 */ 698 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; } 699 #elif defined(TARGET_SH4) 700 /* SH4 doesn't align register pairs, except for p{read,write}64 */ 701 static inline int regpairs_aligned(void *cpu_env, int num) 702 { 703 switch (num) { 704 case TARGET_NR_pread64: 705 case TARGET_NR_pwrite64: 706 return 1; 707 708 default: 709 return 0; 710 } 711 } 712 #else 713 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; } 714 #endif 715 716 #define ERRNO_TABLE_SIZE 1200 717 718 /* target_to_host_errno_table[] is initialized from 719 * host_to_target_errno_table[] in syscall_init(). */ 720 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = { 721 }; 722 723 /* 724 * This list is the union of errno values overridden in asm-<arch>/errno.h 725 * minus the errnos that are not actually generic to all archs. 726 */ 727 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = { 728 [EAGAIN] = TARGET_EAGAIN, 729 [EIDRM] = TARGET_EIDRM, 730 [ECHRNG] = TARGET_ECHRNG, 731 [EL2NSYNC] = TARGET_EL2NSYNC, 732 [EL3HLT] = TARGET_EL3HLT, 733 [EL3RST] = TARGET_EL3RST, 734 [ELNRNG] = TARGET_ELNRNG, 735 [EUNATCH] = TARGET_EUNATCH, 736 [ENOCSI] = TARGET_ENOCSI, 737 [EL2HLT] = TARGET_EL2HLT, 738 [EDEADLK] = TARGET_EDEADLK, 739 [ENOLCK] = TARGET_ENOLCK, 740 [EBADE] = TARGET_EBADE, 741 [EBADR] = TARGET_EBADR, 742 [EXFULL] = TARGET_EXFULL, 743 [ENOANO] = TARGET_ENOANO, 744 [EBADRQC] = TARGET_EBADRQC, 745 [EBADSLT] = TARGET_EBADSLT, 746 [EBFONT] = TARGET_EBFONT, 747 [ENOSTR] = TARGET_ENOSTR, 748 [ENODATA] = TARGET_ENODATA, 749 [ETIME] = TARGET_ETIME, 750 [ENOSR] = TARGET_ENOSR, 751 [ENONET] = TARGET_ENONET, 752 [ENOPKG] = TARGET_ENOPKG, 753 [EREMOTE] = TARGET_EREMOTE, 754 [ENOLINK] = TARGET_ENOLINK, 755 [EADV] = TARGET_EADV, 756 [ESRMNT] = TARGET_ESRMNT, 757 [ECOMM] = TARGET_ECOMM, 758 [EPROTO] = TARGET_EPROTO, 759 [EDOTDOT] = TARGET_EDOTDOT, 760 [EMULTIHOP] = TARGET_EMULTIHOP, 761 [EBADMSG] = TARGET_EBADMSG, 762 [ENAMETOOLONG] = TARGET_ENAMETOOLONG, 763 [EOVERFLOW] = TARGET_EOVERFLOW, 764 [ENOTUNIQ] = TARGET_ENOTUNIQ, 765 [EBADFD] = TARGET_EBADFD, 766 [EREMCHG] = TARGET_EREMCHG, 767 [ELIBACC] = TARGET_ELIBACC, 768 [ELIBBAD] = TARGET_ELIBBAD, 769 [ELIBSCN] = TARGET_ELIBSCN, 770 [ELIBMAX] = TARGET_ELIBMAX, 771 [ELIBEXEC] = TARGET_ELIBEXEC, 772 [EILSEQ] = TARGET_EILSEQ, 773 [ENOSYS] = TARGET_ENOSYS, 774 [ELOOP] = TARGET_ELOOP, 775 [ERESTART] = TARGET_ERESTART, 776 [ESTRPIPE] = TARGET_ESTRPIPE, 777 [ENOTEMPTY] = TARGET_ENOTEMPTY, 778 [EUSERS] = TARGET_EUSERS, 779 [ENOTSOCK] = TARGET_ENOTSOCK, 780 [EDESTADDRREQ] = TARGET_EDESTADDRREQ, 781 [EMSGSIZE] = TARGET_EMSGSIZE, 782 [EPROTOTYPE] = TARGET_EPROTOTYPE, 783 [ENOPROTOOPT] = TARGET_ENOPROTOOPT, 784 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT, 785 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT, 786 [EOPNOTSUPP] = TARGET_EOPNOTSUPP, 787 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT, 788 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT, 789 [EADDRINUSE] = TARGET_EADDRINUSE, 790 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL, 791 [ENETDOWN] = TARGET_ENETDOWN, 792 [ENETUNREACH] = TARGET_ENETUNREACH, 793 [ENETRESET] = TARGET_ENETRESET, 794 [ECONNABORTED] = TARGET_ECONNABORTED, 795 [ECONNRESET] = TARGET_ECONNRESET, 796 [ENOBUFS] = TARGET_ENOBUFS, 797 [EISCONN] = TARGET_EISCONN, 798 [ENOTCONN] = TARGET_ENOTCONN, 799 [EUCLEAN] = TARGET_EUCLEAN, 800 [ENOTNAM] = TARGET_ENOTNAM, 801 [ENAVAIL] = TARGET_ENAVAIL, 802 [EISNAM] = TARGET_EISNAM, 803 [EREMOTEIO] = TARGET_EREMOTEIO, 804 [EDQUOT] = TARGET_EDQUOT, 805 [ESHUTDOWN] = TARGET_ESHUTDOWN, 806 [ETOOMANYREFS] = TARGET_ETOOMANYREFS, 807 [ETIMEDOUT] = TARGET_ETIMEDOUT, 808 [ECONNREFUSED] = TARGET_ECONNREFUSED, 809 [EHOSTDOWN] = TARGET_EHOSTDOWN, 810 [EHOSTUNREACH] = TARGET_EHOSTUNREACH, 811 [EALREADY] = TARGET_EALREADY, 812 [EINPROGRESS] = TARGET_EINPROGRESS, 813 [ESTALE] = TARGET_ESTALE, 814 [ECANCELED] = TARGET_ECANCELED, 815 [ENOMEDIUM] = TARGET_ENOMEDIUM, 816 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE, 817 #ifdef ENOKEY 818 [ENOKEY] = TARGET_ENOKEY, 819 #endif 820 #ifdef EKEYEXPIRED 821 [EKEYEXPIRED] = TARGET_EKEYEXPIRED, 822 #endif 823 #ifdef EKEYREVOKED 824 [EKEYREVOKED] = TARGET_EKEYREVOKED, 825 #endif 826 #ifdef EKEYREJECTED 827 [EKEYREJECTED] = TARGET_EKEYREJECTED, 828 #endif 829 #ifdef EOWNERDEAD 830 [EOWNERDEAD] = TARGET_EOWNERDEAD, 831 #endif 832 #ifdef ENOTRECOVERABLE 833 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE, 834 #endif 835 #ifdef ENOMSG 836 [ENOMSG] = TARGET_ENOMSG, 837 #endif 838 #ifdef ERKFILL 839 [ERFKILL] = TARGET_ERFKILL, 840 #endif 841 #ifdef EHWPOISON 842 [EHWPOISON] = TARGET_EHWPOISON, 843 #endif 844 }; 845 846 static inline int host_to_target_errno(int err) 847 { 848 if (err >= 0 && err < ERRNO_TABLE_SIZE && 849 host_to_target_errno_table[err]) { 850 return host_to_target_errno_table[err]; 851 } 852 return err; 853 } 854 855 static inline int target_to_host_errno(int err) 856 { 857 if (err >= 0 && err < ERRNO_TABLE_SIZE && 858 target_to_host_errno_table[err]) { 859 return target_to_host_errno_table[err]; 860 } 861 return err; 862 } 863 864 static inline abi_long get_errno(abi_long ret) 865 { 866 if (ret == -1) 867 return -host_to_target_errno(errno); 868 else 869 return ret; 870 } 871 872 static inline int is_error(abi_long ret) 873 { 874 return (abi_ulong)ret >= (abi_ulong)(-4096); 875 } 876 877 const char *target_strerror(int err) 878 { 879 if (err == TARGET_ERESTARTSYS) { 880 return "To be restarted"; 881 } 882 if (err == TARGET_QEMU_ESIGRETURN) { 883 return "Successful exit from sigreturn"; 884 } 885 886 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) { 887 return NULL; 888 } 889 return strerror(target_to_host_errno(err)); 890 } 891 892 #define safe_syscall0(type, name) \ 893 static type safe_##name(void) \ 894 { \ 895 return safe_syscall(__NR_##name); \ 896 } 897 898 #define safe_syscall1(type, name, type1, arg1) \ 899 static type safe_##name(type1 arg1) \ 900 { \ 901 return safe_syscall(__NR_##name, arg1); \ 902 } 903 904 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \ 905 static type safe_##name(type1 arg1, type2 arg2) \ 906 { \ 907 return safe_syscall(__NR_##name, arg1, arg2); \ 908 } 909 910 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \ 911 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \ 912 { \ 913 return safe_syscall(__NR_##name, arg1, arg2, arg3); \ 914 } 915 916 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \ 917 type4, arg4) \ 918 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ 919 { \ 920 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 921 } 922 923 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \ 924 type4, arg4, type5, arg5) \ 925 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 926 type5 arg5) \ 927 { \ 928 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 929 } 930 931 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \ 932 type4, arg4, type5, arg5, type6, arg6) \ 933 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 934 type5 arg5, type6 arg6) \ 935 { \ 936 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 937 } 938 939 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count) 940 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count) 941 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \ 942 int, flags, mode_t, mode) 943 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \ 944 struct rusage *, rusage) 945 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \ 946 int, options, struct rusage *, rusage) 947 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp) 948 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \ 949 fd_set *, exceptfds, struct timespec *, timeout, void *, sig) 950 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds, 951 struct timespec *, tsp, const sigset_t *, sigmask, 952 size_t, sigsetsize) 953 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events, 954 int, maxevents, int, timeout, const sigset_t *, sigmask, 955 size_t, sigsetsize) 956 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \ 957 const struct timespec *,timeout,int *,uaddr2,int,val3) 958 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize) 959 safe_syscall2(int, kill, pid_t, pid, int, sig) 960 safe_syscall2(int, tkill, int, tid, int, sig) 961 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig) 962 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt) 963 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt) 964 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt, 965 unsigned long, pos_l, unsigned long, pos_h) 966 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt, 967 unsigned long, pos_l, unsigned long, pos_h) 968 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr, 969 socklen_t, addrlen) 970 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len, 971 int, flags, const struct sockaddr *, addr, socklen_t, addrlen) 972 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len, 973 int, flags, struct sockaddr *, addr, socklen_t *, addrlen) 974 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags) 975 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags) 976 safe_syscall2(int, flock, int, fd, int, operation) 977 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo, 978 const struct timespec *, uts, size_t, sigsetsize) 979 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len, 980 int, flags) 981 safe_syscall2(int, nanosleep, const struct timespec *, req, 982 struct timespec *, rem) 983 #ifdef TARGET_NR_clock_nanosleep 984 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags, 985 const struct timespec *, req, struct timespec *, rem) 986 #endif 987 #ifdef __NR_msgsnd 988 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz, 989 int, flags) 990 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz, 991 long, msgtype, int, flags) 992 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops, 993 unsigned, nsops, const struct timespec *, timeout) 994 #else 995 /* This host kernel architecture uses a single ipc syscall; fake up 996 * wrappers for the sub-operations to hide this implementation detail. 997 * Annoyingly we can't include linux/ipc.h to get the constant definitions 998 * for the call parameter because some structs in there conflict with the 999 * sys/ipc.h ones. So we just define them here, and rely on them being 1000 * the same for all host architectures. 1001 */ 1002 #define Q_SEMTIMEDOP 4 1003 #define Q_MSGSND 11 1004 #define Q_MSGRCV 12 1005 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP)) 1006 1007 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third, 1008 void *, ptr, long, fifth) 1009 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags) 1010 { 1011 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0); 1012 } 1013 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags) 1014 { 1015 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type); 1016 } 1017 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops, 1018 const struct timespec *timeout) 1019 { 1020 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops, 1021 (long)timeout); 1022 } 1023 #endif 1024 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 1025 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr, 1026 size_t, len, unsigned, prio, const struct timespec *, timeout) 1027 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr, 1028 size_t, len, unsigned *, prio, const struct timespec *, timeout) 1029 #endif 1030 /* We do ioctl like this rather than via safe_syscall3 to preserve the 1031 * "third argument might be integer or pointer or not present" behaviour of 1032 * the libc function. 1033 */ 1034 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__) 1035 /* Similarly for fcntl. Note that callers must always: 1036 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK 1037 * use the flock64 struct rather than unsuffixed flock 1038 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts. 1039 */ 1040 #ifdef __NR_fcntl64 1041 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__) 1042 #else 1043 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__) 1044 #endif 1045 1046 static inline int host_to_target_sock_type(int host_type) 1047 { 1048 int target_type; 1049 1050 switch (host_type & 0xf /* SOCK_TYPE_MASK */) { 1051 case SOCK_DGRAM: 1052 target_type = TARGET_SOCK_DGRAM; 1053 break; 1054 case SOCK_STREAM: 1055 target_type = TARGET_SOCK_STREAM; 1056 break; 1057 default: 1058 target_type = host_type & 0xf /* SOCK_TYPE_MASK */; 1059 break; 1060 } 1061 1062 #if defined(SOCK_CLOEXEC) 1063 if (host_type & SOCK_CLOEXEC) { 1064 target_type |= TARGET_SOCK_CLOEXEC; 1065 } 1066 #endif 1067 1068 #if defined(SOCK_NONBLOCK) 1069 if (host_type & SOCK_NONBLOCK) { 1070 target_type |= TARGET_SOCK_NONBLOCK; 1071 } 1072 #endif 1073 1074 return target_type; 1075 } 1076 1077 static abi_ulong target_brk; 1078 static abi_ulong target_original_brk; 1079 static abi_ulong brk_page; 1080 1081 void target_set_brk(abi_ulong new_brk) 1082 { 1083 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk); 1084 brk_page = HOST_PAGE_ALIGN(target_brk); 1085 } 1086 1087 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0) 1088 #define DEBUGF_BRK(message, args...) 1089 1090 /* do_brk() must return target values and target errnos. */ 1091 abi_long do_brk(abi_ulong new_brk) 1092 { 1093 abi_long mapped_addr; 1094 abi_ulong new_alloc_size; 1095 1096 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk); 1097 1098 if (!new_brk) { 1099 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk); 1100 return target_brk; 1101 } 1102 if (new_brk < target_original_brk) { 1103 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n", 1104 target_brk); 1105 return target_brk; 1106 } 1107 1108 /* If the new brk is less than the highest page reserved to the 1109 * target heap allocation, set it and we're almost done... */ 1110 if (new_brk <= brk_page) { 1111 /* Heap contents are initialized to zero, as for anonymous 1112 * mapped pages. */ 1113 if (new_brk > target_brk) { 1114 memset(g2h(target_brk), 0, new_brk - target_brk); 1115 } 1116 target_brk = new_brk; 1117 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk); 1118 return target_brk; 1119 } 1120 1121 /* We need to allocate more memory after the brk... Note that 1122 * we don't use MAP_FIXED because that will map over the top of 1123 * any existing mapping (like the one with the host libc or qemu 1124 * itself); instead we treat "mapped but at wrong address" as 1125 * a failure and unmap again. 1126 */ 1127 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page); 1128 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, 1129 PROT_READ|PROT_WRITE, 1130 MAP_ANON|MAP_PRIVATE, 0, 0)); 1131 1132 if (mapped_addr == brk_page) { 1133 /* Heap contents are initialized to zero, as for anonymous 1134 * mapped pages. Technically the new pages are already 1135 * initialized to zero since they *are* anonymous mapped 1136 * pages, however we have to take care with the contents that 1137 * come from the remaining part of the previous page: it may 1138 * contains garbage data due to a previous heap usage (grown 1139 * then shrunken). */ 1140 memset(g2h(target_brk), 0, brk_page - target_brk); 1141 1142 target_brk = new_brk; 1143 brk_page = HOST_PAGE_ALIGN(target_brk); 1144 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n", 1145 target_brk); 1146 return target_brk; 1147 } else if (mapped_addr != -1) { 1148 /* Mapped but at wrong address, meaning there wasn't actually 1149 * enough space for this brk. 1150 */ 1151 target_munmap(mapped_addr, new_alloc_size); 1152 mapped_addr = -1; 1153 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk); 1154 } 1155 else { 1156 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk); 1157 } 1158 1159 #if defined(TARGET_ALPHA) 1160 /* We (partially) emulate OSF/1 on Alpha, which requires we 1161 return a proper errno, not an unchanged brk value. */ 1162 return -TARGET_ENOMEM; 1163 #endif 1164 /* For everything else, return the previous break. */ 1165 return target_brk; 1166 } 1167 1168 static inline abi_long copy_from_user_fdset(fd_set *fds, 1169 abi_ulong target_fds_addr, 1170 int n) 1171 { 1172 int i, nw, j, k; 1173 abi_ulong b, *target_fds; 1174 1175 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS); 1176 if (!(target_fds = lock_user(VERIFY_READ, 1177 target_fds_addr, 1178 sizeof(abi_ulong) * nw, 1179 1))) 1180 return -TARGET_EFAULT; 1181 1182 FD_ZERO(fds); 1183 k = 0; 1184 for (i = 0; i < nw; i++) { 1185 /* grab the abi_ulong */ 1186 __get_user(b, &target_fds[i]); 1187 for (j = 0; j < TARGET_ABI_BITS; j++) { 1188 /* check the bit inside the abi_ulong */ 1189 if ((b >> j) & 1) 1190 FD_SET(k, fds); 1191 k++; 1192 } 1193 } 1194 1195 unlock_user(target_fds, target_fds_addr, 0); 1196 1197 return 0; 1198 } 1199 1200 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr, 1201 abi_ulong target_fds_addr, 1202 int n) 1203 { 1204 if (target_fds_addr) { 1205 if (copy_from_user_fdset(fds, target_fds_addr, n)) 1206 return -TARGET_EFAULT; 1207 *fds_ptr = fds; 1208 } else { 1209 *fds_ptr = NULL; 1210 } 1211 return 0; 1212 } 1213 1214 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr, 1215 const fd_set *fds, 1216 int n) 1217 { 1218 int i, nw, j, k; 1219 abi_long v; 1220 abi_ulong *target_fds; 1221 1222 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS); 1223 if (!(target_fds = lock_user(VERIFY_WRITE, 1224 target_fds_addr, 1225 sizeof(abi_ulong) * nw, 1226 0))) 1227 return -TARGET_EFAULT; 1228 1229 k = 0; 1230 for (i = 0; i < nw; i++) { 1231 v = 0; 1232 for (j = 0; j < TARGET_ABI_BITS; j++) { 1233 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j); 1234 k++; 1235 } 1236 __put_user(v, &target_fds[i]); 1237 } 1238 1239 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw); 1240 1241 return 0; 1242 } 1243 1244 #if defined(__alpha__) 1245 #define HOST_HZ 1024 1246 #else 1247 #define HOST_HZ 100 1248 #endif 1249 1250 static inline abi_long host_to_target_clock_t(long ticks) 1251 { 1252 #if HOST_HZ == TARGET_HZ 1253 return ticks; 1254 #else 1255 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ; 1256 #endif 1257 } 1258 1259 static inline abi_long host_to_target_rusage(abi_ulong target_addr, 1260 const struct rusage *rusage) 1261 { 1262 struct target_rusage *target_rusage; 1263 1264 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0)) 1265 return -TARGET_EFAULT; 1266 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec); 1267 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec); 1268 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec); 1269 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec); 1270 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss); 1271 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss); 1272 target_rusage->ru_idrss = tswapal(rusage->ru_idrss); 1273 target_rusage->ru_isrss = tswapal(rusage->ru_isrss); 1274 target_rusage->ru_minflt = tswapal(rusage->ru_minflt); 1275 target_rusage->ru_majflt = tswapal(rusage->ru_majflt); 1276 target_rusage->ru_nswap = tswapal(rusage->ru_nswap); 1277 target_rusage->ru_inblock = tswapal(rusage->ru_inblock); 1278 target_rusage->ru_oublock = tswapal(rusage->ru_oublock); 1279 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd); 1280 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv); 1281 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals); 1282 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw); 1283 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw); 1284 unlock_user_struct(target_rusage, target_addr, 1); 1285 1286 return 0; 1287 } 1288 1289 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim) 1290 { 1291 abi_ulong target_rlim_swap; 1292 rlim_t result; 1293 1294 target_rlim_swap = tswapal(target_rlim); 1295 if (target_rlim_swap == TARGET_RLIM_INFINITY) 1296 return RLIM_INFINITY; 1297 1298 result = target_rlim_swap; 1299 if (target_rlim_swap != (rlim_t)result) 1300 return RLIM_INFINITY; 1301 1302 return result; 1303 } 1304 1305 static inline abi_ulong host_to_target_rlim(rlim_t rlim) 1306 { 1307 abi_ulong target_rlim_swap; 1308 abi_ulong result; 1309 1310 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim) 1311 target_rlim_swap = TARGET_RLIM_INFINITY; 1312 else 1313 target_rlim_swap = rlim; 1314 result = tswapal(target_rlim_swap); 1315 1316 return result; 1317 } 1318 1319 static inline int target_to_host_resource(int code) 1320 { 1321 switch (code) { 1322 case TARGET_RLIMIT_AS: 1323 return RLIMIT_AS; 1324 case TARGET_RLIMIT_CORE: 1325 return RLIMIT_CORE; 1326 case TARGET_RLIMIT_CPU: 1327 return RLIMIT_CPU; 1328 case TARGET_RLIMIT_DATA: 1329 return RLIMIT_DATA; 1330 case TARGET_RLIMIT_FSIZE: 1331 return RLIMIT_FSIZE; 1332 case TARGET_RLIMIT_LOCKS: 1333 return RLIMIT_LOCKS; 1334 case TARGET_RLIMIT_MEMLOCK: 1335 return RLIMIT_MEMLOCK; 1336 case TARGET_RLIMIT_MSGQUEUE: 1337 return RLIMIT_MSGQUEUE; 1338 case TARGET_RLIMIT_NICE: 1339 return RLIMIT_NICE; 1340 case TARGET_RLIMIT_NOFILE: 1341 return RLIMIT_NOFILE; 1342 case TARGET_RLIMIT_NPROC: 1343 return RLIMIT_NPROC; 1344 case TARGET_RLIMIT_RSS: 1345 return RLIMIT_RSS; 1346 case TARGET_RLIMIT_RTPRIO: 1347 return RLIMIT_RTPRIO; 1348 case TARGET_RLIMIT_SIGPENDING: 1349 return RLIMIT_SIGPENDING; 1350 case TARGET_RLIMIT_STACK: 1351 return RLIMIT_STACK; 1352 default: 1353 return code; 1354 } 1355 } 1356 1357 static inline abi_long copy_from_user_timeval(struct timeval *tv, 1358 abi_ulong target_tv_addr) 1359 { 1360 struct target_timeval *target_tv; 1361 1362 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) 1363 return -TARGET_EFAULT; 1364 1365 __get_user(tv->tv_sec, &target_tv->tv_sec); 1366 __get_user(tv->tv_usec, &target_tv->tv_usec); 1367 1368 unlock_user_struct(target_tv, target_tv_addr, 0); 1369 1370 return 0; 1371 } 1372 1373 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr, 1374 const struct timeval *tv) 1375 { 1376 struct target_timeval *target_tv; 1377 1378 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) 1379 return -TARGET_EFAULT; 1380 1381 __put_user(tv->tv_sec, &target_tv->tv_sec); 1382 __put_user(tv->tv_usec, &target_tv->tv_usec); 1383 1384 unlock_user_struct(target_tv, target_tv_addr, 1); 1385 1386 return 0; 1387 } 1388 1389 static inline abi_long copy_from_user_timezone(struct timezone *tz, 1390 abi_ulong target_tz_addr) 1391 { 1392 struct target_timezone *target_tz; 1393 1394 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) { 1395 return -TARGET_EFAULT; 1396 } 1397 1398 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest); 1399 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime); 1400 1401 unlock_user_struct(target_tz, target_tz_addr, 0); 1402 1403 return 0; 1404 } 1405 1406 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 1407 #include <mqueue.h> 1408 1409 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr, 1410 abi_ulong target_mq_attr_addr) 1411 { 1412 struct target_mq_attr *target_mq_attr; 1413 1414 if (!lock_user_struct(VERIFY_READ, target_mq_attr, 1415 target_mq_attr_addr, 1)) 1416 return -TARGET_EFAULT; 1417 1418 __get_user(attr->mq_flags, &target_mq_attr->mq_flags); 1419 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1420 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1421 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1422 1423 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0); 1424 1425 return 0; 1426 } 1427 1428 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr, 1429 const struct mq_attr *attr) 1430 { 1431 struct target_mq_attr *target_mq_attr; 1432 1433 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr, 1434 target_mq_attr_addr, 0)) 1435 return -TARGET_EFAULT; 1436 1437 __put_user(attr->mq_flags, &target_mq_attr->mq_flags); 1438 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1439 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1440 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1441 1442 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1); 1443 1444 return 0; 1445 } 1446 #endif 1447 1448 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) 1449 /* do_select() must return target values and target errnos. */ 1450 static abi_long do_select(int n, 1451 abi_ulong rfd_addr, abi_ulong wfd_addr, 1452 abi_ulong efd_addr, abi_ulong target_tv_addr) 1453 { 1454 fd_set rfds, wfds, efds; 1455 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 1456 struct timeval tv; 1457 struct timespec ts, *ts_ptr; 1458 abi_long ret; 1459 1460 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 1461 if (ret) { 1462 return ret; 1463 } 1464 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 1465 if (ret) { 1466 return ret; 1467 } 1468 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 1469 if (ret) { 1470 return ret; 1471 } 1472 1473 if (target_tv_addr) { 1474 if (copy_from_user_timeval(&tv, target_tv_addr)) 1475 return -TARGET_EFAULT; 1476 ts.tv_sec = tv.tv_sec; 1477 ts.tv_nsec = tv.tv_usec * 1000; 1478 ts_ptr = &ts; 1479 } else { 1480 ts_ptr = NULL; 1481 } 1482 1483 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 1484 ts_ptr, NULL)); 1485 1486 if (!is_error(ret)) { 1487 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 1488 return -TARGET_EFAULT; 1489 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 1490 return -TARGET_EFAULT; 1491 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 1492 return -TARGET_EFAULT; 1493 1494 if (target_tv_addr) { 1495 tv.tv_sec = ts.tv_sec; 1496 tv.tv_usec = ts.tv_nsec / 1000; 1497 if (copy_to_user_timeval(target_tv_addr, &tv)) { 1498 return -TARGET_EFAULT; 1499 } 1500 } 1501 } 1502 1503 return ret; 1504 } 1505 1506 #if defined(TARGET_WANT_OLD_SYS_SELECT) 1507 static abi_long do_old_select(abi_ulong arg1) 1508 { 1509 struct target_sel_arg_struct *sel; 1510 abi_ulong inp, outp, exp, tvp; 1511 long nsel; 1512 1513 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) { 1514 return -TARGET_EFAULT; 1515 } 1516 1517 nsel = tswapal(sel->n); 1518 inp = tswapal(sel->inp); 1519 outp = tswapal(sel->outp); 1520 exp = tswapal(sel->exp); 1521 tvp = tswapal(sel->tvp); 1522 1523 unlock_user_struct(sel, arg1, 0); 1524 1525 return do_select(nsel, inp, outp, exp, tvp); 1526 } 1527 #endif 1528 #endif 1529 1530 static abi_long do_pipe2(int host_pipe[], int flags) 1531 { 1532 #ifdef CONFIG_PIPE2 1533 return pipe2(host_pipe, flags); 1534 #else 1535 return -ENOSYS; 1536 #endif 1537 } 1538 1539 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes, 1540 int flags, int is_pipe2) 1541 { 1542 int host_pipe[2]; 1543 abi_long ret; 1544 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe); 1545 1546 if (is_error(ret)) 1547 return get_errno(ret); 1548 1549 /* Several targets have special calling conventions for the original 1550 pipe syscall, but didn't replicate this into the pipe2 syscall. */ 1551 if (!is_pipe2) { 1552 #if defined(TARGET_ALPHA) 1553 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1]; 1554 return host_pipe[0]; 1555 #elif defined(TARGET_MIPS) 1556 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1]; 1557 return host_pipe[0]; 1558 #elif defined(TARGET_SH4) 1559 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1]; 1560 return host_pipe[0]; 1561 #elif defined(TARGET_SPARC) 1562 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1]; 1563 return host_pipe[0]; 1564 #endif 1565 } 1566 1567 if (put_user_s32(host_pipe[0], pipedes) 1568 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0]))) 1569 return -TARGET_EFAULT; 1570 return get_errno(ret); 1571 } 1572 1573 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn, 1574 abi_ulong target_addr, 1575 socklen_t len) 1576 { 1577 struct target_ip_mreqn *target_smreqn; 1578 1579 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1); 1580 if (!target_smreqn) 1581 return -TARGET_EFAULT; 1582 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr; 1583 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr; 1584 if (len == sizeof(struct target_ip_mreqn)) 1585 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex); 1586 unlock_user(target_smreqn, target_addr, 0); 1587 1588 return 0; 1589 } 1590 1591 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr, 1592 abi_ulong target_addr, 1593 socklen_t len) 1594 { 1595 const socklen_t unix_maxlen = sizeof (struct sockaddr_un); 1596 sa_family_t sa_family; 1597 struct target_sockaddr *target_saddr; 1598 1599 if (fd_trans_target_to_host_addr(fd)) { 1600 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len); 1601 } 1602 1603 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 1604 if (!target_saddr) 1605 return -TARGET_EFAULT; 1606 1607 sa_family = tswap16(target_saddr->sa_family); 1608 1609 /* Oops. The caller might send a incomplete sun_path; sun_path 1610 * must be terminated by \0 (see the manual page), but 1611 * unfortunately it is quite common to specify sockaddr_un 1612 * length as "strlen(x->sun_path)" while it should be 1613 * "strlen(...) + 1". We'll fix that here if needed. 1614 * Linux kernel has a similar feature. 1615 */ 1616 1617 if (sa_family == AF_UNIX) { 1618 if (len < unix_maxlen && len > 0) { 1619 char *cp = (char*)target_saddr; 1620 1621 if ( cp[len-1] && !cp[len] ) 1622 len++; 1623 } 1624 if (len > unix_maxlen) 1625 len = unix_maxlen; 1626 } 1627 1628 memcpy(addr, target_saddr, len); 1629 addr->sa_family = sa_family; 1630 if (sa_family == AF_NETLINK) { 1631 struct sockaddr_nl *nladdr; 1632 1633 nladdr = (struct sockaddr_nl *)addr; 1634 nladdr->nl_pid = tswap32(nladdr->nl_pid); 1635 nladdr->nl_groups = tswap32(nladdr->nl_groups); 1636 } else if (sa_family == AF_PACKET) { 1637 struct target_sockaddr_ll *lladdr; 1638 1639 lladdr = (struct target_sockaddr_ll *)addr; 1640 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex); 1641 lladdr->sll_hatype = tswap16(lladdr->sll_hatype); 1642 } 1643 unlock_user(target_saddr, target_addr, 0); 1644 1645 return 0; 1646 } 1647 1648 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr, 1649 struct sockaddr *addr, 1650 socklen_t len) 1651 { 1652 struct target_sockaddr *target_saddr; 1653 1654 if (len == 0) { 1655 return 0; 1656 } 1657 assert(addr); 1658 1659 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0); 1660 if (!target_saddr) 1661 return -TARGET_EFAULT; 1662 memcpy(target_saddr, addr, len); 1663 if (len >= offsetof(struct target_sockaddr, sa_family) + 1664 sizeof(target_saddr->sa_family)) { 1665 target_saddr->sa_family = tswap16(addr->sa_family); 1666 } 1667 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) { 1668 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr; 1669 target_nl->nl_pid = tswap32(target_nl->nl_pid); 1670 target_nl->nl_groups = tswap32(target_nl->nl_groups); 1671 } else if (addr->sa_family == AF_PACKET) { 1672 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr; 1673 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex); 1674 target_ll->sll_hatype = tswap16(target_ll->sll_hatype); 1675 } else if (addr->sa_family == AF_INET6 && 1676 len >= sizeof(struct target_sockaddr_in6)) { 1677 struct target_sockaddr_in6 *target_in6 = 1678 (struct target_sockaddr_in6 *)target_saddr; 1679 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id); 1680 } 1681 unlock_user(target_saddr, target_addr, len); 1682 1683 return 0; 1684 } 1685 1686 static inline abi_long target_to_host_cmsg(struct msghdr *msgh, 1687 struct target_msghdr *target_msgh) 1688 { 1689 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1690 abi_long msg_controllen; 1691 abi_ulong target_cmsg_addr; 1692 struct target_cmsghdr *target_cmsg, *target_cmsg_start; 1693 socklen_t space = 0; 1694 1695 msg_controllen = tswapal(target_msgh->msg_controllen); 1696 if (msg_controllen < sizeof (struct target_cmsghdr)) 1697 goto the_end; 1698 target_cmsg_addr = tswapal(target_msgh->msg_control); 1699 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1); 1700 target_cmsg_start = target_cmsg; 1701 if (!target_cmsg) 1702 return -TARGET_EFAULT; 1703 1704 while (cmsg && target_cmsg) { 1705 void *data = CMSG_DATA(cmsg); 1706 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1707 1708 int len = tswapal(target_cmsg->cmsg_len) 1709 - sizeof(struct target_cmsghdr); 1710 1711 space += CMSG_SPACE(len); 1712 if (space > msgh->msg_controllen) { 1713 space -= CMSG_SPACE(len); 1714 /* This is a QEMU bug, since we allocated the payload 1715 * area ourselves (unlike overflow in host-to-target 1716 * conversion, which is just the guest giving us a buffer 1717 * that's too small). It can't happen for the payload types 1718 * we currently support; if it becomes an issue in future 1719 * we would need to improve our allocation strategy to 1720 * something more intelligent than "twice the size of the 1721 * target buffer we're reading from". 1722 */ 1723 gemu_log("Host cmsg overflow\n"); 1724 break; 1725 } 1726 1727 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) { 1728 cmsg->cmsg_level = SOL_SOCKET; 1729 } else { 1730 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level); 1731 } 1732 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type); 1733 cmsg->cmsg_len = CMSG_LEN(len); 1734 1735 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) { 1736 int *fd = (int *)data; 1737 int *target_fd = (int *)target_data; 1738 int i, numfds = len / sizeof(int); 1739 1740 for (i = 0; i < numfds; i++) { 1741 __get_user(fd[i], target_fd + i); 1742 } 1743 } else if (cmsg->cmsg_level == SOL_SOCKET 1744 && cmsg->cmsg_type == SCM_CREDENTIALS) { 1745 struct ucred *cred = (struct ucred *)data; 1746 struct target_ucred *target_cred = 1747 (struct target_ucred *)target_data; 1748 1749 __get_user(cred->pid, &target_cred->pid); 1750 __get_user(cred->uid, &target_cred->uid); 1751 __get_user(cred->gid, &target_cred->gid); 1752 } else { 1753 gemu_log("Unsupported ancillary data: %d/%d\n", 1754 cmsg->cmsg_level, cmsg->cmsg_type); 1755 memcpy(data, target_data, len); 1756 } 1757 1758 cmsg = CMSG_NXTHDR(msgh, cmsg); 1759 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg, 1760 target_cmsg_start); 1761 } 1762 unlock_user(target_cmsg, target_cmsg_addr, 0); 1763 the_end: 1764 msgh->msg_controllen = space; 1765 return 0; 1766 } 1767 1768 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, 1769 struct msghdr *msgh) 1770 { 1771 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1772 abi_long msg_controllen; 1773 abi_ulong target_cmsg_addr; 1774 struct target_cmsghdr *target_cmsg, *target_cmsg_start; 1775 socklen_t space = 0; 1776 1777 msg_controllen = tswapal(target_msgh->msg_controllen); 1778 if (msg_controllen < sizeof (struct target_cmsghdr)) 1779 goto the_end; 1780 target_cmsg_addr = tswapal(target_msgh->msg_control); 1781 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0); 1782 target_cmsg_start = target_cmsg; 1783 if (!target_cmsg) 1784 return -TARGET_EFAULT; 1785 1786 while (cmsg && target_cmsg) { 1787 void *data = CMSG_DATA(cmsg); 1788 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1789 1790 int len = cmsg->cmsg_len - sizeof(struct cmsghdr); 1791 int tgt_len, tgt_space; 1792 1793 /* We never copy a half-header but may copy half-data; 1794 * this is Linux's behaviour in put_cmsg(). Note that 1795 * truncation here is a guest problem (which we report 1796 * to the guest via the CTRUNC bit), unlike truncation 1797 * in target_to_host_cmsg, which is a QEMU bug. 1798 */ 1799 if (msg_controllen < sizeof(struct target_cmsghdr)) { 1800 target_msgh->msg_flags |= tswap32(MSG_CTRUNC); 1801 break; 1802 } 1803 1804 if (cmsg->cmsg_level == SOL_SOCKET) { 1805 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET); 1806 } else { 1807 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level); 1808 } 1809 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type); 1810 1811 /* Payload types which need a different size of payload on 1812 * the target must adjust tgt_len here. 1813 */ 1814 switch (cmsg->cmsg_level) { 1815 case SOL_SOCKET: 1816 switch (cmsg->cmsg_type) { 1817 case SO_TIMESTAMP: 1818 tgt_len = sizeof(struct target_timeval); 1819 break; 1820 default: 1821 break; 1822 } 1823 default: 1824 tgt_len = len; 1825 break; 1826 } 1827 1828 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) { 1829 target_msgh->msg_flags |= tswap32(MSG_CTRUNC); 1830 tgt_len = msg_controllen - sizeof(struct target_cmsghdr); 1831 } 1832 1833 /* We must now copy-and-convert len bytes of payload 1834 * into tgt_len bytes of destination space. Bear in mind 1835 * that in both source and destination we may be dealing 1836 * with a truncated value! 1837 */ 1838 switch (cmsg->cmsg_level) { 1839 case SOL_SOCKET: 1840 switch (cmsg->cmsg_type) { 1841 case SCM_RIGHTS: 1842 { 1843 int *fd = (int *)data; 1844 int *target_fd = (int *)target_data; 1845 int i, numfds = tgt_len / sizeof(int); 1846 1847 for (i = 0; i < numfds; i++) { 1848 __put_user(fd[i], target_fd + i); 1849 } 1850 break; 1851 } 1852 case SO_TIMESTAMP: 1853 { 1854 struct timeval *tv = (struct timeval *)data; 1855 struct target_timeval *target_tv = 1856 (struct target_timeval *)target_data; 1857 1858 if (len != sizeof(struct timeval) || 1859 tgt_len != sizeof(struct target_timeval)) { 1860 goto unimplemented; 1861 } 1862 1863 /* copy struct timeval to target */ 1864 __put_user(tv->tv_sec, &target_tv->tv_sec); 1865 __put_user(tv->tv_usec, &target_tv->tv_usec); 1866 break; 1867 } 1868 case SCM_CREDENTIALS: 1869 { 1870 struct ucred *cred = (struct ucred *)data; 1871 struct target_ucred *target_cred = 1872 (struct target_ucred *)target_data; 1873 1874 __put_user(cred->pid, &target_cred->pid); 1875 __put_user(cred->uid, &target_cred->uid); 1876 __put_user(cred->gid, &target_cred->gid); 1877 break; 1878 } 1879 default: 1880 goto unimplemented; 1881 } 1882 break; 1883 1884 case SOL_IP: 1885 switch (cmsg->cmsg_type) { 1886 case IP_TTL: 1887 { 1888 uint32_t *v = (uint32_t *)data; 1889 uint32_t *t_int = (uint32_t *)target_data; 1890 1891 if (len != sizeof(uint32_t) || 1892 tgt_len != sizeof(uint32_t)) { 1893 goto unimplemented; 1894 } 1895 __put_user(*v, t_int); 1896 break; 1897 } 1898 case IP_RECVERR: 1899 { 1900 struct errhdr_t { 1901 struct sock_extended_err ee; 1902 struct sockaddr_in offender; 1903 }; 1904 struct errhdr_t *errh = (struct errhdr_t *)data; 1905 struct errhdr_t *target_errh = 1906 (struct errhdr_t *)target_data; 1907 1908 if (len != sizeof(struct errhdr_t) || 1909 tgt_len != sizeof(struct errhdr_t)) { 1910 goto unimplemented; 1911 } 1912 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno); 1913 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin); 1914 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type); 1915 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code); 1916 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad); 1917 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info); 1918 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data); 1919 host_to_target_sockaddr((unsigned long) &target_errh->offender, 1920 (void *) &errh->offender, sizeof(errh->offender)); 1921 break; 1922 } 1923 default: 1924 goto unimplemented; 1925 } 1926 break; 1927 1928 case SOL_IPV6: 1929 switch (cmsg->cmsg_type) { 1930 case IPV6_HOPLIMIT: 1931 { 1932 uint32_t *v = (uint32_t *)data; 1933 uint32_t *t_int = (uint32_t *)target_data; 1934 1935 if (len != sizeof(uint32_t) || 1936 tgt_len != sizeof(uint32_t)) { 1937 goto unimplemented; 1938 } 1939 __put_user(*v, t_int); 1940 break; 1941 } 1942 case IPV6_RECVERR: 1943 { 1944 struct errhdr6_t { 1945 struct sock_extended_err ee; 1946 struct sockaddr_in6 offender; 1947 }; 1948 struct errhdr6_t *errh = (struct errhdr6_t *)data; 1949 struct errhdr6_t *target_errh = 1950 (struct errhdr6_t *)target_data; 1951 1952 if (len != sizeof(struct errhdr6_t) || 1953 tgt_len != sizeof(struct errhdr6_t)) { 1954 goto unimplemented; 1955 } 1956 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno); 1957 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin); 1958 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type); 1959 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code); 1960 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad); 1961 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info); 1962 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data); 1963 host_to_target_sockaddr((unsigned long) &target_errh->offender, 1964 (void *) &errh->offender, sizeof(errh->offender)); 1965 break; 1966 } 1967 default: 1968 goto unimplemented; 1969 } 1970 break; 1971 1972 default: 1973 unimplemented: 1974 gemu_log("Unsupported ancillary data: %d/%d\n", 1975 cmsg->cmsg_level, cmsg->cmsg_type); 1976 memcpy(target_data, data, MIN(len, tgt_len)); 1977 if (tgt_len > len) { 1978 memset(target_data + len, 0, tgt_len - len); 1979 } 1980 } 1981 1982 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len)); 1983 tgt_space = TARGET_CMSG_SPACE(tgt_len); 1984 if (msg_controllen < tgt_space) { 1985 tgt_space = msg_controllen; 1986 } 1987 msg_controllen -= tgt_space; 1988 space += tgt_space; 1989 cmsg = CMSG_NXTHDR(msgh, cmsg); 1990 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg, 1991 target_cmsg_start); 1992 } 1993 unlock_user(target_cmsg, target_cmsg_addr, space); 1994 the_end: 1995 target_msgh->msg_controllen = tswapal(space); 1996 return 0; 1997 } 1998 1999 static void tswap_nlmsghdr(struct nlmsghdr *nlh) 2000 { 2001 nlh->nlmsg_len = tswap32(nlh->nlmsg_len); 2002 nlh->nlmsg_type = tswap16(nlh->nlmsg_type); 2003 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags); 2004 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq); 2005 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid); 2006 } 2007 2008 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh, 2009 size_t len, 2010 abi_long (*host_to_target_nlmsg) 2011 (struct nlmsghdr *)) 2012 { 2013 uint32_t nlmsg_len; 2014 abi_long ret; 2015 2016 while (len > sizeof(struct nlmsghdr)) { 2017 2018 nlmsg_len = nlh->nlmsg_len; 2019 if (nlmsg_len < sizeof(struct nlmsghdr) || 2020 nlmsg_len > len) { 2021 break; 2022 } 2023 2024 switch (nlh->nlmsg_type) { 2025 case NLMSG_DONE: 2026 tswap_nlmsghdr(nlh); 2027 return 0; 2028 case NLMSG_NOOP: 2029 break; 2030 case NLMSG_ERROR: 2031 { 2032 struct nlmsgerr *e = NLMSG_DATA(nlh); 2033 e->error = tswap32(e->error); 2034 tswap_nlmsghdr(&e->msg); 2035 tswap_nlmsghdr(nlh); 2036 return 0; 2037 } 2038 default: 2039 ret = host_to_target_nlmsg(nlh); 2040 if (ret < 0) { 2041 tswap_nlmsghdr(nlh); 2042 return ret; 2043 } 2044 break; 2045 } 2046 tswap_nlmsghdr(nlh); 2047 len -= NLMSG_ALIGN(nlmsg_len); 2048 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len)); 2049 } 2050 return 0; 2051 } 2052 2053 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh, 2054 size_t len, 2055 abi_long (*target_to_host_nlmsg) 2056 (struct nlmsghdr *)) 2057 { 2058 int ret; 2059 2060 while (len > sizeof(struct nlmsghdr)) { 2061 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) || 2062 tswap32(nlh->nlmsg_len) > len) { 2063 break; 2064 } 2065 tswap_nlmsghdr(nlh); 2066 switch (nlh->nlmsg_type) { 2067 case NLMSG_DONE: 2068 return 0; 2069 case NLMSG_NOOP: 2070 break; 2071 case NLMSG_ERROR: 2072 { 2073 struct nlmsgerr *e = NLMSG_DATA(nlh); 2074 e->error = tswap32(e->error); 2075 tswap_nlmsghdr(&e->msg); 2076 return 0; 2077 } 2078 default: 2079 ret = target_to_host_nlmsg(nlh); 2080 if (ret < 0) { 2081 return ret; 2082 } 2083 } 2084 len -= NLMSG_ALIGN(nlh->nlmsg_len); 2085 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len)); 2086 } 2087 return 0; 2088 } 2089 2090 #ifdef CONFIG_RTNETLINK 2091 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr, 2092 size_t len, void *context, 2093 abi_long (*host_to_target_nlattr) 2094 (struct nlattr *, 2095 void *context)) 2096 { 2097 unsigned short nla_len; 2098 abi_long ret; 2099 2100 while (len > sizeof(struct nlattr)) { 2101 nla_len = nlattr->nla_len; 2102 if (nla_len < sizeof(struct nlattr) || 2103 nla_len > len) { 2104 break; 2105 } 2106 ret = host_to_target_nlattr(nlattr, context); 2107 nlattr->nla_len = tswap16(nlattr->nla_len); 2108 nlattr->nla_type = tswap16(nlattr->nla_type); 2109 if (ret < 0) { 2110 return ret; 2111 } 2112 len -= NLA_ALIGN(nla_len); 2113 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len)); 2114 } 2115 return 0; 2116 } 2117 2118 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr, 2119 size_t len, 2120 abi_long (*host_to_target_rtattr) 2121 (struct rtattr *)) 2122 { 2123 unsigned short rta_len; 2124 abi_long ret; 2125 2126 while (len > sizeof(struct rtattr)) { 2127 rta_len = rtattr->rta_len; 2128 if (rta_len < sizeof(struct rtattr) || 2129 rta_len > len) { 2130 break; 2131 } 2132 ret = host_to_target_rtattr(rtattr); 2133 rtattr->rta_len = tswap16(rtattr->rta_len); 2134 rtattr->rta_type = tswap16(rtattr->rta_type); 2135 if (ret < 0) { 2136 return ret; 2137 } 2138 len -= RTA_ALIGN(rta_len); 2139 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len)); 2140 } 2141 return 0; 2142 } 2143 2144 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN) 2145 2146 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr, 2147 void *context) 2148 { 2149 uint16_t *u16; 2150 uint32_t *u32; 2151 uint64_t *u64; 2152 2153 switch (nlattr->nla_type) { 2154 /* no data */ 2155 case QEMU_IFLA_BR_FDB_FLUSH: 2156 break; 2157 /* binary */ 2158 case QEMU_IFLA_BR_GROUP_ADDR: 2159 break; 2160 /* uint8_t */ 2161 case QEMU_IFLA_BR_VLAN_FILTERING: 2162 case QEMU_IFLA_BR_TOPOLOGY_CHANGE: 2163 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED: 2164 case QEMU_IFLA_BR_MCAST_ROUTER: 2165 case QEMU_IFLA_BR_MCAST_SNOOPING: 2166 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR: 2167 case QEMU_IFLA_BR_MCAST_QUERIER: 2168 case QEMU_IFLA_BR_NF_CALL_IPTABLES: 2169 case QEMU_IFLA_BR_NF_CALL_IP6TABLES: 2170 case QEMU_IFLA_BR_NF_CALL_ARPTABLES: 2171 break; 2172 /* uint16_t */ 2173 case QEMU_IFLA_BR_PRIORITY: 2174 case QEMU_IFLA_BR_VLAN_PROTOCOL: 2175 case QEMU_IFLA_BR_GROUP_FWD_MASK: 2176 case QEMU_IFLA_BR_ROOT_PORT: 2177 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID: 2178 u16 = NLA_DATA(nlattr); 2179 *u16 = tswap16(*u16); 2180 break; 2181 /* uint32_t */ 2182 case QEMU_IFLA_BR_FORWARD_DELAY: 2183 case QEMU_IFLA_BR_HELLO_TIME: 2184 case QEMU_IFLA_BR_MAX_AGE: 2185 case QEMU_IFLA_BR_AGEING_TIME: 2186 case QEMU_IFLA_BR_STP_STATE: 2187 case QEMU_IFLA_BR_ROOT_PATH_COST: 2188 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY: 2189 case QEMU_IFLA_BR_MCAST_HASH_MAX: 2190 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT: 2191 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT: 2192 u32 = NLA_DATA(nlattr); 2193 *u32 = tswap32(*u32); 2194 break; 2195 /* uint64_t */ 2196 case QEMU_IFLA_BR_HELLO_TIMER: 2197 case QEMU_IFLA_BR_TCN_TIMER: 2198 case QEMU_IFLA_BR_GC_TIMER: 2199 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER: 2200 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL: 2201 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL: 2202 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL: 2203 case QEMU_IFLA_BR_MCAST_QUERY_INTVL: 2204 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL: 2205 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL: 2206 u64 = NLA_DATA(nlattr); 2207 *u64 = tswap64(*u64); 2208 break; 2209 /* ifla_bridge_id: uin8_t[] */ 2210 case QEMU_IFLA_BR_ROOT_ID: 2211 case QEMU_IFLA_BR_BRIDGE_ID: 2212 break; 2213 default: 2214 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type); 2215 break; 2216 } 2217 return 0; 2218 } 2219 2220 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr, 2221 void *context) 2222 { 2223 uint16_t *u16; 2224 uint32_t *u32; 2225 uint64_t *u64; 2226 2227 switch (nlattr->nla_type) { 2228 /* uint8_t */ 2229 case QEMU_IFLA_BRPORT_STATE: 2230 case QEMU_IFLA_BRPORT_MODE: 2231 case QEMU_IFLA_BRPORT_GUARD: 2232 case QEMU_IFLA_BRPORT_PROTECT: 2233 case QEMU_IFLA_BRPORT_FAST_LEAVE: 2234 case QEMU_IFLA_BRPORT_LEARNING: 2235 case QEMU_IFLA_BRPORT_UNICAST_FLOOD: 2236 case QEMU_IFLA_BRPORT_PROXYARP: 2237 case QEMU_IFLA_BRPORT_LEARNING_SYNC: 2238 case QEMU_IFLA_BRPORT_PROXYARP_WIFI: 2239 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK: 2240 case QEMU_IFLA_BRPORT_CONFIG_PENDING: 2241 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER: 2242 break; 2243 /* uint16_t */ 2244 case QEMU_IFLA_BRPORT_PRIORITY: 2245 case QEMU_IFLA_BRPORT_DESIGNATED_PORT: 2246 case QEMU_IFLA_BRPORT_DESIGNATED_COST: 2247 case QEMU_IFLA_BRPORT_ID: 2248 case QEMU_IFLA_BRPORT_NO: 2249 u16 = NLA_DATA(nlattr); 2250 *u16 = tswap16(*u16); 2251 break; 2252 /* uin32_t */ 2253 case QEMU_IFLA_BRPORT_COST: 2254 u32 = NLA_DATA(nlattr); 2255 *u32 = tswap32(*u32); 2256 break; 2257 /* uint64_t */ 2258 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER: 2259 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER: 2260 case QEMU_IFLA_BRPORT_HOLD_TIMER: 2261 u64 = NLA_DATA(nlattr); 2262 *u64 = tswap64(*u64); 2263 break; 2264 /* ifla_bridge_id: uint8_t[] */ 2265 case QEMU_IFLA_BRPORT_ROOT_ID: 2266 case QEMU_IFLA_BRPORT_BRIDGE_ID: 2267 break; 2268 default: 2269 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type); 2270 break; 2271 } 2272 return 0; 2273 } 2274 2275 struct linkinfo_context { 2276 int len; 2277 char *name; 2278 int slave_len; 2279 char *slave_name; 2280 }; 2281 2282 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr, 2283 void *context) 2284 { 2285 struct linkinfo_context *li_context = context; 2286 2287 switch (nlattr->nla_type) { 2288 /* string */ 2289 case QEMU_IFLA_INFO_KIND: 2290 li_context->name = NLA_DATA(nlattr); 2291 li_context->len = nlattr->nla_len - NLA_HDRLEN; 2292 break; 2293 case QEMU_IFLA_INFO_SLAVE_KIND: 2294 li_context->slave_name = NLA_DATA(nlattr); 2295 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN; 2296 break; 2297 /* stats */ 2298 case QEMU_IFLA_INFO_XSTATS: 2299 /* FIXME: only used by CAN */ 2300 break; 2301 /* nested */ 2302 case QEMU_IFLA_INFO_DATA: 2303 if (strncmp(li_context->name, "bridge", 2304 li_context->len) == 0) { 2305 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), 2306 nlattr->nla_len, 2307 NULL, 2308 host_to_target_data_bridge_nlattr); 2309 } else { 2310 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name); 2311 } 2312 break; 2313 case QEMU_IFLA_INFO_SLAVE_DATA: 2314 if (strncmp(li_context->slave_name, "bridge", 2315 li_context->slave_len) == 0) { 2316 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), 2317 nlattr->nla_len, 2318 NULL, 2319 host_to_target_slave_data_bridge_nlattr); 2320 } else { 2321 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n", 2322 li_context->slave_name); 2323 } 2324 break; 2325 default: 2326 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type); 2327 break; 2328 } 2329 2330 return 0; 2331 } 2332 2333 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr, 2334 void *context) 2335 { 2336 uint32_t *u32; 2337 int i; 2338 2339 switch (nlattr->nla_type) { 2340 case QEMU_IFLA_INET_CONF: 2341 u32 = NLA_DATA(nlattr); 2342 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32); 2343 i++) { 2344 u32[i] = tswap32(u32[i]); 2345 } 2346 break; 2347 default: 2348 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type); 2349 } 2350 return 0; 2351 } 2352 2353 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr, 2354 void *context) 2355 { 2356 uint32_t *u32; 2357 uint64_t *u64; 2358 struct ifla_cacheinfo *ci; 2359 int i; 2360 2361 switch (nlattr->nla_type) { 2362 /* binaries */ 2363 case QEMU_IFLA_INET6_TOKEN: 2364 break; 2365 /* uint8_t */ 2366 case QEMU_IFLA_INET6_ADDR_GEN_MODE: 2367 break; 2368 /* uint32_t */ 2369 case QEMU_IFLA_INET6_FLAGS: 2370 u32 = NLA_DATA(nlattr); 2371 *u32 = tswap32(*u32); 2372 break; 2373 /* uint32_t[] */ 2374 case QEMU_IFLA_INET6_CONF: 2375 u32 = NLA_DATA(nlattr); 2376 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32); 2377 i++) { 2378 u32[i] = tswap32(u32[i]); 2379 } 2380 break; 2381 /* ifla_cacheinfo */ 2382 case QEMU_IFLA_INET6_CACHEINFO: 2383 ci = NLA_DATA(nlattr); 2384 ci->max_reasm_len = tswap32(ci->max_reasm_len); 2385 ci->tstamp = tswap32(ci->tstamp); 2386 ci->reachable_time = tswap32(ci->reachable_time); 2387 ci->retrans_time = tswap32(ci->retrans_time); 2388 break; 2389 /* uint64_t[] */ 2390 case QEMU_IFLA_INET6_STATS: 2391 case QEMU_IFLA_INET6_ICMP6STATS: 2392 u64 = NLA_DATA(nlattr); 2393 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64); 2394 i++) { 2395 u64[i] = tswap64(u64[i]); 2396 } 2397 break; 2398 default: 2399 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type); 2400 } 2401 return 0; 2402 } 2403 2404 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr, 2405 void *context) 2406 { 2407 switch (nlattr->nla_type) { 2408 case AF_INET: 2409 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len, 2410 NULL, 2411 host_to_target_data_inet_nlattr); 2412 case AF_INET6: 2413 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len, 2414 NULL, 2415 host_to_target_data_inet6_nlattr); 2416 default: 2417 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type); 2418 break; 2419 } 2420 return 0; 2421 } 2422 2423 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr) 2424 { 2425 uint32_t *u32; 2426 struct rtnl_link_stats *st; 2427 struct rtnl_link_stats64 *st64; 2428 struct rtnl_link_ifmap *map; 2429 struct linkinfo_context li_context; 2430 2431 switch (rtattr->rta_type) { 2432 /* binary stream */ 2433 case QEMU_IFLA_ADDRESS: 2434 case QEMU_IFLA_BROADCAST: 2435 /* string */ 2436 case QEMU_IFLA_IFNAME: 2437 case QEMU_IFLA_QDISC: 2438 break; 2439 /* uin8_t */ 2440 case QEMU_IFLA_OPERSTATE: 2441 case QEMU_IFLA_LINKMODE: 2442 case QEMU_IFLA_CARRIER: 2443 case QEMU_IFLA_PROTO_DOWN: 2444 break; 2445 /* uint32_t */ 2446 case QEMU_IFLA_MTU: 2447 case QEMU_IFLA_LINK: 2448 case QEMU_IFLA_WEIGHT: 2449 case QEMU_IFLA_TXQLEN: 2450 case QEMU_IFLA_CARRIER_CHANGES: 2451 case QEMU_IFLA_NUM_RX_QUEUES: 2452 case QEMU_IFLA_NUM_TX_QUEUES: 2453 case QEMU_IFLA_PROMISCUITY: 2454 case QEMU_IFLA_EXT_MASK: 2455 case QEMU_IFLA_LINK_NETNSID: 2456 case QEMU_IFLA_GROUP: 2457 case QEMU_IFLA_MASTER: 2458 case QEMU_IFLA_NUM_VF: 2459 case QEMU_IFLA_GSO_MAX_SEGS: 2460 case QEMU_IFLA_GSO_MAX_SIZE: 2461 u32 = RTA_DATA(rtattr); 2462 *u32 = tswap32(*u32); 2463 break; 2464 /* struct rtnl_link_stats */ 2465 case QEMU_IFLA_STATS: 2466 st = RTA_DATA(rtattr); 2467 st->rx_packets = tswap32(st->rx_packets); 2468 st->tx_packets = tswap32(st->tx_packets); 2469 st->rx_bytes = tswap32(st->rx_bytes); 2470 st->tx_bytes = tswap32(st->tx_bytes); 2471 st->rx_errors = tswap32(st->rx_errors); 2472 st->tx_errors = tswap32(st->tx_errors); 2473 st->rx_dropped = tswap32(st->rx_dropped); 2474 st->tx_dropped = tswap32(st->tx_dropped); 2475 st->multicast = tswap32(st->multicast); 2476 st->collisions = tswap32(st->collisions); 2477 2478 /* detailed rx_errors: */ 2479 st->rx_length_errors = tswap32(st->rx_length_errors); 2480 st->rx_over_errors = tswap32(st->rx_over_errors); 2481 st->rx_crc_errors = tswap32(st->rx_crc_errors); 2482 st->rx_frame_errors = tswap32(st->rx_frame_errors); 2483 st->rx_fifo_errors = tswap32(st->rx_fifo_errors); 2484 st->rx_missed_errors = tswap32(st->rx_missed_errors); 2485 2486 /* detailed tx_errors */ 2487 st->tx_aborted_errors = tswap32(st->tx_aborted_errors); 2488 st->tx_carrier_errors = tswap32(st->tx_carrier_errors); 2489 st->tx_fifo_errors = tswap32(st->tx_fifo_errors); 2490 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors); 2491 st->tx_window_errors = tswap32(st->tx_window_errors); 2492 2493 /* for cslip etc */ 2494 st->rx_compressed = tswap32(st->rx_compressed); 2495 st->tx_compressed = tswap32(st->tx_compressed); 2496 break; 2497 /* struct rtnl_link_stats64 */ 2498 case QEMU_IFLA_STATS64: 2499 st64 = RTA_DATA(rtattr); 2500 st64->rx_packets = tswap64(st64->rx_packets); 2501 st64->tx_packets = tswap64(st64->tx_packets); 2502 st64->rx_bytes = tswap64(st64->rx_bytes); 2503 st64->tx_bytes = tswap64(st64->tx_bytes); 2504 st64->rx_errors = tswap64(st64->rx_errors); 2505 st64->tx_errors = tswap64(st64->tx_errors); 2506 st64->rx_dropped = tswap64(st64->rx_dropped); 2507 st64->tx_dropped = tswap64(st64->tx_dropped); 2508 st64->multicast = tswap64(st64->multicast); 2509 st64->collisions = tswap64(st64->collisions); 2510 2511 /* detailed rx_errors: */ 2512 st64->rx_length_errors = tswap64(st64->rx_length_errors); 2513 st64->rx_over_errors = tswap64(st64->rx_over_errors); 2514 st64->rx_crc_errors = tswap64(st64->rx_crc_errors); 2515 st64->rx_frame_errors = tswap64(st64->rx_frame_errors); 2516 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors); 2517 st64->rx_missed_errors = tswap64(st64->rx_missed_errors); 2518 2519 /* detailed tx_errors */ 2520 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors); 2521 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors); 2522 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors); 2523 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors); 2524 st64->tx_window_errors = tswap64(st64->tx_window_errors); 2525 2526 /* for cslip etc */ 2527 st64->rx_compressed = tswap64(st64->rx_compressed); 2528 st64->tx_compressed = tswap64(st64->tx_compressed); 2529 break; 2530 /* struct rtnl_link_ifmap */ 2531 case QEMU_IFLA_MAP: 2532 map = RTA_DATA(rtattr); 2533 map->mem_start = tswap64(map->mem_start); 2534 map->mem_end = tswap64(map->mem_end); 2535 map->base_addr = tswap64(map->base_addr); 2536 map->irq = tswap16(map->irq); 2537 break; 2538 /* nested */ 2539 case QEMU_IFLA_LINKINFO: 2540 memset(&li_context, 0, sizeof(li_context)); 2541 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len, 2542 &li_context, 2543 host_to_target_data_linkinfo_nlattr); 2544 case QEMU_IFLA_AF_SPEC: 2545 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len, 2546 NULL, 2547 host_to_target_data_spec_nlattr); 2548 default: 2549 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type); 2550 break; 2551 } 2552 return 0; 2553 } 2554 2555 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr) 2556 { 2557 uint32_t *u32; 2558 struct ifa_cacheinfo *ci; 2559 2560 switch (rtattr->rta_type) { 2561 /* binary: depends on family type */ 2562 case IFA_ADDRESS: 2563 case IFA_LOCAL: 2564 break; 2565 /* string */ 2566 case IFA_LABEL: 2567 break; 2568 /* u32 */ 2569 case IFA_FLAGS: 2570 case IFA_BROADCAST: 2571 u32 = RTA_DATA(rtattr); 2572 *u32 = tswap32(*u32); 2573 break; 2574 /* struct ifa_cacheinfo */ 2575 case IFA_CACHEINFO: 2576 ci = RTA_DATA(rtattr); 2577 ci->ifa_prefered = tswap32(ci->ifa_prefered); 2578 ci->ifa_valid = tswap32(ci->ifa_valid); 2579 ci->cstamp = tswap32(ci->cstamp); 2580 ci->tstamp = tswap32(ci->tstamp); 2581 break; 2582 default: 2583 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type); 2584 break; 2585 } 2586 return 0; 2587 } 2588 2589 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr) 2590 { 2591 uint32_t *u32; 2592 switch (rtattr->rta_type) { 2593 /* binary: depends on family type */ 2594 case RTA_GATEWAY: 2595 case RTA_DST: 2596 case RTA_PREFSRC: 2597 break; 2598 /* u32 */ 2599 case RTA_PRIORITY: 2600 case RTA_TABLE: 2601 case RTA_OIF: 2602 u32 = RTA_DATA(rtattr); 2603 *u32 = tswap32(*u32); 2604 break; 2605 default: 2606 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type); 2607 break; 2608 } 2609 return 0; 2610 } 2611 2612 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr, 2613 uint32_t rtattr_len) 2614 { 2615 return host_to_target_for_each_rtattr(rtattr, rtattr_len, 2616 host_to_target_data_link_rtattr); 2617 } 2618 2619 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr, 2620 uint32_t rtattr_len) 2621 { 2622 return host_to_target_for_each_rtattr(rtattr, rtattr_len, 2623 host_to_target_data_addr_rtattr); 2624 } 2625 2626 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr, 2627 uint32_t rtattr_len) 2628 { 2629 return host_to_target_for_each_rtattr(rtattr, rtattr_len, 2630 host_to_target_data_route_rtattr); 2631 } 2632 2633 static abi_long host_to_target_data_route(struct nlmsghdr *nlh) 2634 { 2635 uint32_t nlmsg_len; 2636 struct ifinfomsg *ifi; 2637 struct ifaddrmsg *ifa; 2638 struct rtmsg *rtm; 2639 2640 nlmsg_len = nlh->nlmsg_len; 2641 switch (nlh->nlmsg_type) { 2642 case RTM_NEWLINK: 2643 case RTM_DELLINK: 2644 case RTM_GETLINK: 2645 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) { 2646 ifi = NLMSG_DATA(nlh); 2647 ifi->ifi_type = tswap16(ifi->ifi_type); 2648 ifi->ifi_index = tswap32(ifi->ifi_index); 2649 ifi->ifi_flags = tswap32(ifi->ifi_flags); 2650 ifi->ifi_change = tswap32(ifi->ifi_change); 2651 host_to_target_link_rtattr(IFLA_RTA(ifi), 2652 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi))); 2653 } 2654 break; 2655 case RTM_NEWADDR: 2656 case RTM_DELADDR: 2657 case RTM_GETADDR: 2658 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) { 2659 ifa = NLMSG_DATA(nlh); 2660 ifa->ifa_index = tswap32(ifa->ifa_index); 2661 host_to_target_addr_rtattr(IFA_RTA(ifa), 2662 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa))); 2663 } 2664 break; 2665 case RTM_NEWROUTE: 2666 case RTM_DELROUTE: 2667 case RTM_GETROUTE: 2668 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) { 2669 rtm = NLMSG_DATA(nlh); 2670 rtm->rtm_flags = tswap32(rtm->rtm_flags); 2671 host_to_target_route_rtattr(RTM_RTA(rtm), 2672 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm))); 2673 } 2674 break; 2675 default: 2676 return -TARGET_EINVAL; 2677 } 2678 return 0; 2679 } 2680 2681 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh, 2682 size_t len) 2683 { 2684 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route); 2685 } 2686 2687 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr, 2688 size_t len, 2689 abi_long (*target_to_host_rtattr) 2690 (struct rtattr *)) 2691 { 2692 abi_long ret; 2693 2694 while (len >= sizeof(struct rtattr)) { 2695 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) || 2696 tswap16(rtattr->rta_len) > len) { 2697 break; 2698 } 2699 rtattr->rta_len = tswap16(rtattr->rta_len); 2700 rtattr->rta_type = tswap16(rtattr->rta_type); 2701 ret = target_to_host_rtattr(rtattr); 2702 if (ret < 0) { 2703 return ret; 2704 } 2705 len -= RTA_ALIGN(rtattr->rta_len); 2706 rtattr = (struct rtattr *)(((char *)rtattr) + 2707 RTA_ALIGN(rtattr->rta_len)); 2708 } 2709 return 0; 2710 } 2711 2712 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr) 2713 { 2714 switch (rtattr->rta_type) { 2715 default: 2716 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type); 2717 break; 2718 } 2719 return 0; 2720 } 2721 2722 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr) 2723 { 2724 switch (rtattr->rta_type) { 2725 /* binary: depends on family type */ 2726 case IFA_LOCAL: 2727 case IFA_ADDRESS: 2728 break; 2729 default: 2730 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type); 2731 break; 2732 } 2733 return 0; 2734 } 2735 2736 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr) 2737 { 2738 uint32_t *u32; 2739 switch (rtattr->rta_type) { 2740 /* binary: depends on family type */ 2741 case RTA_DST: 2742 case RTA_SRC: 2743 case RTA_GATEWAY: 2744 break; 2745 /* u32 */ 2746 case RTA_PRIORITY: 2747 case RTA_OIF: 2748 u32 = RTA_DATA(rtattr); 2749 *u32 = tswap32(*u32); 2750 break; 2751 default: 2752 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type); 2753 break; 2754 } 2755 return 0; 2756 } 2757 2758 static void target_to_host_link_rtattr(struct rtattr *rtattr, 2759 uint32_t rtattr_len) 2760 { 2761 target_to_host_for_each_rtattr(rtattr, rtattr_len, 2762 target_to_host_data_link_rtattr); 2763 } 2764 2765 static void target_to_host_addr_rtattr(struct rtattr *rtattr, 2766 uint32_t rtattr_len) 2767 { 2768 target_to_host_for_each_rtattr(rtattr, rtattr_len, 2769 target_to_host_data_addr_rtattr); 2770 } 2771 2772 static void target_to_host_route_rtattr(struct rtattr *rtattr, 2773 uint32_t rtattr_len) 2774 { 2775 target_to_host_for_each_rtattr(rtattr, rtattr_len, 2776 target_to_host_data_route_rtattr); 2777 } 2778 2779 static abi_long target_to_host_data_route(struct nlmsghdr *nlh) 2780 { 2781 struct ifinfomsg *ifi; 2782 struct ifaddrmsg *ifa; 2783 struct rtmsg *rtm; 2784 2785 switch (nlh->nlmsg_type) { 2786 case RTM_GETLINK: 2787 break; 2788 case RTM_NEWLINK: 2789 case RTM_DELLINK: 2790 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) { 2791 ifi = NLMSG_DATA(nlh); 2792 ifi->ifi_type = tswap16(ifi->ifi_type); 2793 ifi->ifi_index = tswap32(ifi->ifi_index); 2794 ifi->ifi_flags = tswap32(ifi->ifi_flags); 2795 ifi->ifi_change = tswap32(ifi->ifi_change); 2796 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len - 2797 NLMSG_LENGTH(sizeof(*ifi))); 2798 } 2799 break; 2800 case RTM_GETADDR: 2801 case RTM_NEWADDR: 2802 case RTM_DELADDR: 2803 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) { 2804 ifa = NLMSG_DATA(nlh); 2805 ifa->ifa_index = tswap32(ifa->ifa_index); 2806 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len - 2807 NLMSG_LENGTH(sizeof(*ifa))); 2808 } 2809 break; 2810 case RTM_GETROUTE: 2811 break; 2812 case RTM_NEWROUTE: 2813 case RTM_DELROUTE: 2814 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) { 2815 rtm = NLMSG_DATA(nlh); 2816 rtm->rtm_flags = tswap32(rtm->rtm_flags); 2817 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len - 2818 NLMSG_LENGTH(sizeof(*rtm))); 2819 } 2820 break; 2821 default: 2822 return -TARGET_EOPNOTSUPP; 2823 } 2824 return 0; 2825 } 2826 2827 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len) 2828 { 2829 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route); 2830 } 2831 #endif /* CONFIG_RTNETLINK */ 2832 2833 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh) 2834 { 2835 switch (nlh->nlmsg_type) { 2836 default: 2837 gemu_log("Unknown host audit message type %d\n", 2838 nlh->nlmsg_type); 2839 return -TARGET_EINVAL; 2840 } 2841 return 0; 2842 } 2843 2844 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh, 2845 size_t len) 2846 { 2847 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit); 2848 } 2849 2850 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh) 2851 { 2852 switch (nlh->nlmsg_type) { 2853 case AUDIT_USER: 2854 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG: 2855 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2: 2856 break; 2857 default: 2858 gemu_log("Unknown target audit message type %d\n", 2859 nlh->nlmsg_type); 2860 return -TARGET_EINVAL; 2861 } 2862 2863 return 0; 2864 } 2865 2866 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len) 2867 { 2868 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit); 2869 } 2870 2871 /* do_setsockopt() Must return target values and target errnos. */ 2872 static abi_long do_setsockopt(int sockfd, int level, int optname, 2873 abi_ulong optval_addr, socklen_t optlen) 2874 { 2875 abi_long ret; 2876 int val; 2877 struct ip_mreqn *ip_mreq; 2878 struct ip_mreq_source *ip_mreq_source; 2879 2880 switch(level) { 2881 case SOL_TCP: 2882 /* TCP options all take an 'int' value. */ 2883 if (optlen < sizeof(uint32_t)) 2884 return -TARGET_EINVAL; 2885 2886 if (get_user_u32(val, optval_addr)) 2887 return -TARGET_EFAULT; 2888 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 2889 break; 2890 case SOL_IP: 2891 switch(optname) { 2892 case IP_TOS: 2893 case IP_TTL: 2894 case IP_HDRINCL: 2895 case IP_ROUTER_ALERT: 2896 case IP_RECVOPTS: 2897 case IP_RETOPTS: 2898 case IP_PKTINFO: 2899 case IP_MTU_DISCOVER: 2900 case IP_RECVERR: 2901 case IP_RECVTTL: 2902 case IP_RECVTOS: 2903 #ifdef IP_FREEBIND 2904 case IP_FREEBIND: 2905 #endif 2906 case IP_MULTICAST_TTL: 2907 case IP_MULTICAST_LOOP: 2908 val = 0; 2909 if (optlen >= sizeof(uint32_t)) { 2910 if (get_user_u32(val, optval_addr)) 2911 return -TARGET_EFAULT; 2912 } else if (optlen >= 1) { 2913 if (get_user_u8(val, optval_addr)) 2914 return -TARGET_EFAULT; 2915 } 2916 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 2917 break; 2918 case IP_ADD_MEMBERSHIP: 2919 case IP_DROP_MEMBERSHIP: 2920 if (optlen < sizeof (struct target_ip_mreq) || 2921 optlen > sizeof (struct target_ip_mreqn)) 2922 return -TARGET_EINVAL; 2923 2924 ip_mreq = (struct ip_mreqn *) alloca(optlen); 2925 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen); 2926 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen)); 2927 break; 2928 2929 case IP_BLOCK_SOURCE: 2930 case IP_UNBLOCK_SOURCE: 2931 case IP_ADD_SOURCE_MEMBERSHIP: 2932 case IP_DROP_SOURCE_MEMBERSHIP: 2933 if (optlen != sizeof (struct target_ip_mreq_source)) 2934 return -TARGET_EINVAL; 2935 2936 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1); 2937 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen)); 2938 unlock_user (ip_mreq_source, optval_addr, 0); 2939 break; 2940 2941 default: 2942 goto unimplemented; 2943 } 2944 break; 2945 case SOL_IPV6: 2946 switch (optname) { 2947 case IPV6_MTU_DISCOVER: 2948 case IPV6_MTU: 2949 case IPV6_V6ONLY: 2950 case IPV6_RECVPKTINFO: 2951 case IPV6_UNICAST_HOPS: 2952 case IPV6_RECVERR: 2953 case IPV6_RECVHOPLIMIT: 2954 case IPV6_2292HOPLIMIT: 2955 case IPV6_CHECKSUM: 2956 val = 0; 2957 if (optlen < sizeof(uint32_t)) { 2958 return -TARGET_EINVAL; 2959 } 2960 if (get_user_u32(val, optval_addr)) { 2961 return -TARGET_EFAULT; 2962 } 2963 ret = get_errno(setsockopt(sockfd, level, optname, 2964 &val, sizeof(val))); 2965 break; 2966 case IPV6_PKTINFO: 2967 { 2968 struct in6_pktinfo pki; 2969 2970 if (optlen < sizeof(pki)) { 2971 return -TARGET_EINVAL; 2972 } 2973 2974 if (copy_from_user(&pki, optval_addr, sizeof(pki))) { 2975 return -TARGET_EFAULT; 2976 } 2977 2978 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex); 2979 2980 ret = get_errno(setsockopt(sockfd, level, optname, 2981 &pki, sizeof(pki))); 2982 break; 2983 } 2984 default: 2985 goto unimplemented; 2986 } 2987 break; 2988 case SOL_ICMPV6: 2989 switch (optname) { 2990 case ICMPV6_FILTER: 2991 { 2992 struct icmp6_filter icmp6f; 2993 2994 if (optlen > sizeof(icmp6f)) { 2995 optlen = sizeof(icmp6f); 2996 } 2997 2998 if (copy_from_user(&icmp6f, optval_addr, optlen)) { 2999 return -TARGET_EFAULT; 3000 } 3001 3002 for (val = 0; val < 8; val++) { 3003 icmp6f.data[val] = tswap32(icmp6f.data[val]); 3004 } 3005 3006 ret = get_errno(setsockopt(sockfd, level, optname, 3007 &icmp6f, optlen)); 3008 break; 3009 } 3010 default: 3011 goto unimplemented; 3012 } 3013 break; 3014 case SOL_RAW: 3015 switch (optname) { 3016 case ICMP_FILTER: 3017 case IPV6_CHECKSUM: 3018 /* those take an u32 value */ 3019 if (optlen < sizeof(uint32_t)) { 3020 return -TARGET_EINVAL; 3021 } 3022 3023 if (get_user_u32(val, optval_addr)) { 3024 return -TARGET_EFAULT; 3025 } 3026 ret = get_errno(setsockopt(sockfd, level, optname, 3027 &val, sizeof(val))); 3028 break; 3029 3030 default: 3031 goto unimplemented; 3032 } 3033 break; 3034 case TARGET_SOL_SOCKET: 3035 switch (optname) { 3036 case TARGET_SO_RCVTIMEO: 3037 { 3038 struct timeval tv; 3039 3040 optname = SO_RCVTIMEO; 3041 3042 set_timeout: 3043 if (optlen != sizeof(struct target_timeval)) { 3044 return -TARGET_EINVAL; 3045 } 3046 3047 if (copy_from_user_timeval(&tv, optval_addr)) { 3048 return -TARGET_EFAULT; 3049 } 3050 3051 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 3052 &tv, sizeof(tv))); 3053 return ret; 3054 } 3055 case TARGET_SO_SNDTIMEO: 3056 optname = SO_SNDTIMEO; 3057 goto set_timeout; 3058 case TARGET_SO_ATTACH_FILTER: 3059 { 3060 struct target_sock_fprog *tfprog; 3061 struct target_sock_filter *tfilter; 3062 struct sock_fprog fprog; 3063 struct sock_filter *filter; 3064 int i; 3065 3066 if (optlen != sizeof(*tfprog)) { 3067 return -TARGET_EINVAL; 3068 } 3069 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) { 3070 return -TARGET_EFAULT; 3071 } 3072 if (!lock_user_struct(VERIFY_READ, tfilter, 3073 tswapal(tfprog->filter), 0)) { 3074 unlock_user_struct(tfprog, optval_addr, 1); 3075 return -TARGET_EFAULT; 3076 } 3077 3078 fprog.len = tswap16(tfprog->len); 3079 filter = g_try_new(struct sock_filter, fprog.len); 3080 if (filter == NULL) { 3081 unlock_user_struct(tfilter, tfprog->filter, 1); 3082 unlock_user_struct(tfprog, optval_addr, 1); 3083 return -TARGET_ENOMEM; 3084 } 3085 for (i = 0; i < fprog.len; i++) { 3086 filter[i].code = tswap16(tfilter[i].code); 3087 filter[i].jt = tfilter[i].jt; 3088 filter[i].jf = tfilter[i].jf; 3089 filter[i].k = tswap32(tfilter[i].k); 3090 } 3091 fprog.filter = filter; 3092 3093 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, 3094 SO_ATTACH_FILTER, &fprog, sizeof(fprog))); 3095 g_free(filter); 3096 3097 unlock_user_struct(tfilter, tfprog->filter, 1); 3098 unlock_user_struct(tfprog, optval_addr, 1); 3099 return ret; 3100 } 3101 case TARGET_SO_BINDTODEVICE: 3102 { 3103 char *dev_ifname, *addr_ifname; 3104 3105 if (optlen > IFNAMSIZ - 1) { 3106 optlen = IFNAMSIZ - 1; 3107 } 3108 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1); 3109 if (!dev_ifname) { 3110 return -TARGET_EFAULT; 3111 } 3112 optname = SO_BINDTODEVICE; 3113 addr_ifname = alloca(IFNAMSIZ); 3114 memcpy(addr_ifname, dev_ifname, optlen); 3115 addr_ifname[optlen] = 0; 3116 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 3117 addr_ifname, optlen)); 3118 unlock_user (dev_ifname, optval_addr, 0); 3119 return ret; 3120 } 3121 /* Options with 'int' argument. */ 3122 case TARGET_SO_DEBUG: 3123 optname = SO_DEBUG; 3124 break; 3125 case TARGET_SO_REUSEADDR: 3126 optname = SO_REUSEADDR; 3127 break; 3128 case TARGET_SO_TYPE: 3129 optname = SO_TYPE; 3130 break; 3131 case TARGET_SO_ERROR: 3132 optname = SO_ERROR; 3133 break; 3134 case TARGET_SO_DONTROUTE: 3135 optname = SO_DONTROUTE; 3136 break; 3137 case TARGET_SO_BROADCAST: 3138 optname = SO_BROADCAST; 3139 break; 3140 case TARGET_SO_SNDBUF: 3141 optname = SO_SNDBUF; 3142 break; 3143 case TARGET_SO_SNDBUFFORCE: 3144 optname = SO_SNDBUFFORCE; 3145 break; 3146 case TARGET_SO_RCVBUF: 3147 optname = SO_RCVBUF; 3148 break; 3149 case TARGET_SO_RCVBUFFORCE: 3150 optname = SO_RCVBUFFORCE; 3151 break; 3152 case TARGET_SO_KEEPALIVE: 3153 optname = SO_KEEPALIVE; 3154 break; 3155 case TARGET_SO_OOBINLINE: 3156 optname = SO_OOBINLINE; 3157 break; 3158 case TARGET_SO_NO_CHECK: 3159 optname = SO_NO_CHECK; 3160 break; 3161 case TARGET_SO_PRIORITY: 3162 optname = SO_PRIORITY; 3163 break; 3164 #ifdef SO_BSDCOMPAT 3165 case TARGET_SO_BSDCOMPAT: 3166 optname = SO_BSDCOMPAT; 3167 break; 3168 #endif 3169 case TARGET_SO_PASSCRED: 3170 optname = SO_PASSCRED; 3171 break; 3172 case TARGET_SO_PASSSEC: 3173 optname = SO_PASSSEC; 3174 break; 3175 case TARGET_SO_TIMESTAMP: 3176 optname = SO_TIMESTAMP; 3177 break; 3178 case TARGET_SO_RCVLOWAT: 3179 optname = SO_RCVLOWAT; 3180 break; 3181 default: 3182 goto unimplemented; 3183 } 3184 if (optlen < sizeof(uint32_t)) 3185 return -TARGET_EINVAL; 3186 3187 if (get_user_u32(val, optval_addr)) 3188 return -TARGET_EFAULT; 3189 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val))); 3190 break; 3191 default: 3192 unimplemented: 3193 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname); 3194 ret = -TARGET_ENOPROTOOPT; 3195 } 3196 return ret; 3197 } 3198 3199 /* do_getsockopt() Must return target values and target errnos. */ 3200 static abi_long do_getsockopt(int sockfd, int level, int optname, 3201 abi_ulong optval_addr, abi_ulong optlen) 3202 { 3203 abi_long ret; 3204 int len, val; 3205 socklen_t lv; 3206 3207 switch(level) { 3208 case TARGET_SOL_SOCKET: 3209 level = SOL_SOCKET; 3210 switch (optname) { 3211 /* These don't just return a single integer */ 3212 case TARGET_SO_LINGER: 3213 case TARGET_SO_RCVTIMEO: 3214 case TARGET_SO_SNDTIMEO: 3215 case TARGET_SO_PEERNAME: 3216 goto unimplemented; 3217 case TARGET_SO_PEERCRED: { 3218 struct ucred cr; 3219 socklen_t crlen; 3220 struct target_ucred *tcr; 3221 3222 if (get_user_u32(len, optlen)) { 3223 return -TARGET_EFAULT; 3224 } 3225 if (len < 0) { 3226 return -TARGET_EINVAL; 3227 } 3228 3229 crlen = sizeof(cr); 3230 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED, 3231 &cr, &crlen)); 3232 if (ret < 0) { 3233 return ret; 3234 } 3235 if (len > crlen) { 3236 len = crlen; 3237 } 3238 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) { 3239 return -TARGET_EFAULT; 3240 } 3241 __put_user(cr.pid, &tcr->pid); 3242 __put_user(cr.uid, &tcr->uid); 3243 __put_user(cr.gid, &tcr->gid); 3244 unlock_user_struct(tcr, optval_addr, 1); 3245 if (put_user_u32(len, optlen)) { 3246 return -TARGET_EFAULT; 3247 } 3248 break; 3249 } 3250 /* Options with 'int' argument. */ 3251 case TARGET_SO_DEBUG: 3252 optname = SO_DEBUG; 3253 goto int_case; 3254 case TARGET_SO_REUSEADDR: 3255 optname = SO_REUSEADDR; 3256 goto int_case; 3257 case TARGET_SO_TYPE: 3258 optname = SO_TYPE; 3259 goto int_case; 3260 case TARGET_SO_ERROR: 3261 optname = SO_ERROR; 3262 goto int_case; 3263 case TARGET_SO_DONTROUTE: 3264 optname = SO_DONTROUTE; 3265 goto int_case; 3266 case TARGET_SO_BROADCAST: 3267 optname = SO_BROADCAST; 3268 goto int_case; 3269 case TARGET_SO_SNDBUF: 3270 optname = SO_SNDBUF; 3271 goto int_case; 3272 case TARGET_SO_RCVBUF: 3273 optname = SO_RCVBUF; 3274 goto int_case; 3275 case TARGET_SO_KEEPALIVE: 3276 optname = SO_KEEPALIVE; 3277 goto int_case; 3278 case TARGET_SO_OOBINLINE: 3279 optname = SO_OOBINLINE; 3280 goto int_case; 3281 case TARGET_SO_NO_CHECK: 3282 optname = SO_NO_CHECK; 3283 goto int_case; 3284 case TARGET_SO_PRIORITY: 3285 optname = SO_PRIORITY; 3286 goto int_case; 3287 #ifdef SO_BSDCOMPAT 3288 case TARGET_SO_BSDCOMPAT: 3289 optname = SO_BSDCOMPAT; 3290 goto int_case; 3291 #endif 3292 case TARGET_SO_PASSCRED: 3293 optname = SO_PASSCRED; 3294 goto int_case; 3295 case TARGET_SO_TIMESTAMP: 3296 optname = SO_TIMESTAMP; 3297 goto int_case; 3298 case TARGET_SO_RCVLOWAT: 3299 optname = SO_RCVLOWAT; 3300 goto int_case; 3301 case TARGET_SO_ACCEPTCONN: 3302 optname = SO_ACCEPTCONN; 3303 goto int_case; 3304 default: 3305 goto int_case; 3306 } 3307 break; 3308 case SOL_TCP: 3309 /* TCP options all take an 'int' value. */ 3310 int_case: 3311 if (get_user_u32(len, optlen)) 3312 return -TARGET_EFAULT; 3313 if (len < 0) 3314 return -TARGET_EINVAL; 3315 lv = sizeof(lv); 3316 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 3317 if (ret < 0) 3318 return ret; 3319 if (optname == SO_TYPE) { 3320 val = host_to_target_sock_type(val); 3321 } 3322 if (len > lv) 3323 len = lv; 3324 if (len == 4) { 3325 if (put_user_u32(val, optval_addr)) 3326 return -TARGET_EFAULT; 3327 } else { 3328 if (put_user_u8(val, optval_addr)) 3329 return -TARGET_EFAULT; 3330 } 3331 if (put_user_u32(len, optlen)) 3332 return -TARGET_EFAULT; 3333 break; 3334 case SOL_IP: 3335 switch(optname) { 3336 case IP_TOS: 3337 case IP_TTL: 3338 case IP_HDRINCL: 3339 case IP_ROUTER_ALERT: 3340 case IP_RECVOPTS: 3341 case IP_RETOPTS: 3342 case IP_PKTINFO: 3343 case IP_MTU_DISCOVER: 3344 case IP_RECVERR: 3345 case IP_RECVTOS: 3346 #ifdef IP_FREEBIND 3347 case IP_FREEBIND: 3348 #endif 3349 case IP_MULTICAST_TTL: 3350 case IP_MULTICAST_LOOP: 3351 if (get_user_u32(len, optlen)) 3352 return -TARGET_EFAULT; 3353 if (len < 0) 3354 return -TARGET_EINVAL; 3355 lv = sizeof(lv); 3356 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 3357 if (ret < 0) 3358 return ret; 3359 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 3360 len = 1; 3361 if (put_user_u32(len, optlen) 3362 || put_user_u8(val, optval_addr)) 3363 return -TARGET_EFAULT; 3364 } else { 3365 if (len > sizeof(int)) 3366 len = sizeof(int); 3367 if (put_user_u32(len, optlen) 3368 || put_user_u32(val, optval_addr)) 3369 return -TARGET_EFAULT; 3370 } 3371 break; 3372 default: 3373 ret = -TARGET_ENOPROTOOPT; 3374 break; 3375 } 3376 break; 3377 default: 3378 unimplemented: 3379 gemu_log("getsockopt level=%d optname=%d not yet supported\n", 3380 level, optname); 3381 ret = -TARGET_EOPNOTSUPP; 3382 break; 3383 } 3384 return ret; 3385 } 3386 3387 static struct iovec *lock_iovec(int type, abi_ulong target_addr, 3388 abi_ulong count, int copy) 3389 { 3390 struct target_iovec *target_vec; 3391 struct iovec *vec; 3392 abi_ulong total_len, max_len; 3393 int i; 3394 int err = 0; 3395 bool bad_address = false; 3396 3397 if (count == 0) { 3398 errno = 0; 3399 return NULL; 3400 } 3401 if (count > IOV_MAX) { 3402 errno = EINVAL; 3403 return NULL; 3404 } 3405 3406 vec = g_try_new0(struct iovec, count); 3407 if (vec == NULL) { 3408 errno = ENOMEM; 3409 return NULL; 3410 } 3411 3412 target_vec = lock_user(VERIFY_READ, target_addr, 3413 count * sizeof(struct target_iovec), 1); 3414 if (target_vec == NULL) { 3415 err = EFAULT; 3416 goto fail2; 3417 } 3418 3419 /* ??? If host page size > target page size, this will result in a 3420 value larger than what we can actually support. */ 3421 max_len = 0x7fffffff & TARGET_PAGE_MASK; 3422 total_len = 0; 3423 3424 for (i = 0; i < count; i++) { 3425 abi_ulong base = tswapal(target_vec[i].iov_base); 3426 abi_long len = tswapal(target_vec[i].iov_len); 3427 3428 if (len < 0) { 3429 err = EINVAL; 3430 goto fail; 3431 } else if (len == 0) { 3432 /* Zero length pointer is ignored. */ 3433 vec[i].iov_base = 0; 3434 } else { 3435 vec[i].iov_base = lock_user(type, base, len, copy); 3436 /* If the first buffer pointer is bad, this is a fault. But 3437 * subsequent bad buffers will result in a partial write; this 3438 * is realized by filling the vector with null pointers and 3439 * zero lengths. */ 3440 if (!vec[i].iov_base) { 3441 if (i == 0) { 3442 err = EFAULT; 3443 goto fail; 3444 } else { 3445 bad_address = true; 3446 } 3447 } 3448 if (bad_address) { 3449 len = 0; 3450 } 3451 if (len > max_len - total_len) { 3452 len = max_len - total_len; 3453 } 3454 } 3455 vec[i].iov_len = len; 3456 total_len += len; 3457 } 3458 3459 unlock_user(target_vec, target_addr, 0); 3460 return vec; 3461 3462 fail: 3463 while (--i >= 0) { 3464 if (tswapal(target_vec[i].iov_len) > 0) { 3465 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0); 3466 } 3467 } 3468 unlock_user(target_vec, target_addr, 0); 3469 fail2: 3470 g_free(vec); 3471 errno = err; 3472 return NULL; 3473 } 3474 3475 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr, 3476 abi_ulong count, int copy) 3477 { 3478 struct target_iovec *target_vec; 3479 int i; 3480 3481 target_vec = lock_user(VERIFY_READ, target_addr, 3482 count * sizeof(struct target_iovec), 1); 3483 if (target_vec) { 3484 for (i = 0; i < count; i++) { 3485 abi_ulong base = tswapal(target_vec[i].iov_base); 3486 abi_long len = tswapal(target_vec[i].iov_len); 3487 if (len < 0) { 3488 break; 3489 } 3490 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0); 3491 } 3492 unlock_user(target_vec, target_addr, 0); 3493 } 3494 3495 g_free(vec); 3496 } 3497 3498 static inline int target_to_host_sock_type(int *type) 3499 { 3500 int host_type = 0; 3501 int target_type = *type; 3502 3503 switch (target_type & TARGET_SOCK_TYPE_MASK) { 3504 case TARGET_SOCK_DGRAM: 3505 host_type = SOCK_DGRAM; 3506 break; 3507 case TARGET_SOCK_STREAM: 3508 host_type = SOCK_STREAM; 3509 break; 3510 default: 3511 host_type = target_type & TARGET_SOCK_TYPE_MASK; 3512 break; 3513 } 3514 if (target_type & TARGET_SOCK_CLOEXEC) { 3515 #if defined(SOCK_CLOEXEC) 3516 host_type |= SOCK_CLOEXEC; 3517 #else 3518 return -TARGET_EINVAL; 3519 #endif 3520 } 3521 if (target_type & TARGET_SOCK_NONBLOCK) { 3522 #if defined(SOCK_NONBLOCK) 3523 host_type |= SOCK_NONBLOCK; 3524 #elif !defined(O_NONBLOCK) 3525 return -TARGET_EINVAL; 3526 #endif 3527 } 3528 *type = host_type; 3529 return 0; 3530 } 3531 3532 /* Try to emulate socket type flags after socket creation. */ 3533 static int sock_flags_fixup(int fd, int target_type) 3534 { 3535 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK) 3536 if (target_type & TARGET_SOCK_NONBLOCK) { 3537 int flags = fcntl(fd, F_GETFL); 3538 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) { 3539 close(fd); 3540 return -TARGET_EINVAL; 3541 } 3542 } 3543 #endif 3544 return fd; 3545 } 3546 3547 static abi_long packet_target_to_host_sockaddr(void *host_addr, 3548 abi_ulong target_addr, 3549 socklen_t len) 3550 { 3551 struct sockaddr *addr = host_addr; 3552 struct target_sockaddr *target_saddr; 3553 3554 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 3555 if (!target_saddr) { 3556 return -TARGET_EFAULT; 3557 } 3558 3559 memcpy(addr, target_saddr, len); 3560 addr->sa_family = tswap16(target_saddr->sa_family); 3561 /* spkt_protocol is big-endian */ 3562 3563 unlock_user(target_saddr, target_addr, 0); 3564 return 0; 3565 } 3566 3567 static TargetFdTrans target_packet_trans = { 3568 .target_to_host_addr = packet_target_to_host_sockaddr, 3569 }; 3570 3571 #ifdef CONFIG_RTNETLINK 3572 static abi_long netlink_route_target_to_host(void *buf, size_t len) 3573 { 3574 abi_long ret; 3575 3576 ret = target_to_host_nlmsg_route(buf, len); 3577 if (ret < 0) { 3578 return ret; 3579 } 3580 3581 return len; 3582 } 3583 3584 static abi_long netlink_route_host_to_target(void *buf, size_t len) 3585 { 3586 abi_long ret; 3587 3588 ret = host_to_target_nlmsg_route(buf, len); 3589 if (ret < 0) { 3590 return ret; 3591 } 3592 3593 return len; 3594 } 3595 3596 static TargetFdTrans target_netlink_route_trans = { 3597 .target_to_host_data = netlink_route_target_to_host, 3598 .host_to_target_data = netlink_route_host_to_target, 3599 }; 3600 #endif /* CONFIG_RTNETLINK */ 3601 3602 static abi_long netlink_audit_target_to_host(void *buf, size_t len) 3603 { 3604 abi_long ret; 3605 3606 ret = target_to_host_nlmsg_audit(buf, len); 3607 if (ret < 0) { 3608 return ret; 3609 } 3610 3611 return len; 3612 } 3613 3614 static abi_long netlink_audit_host_to_target(void *buf, size_t len) 3615 { 3616 abi_long ret; 3617 3618 ret = host_to_target_nlmsg_audit(buf, len); 3619 if (ret < 0) { 3620 return ret; 3621 } 3622 3623 return len; 3624 } 3625 3626 static TargetFdTrans target_netlink_audit_trans = { 3627 .target_to_host_data = netlink_audit_target_to_host, 3628 .host_to_target_data = netlink_audit_host_to_target, 3629 }; 3630 3631 /* do_socket() Must return target values and target errnos. */ 3632 static abi_long do_socket(int domain, int type, int protocol) 3633 { 3634 int target_type = type; 3635 int ret; 3636 3637 ret = target_to_host_sock_type(&type); 3638 if (ret) { 3639 return ret; 3640 } 3641 3642 if (domain == PF_NETLINK && !( 3643 #ifdef CONFIG_RTNETLINK 3644 protocol == NETLINK_ROUTE || 3645 #endif 3646 protocol == NETLINK_KOBJECT_UEVENT || 3647 protocol == NETLINK_AUDIT)) { 3648 return -EPFNOSUPPORT; 3649 } 3650 3651 if (domain == AF_PACKET || 3652 (domain == AF_INET && type == SOCK_PACKET)) { 3653 protocol = tswap16(protocol); 3654 } 3655 3656 ret = get_errno(socket(domain, type, protocol)); 3657 if (ret >= 0) { 3658 ret = sock_flags_fixup(ret, target_type); 3659 if (type == SOCK_PACKET) { 3660 /* Manage an obsolete case : 3661 * if socket type is SOCK_PACKET, bind by name 3662 */ 3663 fd_trans_register(ret, &target_packet_trans); 3664 } else if (domain == PF_NETLINK) { 3665 switch (protocol) { 3666 #ifdef CONFIG_RTNETLINK 3667 case NETLINK_ROUTE: 3668 fd_trans_register(ret, &target_netlink_route_trans); 3669 break; 3670 #endif 3671 case NETLINK_KOBJECT_UEVENT: 3672 /* nothing to do: messages are strings */ 3673 break; 3674 case NETLINK_AUDIT: 3675 fd_trans_register(ret, &target_netlink_audit_trans); 3676 break; 3677 default: 3678 g_assert_not_reached(); 3679 } 3680 } 3681 } 3682 return ret; 3683 } 3684 3685 /* do_bind() Must return target values and target errnos. */ 3686 static abi_long do_bind(int sockfd, abi_ulong target_addr, 3687 socklen_t addrlen) 3688 { 3689 void *addr; 3690 abi_long ret; 3691 3692 if ((int)addrlen < 0) { 3693 return -TARGET_EINVAL; 3694 } 3695 3696 addr = alloca(addrlen+1); 3697 3698 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen); 3699 if (ret) 3700 return ret; 3701 3702 return get_errno(bind(sockfd, addr, addrlen)); 3703 } 3704 3705 /* do_connect() Must return target values and target errnos. */ 3706 static abi_long do_connect(int sockfd, abi_ulong target_addr, 3707 socklen_t addrlen) 3708 { 3709 void *addr; 3710 abi_long ret; 3711 3712 if ((int)addrlen < 0) { 3713 return -TARGET_EINVAL; 3714 } 3715 3716 addr = alloca(addrlen+1); 3717 3718 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen); 3719 if (ret) 3720 return ret; 3721 3722 return get_errno(safe_connect(sockfd, addr, addrlen)); 3723 } 3724 3725 /* do_sendrecvmsg_locked() Must return target values and target errnos. */ 3726 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp, 3727 int flags, int send) 3728 { 3729 abi_long ret, len; 3730 struct msghdr msg; 3731 abi_ulong count; 3732 struct iovec *vec; 3733 abi_ulong target_vec; 3734 3735 if (msgp->msg_name) { 3736 msg.msg_namelen = tswap32(msgp->msg_namelen); 3737 msg.msg_name = alloca(msg.msg_namelen+1); 3738 ret = target_to_host_sockaddr(fd, msg.msg_name, 3739 tswapal(msgp->msg_name), 3740 msg.msg_namelen); 3741 if (ret == -TARGET_EFAULT) { 3742 /* For connected sockets msg_name and msg_namelen must 3743 * be ignored, so returning EFAULT immediately is wrong. 3744 * Instead, pass a bad msg_name to the host kernel, and 3745 * let it decide whether to return EFAULT or not. 3746 */ 3747 msg.msg_name = (void *)-1; 3748 } else if (ret) { 3749 goto out2; 3750 } 3751 } else { 3752 msg.msg_name = NULL; 3753 msg.msg_namelen = 0; 3754 } 3755 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen); 3756 msg.msg_control = alloca(msg.msg_controllen); 3757 msg.msg_flags = tswap32(msgp->msg_flags); 3758 3759 count = tswapal(msgp->msg_iovlen); 3760 target_vec = tswapal(msgp->msg_iov); 3761 3762 if (count > IOV_MAX) { 3763 /* sendrcvmsg returns a different errno for this condition than 3764 * readv/writev, so we must catch it here before lock_iovec() does. 3765 */ 3766 ret = -TARGET_EMSGSIZE; 3767 goto out2; 3768 } 3769 3770 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, 3771 target_vec, count, send); 3772 if (vec == NULL) { 3773 ret = -host_to_target_errno(errno); 3774 goto out2; 3775 } 3776 msg.msg_iovlen = count; 3777 msg.msg_iov = vec; 3778 3779 if (send) { 3780 if (fd_trans_target_to_host_data(fd)) { 3781 void *host_msg; 3782 3783 host_msg = g_malloc(msg.msg_iov->iov_len); 3784 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len); 3785 ret = fd_trans_target_to_host_data(fd)(host_msg, 3786 msg.msg_iov->iov_len); 3787 if (ret >= 0) { 3788 msg.msg_iov->iov_base = host_msg; 3789 ret = get_errno(safe_sendmsg(fd, &msg, flags)); 3790 } 3791 g_free(host_msg); 3792 } else { 3793 ret = target_to_host_cmsg(&msg, msgp); 3794 if (ret == 0) { 3795 ret = get_errno(safe_sendmsg(fd, &msg, flags)); 3796 } 3797 } 3798 } else { 3799 ret = get_errno(safe_recvmsg(fd, &msg, flags)); 3800 if (!is_error(ret)) { 3801 len = ret; 3802 if (fd_trans_host_to_target_data(fd)) { 3803 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base, 3804 len); 3805 } else { 3806 ret = host_to_target_cmsg(msgp, &msg); 3807 } 3808 if (!is_error(ret)) { 3809 msgp->msg_namelen = tswap32(msg.msg_namelen); 3810 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) { 3811 ret = host_to_target_sockaddr(tswapal(msgp->msg_name), 3812 msg.msg_name, msg.msg_namelen); 3813 if (ret) { 3814 goto out; 3815 } 3816 } 3817 3818 ret = len; 3819 } 3820 } 3821 } 3822 3823 out: 3824 unlock_iovec(vec, target_vec, count, !send); 3825 out2: 3826 return ret; 3827 } 3828 3829 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg, 3830 int flags, int send) 3831 { 3832 abi_long ret; 3833 struct target_msghdr *msgp; 3834 3835 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE, 3836 msgp, 3837 target_msg, 3838 send ? 1 : 0)) { 3839 return -TARGET_EFAULT; 3840 } 3841 ret = do_sendrecvmsg_locked(fd, msgp, flags, send); 3842 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 3843 return ret; 3844 } 3845 3846 /* We don't rely on the C library to have sendmmsg/recvmmsg support, 3847 * so it might not have this *mmsg-specific flag either. 3848 */ 3849 #ifndef MSG_WAITFORONE 3850 #define MSG_WAITFORONE 0x10000 3851 #endif 3852 3853 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec, 3854 unsigned int vlen, unsigned int flags, 3855 int send) 3856 { 3857 struct target_mmsghdr *mmsgp; 3858 abi_long ret = 0; 3859 int i; 3860 3861 if (vlen > UIO_MAXIOV) { 3862 vlen = UIO_MAXIOV; 3863 } 3864 3865 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1); 3866 if (!mmsgp) { 3867 return -TARGET_EFAULT; 3868 } 3869 3870 for (i = 0; i < vlen; i++) { 3871 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send); 3872 if (is_error(ret)) { 3873 break; 3874 } 3875 mmsgp[i].msg_len = tswap32(ret); 3876 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ 3877 if (flags & MSG_WAITFORONE) { 3878 flags |= MSG_DONTWAIT; 3879 } 3880 } 3881 3882 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i); 3883 3884 /* Return number of datagrams sent if we sent any at all; 3885 * otherwise return the error. 3886 */ 3887 if (i) { 3888 return i; 3889 } 3890 return ret; 3891 } 3892 3893 /* do_accept4() Must return target values and target errnos. */ 3894 static abi_long do_accept4(int fd, abi_ulong target_addr, 3895 abi_ulong target_addrlen_addr, int flags) 3896 { 3897 socklen_t addrlen; 3898 void *addr; 3899 abi_long ret; 3900 int host_flags; 3901 3902 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl); 3903 3904 if (target_addr == 0) { 3905 return get_errno(safe_accept4(fd, NULL, NULL, host_flags)); 3906 } 3907 3908 /* linux returns EINVAL if addrlen pointer is invalid */ 3909 if (get_user_u32(addrlen, target_addrlen_addr)) 3910 return -TARGET_EINVAL; 3911 3912 if ((int)addrlen < 0) { 3913 return -TARGET_EINVAL; 3914 } 3915 3916 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 3917 return -TARGET_EINVAL; 3918 3919 addr = alloca(addrlen); 3920 3921 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags)); 3922 if (!is_error(ret)) { 3923 host_to_target_sockaddr(target_addr, addr, addrlen); 3924 if (put_user_u32(addrlen, target_addrlen_addr)) 3925 ret = -TARGET_EFAULT; 3926 } 3927 return ret; 3928 } 3929 3930 /* do_getpeername() Must return target values and target errnos. */ 3931 static abi_long do_getpeername(int fd, abi_ulong target_addr, 3932 abi_ulong target_addrlen_addr) 3933 { 3934 socklen_t addrlen; 3935 void *addr; 3936 abi_long ret; 3937 3938 if (get_user_u32(addrlen, target_addrlen_addr)) 3939 return -TARGET_EFAULT; 3940 3941 if ((int)addrlen < 0) { 3942 return -TARGET_EINVAL; 3943 } 3944 3945 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 3946 return -TARGET_EFAULT; 3947 3948 addr = alloca(addrlen); 3949 3950 ret = get_errno(getpeername(fd, addr, &addrlen)); 3951 if (!is_error(ret)) { 3952 host_to_target_sockaddr(target_addr, addr, addrlen); 3953 if (put_user_u32(addrlen, target_addrlen_addr)) 3954 ret = -TARGET_EFAULT; 3955 } 3956 return ret; 3957 } 3958 3959 /* do_getsockname() Must return target values and target errnos. */ 3960 static abi_long do_getsockname(int fd, abi_ulong target_addr, 3961 abi_ulong target_addrlen_addr) 3962 { 3963 socklen_t addrlen; 3964 void *addr; 3965 abi_long ret; 3966 3967 if (get_user_u32(addrlen, target_addrlen_addr)) 3968 return -TARGET_EFAULT; 3969 3970 if ((int)addrlen < 0) { 3971 return -TARGET_EINVAL; 3972 } 3973 3974 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 3975 return -TARGET_EFAULT; 3976 3977 addr = alloca(addrlen); 3978 3979 ret = get_errno(getsockname(fd, addr, &addrlen)); 3980 if (!is_error(ret)) { 3981 host_to_target_sockaddr(target_addr, addr, addrlen); 3982 if (put_user_u32(addrlen, target_addrlen_addr)) 3983 ret = -TARGET_EFAULT; 3984 } 3985 return ret; 3986 } 3987 3988 /* do_socketpair() Must return target values and target errnos. */ 3989 static abi_long do_socketpair(int domain, int type, int protocol, 3990 abi_ulong target_tab_addr) 3991 { 3992 int tab[2]; 3993 abi_long ret; 3994 3995 target_to_host_sock_type(&type); 3996 3997 ret = get_errno(socketpair(domain, type, protocol, tab)); 3998 if (!is_error(ret)) { 3999 if (put_user_s32(tab[0], target_tab_addr) 4000 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0]))) 4001 ret = -TARGET_EFAULT; 4002 } 4003 return ret; 4004 } 4005 4006 /* do_sendto() Must return target values and target errnos. */ 4007 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags, 4008 abi_ulong target_addr, socklen_t addrlen) 4009 { 4010 void *addr; 4011 void *host_msg; 4012 void *copy_msg = NULL; 4013 abi_long ret; 4014 4015 if ((int)addrlen < 0) { 4016 return -TARGET_EINVAL; 4017 } 4018 4019 host_msg = lock_user(VERIFY_READ, msg, len, 1); 4020 if (!host_msg) 4021 return -TARGET_EFAULT; 4022 if (fd_trans_target_to_host_data(fd)) { 4023 copy_msg = host_msg; 4024 host_msg = g_malloc(len); 4025 memcpy(host_msg, copy_msg, len); 4026 ret = fd_trans_target_to_host_data(fd)(host_msg, len); 4027 if (ret < 0) { 4028 goto fail; 4029 } 4030 } 4031 if (target_addr) { 4032 addr = alloca(addrlen+1); 4033 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen); 4034 if (ret) { 4035 goto fail; 4036 } 4037 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen)); 4038 } else { 4039 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0)); 4040 } 4041 fail: 4042 if (copy_msg) { 4043 g_free(host_msg); 4044 host_msg = copy_msg; 4045 } 4046 unlock_user(host_msg, msg, 0); 4047 return ret; 4048 } 4049 4050 /* do_recvfrom() Must return target values and target errnos. */ 4051 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags, 4052 abi_ulong target_addr, 4053 abi_ulong target_addrlen) 4054 { 4055 socklen_t addrlen; 4056 void *addr; 4057 void *host_msg; 4058 abi_long ret; 4059 4060 host_msg = lock_user(VERIFY_WRITE, msg, len, 0); 4061 if (!host_msg) 4062 return -TARGET_EFAULT; 4063 if (target_addr) { 4064 if (get_user_u32(addrlen, target_addrlen)) { 4065 ret = -TARGET_EFAULT; 4066 goto fail; 4067 } 4068 if ((int)addrlen < 0) { 4069 ret = -TARGET_EINVAL; 4070 goto fail; 4071 } 4072 addr = alloca(addrlen); 4073 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, 4074 addr, &addrlen)); 4075 } else { 4076 addr = NULL; /* To keep compiler quiet. */ 4077 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0)); 4078 } 4079 if (!is_error(ret)) { 4080 if (fd_trans_host_to_target_data(fd)) { 4081 ret = fd_trans_host_to_target_data(fd)(host_msg, ret); 4082 } 4083 if (target_addr) { 4084 host_to_target_sockaddr(target_addr, addr, addrlen); 4085 if (put_user_u32(addrlen, target_addrlen)) { 4086 ret = -TARGET_EFAULT; 4087 goto fail; 4088 } 4089 } 4090 unlock_user(host_msg, msg, len); 4091 } else { 4092 fail: 4093 unlock_user(host_msg, msg, 0); 4094 } 4095 return ret; 4096 } 4097 4098 #ifdef TARGET_NR_socketcall 4099 /* do_socketcall() must return target values and target errnos. */ 4100 static abi_long do_socketcall(int num, abi_ulong vptr) 4101 { 4102 static const unsigned nargs[] = { /* number of arguments per operation */ 4103 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */ 4104 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */ 4105 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */ 4106 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */ 4107 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */ 4108 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */ 4109 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */ 4110 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */ 4111 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */ 4112 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */ 4113 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */ 4114 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */ 4115 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */ 4116 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */ 4117 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */ 4118 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */ 4119 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */ 4120 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */ 4121 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */ 4122 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */ 4123 }; 4124 abi_long a[6]; /* max 6 args */ 4125 unsigned i; 4126 4127 /* check the range of the first argument num */ 4128 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */ 4129 if (num < 1 || num > TARGET_SYS_SENDMMSG) { 4130 return -TARGET_EINVAL; 4131 } 4132 /* ensure we have space for args */ 4133 if (nargs[num] > ARRAY_SIZE(a)) { 4134 return -TARGET_EINVAL; 4135 } 4136 /* collect the arguments in a[] according to nargs[] */ 4137 for (i = 0; i < nargs[num]; ++i) { 4138 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) { 4139 return -TARGET_EFAULT; 4140 } 4141 } 4142 /* now when we have the args, invoke the appropriate underlying function */ 4143 switch (num) { 4144 case TARGET_SYS_SOCKET: /* domain, type, protocol */ 4145 return do_socket(a[0], a[1], a[2]); 4146 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */ 4147 return do_bind(a[0], a[1], a[2]); 4148 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */ 4149 return do_connect(a[0], a[1], a[2]); 4150 case TARGET_SYS_LISTEN: /* sockfd, backlog */ 4151 return get_errno(listen(a[0], a[1])); 4152 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */ 4153 return do_accept4(a[0], a[1], a[2], 0); 4154 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */ 4155 return do_getsockname(a[0], a[1], a[2]); 4156 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */ 4157 return do_getpeername(a[0], a[1], a[2]); 4158 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */ 4159 return do_socketpair(a[0], a[1], a[2], a[3]); 4160 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */ 4161 return do_sendto(a[0], a[1], a[2], a[3], 0, 0); 4162 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */ 4163 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0); 4164 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */ 4165 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]); 4166 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */ 4167 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]); 4168 case TARGET_SYS_SHUTDOWN: /* sockfd, how */ 4169 return get_errno(shutdown(a[0], a[1])); 4170 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */ 4171 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]); 4172 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */ 4173 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]); 4174 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */ 4175 return do_sendrecvmsg(a[0], a[1], a[2], 1); 4176 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */ 4177 return do_sendrecvmsg(a[0], a[1], a[2], 0); 4178 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */ 4179 return do_accept4(a[0], a[1], a[2], a[3]); 4180 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */ 4181 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0); 4182 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */ 4183 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1); 4184 default: 4185 gemu_log("Unsupported socketcall: %d\n", num); 4186 return -TARGET_EINVAL; 4187 } 4188 } 4189 #endif 4190 4191 #define N_SHM_REGIONS 32 4192 4193 static struct shm_region { 4194 abi_ulong start; 4195 abi_ulong size; 4196 bool in_use; 4197 } shm_regions[N_SHM_REGIONS]; 4198 4199 #ifndef TARGET_SEMID64_DS 4200 /* asm-generic version of this struct */ 4201 struct target_semid64_ds 4202 { 4203 struct target_ipc_perm sem_perm; 4204 abi_ulong sem_otime; 4205 #if TARGET_ABI_BITS == 32 4206 abi_ulong __unused1; 4207 #endif 4208 abi_ulong sem_ctime; 4209 #if TARGET_ABI_BITS == 32 4210 abi_ulong __unused2; 4211 #endif 4212 abi_ulong sem_nsems; 4213 abi_ulong __unused3; 4214 abi_ulong __unused4; 4215 }; 4216 #endif 4217 4218 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip, 4219 abi_ulong target_addr) 4220 { 4221 struct target_ipc_perm *target_ip; 4222 struct target_semid64_ds *target_sd; 4223 4224 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 4225 return -TARGET_EFAULT; 4226 target_ip = &(target_sd->sem_perm); 4227 host_ip->__key = tswap32(target_ip->__key); 4228 host_ip->uid = tswap32(target_ip->uid); 4229 host_ip->gid = tswap32(target_ip->gid); 4230 host_ip->cuid = tswap32(target_ip->cuid); 4231 host_ip->cgid = tswap32(target_ip->cgid); 4232 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 4233 host_ip->mode = tswap32(target_ip->mode); 4234 #else 4235 host_ip->mode = tswap16(target_ip->mode); 4236 #endif 4237 #if defined(TARGET_PPC) 4238 host_ip->__seq = tswap32(target_ip->__seq); 4239 #else 4240 host_ip->__seq = tswap16(target_ip->__seq); 4241 #endif 4242 unlock_user_struct(target_sd, target_addr, 0); 4243 return 0; 4244 } 4245 4246 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr, 4247 struct ipc_perm *host_ip) 4248 { 4249 struct target_ipc_perm *target_ip; 4250 struct target_semid64_ds *target_sd; 4251 4252 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 4253 return -TARGET_EFAULT; 4254 target_ip = &(target_sd->sem_perm); 4255 target_ip->__key = tswap32(host_ip->__key); 4256 target_ip->uid = tswap32(host_ip->uid); 4257 target_ip->gid = tswap32(host_ip->gid); 4258 target_ip->cuid = tswap32(host_ip->cuid); 4259 target_ip->cgid = tswap32(host_ip->cgid); 4260 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 4261 target_ip->mode = tswap32(host_ip->mode); 4262 #else 4263 target_ip->mode = tswap16(host_ip->mode); 4264 #endif 4265 #if defined(TARGET_PPC) 4266 target_ip->__seq = tswap32(host_ip->__seq); 4267 #else 4268 target_ip->__seq = tswap16(host_ip->__seq); 4269 #endif 4270 unlock_user_struct(target_sd, target_addr, 1); 4271 return 0; 4272 } 4273 4274 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd, 4275 abi_ulong target_addr) 4276 { 4277 struct target_semid64_ds *target_sd; 4278 4279 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 4280 return -TARGET_EFAULT; 4281 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr)) 4282 return -TARGET_EFAULT; 4283 host_sd->sem_nsems = tswapal(target_sd->sem_nsems); 4284 host_sd->sem_otime = tswapal(target_sd->sem_otime); 4285 host_sd->sem_ctime = tswapal(target_sd->sem_ctime); 4286 unlock_user_struct(target_sd, target_addr, 0); 4287 return 0; 4288 } 4289 4290 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr, 4291 struct semid_ds *host_sd) 4292 { 4293 struct target_semid64_ds *target_sd; 4294 4295 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 4296 return -TARGET_EFAULT; 4297 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm))) 4298 return -TARGET_EFAULT; 4299 target_sd->sem_nsems = tswapal(host_sd->sem_nsems); 4300 target_sd->sem_otime = tswapal(host_sd->sem_otime); 4301 target_sd->sem_ctime = tswapal(host_sd->sem_ctime); 4302 unlock_user_struct(target_sd, target_addr, 1); 4303 return 0; 4304 } 4305 4306 struct target_seminfo { 4307 int semmap; 4308 int semmni; 4309 int semmns; 4310 int semmnu; 4311 int semmsl; 4312 int semopm; 4313 int semume; 4314 int semusz; 4315 int semvmx; 4316 int semaem; 4317 }; 4318 4319 static inline abi_long host_to_target_seminfo(abi_ulong target_addr, 4320 struct seminfo *host_seminfo) 4321 { 4322 struct target_seminfo *target_seminfo; 4323 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0)) 4324 return -TARGET_EFAULT; 4325 __put_user(host_seminfo->semmap, &target_seminfo->semmap); 4326 __put_user(host_seminfo->semmni, &target_seminfo->semmni); 4327 __put_user(host_seminfo->semmns, &target_seminfo->semmns); 4328 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu); 4329 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl); 4330 __put_user(host_seminfo->semopm, &target_seminfo->semopm); 4331 __put_user(host_seminfo->semume, &target_seminfo->semume); 4332 __put_user(host_seminfo->semusz, &target_seminfo->semusz); 4333 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx); 4334 __put_user(host_seminfo->semaem, &target_seminfo->semaem); 4335 unlock_user_struct(target_seminfo, target_addr, 1); 4336 return 0; 4337 } 4338 4339 union semun { 4340 int val; 4341 struct semid_ds *buf; 4342 unsigned short *array; 4343 struct seminfo *__buf; 4344 }; 4345 4346 union target_semun { 4347 int val; 4348 abi_ulong buf; 4349 abi_ulong array; 4350 abi_ulong __buf; 4351 }; 4352 4353 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array, 4354 abi_ulong target_addr) 4355 { 4356 int nsems; 4357 unsigned short *array; 4358 union semun semun; 4359 struct semid_ds semid_ds; 4360 int i, ret; 4361 4362 semun.buf = &semid_ds; 4363 4364 ret = semctl(semid, 0, IPC_STAT, semun); 4365 if (ret == -1) 4366 return get_errno(ret); 4367 4368 nsems = semid_ds.sem_nsems; 4369 4370 *host_array = g_try_new(unsigned short, nsems); 4371 if (!*host_array) { 4372 return -TARGET_ENOMEM; 4373 } 4374 array = lock_user(VERIFY_READ, target_addr, 4375 nsems*sizeof(unsigned short), 1); 4376 if (!array) { 4377 g_free(*host_array); 4378 return -TARGET_EFAULT; 4379 } 4380 4381 for(i=0; i<nsems; i++) { 4382 __get_user((*host_array)[i], &array[i]); 4383 } 4384 unlock_user(array, target_addr, 0); 4385 4386 return 0; 4387 } 4388 4389 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr, 4390 unsigned short **host_array) 4391 { 4392 int nsems; 4393 unsigned short *array; 4394 union semun semun; 4395 struct semid_ds semid_ds; 4396 int i, ret; 4397 4398 semun.buf = &semid_ds; 4399 4400 ret = semctl(semid, 0, IPC_STAT, semun); 4401 if (ret == -1) 4402 return get_errno(ret); 4403 4404 nsems = semid_ds.sem_nsems; 4405 4406 array = lock_user(VERIFY_WRITE, target_addr, 4407 nsems*sizeof(unsigned short), 0); 4408 if (!array) 4409 return -TARGET_EFAULT; 4410 4411 for(i=0; i<nsems; i++) { 4412 __put_user((*host_array)[i], &array[i]); 4413 } 4414 g_free(*host_array); 4415 unlock_user(array, target_addr, 1); 4416 4417 return 0; 4418 } 4419 4420 static inline abi_long do_semctl(int semid, int semnum, int cmd, 4421 abi_ulong target_arg) 4422 { 4423 union target_semun target_su = { .buf = target_arg }; 4424 union semun arg; 4425 struct semid_ds dsarg; 4426 unsigned short *array = NULL; 4427 struct seminfo seminfo; 4428 abi_long ret = -TARGET_EINVAL; 4429 abi_long err; 4430 cmd &= 0xff; 4431 4432 switch( cmd ) { 4433 case GETVAL: 4434 case SETVAL: 4435 /* In 64 bit cross-endian situations, we will erroneously pick up 4436 * the wrong half of the union for the "val" element. To rectify 4437 * this, the entire 8-byte structure is byteswapped, followed by 4438 * a swap of the 4 byte val field. In other cases, the data is 4439 * already in proper host byte order. */ 4440 if (sizeof(target_su.val) != (sizeof(target_su.buf))) { 4441 target_su.buf = tswapal(target_su.buf); 4442 arg.val = tswap32(target_su.val); 4443 } else { 4444 arg.val = target_su.val; 4445 } 4446 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4447 break; 4448 case GETALL: 4449 case SETALL: 4450 err = target_to_host_semarray(semid, &array, target_su.array); 4451 if (err) 4452 return err; 4453 arg.array = array; 4454 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4455 err = host_to_target_semarray(semid, target_su.array, &array); 4456 if (err) 4457 return err; 4458 break; 4459 case IPC_STAT: 4460 case IPC_SET: 4461 case SEM_STAT: 4462 err = target_to_host_semid_ds(&dsarg, target_su.buf); 4463 if (err) 4464 return err; 4465 arg.buf = &dsarg; 4466 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4467 err = host_to_target_semid_ds(target_su.buf, &dsarg); 4468 if (err) 4469 return err; 4470 break; 4471 case IPC_INFO: 4472 case SEM_INFO: 4473 arg.__buf = &seminfo; 4474 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4475 err = host_to_target_seminfo(target_su.__buf, &seminfo); 4476 if (err) 4477 return err; 4478 break; 4479 case IPC_RMID: 4480 case GETPID: 4481 case GETNCNT: 4482 case GETZCNT: 4483 ret = get_errno(semctl(semid, semnum, cmd, NULL)); 4484 break; 4485 } 4486 4487 return ret; 4488 } 4489 4490 struct target_sembuf { 4491 unsigned short sem_num; 4492 short sem_op; 4493 short sem_flg; 4494 }; 4495 4496 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf, 4497 abi_ulong target_addr, 4498 unsigned nsops) 4499 { 4500 struct target_sembuf *target_sembuf; 4501 int i; 4502 4503 target_sembuf = lock_user(VERIFY_READ, target_addr, 4504 nsops*sizeof(struct target_sembuf), 1); 4505 if (!target_sembuf) 4506 return -TARGET_EFAULT; 4507 4508 for(i=0; i<nsops; i++) { 4509 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num); 4510 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op); 4511 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg); 4512 } 4513 4514 unlock_user(target_sembuf, target_addr, 0); 4515 4516 return 0; 4517 } 4518 4519 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops) 4520 { 4521 struct sembuf sops[nsops]; 4522 4523 if (target_to_host_sembuf(sops, ptr, nsops)) 4524 return -TARGET_EFAULT; 4525 4526 return get_errno(safe_semtimedop(semid, sops, nsops, NULL)); 4527 } 4528 4529 struct target_msqid_ds 4530 { 4531 struct target_ipc_perm msg_perm; 4532 abi_ulong msg_stime; 4533 #if TARGET_ABI_BITS == 32 4534 abi_ulong __unused1; 4535 #endif 4536 abi_ulong msg_rtime; 4537 #if TARGET_ABI_BITS == 32 4538 abi_ulong __unused2; 4539 #endif 4540 abi_ulong msg_ctime; 4541 #if TARGET_ABI_BITS == 32 4542 abi_ulong __unused3; 4543 #endif 4544 abi_ulong __msg_cbytes; 4545 abi_ulong msg_qnum; 4546 abi_ulong msg_qbytes; 4547 abi_ulong msg_lspid; 4548 abi_ulong msg_lrpid; 4549 abi_ulong __unused4; 4550 abi_ulong __unused5; 4551 }; 4552 4553 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md, 4554 abi_ulong target_addr) 4555 { 4556 struct target_msqid_ds *target_md; 4557 4558 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1)) 4559 return -TARGET_EFAULT; 4560 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr)) 4561 return -TARGET_EFAULT; 4562 host_md->msg_stime = tswapal(target_md->msg_stime); 4563 host_md->msg_rtime = tswapal(target_md->msg_rtime); 4564 host_md->msg_ctime = tswapal(target_md->msg_ctime); 4565 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes); 4566 host_md->msg_qnum = tswapal(target_md->msg_qnum); 4567 host_md->msg_qbytes = tswapal(target_md->msg_qbytes); 4568 host_md->msg_lspid = tswapal(target_md->msg_lspid); 4569 host_md->msg_lrpid = tswapal(target_md->msg_lrpid); 4570 unlock_user_struct(target_md, target_addr, 0); 4571 return 0; 4572 } 4573 4574 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr, 4575 struct msqid_ds *host_md) 4576 { 4577 struct target_msqid_ds *target_md; 4578 4579 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0)) 4580 return -TARGET_EFAULT; 4581 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm))) 4582 return -TARGET_EFAULT; 4583 target_md->msg_stime = tswapal(host_md->msg_stime); 4584 target_md->msg_rtime = tswapal(host_md->msg_rtime); 4585 target_md->msg_ctime = tswapal(host_md->msg_ctime); 4586 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes); 4587 target_md->msg_qnum = tswapal(host_md->msg_qnum); 4588 target_md->msg_qbytes = tswapal(host_md->msg_qbytes); 4589 target_md->msg_lspid = tswapal(host_md->msg_lspid); 4590 target_md->msg_lrpid = tswapal(host_md->msg_lrpid); 4591 unlock_user_struct(target_md, target_addr, 1); 4592 return 0; 4593 } 4594 4595 struct target_msginfo { 4596 int msgpool; 4597 int msgmap; 4598 int msgmax; 4599 int msgmnb; 4600 int msgmni; 4601 int msgssz; 4602 int msgtql; 4603 unsigned short int msgseg; 4604 }; 4605 4606 static inline abi_long host_to_target_msginfo(abi_ulong target_addr, 4607 struct msginfo *host_msginfo) 4608 { 4609 struct target_msginfo *target_msginfo; 4610 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0)) 4611 return -TARGET_EFAULT; 4612 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool); 4613 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap); 4614 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax); 4615 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb); 4616 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni); 4617 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz); 4618 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql); 4619 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg); 4620 unlock_user_struct(target_msginfo, target_addr, 1); 4621 return 0; 4622 } 4623 4624 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr) 4625 { 4626 struct msqid_ds dsarg; 4627 struct msginfo msginfo; 4628 abi_long ret = -TARGET_EINVAL; 4629 4630 cmd &= 0xff; 4631 4632 switch (cmd) { 4633 case IPC_STAT: 4634 case IPC_SET: 4635 case MSG_STAT: 4636 if (target_to_host_msqid_ds(&dsarg,ptr)) 4637 return -TARGET_EFAULT; 4638 ret = get_errno(msgctl(msgid, cmd, &dsarg)); 4639 if (host_to_target_msqid_ds(ptr,&dsarg)) 4640 return -TARGET_EFAULT; 4641 break; 4642 case IPC_RMID: 4643 ret = get_errno(msgctl(msgid, cmd, NULL)); 4644 break; 4645 case IPC_INFO: 4646 case MSG_INFO: 4647 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo)); 4648 if (host_to_target_msginfo(ptr, &msginfo)) 4649 return -TARGET_EFAULT; 4650 break; 4651 } 4652 4653 return ret; 4654 } 4655 4656 struct target_msgbuf { 4657 abi_long mtype; 4658 char mtext[1]; 4659 }; 4660 4661 static inline abi_long do_msgsnd(int msqid, abi_long msgp, 4662 ssize_t msgsz, int msgflg) 4663 { 4664 struct target_msgbuf *target_mb; 4665 struct msgbuf *host_mb; 4666 abi_long ret = 0; 4667 4668 if (msgsz < 0) { 4669 return -TARGET_EINVAL; 4670 } 4671 4672 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0)) 4673 return -TARGET_EFAULT; 4674 host_mb = g_try_malloc(msgsz + sizeof(long)); 4675 if (!host_mb) { 4676 unlock_user_struct(target_mb, msgp, 0); 4677 return -TARGET_ENOMEM; 4678 } 4679 host_mb->mtype = (abi_long) tswapal(target_mb->mtype); 4680 memcpy(host_mb->mtext, target_mb->mtext, msgsz); 4681 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg)); 4682 g_free(host_mb); 4683 unlock_user_struct(target_mb, msgp, 0); 4684 4685 return ret; 4686 } 4687 4688 static inline abi_long do_msgrcv(int msqid, abi_long msgp, 4689 ssize_t msgsz, abi_long msgtyp, 4690 int msgflg) 4691 { 4692 struct target_msgbuf *target_mb; 4693 char *target_mtext; 4694 struct msgbuf *host_mb; 4695 abi_long ret = 0; 4696 4697 if (msgsz < 0) { 4698 return -TARGET_EINVAL; 4699 } 4700 4701 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0)) 4702 return -TARGET_EFAULT; 4703 4704 host_mb = g_try_malloc(msgsz + sizeof(long)); 4705 if (!host_mb) { 4706 ret = -TARGET_ENOMEM; 4707 goto end; 4708 } 4709 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg)); 4710 4711 if (ret > 0) { 4712 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong); 4713 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0); 4714 if (!target_mtext) { 4715 ret = -TARGET_EFAULT; 4716 goto end; 4717 } 4718 memcpy(target_mb->mtext, host_mb->mtext, ret); 4719 unlock_user(target_mtext, target_mtext_addr, ret); 4720 } 4721 4722 target_mb->mtype = tswapal(host_mb->mtype); 4723 4724 end: 4725 if (target_mb) 4726 unlock_user_struct(target_mb, msgp, 1); 4727 g_free(host_mb); 4728 return ret; 4729 } 4730 4731 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd, 4732 abi_ulong target_addr) 4733 { 4734 struct target_shmid_ds *target_sd; 4735 4736 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 4737 return -TARGET_EFAULT; 4738 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr)) 4739 return -TARGET_EFAULT; 4740 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz); 4741 __get_user(host_sd->shm_atime, &target_sd->shm_atime); 4742 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime); 4743 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime); 4744 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid); 4745 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid); 4746 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch); 4747 unlock_user_struct(target_sd, target_addr, 0); 4748 return 0; 4749 } 4750 4751 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr, 4752 struct shmid_ds *host_sd) 4753 { 4754 struct target_shmid_ds *target_sd; 4755 4756 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 4757 return -TARGET_EFAULT; 4758 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm))) 4759 return -TARGET_EFAULT; 4760 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz); 4761 __put_user(host_sd->shm_atime, &target_sd->shm_atime); 4762 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime); 4763 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime); 4764 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid); 4765 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid); 4766 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch); 4767 unlock_user_struct(target_sd, target_addr, 1); 4768 return 0; 4769 } 4770 4771 struct target_shminfo { 4772 abi_ulong shmmax; 4773 abi_ulong shmmin; 4774 abi_ulong shmmni; 4775 abi_ulong shmseg; 4776 abi_ulong shmall; 4777 }; 4778 4779 static inline abi_long host_to_target_shminfo(abi_ulong target_addr, 4780 struct shminfo *host_shminfo) 4781 { 4782 struct target_shminfo *target_shminfo; 4783 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0)) 4784 return -TARGET_EFAULT; 4785 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax); 4786 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin); 4787 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni); 4788 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg); 4789 __put_user(host_shminfo->shmall, &target_shminfo->shmall); 4790 unlock_user_struct(target_shminfo, target_addr, 1); 4791 return 0; 4792 } 4793 4794 struct target_shm_info { 4795 int used_ids; 4796 abi_ulong shm_tot; 4797 abi_ulong shm_rss; 4798 abi_ulong shm_swp; 4799 abi_ulong swap_attempts; 4800 abi_ulong swap_successes; 4801 }; 4802 4803 static inline abi_long host_to_target_shm_info(abi_ulong target_addr, 4804 struct shm_info *host_shm_info) 4805 { 4806 struct target_shm_info *target_shm_info; 4807 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0)) 4808 return -TARGET_EFAULT; 4809 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids); 4810 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot); 4811 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss); 4812 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp); 4813 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts); 4814 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes); 4815 unlock_user_struct(target_shm_info, target_addr, 1); 4816 return 0; 4817 } 4818 4819 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf) 4820 { 4821 struct shmid_ds dsarg; 4822 struct shminfo shminfo; 4823 struct shm_info shm_info; 4824 abi_long ret = -TARGET_EINVAL; 4825 4826 cmd &= 0xff; 4827 4828 switch(cmd) { 4829 case IPC_STAT: 4830 case IPC_SET: 4831 case SHM_STAT: 4832 if (target_to_host_shmid_ds(&dsarg, buf)) 4833 return -TARGET_EFAULT; 4834 ret = get_errno(shmctl(shmid, cmd, &dsarg)); 4835 if (host_to_target_shmid_ds(buf, &dsarg)) 4836 return -TARGET_EFAULT; 4837 break; 4838 case IPC_INFO: 4839 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo)); 4840 if (host_to_target_shminfo(buf, &shminfo)) 4841 return -TARGET_EFAULT; 4842 break; 4843 case SHM_INFO: 4844 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info)); 4845 if (host_to_target_shm_info(buf, &shm_info)) 4846 return -TARGET_EFAULT; 4847 break; 4848 case IPC_RMID: 4849 case SHM_LOCK: 4850 case SHM_UNLOCK: 4851 ret = get_errno(shmctl(shmid, cmd, NULL)); 4852 break; 4853 } 4854 4855 return ret; 4856 } 4857 4858 #ifndef TARGET_FORCE_SHMLBA 4859 /* For most architectures, SHMLBA is the same as the page size; 4860 * some architectures have larger values, in which case they should 4861 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function. 4862 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA 4863 * and defining its own value for SHMLBA. 4864 * 4865 * The kernel also permits SHMLBA to be set by the architecture to a 4866 * value larger than the page size without setting __ARCH_FORCE_SHMLBA; 4867 * this means that addresses are rounded to the large size if 4868 * SHM_RND is set but addresses not aligned to that size are not rejected 4869 * as long as they are at least page-aligned. Since the only architecture 4870 * which uses this is ia64 this code doesn't provide for that oddity. 4871 */ 4872 static inline abi_ulong target_shmlba(CPUArchState *cpu_env) 4873 { 4874 return TARGET_PAGE_SIZE; 4875 } 4876 #endif 4877 4878 static inline abi_ulong do_shmat(CPUArchState *cpu_env, 4879 int shmid, abi_ulong shmaddr, int shmflg) 4880 { 4881 abi_long raddr; 4882 void *host_raddr; 4883 struct shmid_ds shm_info; 4884 int i,ret; 4885 abi_ulong shmlba; 4886 4887 /* find out the length of the shared memory segment */ 4888 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); 4889 if (is_error(ret)) { 4890 /* can't get length, bail out */ 4891 return ret; 4892 } 4893 4894 shmlba = target_shmlba(cpu_env); 4895 4896 if (shmaddr & (shmlba - 1)) { 4897 if (shmflg & SHM_RND) { 4898 shmaddr &= ~(shmlba - 1); 4899 } else { 4900 return -TARGET_EINVAL; 4901 } 4902 } 4903 4904 mmap_lock(); 4905 4906 if (shmaddr) 4907 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg); 4908 else { 4909 abi_ulong mmap_start; 4910 4911 mmap_start = mmap_find_vma(0, shm_info.shm_segsz); 4912 4913 if (mmap_start == -1) { 4914 errno = ENOMEM; 4915 host_raddr = (void *)-1; 4916 } else 4917 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP); 4918 } 4919 4920 if (host_raddr == (void *)-1) { 4921 mmap_unlock(); 4922 return get_errno((long)host_raddr); 4923 } 4924 raddr=h2g((unsigned long)host_raddr); 4925 4926 page_set_flags(raddr, raddr + shm_info.shm_segsz, 4927 PAGE_VALID | PAGE_READ | 4928 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE)); 4929 4930 for (i = 0; i < N_SHM_REGIONS; i++) { 4931 if (!shm_regions[i].in_use) { 4932 shm_regions[i].in_use = true; 4933 shm_regions[i].start = raddr; 4934 shm_regions[i].size = shm_info.shm_segsz; 4935 break; 4936 } 4937 } 4938 4939 mmap_unlock(); 4940 return raddr; 4941 4942 } 4943 4944 static inline abi_long do_shmdt(abi_ulong shmaddr) 4945 { 4946 int i; 4947 4948 for (i = 0; i < N_SHM_REGIONS; ++i) { 4949 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) { 4950 shm_regions[i].in_use = false; 4951 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0); 4952 break; 4953 } 4954 } 4955 4956 return get_errno(shmdt(g2h(shmaddr))); 4957 } 4958 4959 #ifdef TARGET_NR_ipc 4960 /* ??? This only works with linear mappings. */ 4961 /* do_ipc() must return target values and target errnos. */ 4962 static abi_long do_ipc(CPUArchState *cpu_env, 4963 unsigned int call, abi_long first, 4964 abi_long second, abi_long third, 4965 abi_long ptr, abi_long fifth) 4966 { 4967 int version; 4968 abi_long ret = 0; 4969 4970 version = call >> 16; 4971 call &= 0xffff; 4972 4973 switch (call) { 4974 case IPCOP_semop: 4975 ret = do_semop(first, ptr, second); 4976 break; 4977 4978 case IPCOP_semget: 4979 ret = get_errno(semget(first, second, third)); 4980 break; 4981 4982 case IPCOP_semctl: { 4983 /* The semun argument to semctl is passed by value, so dereference the 4984 * ptr argument. */ 4985 abi_ulong atptr; 4986 get_user_ual(atptr, ptr); 4987 ret = do_semctl(first, second, third, atptr); 4988 break; 4989 } 4990 4991 case IPCOP_msgget: 4992 ret = get_errno(msgget(first, second)); 4993 break; 4994 4995 case IPCOP_msgsnd: 4996 ret = do_msgsnd(first, ptr, second, third); 4997 break; 4998 4999 case IPCOP_msgctl: 5000 ret = do_msgctl(first, second, ptr); 5001 break; 5002 5003 case IPCOP_msgrcv: 5004 switch (version) { 5005 case 0: 5006 { 5007 struct target_ipc_kludge { 5008 abi_long msgp; 5009 abi_long msgtyp; 5010 } *tmp; 5011 5012 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) { 5013 ret = -TARGET_EFAULT; 5014 break; 5015 } 5016 5017 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third); 5018 5019 unlock_user_struct(tmp, ptr, 0); 5020 break; 5021 } 5022 default: 5023 ret = do_msgrcv(first, ptr, second, fifth, third); 5024 } 5025 break; 5026 5027 case IPCOP_shmat: 5028 switch (version) { 5029 default: 5030 { 5031 abi_ulong raddr; 5032 raddr = do_shmat(cpu_env, first, ptr, second); 5033 if (is_error(raddr)) 5034 return get_errno(raddr); 5035 if (put_user_ual(raddr, third)) 5036 return -TARGET_EFAULT; 5037 break; 5038 } 5039 case 1: 5040 ret = -TARGET_EINVAL; 5041 break; 5042 } 5043 break; 5044 case IPCOP_shmdt: 5045 ret = do_shmdt(ptr); 5046 break; 5047 5048 case IPCOP_shmget: 5049 /* IPC_* flag values are the same on all linux platforms */ 5050 ret = get_errno(shmget(first, second, third)); 5051 break; 5052 5053 /* IPC_* and SHM_* command values are the same on all linux platforms */ 5054 case IPCOP_shmctl: 5055 ret = do_shmctl(first, second, ptr); 5056 break; 5057 default: 5058 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version); 5059 ret = -TARGET_ENOSYS; 5060 break; 5061 } 5062 return ret; 5063 } 5064 #endif 5065 5066 /* kernel structure types definitions */ 5067 5068 #define STRUCT(name, ...) STRUCT_ ## name, 5069 #define STRUCT_SPECIAL(name) STRUCT_ ## name, 5070 enum { 5071 #include "syscall_types.h" 5072 STRUCT_MAX 5073 }; 5074 #undef STRUCT 5075 #undef STRUCT_SPECIAL 5076 5077 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL }; 5078 #define STRUCT_SPECIAL(name) 5079 #include "syscall_types.h" 5080 #undef STRUCT 5081 #undef STRUCT_SPECIAL 5082 5083 typedef struct IOCTLEntry IOCTLEntry; 5084 5085 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp, 5086 int fd, int cmd, abi_long arg); 5087 5088 struct IOCTLEntry { 5089 int target_cmd; 5090 unsigned int host_cmd; 5091 const char *name; 5092 int access; 5093 do_ioctl_fn *do_ioctl; 5094 const argtype arg_type[5]; 5095 }; 5096 5097 #define IOC_R 0x0001 5098 #define IOC_W 0x0002 5099 #define IOC_RW (IOC_R | IOC_W) 5100 5101 #define MAX_STRUCT_SIZE 4096 5102 5103 #ifdef CONFIG_FIEMAP 5104 /* So fiemap access checks don't overflow on 32 bit systems. 5105 * This is very slightly smaller than the limit imposed by 5106 * the underlying kernel. 5107 */ 5108 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \ 5109 / sizeof(struct fiemap_extent)) 5110 5111 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp, 5112 int fd, int cmd, abi_long arg) 5113 { 5114 /* The parameter for this ioctl is a struct fiemap followed 5115 * by an array of struct fiemap_extent whose size is set 5116 * in fiemap->fm_extent_count. The array is filled in by the 5117 * ioctl. 5118 */ 5119 int target_size_in, target_size_out; 5120 struct fiemap *fm; 5121 const argtype *arg_type = ie->arg_type; 5122 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) }; 5123 void *argptr, *p; 5124 abi_long ret; 5125 int i, extent_size = thunk_type_size(extent_arg_type, 0); 5126 uint32_t outbufsz; 5127 int free_fm = 0; 5128 5129 assert(arg_type[0] == TYPE_PTR); 5130 assert(ie->access == IOC_RW); 5131 arg_type++; 5132 target_size_in = thunk_type_size(arg_type, 0); 5133 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1); 5134 if (!argptr) { 5135 return -TARGET_EFAULT; 5136 } 5137 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5138 unlock_user(argptr, arg, 0); 5139 fm = (struct fiemap *)buf_temp; 5140 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) { 5141 return -TARGET_EINVAL; 5142 } 5143 5144 outbufsz = sizeof (*fm) + 5145 (sizeof(struct fiemap_extent) * fm->fm_extent_count); 5146 5147 if (outbufsz > MAX_STRUCT_SIZE) { 5148 /* We can't fit all the extents into the fixed size buffer. 5149 * Allocate one that is large enough and use it instead. 5150 */ 5151 fm = g_try_malloc(outbufsz); 5152 if (!fm) { 5153 return -TARGET_ENOMEM; 5154 } 5155 memcpy(fm, buf_temp, sizeof(struct fiemap)); 5156 free_fm = 1; 5157 } 5158 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm)); 5159 if (!is_error(ret)) { 5160 target_size_out = target_size_in; 5161 /* An extent_count of 0 means we were only counting the extents 5162 * so there are no structs to copy 5163 */ 5164 if (fm->fm_extent_count != 0) { 5165 target_size_out += fm->fm_mapped_extents * extent_size; 5166 } 5167 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0); 5168 if (!argptr) { 5169 ret = -TARGET_EFAULT; 5170 } else { 5171 /* Convert the struct fiemap */ 5172 thunk_convert(argptr, fm, arg_type, THUNK_TARGET); 5173 if (fm->fm_extent_count != 0) { 5174 p = argptr + target_size_in; 5175 /* ...and then all the struct fiemap_extents */ 5176 for (i = 0; i < fm->fm_mapped_extents; i++) { 5177 thunk_convert(p, &fm->fm_extents[i], extent_arg_type, 5178 THUNK_TARGET); 5179 p += extent_size; 5180 } 5181 } 5182 unlock_user(argptr, arg, target_size_out); 5183 } 5184 } 5185 if (free_fm) { 5186 g_free(fm); 5187 } 5188 return ret; 5189 } 5190 #endif 5191 5192 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, 5193 int fd, int cmd, abi_long arg) 5194 { 5195 const argtype *arg_type = ie->arg_type; 5196 int target_size; 5197 void *argptr; 5198 int ret; 5199 struct ifconf *host_ifconf; 5200 uint32_t outbufsz; 5201 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) }; 5202 int target_ifreq_size; 5203 int nb_ifreq; 5204 int free_buf = 0; 5205 int i; 5206 int target_ifc_len; 5207 abi_long target_ifc_buf; 5208 int host_ifc_len; 5209 char *host_ifc_buf; 5210 5211 assert(arg_type[0] == TYPE_PTR); 5212 assert(ie->access == IOC_RW); 5213 5214 arg_type++; 5215 target_size = thunk_type_size(arg_type, 0); 5216 5217 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5218 if (!argptr) 5219 return -TARGET_EFAULT; 5220 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5221 unlock_user(argptr, arg, 0); 5222 5223 host_ifconf = (struct ifconf *)(unsigned long)buf_temp; 5224 target_ifc_len = host_ifconf->ifc_len; 5225 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf; 5226 5227 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0); 5228 nb_ifreq = target_ifc_len / target_ifreq_size; 5229 host_ifc_len = nb_ifreq * sizeof(struct ifreq); 5230 5231 outbufsz = sizeof(*host_ifconf) + host_ifc_len; 5232 if (outbufsz > MAX_STRUCT_SIZE) { 5233 /* We can't fit all the extents into the fixed size buffer. 5234 * Allocate one that is large enough and use it instead. 5235 */ 5236 host_ifconf = malloc(outbufsz); 5237 if (!host_ifconf) { 5238 return -TARGET_ENOMEM; 5239 } 5240 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf)); 5241 free_buf = 1; 5242 } 5243 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf); 5244 5245 host_ifconf->ifc_len = host_ifc_len; 5246 host_ifconf->ifc_buf = host_ifc_buf; 5247 5248 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf)); 5249 if (!is_error(ret)) { 5250 /* convert host ifc_len to target ifc_len */ 5251 5252 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq); 5253 target_ifc_len = nb_ifreq * target_ifreq_size; 5254 host_ifconf->ifc_len = target_ifc_len; 5255 5256 /* restore target ifc_buf */ 5257 5258 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf; 5259 5260 /* copy struct ifconf to target user */ 5261 5262 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5263 if (!argptr) 5264 return -TARGET_EFAULT; 5265 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET); 5266 unlock_user(argptr, arg, target_size); 5267 5268 /* copy ifreq[] to target user */ 5269 5270 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0); 5271 for (i = 0; i < nb_ifreq ; i++) { 5272 thunk_convert(argptr + i * target_ifreq_size, 5273 host_ifc_buf + i * sizeof(struct ifreq), 5274 ifreq_arg_type, THUNK_TARGET); 5275 } 5276 unlock_user(argptr, target_ifc_buf, target_ifc_len); 5277 } 5278 5279 if (free_buf) { 5280 free(host_ifconf); 5281 } 5282 5283 return ret; 5284 } 5285 5286 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 5287 int cmd, abi_long arg) 5288 { 5289 void *argptr; 5290 struct dm_ioctl *host_dm; 5291 abi_long guest_data; 5292 uint32_t guest_data_size; 5293 int target_size; 5294 const argtype *arg_type = ie->arg_type; 5295 abi_long ret; 5296 void *big_buf = NULL; 5297 char *host_data; 5298 5299 arg_type++; 5300 target_size = thunk_type_size(arg_type, 0); 5301 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5302 if (!argptr) { 5303 ret = -TARGET_EFAULT; 5304 goto out; 5305 } 5306 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5307 unlock_user(argptr, arg, 0); 5308 5309 /* buf_temp is too small, so fetch things into a bigger buffer */ 5310 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2); 5311 memcpy(big_buf, buf_temp, target_size); 5312 buf_temp = big_buf; 5313 host_dm = big_buf; 5314 5315 guest_data = arg + host_dm->data_start; 5316 if ((guest_data - arg) < 0) { 5317 ret = -TARGET_EINVAL; 5318 goto out; 5319 } 5320 guest_data_size = host_dm->data_size - host_dm->data_start; 5321 host_data = (char*)host_dm + host_dm->data_start; 5322 5323 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1); 5324 if (!argptr) { 5325 ret = -TARGET_EFAULT; 5326 goto out; 5327 } 5328 5329 switch (ie->host_cmd) { 5330 case DM_REMOVE_ALL: 5331 case DM_LIST_DEVICES: 5332 case DM_DEV_CREATE: 5333 case DM_DEV_REMOVE: 5334 case DM_DEV_SUSPEND: 5335 case DM_DEV_STATUS: 5336 case DM_DEV_WAIT: 5337 case DM_TABLE_STATUS: 5338 case DM_TABLE_CLEAR: 5339 case DM_TABLE_DEPS: 5340 case DM_LIST_VERSIONS: 5341 /* no input data */ 5342 break; 5343 case DM_DEV_RENAME: 5344 case DM_DEV_SET_GEOMETRY: 5345 /* data contains only strings */ 5346 memcpy(host_data, argptr, guest_data_size); 5347 break; 5348 case DM_TARGET_MSG: 5349 memcpy(host_data, argptr, guest_data_size); 5350 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr); 5351 break; 5352 case DM_TABLE_LOAD: 5353 { 5354 void *gspec = argptr; 5355 void *cur_data = host_data; 5356 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 5357 int spec_size = thunk_type_size(arg_type, 0); 5358 int i; 5359 5360 for (i = 0; i < host_dm->target_count; i++) { 5361 struct dm_target_spec *spec = cur_data; 5362 uint32_t next; 5363 int slen; 5364 5365 thunk_convert(spec, gspec, arg_type, THUNK_HOST); 5366 slen = strlen((char*)gspec + spec_size) + 1; 5367 next = spec->next; 5368 spec->next = sizeof(*spec) + slen; 5369 strcpy((char*)&spec[1], gspec + spec_size); 5370 gspec += next; 5371 cur_data += spec->next; 5372 } 5373 break; 5374 } 5375 default: 5376 ret = -TARGET_EINVAL; 5377 unlock_user(argptr, guest_data, 0); 5378 goto out; 5379 } 5380 unlock_user(argptr, guest_data, 0); 5381 5382 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5383 if (!is_error(ret)) { 5384 guest_data = arg + host_dm->data_start; 5385 guest_data_size = host_dm->data_size - host_dm->data_start; 5386 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0); 5387 switch (ie->host_cmd) { 5388 case DM_REMOVE_ALL: 5389 case DM_DEV_CREATE: 5390 case DM_DEV_REMOVE: 5391 case DM_DEV_RENAME: 5392 case DM_DEV_SUSPEND: 5393 case DM_DEV_STATUS: 5394 case DM_TABLE_LOAD: 5395 case DM_TABLE_CLEAR: 5396 case DM_TARGET_MSG: 5397 case DM_DEV_SET_GEOMETRY: 5398 /* no return data */ 5399 break; 5400 case DM_LIST_DEVICES: 5401 { 5402 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start; 5403 uint32_t remaining_data = guest_data_size; 5404 void *cur_data = argptr; 5405 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) }; 5406 int nl_size = 12; /* can't use thunk_size due to alignment */ 5407 5408 while (1) { 5409 uint32_t next = nl->next; 5410 if (next) { 5411 nl->next = nl_size + (strlen(nl->name) + 1); 5412 } 5413 if (remaining_data < nl->next) { 5414 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5415 break; 5416 } 5417 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET); 5418 strcpy(cur_data + nl_size, nl->name); 5419 cur_data += nl->next; 5420 remaining_data -= nl->next; 5421 if (!next) { 5422 break; 5423 } 5424 nl = (void*)nl + next; 5425 } 5426 break; 5427 } 5428 case DM_DEV_WAIT: 5429 case DM_TABLE_STATUS: 5430 { 5431 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start; 5432 void *cur_data = argptr; 5433 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 5434 int spec_size = thunk_type_size(arg_type, 0); 5435 int i; 5436 5437 for (i = 0; i < host_dm->target_count; i++) { 5438 uint32_t next = spec->next; 5439 int slen = strlen((char*)&spec[1]) + 1; 5440 spec->next = (cur_data - argptr) + spec_size + slen; 5441 if (guest_data_size < spec->next) { 5442 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5443 break; 5444 } 5445 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET); 5446 strcpy(cur_data + spec_size, (char*)&spec[1]); 5447 cur_data = argptr + spec->next; 5448 spec = (void*)host_dm + host_dm->data_start + next; 5449 } 5450 break; 5451 } 5452 case DM_TABLE_DEPS: 5453 { 5454 void *hdata = (void*)host_dm + host_dm->data_start; 5455 int count = *(uint32_t*)hdata; 5456 uint64_t *hdev = hdata + 8; 5457 uint64_t *gdev = argptr + 8; 5458 int i; 5459 5460 *(uint32_t*)argptr = tswap32(count); 5461 for (i = 0; i < count; i++) { 5462 *gdev = tswap64(*hdev); 5463 gdev++; 5464 hdev++; 5465 } 5466 break; 5467 } 5468 case DM_LIST_VERSIONS: 5469 { 5470 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start; 5471 uint32_t remaining_data = guest_data_size; 5472 void *cur_data = argptr; 5473 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) }; 5474 int vers_size = thunk_type_size(arg_type, 0); 5475 5476 while (1) { 5477 uint32_t next = vers->next; 5478 if (next) { 5479 vers->next = vers_size + (strlen(vers->name) + 1); 5480 } 5481 if (remaining_data < vers->next) { 5482 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5483 break; 5484 } 5485 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET); 5486 strcpy(cur_data + vers_size, vers->name); 5487 cur_data += vers->next; 5488 remaining_data -= vers->next; 5489 if (!next) { 5490 break; 5491 } 5492 vers = (void*)vers + next; 5493 } 5494 break; 5495 } 5496 default: 5497 unlock_user(argptr, guest_data, 0); 5498 ret = -TARGET_EINVAL; 5499 goto out; 5500 } 5501 unlock_user(argptr, guest_data, guest_data_size); 5502 5503 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5504 if (!argptr) { 5505 ret = -TARGET_EFAULT; 5506 goto out; 5507 } 5508 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5509 unlock_user(argptr, arg, target_size); 5510 } 5511 out: 5512 g_free(big_buf); 5513 return ret; 5514 } 5515 5516 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 5517 int cmd, abi_long arg) 5518 { 5519 void *argptr; 5520 int target_size; 5521 const argtype *arg_type = ie->arg_type; 5522 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) }; 5523 abi_long ret; 5524 5525 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp; 5526 struct blkpg_partition host_part; 5527 5528 /* Read and convert blkpg */ 5529 arg_type++; 5530 target_size = thunk_type_size(arg_type, 0); 5531 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5532 if (!argptr) { 5533 ret = -TARGET_EFAULT; 5534 goto out; 5535 } 5536 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5537 unlock_user(argptr, arg, 0); 5538 5539 switch (host_blkpg->op) { 5540 case BLKPG_ADD_PARTITION: 5541 case BLKPG_DEL_PARTITION: 5542 /* payload is struct blkpg_partition */ 5543 break; 5544 default: 5545 /* Unknown opcode */ 5546 ret = -TARGET_EINVAL; 5547 goto out; 5548 } 5549 5550 /* Read and convert blkpg->data */ 5551 arg = (abi_long)(uintptr_t)host_blkpg->data; 5552 target_size = thunk_type_size(part_arg_type, 0); 5553 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5554 if (!argptr) { 5555 ret = -TARGET_EFAULT; 5556 goto out; 5557 } 5558 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST); 5559 unlock_user(argptr, arg, 0); 5560 5561 /* Swizzle the data pointer to our local copy and call! */ 5562 host_blkpg->data = &host_part; 5563 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg)); 5564 5565 out: 5566 return ret; 5567 } 5568 5569 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp, 5570 int fd, int cmd, abi_long arg) 5571 { 5572 const argtype *arg_type = ie->arg_type; 5573 const StructEntry *se; 5574 const argtype *field_types; 5575 const int *dst_offsets, *src_offsets; 5576 int target_size; 5577 void *argptr; 5578 abi_ulong *target_rt_dev_ptr; 5579 unsigned long *host_rt_dev_ptr; 5580 abi_long ret; 5581 int i; 5582 5583 assert(ie->access == IOC_W); 5584 assert(*arg_type == TYPE_PTR); 5585 arg_type++; 5586 assert(*arg_type == TYPE_STRUCT); 5587 target_size = thunk_type_size(arg_type, 0); 5588 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5589 if (!argptr) { 5590 return -TARGET_EFAULT; 5591 } 5592 arg_type++; 5593 assert(*arg_type == (int)STRUCT_rtentry); 5594 se = struct_entries + *arg_type++; 5595 assert(se->convert[0] == NULL); 5596 /* convert struct here to be able to catch rt_dev string */ 5597 field_types = se->field_types; 5598 dst_offsets = se->field_offsets[THUNK_HOST]; 5599 src_offsets = se->field_offsets[THUNK_TARGET]; 5600 for (i = 0; i < se->nb_fields; i++) { 5601 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) { 5602 assert(*field_types == TYPE_PTRVOID); 5603 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]); 5604 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]); 5605 if (*target_rt_dev_ptr != 0) { 5606 *host_rt_dev_ptr = (unsigned long)lock_user_string( 5607 tswapal(*target_rt_dev_ptr)); 5608 if (!*host_rt_dev_ptr) { 5609 unlock_user(argptr, arg, 0); 5610 return -TARGET_EFAULT; 5611 } 5612 } else { 5613 *host_rt_dev_ptr = 0; 5614 } 5615 field_types++; 5616 continue; 5617 } 5618 field_types = thunk_convert(buf_temp + dst_offsets[i], 5619 argptr + src_offsets[i], 5620 field_types, THUNK_HOST); 5621 } 5622 unlock_user(argptr, arg, 0); 5623 5624 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5625 if (*host_rt_dev_ptr != 0) { 5626 unlock_user((void *)*host_rt_dev_ptr, 5627 *target_rt_dev_ptr, 0); 5628 } 5629 return ret; 5630 } 5631 5632 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp, 5633 int fd, int cmd, abi_long arg) 5634 { 5635 int sig = target_to_host_signal(arg); 5636 return get_errno(safe_ioctl(fd, ie->host_cmd, sig)); 5637 } 5638 5639 #ifdef TIOCGPTPEER 5640 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp, 5641 int fd, int cmd, abi_long arg) 5642 { 5643 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl); 5644 return get_errno(safe_ioctl(fd, ie->host_cmd, flags)); 5645 } 5646 #endif 5647 5648 static IOCTLEntry ioctl_entries[] = { 5649 #define IOCTL(cmd, access, ...) \ 5650 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } }, 5651 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \ 5652 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } }, 5653 #define IOCTL_IGNORE(cmd) \ 5654 { TARGET_ ## cmd, 0, #cmd }, 5655 #include "ioctls.h" 5656 { 0, 0, }, 5657 }; 5658 5659 /* ??? Implement proper locking for ioctls. */ 5660 /* do_ioctl() Must return target values and target errnos. */ 5661 static abi_long do_ioctl(int fd, int cmd, abi_long arg) 5662 { 5663 const IOCTLEntry *ie; 5664 const argtype *arg_type; 5665 abi_long ret; 5666 uint8_t buf_temp[MAX_STRUCT_SIZE]; 5667 int target_size; 5668 void *argptr; 5669 5670 ie = ioctl_entries; 5671 for(;;) { 5672 if (ie->target_cmd == 0) { 5673 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd); 5674 return -TARGET_ENOSYS; 5675 } 5676 if (ie->target_cmd == cmd) 5677 break; 5678 ie++; 5679 } 5680 arg_type = ie->arg_type; 5681 #if defined(DEBUG) 5682 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name); 5683 #endif 5684 if (ie->do_ioctl) { 5685 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg); 5686 } else if (!ie->host_cmd) { 5687 /* Some architectures define BSD ioctls in their headers 5688 that are not implemented in Linux. */ 5689 return -TARGET_ENOSYS; 5690 } 5691 5692 switch(arg_type[0]) { 5693 case TYPE_NULL: 5694 /* no argument */ 5695 ret = get_errno(safe_ioctl(fd, ie->host_cmd)); 5696 break; 5697 case TYPE_PTRVOID: 5698 case TYPE_INT: 5699 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg)); 5700 break; 5701 case TYPE_PTR: 5702 arg_type++; 5703 target_size = thunk_type_size(arg_type, 0); 5704 switch(ie->access) { 5705 case IOC_R: 5706 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5707 if (!is_error(ret)) { 5708 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5709 if (!argptr) 5710 return -TARGET_EFAULT; 5711 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5712 unlock_user(argptr, arg, target_size); 5713 } 5714 break; 5715 case IOC_W: 5716 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5717 if (!argptr) 5718 return -TARGET_EFAULT; 5719 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5720 unlock_user(argptr, arg, 0); 5721 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5722 break; 5723 default: 5724 case IOC_RW: 5725 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5726 if (!argptr) 5727 return -TARGET_EFAULT; 5728 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5729 unlock_user(argptr, arg, 0); 5730 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5731 if (!is_error(ret)) { 5732 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5733 if (!argptr) 5734 return -TARGET_EFAULT; 5735 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5736 unlock_user(argptr, arg, target_size); 5737 } 5738 break; 5739 } 5740 break; 5741 default: 5742 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n", 5743 (long)cmd, arg_type[0]); 5744 ret = -TARGET_ENOSYS; 5745 break; 5746 } 5747 return ret; 5748 } 5749 5750 static const bitmask_transtbl iflag_tbl[] = { 5751 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK }, 5752 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT }, 5753 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR }, 5754 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK }, 5755 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK }, 5756 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP }, 5757 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR }, 5758 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR }, 5759 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL }, 5760 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC }, 5761 { TARGET_IXON, TARGET_IXON, IXON, IXON }, 5762 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY }, 5763 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF }, 5764 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL }, 5765 { 0, 0, 0, 0 } 5766 }; 5767 5768 static const bitmask_transtbl oflag_tbl[] = { 5769 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST }, 5770 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC }, 5771 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR }, 5772 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL }, 5773 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR }, 5774 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET }, 5775 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL }, 5776 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL }, 5777 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 }, 5778 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 }, 5779 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 }, 5780 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 }, 5781 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 }, 5782 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 }, 5783 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 }, 5784 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 }, 5785 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 }, 5786 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 }, 5787 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 }, 5788 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 }, 5789 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 }, 5790 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 }, 5791 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 }, 5792 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 }, 5793 { 0, 0, 0, 0 } 5794 }; 5795 5796 static const bitmask_transtbl cflag_tbl[] = { 5797 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 }, 5798 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 }, 5799 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 }, 5800 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 }, 5801 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 }, 5802 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 }, 5803 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 }, 5804 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 }, 5805 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 }, 5806 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 }, 5807 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 }, 5808 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 }, 5809 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 }, 5810 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 }, 5811 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 }, 5812 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 }, 5813 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 }, 5814 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 }, 5815 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 }, 5816 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 }, 5817 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 }, 5818 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 }, 5819 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 }, 5820 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 }, 5821 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB }, 5822 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD }, 5823 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB }, 5824 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD }, 5825 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL }, 5826 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL }, 5827 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS }, 5828 { 0, 0, 0, 0 } 5829 }; 5830 5831 static const bitmask_transtbl lflag_tbl[] = { 5832 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG }, 5833 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON }, 5834 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE }, 5835 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO }, 5836 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE }, 5837 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK }, 5838 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL }, 5839 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH }, 5840 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP }, 5841 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL }, 5842 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT }, 5843 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE }, 5844 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO }, 5845 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN }, 5846 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN }, 5847 { 0, 0, 0, 0 } 5848 }; 5849 5850 static void target_to_host_termios (void *dst, const void *src) 5851 { 5852 struct host_termios *host = dst; 5853 const struct target_termios *target = src; 5854 5855 host->c_iflag = 5856 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl); 5857 host->c_oflag = 5858 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl); 5859 host->c_cflag = 5860 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl); 5861 host->c_lflag = 5862 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl); 5863 host->c_line = target->c_line; 5864 5865 memset(host->c_cc, 0, sizeof(host->c_cc)); 5866 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR]; 5867 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT]; 5868 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE]; 5869 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL]; 5870 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF]; 5871 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME]; 5872 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN]; 5873 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC]; 5874 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART]; 5875 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP]; 5876 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP]; 5877 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL]; 5878 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT]; 5879 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD]; 5880 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE]; 5881 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT]; 5882 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2]; 5883 } 5884 5885 static void host_to_target_termios (void *dst, const void *src) 5886 { 5887 struct target_termios *target = dst; 5888 const struct host_termios *host = src; 5889 5890 target->c_iflag = 5891 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl)); 5892 target->c_oflag = 5893 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl)); 5894 target->c_cflag = 5895 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl)); 5896 target->c_lflag = 5897 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl)); 5898 target->c_line = host->c_line; 5899 5900 memset(target->c_cc, 0, sizeof(target->c_cc)); 5901 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR]; 5902 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT]; 5903 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE]; 5904 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL]; 5905 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF]; 5906 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME]; 5907 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN]; 5908 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC]; 5909 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART]; 5910 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP]; 5911 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP]; 5912 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL]; 5913 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT]; 5914 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD]; 5915 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE]; 5916 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT]; 5917 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2]; 5918 } 5919 5920 static const StructEntry struct_termios_def = { 5921 .convert = { host_to_target_termios, target_to_host_termios }, 5922 .size = { sizeof(struct target_termios), sizeof(struct host_termios) }, 5923 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) }, 5924 }; 5925 5926 static bitmask_transtbl mmap_flags_tbl[] = { 5927 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED }, 5928 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE }, 5929 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED }, 5930 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, 5931 MAP_ANONYMOUS, MAP_ANONYMOUS }, 5932 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, 5933 MAP_GROWSDOWN, MAP_GROWSDOWN }, 5934 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, 5935 MAP_DENYWRITE, MAP_DENYWRITE }, 5936 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, 5937 MAP_EXECUTABLE, MAP_EXECUTABLE }, 5938 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED }, 5939 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, 5940 MAP_NORESERVE, MAP_NORESERVE }, 5941 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB }, 5942 /* MAP_STACK had been ignored by the kernel for quite some time. 5943 Recognize it for the target insofar as we do not want to pass 5944 it through to the host. */ 5945 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 }, 5946 { 0, 0, 0, 0 } 5947 }; 5948 5949 #if defined(TARGET_I386) 5950 5951 /* NOTE: there is really one LDT for all the threads */ 5952 static uint8_t *ldt_table; 5953 5954 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount) 5955 { 5956 int size; 5957 void *p; 5958 5959 if (!ldt_table) 5960 return 0; 5961 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE; 5962 if (size > bytecount) 5963 size = bytecount; 5964 p = lock_user(VERIFY_WRITE, ptr, size, 0); 5965 if (!p) 5966 return -TARGET_EFAULT; 5967 /* ??? Should this by byteswapped? */ 5968 memcpy(p, ldt_table, size); 5969 unlock_user(p, ptr, size); 5970 return size; 5971 } 5972 5973 /* XXX: add locking support */ 5974 static abi_long write_ldt(CPUX86State *env, 5975 abi_ulong ptr, unsigned long bytecount, int oldmode) 5976 { 5977 struct target_modify_ldt_ldt_s ldt_info; 5978 struct target_modify_ldt_ldt_s *target_ldt_info; 5979 int seg_32bit, contents, read_exec_only, limit_in_pages; 5980 int seg_not_present, useable, lm; 5981 uint32_t *lp, entry_1, entry_2; 5982 5983 if (bytecount != sizeof(ldt_info)) 5984 return -TARGET_EINVAL; 5985 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1)) 5986 return -TARGET_EFAULT; 5987 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 5988 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 5989 ldt_info.limit = tswap32(target_ldt_info->limit); 5990 ldt_info.flags = tswap32(target_ldt_info->flags); 5991 unlock_user_struct(target_ldt_info, ptr, 0); 5992 5993 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES) 5994 return -TARGET_EINVAL; 5995 seg_32bit = ldt_info.flags & 1; 5996 contents = (ldt_info.flags >> 1) & 3; 5997 read_exec_only = (ldt_info.flags >> 3) & 1; 5998 limit_in_pages = (ldt_info.flags >> 4) & 1; 5999 seg_not_present = (ldt_info.flags >> 5) & 1; 6000 useable = (ldt_info.flags >> 6) & 1; 6001 #ifdef TARGET_ABI32 6002 lm = 0; 6003 #else 6004 lm = (ldt_info.flags >> 7) & 1; 6005 #endif 6006 if (contents == 3) { 6007 if (oldmode) 6008 return -TARGET_EINVAL; 6009 if (seg_not_present == 0) 6010 return -TARGET_EINVAL; 6011 } 6012 /* allocate the LDT */ 6013 if (!ldt_table) { 6014 env->ldt.base = target_mmap(0, 6015 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE, 6016 PROT_READ|PROT_WRITE, 6017 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 6018 if (env->ldt.base == -1) 6019 return -TARGET_ENOMEM; 6020 memset(g2h(env->ldt.base), 0, 6021 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE); 6022 env->ldt.limit = 0xffff; 6023 ldt_table = g2h(env->ldt.base); 6024 } 6025 6026 /* NOTE: same code as Linux kernel */ 6027 /* Allow LDTs to be cleared by the user. */ 6028 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 6029 if (oldmode || 6030 (contents == 0 && 6031 read_exec_only == 1 && 6032 seg_32bit == 0 && 6033 limit_in_pages == 0 && 6034 seg_not_present == 1 && 6035 useable == 0 )) { 6036 entry_1 = 0; 6037 entry_2 = 0; 6038 goto install; 6039 } 6040 } 6041 6042 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 6043 (ldt_info.limit & 0x0ffff); 6044 entry_2 = (ldt_info.base_addr & 0xff000000) | 6045 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 6046 (ldt_info.limit & 0xf0000) | 6047 ((read_exec_only ^ 1) << 9) | 6048 (contents << 10) | 6049 ((seg_not_present ^ 1) << 15) | 6050 (seg_32bit << 22) | 6051 (limit_in_pages << 23) | 6052 (lm << 21) | 6053 0x7000; 6054 if (!oldmode) 6055 entry_2 |= (useable << 20); 6056 6057 /* Install the new entry ... */ 6058 install: 6059 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3)); 6060 lp[0] = tswap32(entry_1); 6061 lp[1] = tswap32(entry_2); 6062 return 0; 6063 } 6064 6065 /* specific and weird i386 syscalls */ 6066 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr, 6067 unsigned long bytecount) 6068 { 6069 abi_long ret; 6070 6071 switch (func) { 6072 case 0: 6073 ret = read_ldt(ptr, bytecount); 6074 break; 6075 case 1: 6076 ret = write_ldt(env, ptr, bytecount, 1); 6077 break; 6078 case 0x11: 6079 ret = write_ldt(env, ptr, bytecount, 0); 6080 break; 6081 default: 6082 ret = -TARGET_ENOSYS; 6083 break; 6084 } 6085 return ret; 6086 } 6087 6088 #if defined(TARGET_I386) && defined(TARGET_ABI32) 6089 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr) 6090 { 6091 uint64_t *gdt_table = g2h(env->gdt.base); 6092 struct target_modify_ldt_ldt_s ldt_info; 6093 struct target_modify_ldt_ldt_s *target_ldt_info; 6094 int seg_32bit, contents, read_exec_only, limit_in_pages; 6095 int seg_not_present, useable, lm; 6096 uint32_t *lp, entry_1, entry_2; 6097 int i; 6098 6099 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 6100 if (!target_ldt_info) 6101 return -TARGET_EFAULT; 6102 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 6103 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 6104 ldt_info.limit = tswap32(target_ldt_info->limit); 6105 ldt_info.flags = tswap32(target_ldt_info->flags); 6106 if (ldt_info.entry_number == -1) { 6107 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) { 6108 if (gdt_table[i] == 0) { 6109 ldt_info.entry_number = i; 6110 target_ldt_info->entry_number = tswap32(i); 6111 break; 6112 } 6113 } 6114 } 6115 unlock_user_struct(target_ldt_info, ptr, 1); 6116 6117 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 6118 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX) 6119 return -TARGET_EINVAL; 6120 seg_32bit = ldt_info.flags & 1; 6121 contents = (ldt_info.flags >> 1) & 3; 6122 read_exec_only = (ldt_info.flags >> 3) & 1; 6123 limit_in_pages = (ldt_info.flags >> 4) & 1; 6124 seg_not_present = (ldt_info.flags >> 5) & 1; 6125 useable = (ldt_info.flags >> 6) & 1; 6126 #ifdef TARGET_ABI32 6127 lm = 0; 6128 #else 6129 lm = (ldt_info.flags >> 7) & 1; 6130 #endif 6131 6132 if (contents == 3) { 6133 if (seg_not_present == 0) 6134 return -TARGET_EINVAL; 6135 } 6136 6137 /* NOTE: same code as Linux kernel */ 6138 /* Allow LDTs to be cleared by the user. */ 6139 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 6140 if ((contents == 0 && 6141 read_exec_only == 1 && 6142 seg_32bit == 0 && 6143 limit_in_pages == 0 && 6144 seg_not_present == 1 && 6145 useable == 0 )) { 6146 entry_1 = 0; 6147 entry_2 = 0; 6148 goto install; 6149 } 6150 } 6151 6152 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 6153 (ldt_info.limit & 0x0ffff); 6154 entry_2 = (ldt_info.base_addr & 0xff000000) | 6155 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 6156 (ldt_info.limit & 0xf0000) | 6157 ((read_exec_only ^ 1) << 9) | 6158 (contents << 10) | 6159 ((seg_not_present ^ 1) << 15) | 6160 (seg_32bit << 22) | 6161 (limit_in_pages << 23) | 6162 (useable << 20) | 6163 (lm << 21) | 6164 0x7000; 6165 6166 /* Install the new entry ... */ 6167 install: 6168 lp = (uint32_t *)(gdt_table + ldt_info.entry_number); 6169 lp[0] = tswap32(entry_1); 6170 lp[1] = tswap32(entry_2); 6171 return 0; 6172 } 6173 6174 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr) 6175 { 6176 struct target_modify_ldt_ldt_s *target_ldt_info; 6177 uint64_t *gdt_table = g2h(env->gdt.base); 6178 uint32_t base_addr, limit, flags; 6179 int seg_32bit, contents, read_exec_only, limit_in_pages, idx; 6180 int seg_not_present, useable, lm; 6181 uint32_t *lp, entry_1, entry_2; 6182 6183 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 6184 if (!target_ldt_info) 6185 return -TARGET_EFAULT; 6186 idx = tswap32(target_ldt_info->entry_number); 6187 if (idx < TARGET_GDT_ENTRY_TLS_MIN || 6188 idx > TARGET_GDT_ENTRY_TLS_MAX) { 6189 unlock_user_struct(target_ldt_info, ptr, 1); 6190 return -TARGET_EINVAL; 6191 } 6192 lp = (uint32_t *)(gdt_table + idx); 6193 entry_1 = tswap32(lp[0]); 6194 entry_2 = tswap32(lp[1]); 6195 6196 read_exec_only = ((entry_2 >> 9) & 1) ^ 1; 6197 contents = (entry_2 >> 10) & 3; 6198 seg_not_present = ((entry_2 >> 15) & 1) ^ 1; 6199 seg_32bit = (entry_2 >> 22) & 1; 6200 limit_in_pages = (entry_2 >> 23) & 1; 6201 useable = (entry_2 >> 20) & 1; 6202 #ifdef TARGET_ABI32 6203 lm = 0; 6204 #else 6205 lm = (entry_2 >> 21) & 1; 6206 #endif 6207 flags = (seg_32bit << 0) | (contents << 1) | 6208 (read_exec_only << 3) | (limit_in_pages << 4) | 6209 (seg_not_present << 5) | (useable << 6) | (lm << 7); 6210 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000); 6211 base_addr = (entry_1 >> 16) | 6212 (entry_2 & 0xff000000) | 6213 ((entry_2 & 0xff) << 16); 6214 target_ldt_info->base_addr = tswapal(base_addr); 6215 target_ldt_info->limit = tswap32(limit); 6216 target_ldt_info->flags = tswap32(flags); 6217 unlock_user_struct(target_ldt_info, ptr, 1); 6218 return 0; 6219 } 6220 #endif /* TARGET_I386 && TARGET_ABI32 */ 6221 6222 #ifndef TARGET_ABI32 6223 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 6224 { 6225 abi_long ret = 0; 6226 abi_ulong val; 6227 int idx; 6228 6229 switch(code) { 6230 case TARGET_ARCH_SET_GS: 6231 case TARGET_ARCH_SET_FS: 6232 if (code == TARGET_ARCH_SET_GS) 6233 idx = R_GS; 6234 else 6235 idx = R_FS; 6236 cpu_x86_load_seg(env, idx, 0); 6237 env->segs[idx].base = addr; 6238 break; 6239 case TARGET_ARCH_GET_GS: 6240 case TARGET_ARCH_GET_FS: 6241 if (code == TARGET_ARCH_GET_GS) 6242 idx = R_GS; 6243 else 6244 idx = R_FS; 6245 val = env->segs[idx].base; 6246 if (put_user(val, addr, abi_ulong)) 6247 ret = -TARGET_EFAULT; 6248 break; 6249 default: 6250 ret = -TARGET_EINVAL; 6251 break; 6252 } 6253 return ret; 6254 } 6255 #endif 6256 6257 #endif /* defined(TARGET_I386) */ 6258 6259 #define NEW_STACK_SIZE 0x40000 6260 6261 6262 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; 6263 typedef struct { 6264 CPUArchState *env; 6265 pthread_mutex_t mutex; 6266 pthread_cond_t cond; 6267 pthread_t thread; 6268 uint32_t tid; 6269 abi_ulong child_tidptr; 6270 abi_ulong parent_tidptr; 6271 sigset_t sigmask; 6272 } new_thread_info; 6273 6274 static void *clone_func(void *arg) 6275 { 6276 new_thread_info *info = arg; 6277 CPUArchState *env; 6278 CPUState *cpu; 6279 TaskState *ts; 6280 6281 rcu_register_thread(); 6282 tcg_register_thread(); 6283 env = info->env; 6284 cpu = ENV_GET_CPU(env); 6285 thread_cpu = cpu; 6286 ts = (TaskState *)cpu->opaque; 6287 info->tid = gettid(); 6288 task_settid(ts); 6289 if (info->child_tidptr) 6290 put_user_u32(info->tid, info->child_tidptr); 6291 if (info->parent_tidptr) 6292 put_user_u32(info->tid, info->parent_tidptr); 6293 /* Enable signals. */ 6294 sigprocmask(SIG_SETMASK, &info->sigmask, NULL); 6295 /* Signal to the parent that we're ready. */ 6296 pthread_mutex_lock(&info->mutex); 6297 pthread_cond_broadcast(&info->cond); 6298 pthread_mutex_unlock(&info->mutex); 6299 /* Wait until the parent has finished initializing the tls state. */ 6300 pthread_mutex_lock(&clone_lock); 6301 pthread_mutex_unlock(&clone_lock); 6302 cpu_loop(env); 6303 /* never exits */ 6304 return NULL; 6305 } 6306 6307 /* do_fork() Must return host values and target errnos (unlike most 6308 do_*() functions). */ 6309 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, 6310 abi_ulong parent_tidptr, target_ulong newtls, 6311 abi_ulong child_tidptr) 6312 { 6313 CPUState *cpu = ENV_GET_CPU(env); 6314 int ret; 6315 TaskState *ts; 6316 CPUState *new_cpu; 6317 CPUArchState *new_env; 6318 sigset_t sigmask; 6319 6320 flags &= ~CLONE_IGNORED_FLAGS; 6321 6322 /* Emulate vfork() with fork() */ 6323 if (flags & CLONE_VFORK) 6324 flags &= ~(CLONE_VFORK | CLONE_VM); 6325 6326 if (flags & CLONE_VM) { 6327 TaskState *parent_ts = (TaskState *)cpu->opaque; 6328 new_thread_info info; 6329 pthread_attr_t attr; 6330 6331 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) || 6332 (flags & CLONE_INVALID_THREAD_FLAGS)) { 6333 return -TARGET_EINVAL; 6334 } 6335 6336 ts = g_new0(TaskState, 1); 6337 init_task_state(ts); 6338 /* we create a new CPU instance. */ 6339 new_env = cpu_copy(env); 6340 /* Init regs that differ from the parent. */ 6341 cpu_clone_regs(new_env, newsp); 6342 new_cpu = ENV_GET_CPU(new_env); 6343 new_cpu->opaque = ts; 6344 ts->bprm = parent_ts->bprm; 6345 ts->info = parent_ts->info; 6346 ts->signal_mask = parent_ts->signal_mask; 6347 6348 if (flags & CLONE_CHILD_CLEARTID) { 6349 ts->child_tidptr = child_tidptr; 6350 } 6351 6352 if (flags & CLONE_SETTLS) { 6353 cpu_set_tls (new_env, newtls); 6354 } 6355 6356 /* Grab a mutex so that thread setup appears atomic. */ 6357 pthread_mutex_lock(&clone_lock); 6358 6359 memset(&info, 0, sizeof(info)); 6360 pthread_mutex_init(&info.mutex, NULL); 6361 pthread_mutex_lock(&info.mutex); 6362 pthread_cond_init(&info.cond, NULL); 6363 info.env = new_env; 6364 if (flags & CLONE_CHILD_SETTID) { 6365 info.child_tidptr = child_tidptr; 6366 } 6367 if (flags & CLONE_PARENT_SETTID) { 6368 info.parent_tidptr = parent_tidptr; 6369 } 6370 6371 ret = pthread_attr_init(&attr); 6372 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE); 6373 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 6374 /* It is not safe to deliver signals until the child has finished 6375 initializing, so temporarily block all signals. */ 6376 sigfillset(&sigmask); 6377 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); 6378 6379 /* If this is our first additional thread, we need to ensure we 6380 * generate code for parallel execution and flush old translations. 6381 */ 6382 if (!parallel_cpus) { 6383 parallel_cpus = true; 6384 tb_flush(cpu); 6385 } 6386 6387 ret = pthread_create(&info.thread, &attr, clone_func, &info); 6388 /* TODO: Free new CPU state if thread creation failed. */ 6389 6390 sigprocmask(SIG_SETMASK, &info.sigmask, NULL); 6391 pthread_attr_destroy(&attr); 6392 if (ret == 0) { 6393 /* Wait for the child to initialize. */ 6394 pthread_cond_wait(&info.cond, &info.mutex); 6395 ret = info.tid; 6396 } else { 6397 ret = -1; 6398 } 6399 pthread_mutex_unlock(&info.mutex); 6400 pthread_cond_destroy(&info.cond); 6401 pthread_mutex_destroy(&info.mutex); 6402 pthread_mutex_unlock(&clone_lock); 6403 } else { 6404 /* if no CLONE_VM, we consider it is a fork */ 6405 if (flags & CLONE_INVALID_FORK_FLAGS) { 6406 return -TARGET_EINVAL; 6407 } 6408 6409 /* We can't support custom termination signals */ 6410 if ((flags & CSIGNAL) != TARGET_SIGCHLD) { 6411 return -TARGET_EINVAL; 6412 } 6413 6414 if (block_signals()) { 6415 return -TARGET_ERESTARTSYS; 6416 } 6417 6418 fork_start(); 6419 ret = fork(); 6420 if (ret == 0) { 6421 /* Child Process. */ 6422 cpu_clone_regs(env, newsp); 6423 fork_end(1); 6424 /* There is a race condition here. The parent process could 6425 theoretically read the TID in the child process before the child 6426 tid is set. This would require using either ptrace 6427 (not implemented) or having *_tidptr to point at a shared memory 6428 mapping. We can't repeat the spinlock hack used above because 6429 the child process gets its own copy of the lock. */ 6430 if (flags & CLONE_CHILD_SETTID) 6431 put_user_u32(gettid(), child_tidptr); 6432 if (flags & CLONE_PARENT_SETTID) 6433 put_user_u32(gettid(), parent_tidptr); 6434 ts = (TaskState *)cpu->opaque; 6435 if (flags & CLONE_SETTLS) 6436 cpu_set_tls (env, newtls); 6437 if (flags & CLONE_CHILD_CLEARTID) 6438 ts->child_tidptr = child_tidptr; 6439 } else { 6440 fork_end(0); 6441 } 6442 } 6443 return ret; 6444 } 6445 6446 /* warning : doesn't handle linux specific flags... */ 6447 static int target_to_host_fcntl_cmd(int cmd) 6448 { 6449 switch(cmd) { 6450 case TARGET_F_DUPFD: 6451 case TARGET_F_GETFD: 6452 case TARGET_F_SETFD: 6453 case TARGET_F_GETFL: 6454 case TARGET_F_SETFL: 6455 return cmd; 6456 case TARGET_F_GETLK: 6457 return F_GETLK64; 6458 case TARGET_F_SETLK: 6459 return F_SETLK64; 6460 case TARGET_F_SETLKW: 6461 return F_SETLKW64; 6462 case TARGET_F_GETOWN: 6463 return F_GETOWN; 6464 case TARGET_F_SETOWN: 6465 return F_SETOWN; 6466 case TARGET_F_GETSIG: 6467 return F_GETSIG; 6468 case TARGET_F_SETSIG: 6469 return F_SETSIG; 6470 #if TARGET_ABI_BITS == 32 6471 case TARGET_F_GETLK64: 6472 return F_GETLK64; 6473 case TARGET_F_SETLK64: 6474 return F_SETLK64; 6475 case TARGET_F_SETLKW64: 6476 return F_SETLKW64; 6477 #endif 6478 case TARGET_F_SETLEASE: 6479 return F_SETLEASE; 6480 case TARGET_F_GETLEASE: 6481 return F_GETLEASE; 6482 #ifdef F_DUPFD_CLOEXEC 6483 case TARGET_F_DUPFD_CLOEXEC: 6484 return F_DUPFD_CLOEXEC; 6485 #endif 6486 case TARGET_F_NOTIFY: 6487 return F_NOTIFY; 6488 #ifdef F_GETOWN_EX 6489 case TARGET_F_GETOWN_EX: 6490 return F_GETOWN_EX; 6491 #endif 6492 #ifdef F_SETOWN_EX 6493 case TARGET_F_SETOWN_EX: 6494 return F_SETOWN_EX; 6495 #endif 6496 #ifdef F_SETPIPE_SZ 6497 case TARGET_F_SETPIPE_SZ: 6498 return F_SETPIPE_SZ; 6499 case TARGET_F_GETPIPE_SZ: 6500 return F_GETPIPE_SZ; 6501 #endif 6502 default: 6503 return -TARGET_EINVAL; 6504 } 6505 return -TARGET_EINVAL; 6506 } 6507 6508 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a } 6509 static const bitmask_transtbl flock_tbl[] = { 6510 TRANSTBL_CONVERT(F_RDLCK), 6511 TRANSTBL_CONVERT(F_WRLCK), 6512 TRANSTBL_CONVERT(F_UNLCK), 6513 TRANSTBL_CONVERT(F_EXLCK), 6514 TRANSTBL_CONVERT(F_SHLCK), 6515 { 0, 0, 0, 0 } 6516 }; 6517 6518 static inline abi_long copy_from_user_flock(struct flock64 *fl, 6519 abi_ulong target_flock_addr) 6520 { 6521 struct target_flock *target_fl; 6522 short l_type; 6523 6524 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6525 return -TARGET_EFAULT; 6526 } 6527 6528 __get_user(l_type, &target_fl->l_type); 6529 fl->l_type = target_to_host_bitmask(l_type, flock_tbl); 6530 __get_user(fl->l_whence, &target_fl->l_whence); 6531 __get_user(fl->l_start, &target_fl->l_start); 6532 __get_user(fl->l_len, &target_fl->l_len); 6533 __get_user(fl->l_pid, &target_fl->l_pid); 6534 unlock_user_struct(target_fl, target_flock_addr, 0); 6535 return 0; 6536 } 6537 6538 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr, 6539 const struct flock64 *fl) 6540 { 6541 struct target_flock *target_fl; 6542 short l_type; 6543 6544 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 6545 return -TARGET_EFAULT; 6546 } 6547 6548 l_type = host_to_target_bitmask(fl->l_type, flock_tbl); 6549 __put_user(l_type, &target_fl->l_type); 6550 __put_user(fl->l_whence, &target_fl->l_whence); 6551 __put_user(fl->l_start, &target_fl->l_start); 6552 __put_user(fl->l_len, &target_fl->l_len); 6553 __put_user(fl->l_pid, &target_fl->l_pid); 6554 unlock_user_struct(target_fl, target_flock_addr, 1); 6555 return 0; 6556 } 6557 6558 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr); 6559 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl); 6560 6561 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32 6562 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl, 6563 abi_ulong target_flock_addr) 6564 { 6565 struct target_eabi_flock64 *target_fl; 6566 short l_type; 6567 6568 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6569 return -TARGET_EFAULT; 6570 } 6571 6572 __get_user(l_type, &target_fl->l_type); 6573 fl->l_type = target_to_host_bitmask(l_type, flock_tbl); 6574 __get_user(fl->l_whence, &target_fl->l_whence); 6575 __get_user(fl->l_start, &target_fl->l_start); 6576 __get_user(fl->l_len, &target_fl->l_len); 6577 __get_user(fl->l_pid, &target_fl->l_pid); 6578 unlock_user_struct(target_fl, target_flock_addr, 0); 6579 return 0; 6580 } 6581 6582 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr, 6583 const struct flock64 *fl) 6584 { 6585 struct target_eabi_flock64 *target_fl; 6586 short l_type; 6587 6588 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 6589 return -TARGET_EFAULT; 6590 } 6591 6592 l_type = host_to_target_bitmask(fl->l_type, flock_tbl); 6593 __put_user(l_type, &target_fl->l_type); 6594 __put_user(fl->l_whence, &target_fl->l_whence); 6595 __put_user(fl->l_start, &target_fl->l_start); 6596 __put_user(fl->l_len, &target_fl->l_len); 6597 __put_user(fl->l_pid, &target_fl->l_pid); 6598 unlock_user_struct(target_fl, target_flock_addr, 1); 6599 return 0; 6600 } 6601 #endif 6602 6603 static inline abi_long copy_from_user_flock64(struct flock64 *fl, 6604 abi_ulong target_flock_addr) 6605 { 6606 struct target_flock64 *target_fl; 6607 short l_type; 6608 6609 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6610 return -TARGET_EFAULT; 6611 } 6612 6613 __get_user(l_type, &target_fl->l_type); 6614 fl->l_type = target_to_host_bitmask(l_type, flock_tbl); 6615 __get_user(fl->l_whence, &target_fl->l_whence); 6616 __get_user(fl->l_start, &target_fl->l_start); 6617 __get_user(fl->l_len, &target_fl->l_len); 6618 __get_user(fl->l_pid, &target_fl->l_pid); 6619 unlock_user_struct(target_fl, target_flock_addr, 0); 6620 return 0; 6621 } 6622 6623 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr, 6624 const struct flock64 *fl) 6625 { 6626 struct target_flock64 *target_fl; 6627 short l_type; 6628 6629 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 6630 return -TARGET_EFAULT; 6631 } 6632 6633 l_type = host_to_target_bitmask(fl->l_type, flock_tbl); 6634 __put_user(l_type, &target_fl->l_type); 6635 __put_user(fl->l_whence, &target_fl->l_whence); 6636 __put_user(fl->l_start, &target_fl->l_start); 6637 __put_user(fl->l_len, &target_fl->l_len); 6638 __put_user(fl->l_pid, &target_fl->l_pid); 6639 unlock_user_struct(target_fl, target_flock_addr, 1); 6640 return 0; 6641 } 6642 6643 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) 6644 { 6645 struct flock64 fl64; 6646 #ifdef F_GETOWN_EX 6647 struct f_owner_ex fox; 6648 struct target_f_owner_ex *target_fox; 6649 #endif 6650 abi_long ret; 6651 int host_cmd = target_to_host_fcntl_cmd(cmd); 6652 6653 if (host_cmd == -TARGET_EINVAL) 6654 return host_cmd; 6655 6656 switch(cmd) { 6657 case TARGET_F_GETLK: 6658 ret = copy_from_user_flock(&fl64, arg); 6659 if (ret) { 6660 return ret; 6661 } 6662 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 6663 if (ret == 0) { 6664 ret = copy_to_user_flock(arg, &fl64); 6665 } 6666 break; 6667 6668 case TARGET_F_SETLK: 6669 case TARGET_F_SETLKW: 6670 ret = copy_from_user_flock(&fl64, arg); 6671 if (ret) { 6672 return ret; 6673 } 6674 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 6675 break; 6676 6677 case TARGET_F_GETLK64: 6678 ret = copy_from_user_flock64(&fl64, arg); 6679 if (ret) { 6680 return ret; 6681 } 6682 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 6683 if (ret == 0) { 6684 ret = copy_to_user_flock64(arg, &fl64); 6685 } 6686 break; 6687 case TARGET_F_SETLK64: 6688 case TARGET_F_SETLKW64: 6689 ret = copy_from_user_flock64(&fl64, arg); 6690 if (ret) { 6691 return ret; 6692 } 6693 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 6694 break; 6695 6696 case TARGET_F_GETFL: 6697 ret = get_errno(safe_fcntl(fd, host_cmd, arg)); 6698 if (ret >= 0) { 6699 ret = host_to_target_bitmask(ret, fcntl_flags_tbl); 6700 } 6701 break; 6702 6703 case TARGET_F_SETFL: 6704 ret = get_errno(safe_fcntl(fd, host_cmd, 6705 target_to_host_bitmask(arg, 6706 fcntl_flags_tbl))); 6707 break; 6708 6709 #ifdef F_GETOWN_EX 6710 case TARGET_F_GETOWN_EX: 6711 ret = get_errno(safe_fcntl(fd, host_cmd, &fox)); 6712 if (ret >= 0) { 6713 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0)) 6714 return -TARGET_EFAULT; 6715 target_fox->type = tswap32(fox.type); 6716 target_fox->pid = tswap32(fox.pid); 6717 unlock_user_struct(target_fox, arg, 1); 6718 } 6719 break; 6720 #endif 6721 6722 #ifdef F_SETOWN_EX 6723 case TARGET_F_SETOWN_EX: 6724 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1)) 6725 return -TARGET_EFAULT; 6726 fox.type = tswap32(target_fox->type); 6727 fox.pid = tswap32(target_fox->pid); 6728 unlock_user_struct(target_fox, arg, 0); 6729 ret = get_errno(safe_fcntl(fd, host_cmd, &fox)); 6730 break; 6731 #endif 6732 6733 case TARGET_F_SETOWN: 6734 case TARGET_F_GETOWN: 6735 case TARGET_F_SETSIG: 6736 case TARGET_F_GETSIG: 6737 case TARGET_F_SETLEASE: 6738 case TARGET_F_GETLEASE: 6739 case TARGET_F_SETPIPE_SZ: 6740 case TARGET_F_GETPIPE_SZ: 6741 ret = get_errno(safe_fcntl(fd, host_cmd, arg)); 6742 break; 6743 6744 default: 6745 ret = get_errno(safe_fcntl(fd, cmd, arg)); 6746 break; 6747 } 6748 return ret; 6749 } 6750 6751 #ifdef USE_UID16 6752 6753 static inline int high2lowuid(int uid) 6754 { 6755 if (uid > 65535) 6756 return 65534; 6757 else 6758 return uid; 6759 } 6760 6761 static inline int high2lowgid(int gid) 6762 { 6763 if (gid > 65535) 6764 return 65534; 6765 else 6766 return gid; 6767 } 6768 6769 static inline int low2highuid(int uid) 6770 { 6771 if ((int16_t)uid == -1) 6772 return -1; 6773 else 6774 return uid; 6775 } 6776 6777 static inline int low2highgid(int gid) 6778 { 6779 if ((int16_t)gid == -1) 6780 return -1; 6781 else 6782 return gid; 6783 } 6784 static inline int tswapid(int id) 6785 { 6786 return tswap16(id); 6787 } 6788 6789 #define put_user_id(x, gaddr) put_user_u16(x, gaddr) 6790 6791 #else /* !USE_UID16 */ 6792 static inline int high2lowuid(int uid) 6793 { 6794 return uid; 6795 } 6796 static inline int high2lowgid(int gid) 6797 { 6798 return gid; 6799 } 6800 static inline int low2highuid(int uid) 6801 { 6802 return uid; 6803 } 6804 static inline int low2highgid(int gid) 6805 { 6806 return gid; 6807 } 6808 static inline int tswapid(int id) 6809 { 6810 return tswap32(id); 6811 } 6812 6813 #define put_user_id(x, gaddr) put_user_u32(x, gaddr) 6814 6815 #endif /* USE_UID16 */ 6816 6817 /* We must do direct syscalls for setting UID/GID, because we want to 6818 * implement the Linux system call semantics of "change only for this thread", 6819 * not the libc/POSIX semantics of "change for all threads in process". 6820 * (See http://ewontfix.com/17/ for more details.) 6821 * We use the 32-bit version of the syscalls if present; if it is not 6822 * then either the host architecture supports 32-bit UIDs natively with 6823 * the standard syscall, or the 16-bit UID is the best we can do. 6824 */ 6825 #ifdef __NR_setuid32 6826 #define __NR_sys_setuid __NR_setuid32 6827 #else 6828 #define __NR_sys_setuid __NR_setuid 6829 #endif 6830 #ifdef __NR_setgid32 6831 #define __NR_sys_setgid __NR_setgid32 6832 #else 6833 #define __NR_sys_setgid __NR_setgid 6834 #endif 6835 #ifdef __NR_setresuid32 6836 #define __NR_sys_setresuid __NR_setresuid32 6837 #else 6838 #define __NR_sys_setresuid __NR_setresuid 6839 #endif 6840 #ifdef __NR_setresgid32 6841 #define __NR_sys_setresgid __NR_setresgid32 6842 #else 6843 #define __NR_sys_setresgid __NR_setresgid 6844 #endif 6845 6846 _syscall1(int, sys_setuid, uid_t, uid) 6847 _syscall1(int, sys_setgid, gid_t, gid) 6848 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) 6849 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid) 6850 6851 void syscall_init(void) 6852 { 6853 IOCTLEntry *ie; 6854 const argtype *arg_type; 6855 int size; 6856 int i; 6857 6858 thunk_init(STRUCT_MAX); 6859 6860 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def); 6861 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def); 6862 #include "syscall_types.h" 6863 #undef STRUCT 6864 #undef STRUCT_SPECIAL 6865 6866 /* Build target_to_host_errno_table[] table from 6867 * host_to_target_errno_table[]. */ 6868 for (i = 0; i < ERRNO_TABLE_SIZE; i++) { 6869 target_to_host_errno_table[host_to_target_errno_table[i]] = i; 6870 } 6871 6872 /* we patch the ioctl size if necessary. We rely on the fact that 6873 no ioctl has all the bits at '1' in the size field */ 6874 ie = ioctl_entries; 6875 while (ie->target_cmd != 0) { 6876 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) == 6877 TARGET_IOC_SIZEMASK) { 6878 arg_type = ie->arg_type; 6879 if (arg_type[0] != TYPE_PTR) { 6880 fprintf(stderr, "cannot patch size for ioctl 0x%x\n", 6881 ie->target_cmd); 6882 exit(1); 6883 } 6884 arg_type++; 6885 size = thunk_type_size(arg_type, 0); 6886 ie->target_cmd = (ie->target_cmd & 6887 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) | 6888 (size << TARGET_IOC_SIZESHIFT); 6889 } 6890 6891 /* automatic consistency check if same arch */ 6892 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 6893 (defined(__x86_64__) && defined(TARGET_X86_64)) 6894 if (unlikely(ie->target_cmd != ie->host_cmd)) { 6895 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n", 6896 ie->name, ie->target_cmd, ie->host_cmd); 6897 } 6898 #endif 6899 ie++; 6900 } 6901 } 6902 6903 #if TARGET_ABI_BITS == 32 6904 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1) 6905 { 6906 #ifdef TARGET_WORDS_BIGENDIAN 6907 return ((uint64_t)word0 << 32) | word1; 6908 #else 6909 return ((uint64_t)word1 << 32) | word0; 6910 #endif 6911 } 6912 #else /* TARGET_ABI_BITS == 32 */ 6913 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1) 6914 { 6915 return word0; 6916 } 6917 #endif /* TARGET_ABI_BITS != 32 */ 6918 6919 #ifdef TARGET_NR_truncate64 6920 static inline abi_long target_truncate64(void *cpu_env, const char *arg1, 6921 abi_long arg2, 6922 abi_long arg3, 6923 abi_long arg4) 6924 { 6925 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) { 6926 arg2 = arg3; 6927 arg3 = arg4; 6928 } 6929 return get_errno(truncate64(arg1, target_offset64(arg2, arg3))); 6930 } 6931 #endif 6932 6933 #ifdef TARGET_NR_ftruncate64 6934 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1, 6935 abi_long arg2, 6936 abi_long arg3, 6937 abi_long arg4) 6938 { 6939 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) { 6940 arg2 = arg3; 6941 arg3 = arg4; 6942 } 6943 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3))); 6944 } 6945 #endif 6946 6947 static inline abi_long target_to_host_timespec(struct timespec *host_ts, 6948 abi_ulong target_addr) 6949 { 6950 struct target_timespec *target_ts; 6951 6952 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) 6953 return -TARGET_EFAULT; 6954 __get_user(host_ts->tv_sec, &target_ts->tv_sec); 6955 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec); 6956 unlock_user_struct(target_ts, target_addr, 0); 6957 return 0; 6958 } 6959 6960 static inline abi_long host_to_target_timespec(abi_ulong target_addr, 6961 struct timespec *host_ts) 6962 { 6963 struct target_timespec *target_ts; 6964 6965 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) 6966 return -TARGET_EFAULT; 6967 __put_user(host_ts->tv_sec, &target_ts->tv_sec); 6968 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec); 6969 unlock_user_struct(target_ts, target_addr, 1); 6970 return 0; 6971 } 6972 6973 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec, 6974 abi_ulong target_addr) 6975 { 6976 struct target_itimerspec *target_itspec; 6977 6978 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) { 6979 return -TARGET_EFAULT; 6980 } 6981 6982 host_itspec->it_interval.tv_sec = 6983 tswapal(target_itspec->it_interval.tv_sec); 6984 host_itspec->it_interval.tv_nsec = 6985 tswapal(target_itspec->it_interval.tv_nsec); 6986 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec); 6987 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec); 6988 6989 unlock_user_struct(target_itspec, target_addr, 1); 6990 return 0; 6991 } 6992 6993 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr, 6994 struct itimerspec *host_its) 6995 { 6996 struct target_itimerspec *target_itspec; 6997 6998 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) { 6999 return -TARGET_EFAULT; 7000 } 7001 7002 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec); 7003 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec); 7004 7005 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec); 7006 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec); 7007 7008 unlock_user_struct(target_itspec, target_addr, 0); 7009 return 0; 7010 } 7011 7012 static inline abi_long target_to_host_timex(struct timex *host_tx, 7013 abi_long target_addr) 7014 { 7015 struct target_timex *target_tx; 7016 7017 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) { 7018 return -TARGET_EFAULT; 7019 } 7020 7021 __get_user(host_tx->modes, &target_tx->modes); 7022 __get_user(host_tx->offset, &target_tx->offset); 7023 __get_user(host_tx->freq, &target_tx->freq); 7024 __get_user(host_tx->maxerror, &target_tx->maxerror); 7025 __get_user(host_tx->esterror, &target_tx->esterror); 7026 __get_user(host_tx->status, &target_tx->status); 7027 __get_user(host_tx->constant, &target_tx->constant); 7028 __get_user(host_tx->precision, &target_tx->precision); 7029 __get_user(host_tx->tolerance, &target_tx->tolerance); 7030 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec); 7031 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec); 7032 __get_user(host_tx->tick, &target_tx->tick); 7033 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7034 __get_user(host_tx->jitter, &target_tx->jitter); 7035 __get_user(host_tx->shift, &target_tx->shift); 7036 __get_user(host_tx->stabil, &target_tx->stabil); 7037 __get_user(host_tx->jitcnt, &target_tx->jitcnt); 7038 __get_user(host_tx->calcnt, &target_tx->calcnt); 7039 __get_user(host_tx->errcnt, &target_tx->errcnt); 7040 __get_user(host_tx->stbcnt, &target_tx->stbcnt); 7041 __get_user(host_tx->tai, &target_tx->tai); 7042 7043 unlock_user_struct(target_tx, target_addr, 0); 7044 return 0; 7045 } 7046 7047 static inline abi_long host_to_target_timex(abi_long target_addr, 7048 struct timex *host_tx) 7049 { 7050 struct target_timex *target_tx; 7051 7052 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) { 7053 return -TARGET_EFAULT; 7054 } 7055 7056 __put_user(host_tx->modes, &target_tx->modes); 7057 __put_user(host_tx->offset, &target_tx->offset); 7058 __put_user(host_tx->freq, &target_tx->freq); 7059 __put_user(host_tx->maxerror, &target_tx->maxerror); 7060 __put_user(host_tx->esterror, &target_tx->esterror); 7061 __put_user(host_tx->status, &target_tx->status); 7062 __put_user(host_tx->constant, &target_tx->constant); 7063 __put_user(host_tx->precision, &target_tx->precision); 7064 __put_user(host_tx->tolerance, &target_tx->tolerance); 7065 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec); 7066 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec); 7067 __put_user(host_tx->tick, &target_tx->tick); 7068 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7069 __put_user(host_tx->jitter, &target_tx->jitter); 7070 __put_user(host_tx->shift, &target_tx->shift); 7071 __put_user(host_tx->stabil, &target_tx->stabil); 7072 __put_user(host_tx->jitcnt, &target_tx->jitcnt); 7073 __put_user(host_tx->calcnt, &target_tx->calcnt); 7074 __put_user(host_tx->errcnt, &target_tx->errcnt); 7075 __put_user(host_tx->stbcnt, &target_tx->stbcnt); 7076 __put_user(host_tx->tai, &target_tx->tai); 7077 7078 unlock_user_struct(target_tx, target_addr, 1); 7079 return 0; 7080 } 7081 7082 7083 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp, 7084 abi_ulong target_addr) 7085 { 7086 struct target_sigevent *target_sevp; 7087 7088 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) { 7089 return -TARGET_EFAULT; 7090 } 7091 7092 /* This union is awkward on 64 bit systems because it has a 32 bit 7093 * integer and a pointer in it; we follow the conversion approach 7094 * used for handling sigval types in signal.c so the guest should get 7095 * the correct value back even if we did a 64 bit byteswap and it's 7096 * using the 32 bit integer. 7097 */ 7098 host_sevp->sigev_value.sival_ptr = 7099 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr); 7100 host_sevp->sigev_signo = 7101 target_to_host_signal(tswap32(target_sevp->sigev_signo)); 7102 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify); 7103 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid); 7104 7105 unlock_user_struct(target_sevp, target_addr, 1); 7106 return 0; 7107 } 7108 7109 #if defined(TARGET_NR_mlockall) 7110 static inline int target_to_host_mlockall_arg(int arg) 7111 { 7112 int result = 0; 7113 7114 if (arg & TARGET_MLOCKALL_MCL_CURRENT) { 7115 result |= MCL_CURRENT; 7116 } 7117 if (arg & TARGET_MLOCKALL_MCL_FUTURE) { 7118 result |= MCL_FUTURE; 7119 } 7120 return result; 7121 } 7122 #endif 7123 7124 static inline abi_long host_to_target_stat64(void *cpu_env, 7125 abi_ulong target_addr, 7126 struct stat *host_st) 7127 { 7128 #if defined(TARGET_ARM) && defined(TARGET_ABI32) 7129 if (((CPUARMState *)cpu_env)->eabi) { 7130 struct target_eabi_stat64 *target_st; 7131 7132 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 7133 return -TARGET_EFAULT; 7134 memset(target_st, 0, sizeof(struct target_eabi_stat64)); 7135 __put_user(host_st->st_dev, &target_st->st_dev); 7136 __put_user(host_st->st_ino, &target_st->st_ino); 7137 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 7138 __put_user(host_st->st_ino, &target_st->__st_ino); 7139 #endif 7140 __put_user(host_st->st_mode, &target_st->st_mode); 7141 __put_user(host_st->st_nlink, &target_st->st_nlink); 7142 __put_user(host_st->st_uid, &target_st->st_uid); 7143 __put_user(host_st->st_gid, &target_st->st_gid); 7144 __put_user(host_st->st_rdev, &target_st->st_rdev); 7145 __put_user(host_st->st_size, &target_st->st_size); 7146 __put_user(host_st->st_blksize, &target_st->st_blksize); 7147 __put_user(host_st->st_blocks, &target_st->st_blocks); 7148 __put_user(host_st->st_atime, &target_st->target_st_atime); 7149 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 7150 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 7151 unlock_user_struct(target_st, target_addr, 1); 7152 } else 7153 #endif 7154 { 7155 #if defined(TARGET_HAS_STRUCT_STAT64) 7156 struct target_stat64 *target_st; 7157 #else 7158 struct target_stat *target_st; 7159 #endif 7160 7161 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 7162 return -TARGET_EFAULT; 7163 memset(target_st, 0, sizeof(*target_st)); 7164 __put_user(host_st->st_dev, &target_st->st_dev); 7165 __put_user(host_st->st_ino, &target_st->st_ino); 7166 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 7167 __put_user(host_st->st_ino, &target_st->__st_ino); 7168 #endif 7169 __put_user(host_st->st_mode, &target_st->st_mode); 7170 __put_user(host_st->st_nlink, &target_st->st_nlink); 7171 __put_user(host_st->st_uid, &target_st->st_uid); 7172 __put_user(host_st->st_gid, &target_st->st_gid); 7173 __put_user(host_st->st_rdev, &target_st->st_rdev); 7174 /* XXX: better use of kernel struct */ 7175 __put_user(host_st->st_size, &target_st->st_size); 7176 __put_user(host_st->st_blksize, &target_st->st_blksize); 7177 __put_user(host_st->st_blocks, &target_st->st_blocks); 7178 __put_user(host_st->st_atime, &target_st->target_st_atime); 7179 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 7180 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 7181 unlock_user_struct(target_st, target_addr, 1); 7182 } 7183 7184 return 0; 7185 } 7186 7187 /* ??? Using host futex calls even when target atomic operations 7188 are not really atomic probably breaks things. However implementing 7189 futexes locally would make futexes shared between multiple processes 7190 tricky. However they're probably useless because guest atomic 7191 operations won't work either. */ 7192 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout, 7193 target_ulong uaddr2, int val3) 7194 { 7195 struct timespec ts, *pts; 7196 int base_op; 7197 7198 /* ??? We assume FUTEX_* constants are the same on both host 7199 and target. */ 7200 #ifdef FUTEX_CMD_MASK 7201 base_op = op & FUTEX_CMD_MASK; 7202 #else 7203 base_op = op; 7204 #endif 7205 switch (base_op) { 7206 case FUTEX_WAIT: 7207 case FUTEX_WAIT_BITSET: 7208 if (timeout) { 7209 pts = &ts; 7210 target_to_host_timespec(pts, timeout); 7211 } else { 7212 pts = NULL; 7213 } 7214 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val), 7215 pts, NULL, val3)); 7216 case FUTEX_WAKE: 7217 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 7218 case FUTEX_FD: 7219 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 7220 case FUTEX_REQUEUE: 7221 case FUTEX_CMP_REQUEUE: 7222 case FUTEX_WAKE_OP: 7223 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 7224 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 7225 But the prototype takes a `struct timespec *'; insert casts 7226 to satisfy the compiler. We do not need to tswap TIMEOUT 7227 since it's not compared to guest memory. */ 7228 pts = (struct timespec *)(uintptr_t) timeout; 7229 return get_errno(safe_futex(g2h(uaddr), op, val, pts, 7230 g2h(uaddr2), 7231 (base_op == FUTEX_CMP_REQUEUE 7232 ? tswap32(val3) 7233 : val3))); 7234 default: 7235 return -TARGET_ENOSYS; 7236 } 7237 } 7238 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 7239 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname, 7240 abi_long handle, abi_long mount_id, 7241 abi_long flags) 7242 { 7243 struct file_handle *target_fh; 7244 struct file_handle *fh; 7245 int mid = 0; 7246 abi_long ret; 7247 char *name; 7248 unsigned int size, total_size; 7249 7250 if (get_user_s32(size, handle)) { 7251 return -TARGET_EFAULT; 7252 } 7253 7254 name = lock_user_string(pathname); 7255 if (!name) { 7256 return -TARGET_EFAULT; 7257 } 7258 7259 total_size = sizeof(struct file_handle) + size; 7260 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0); 7261 if (!target_fh) { 7262 unlock_user(name, pathname, 0); 7263 return -TARGET_EFAULT; 7264 } 7265 7266 fh = g_malloc0(total_size); 7267 fh->handle_bytes = size; 7268 7269 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags)); 7270 unlock_user(name, pathname, 0); 7271 7272 /* man name_to_handle_at(2): 7273 * Other than the use of the handle_bytes field, the caller should treat 7274 * the file_handle structure as an opaque data type 7275 */ 7276 7277 memcpy(target_fh, fh, total_size); 7278 target_fh->handle_bytes = tswap32(fh->handle_bytes); 7279 target_fh->handle_type = tswap32(fh->handle_type); 7280 g_free(fh); 7281 unlock_user(target_fh, handle, total_size); 7282 7283 if (put_user_s32(mid, mount_id)) { 7284 return -TARGET_EFAULT; 7285 } 7286 7287 return ret; 7288 7289 } 7290 #endif 7291 7292 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 7293 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle, 7294 abi_long flags) 7295 { 7296 struct file_handle *target_fh; 7297 struct file_handle *fh; 7298 unsigned int size, total_size; 7299 abi_long ret; 7300 7301 if (get_user_s32(size, handle)) { 7302 return -TARGET_EFAULT; 7303 } 7304 7305 total_size = sizeof(struct file_handle) + size; 7306 target_fh = lock_user(VERIFY_READ, handle, total_size, 1); 7307 if (!target_fh) { 7308 return -TARGET_EFAULT; 7309 } 7310 7311 fh = g_memdup(target_fh, total_size); 7312 fh->handle_bytes = size; 7313 fh->handle_type = tswap32(target_fh->handle_type); 7314 7315 ret = get_errno(open_by_handle_at(mount_fd, fh, 7316 target_to_host_bitmask(flags, fcntl_flags_tbl))); 7317 7318 g_free(fh); 7319 7320 unlock_user(target_fh, handle, total_size); 7321 7322 return ret; 7323 } 7324 #endif 7325 7326 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4) 7327 7328 /* signalfd siginfo conversion */ 7329 7330 static void 7331 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo, 7332 const struct signalfd_siginfo *info) 7333 { 7334 int sig = host_to_target_signal(info->ssi_signo); 7335 7336 /* linux/signalfd.h defines a ssi_addr_lsb 7337 * not defined in sys/signalfd.h but used by some kernels 7338 */ 7339 7340 #ifdef BUS_MCEERR_AO 7341 if (tinfo->ssi_signo == SIGBUS && 7342 (tinfo->ssi_code == BUS_MCEERR_AR || 7343 tinfo->ssi_code == BUS_MCEERR_AO)) { 7344 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1); 7345 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1); 7346 *tssi_addr_lsb = tswap16(*ssi_addr_lsb); 7347 } 7348 #endif 7349 7350 tinfo->ssi_signo = tswap32(sig); 7351 tinfo->ssi_errno = tswap32(tinfo->ssi_errno); 7352 tinfo->ssi_code = tswap32(info->ssi_code); 7353 tinfo->ssi_pid = tswap32(info->ssi_pid); 7354 tinfo->ssi_uid = tswap32(info->ssi_uid); 7355 tinfo->ssi_fd = tswap32(info->ssi_fd); 7356 tinfo->ssi_tid = tswap32(info->ssi_tid); 7357 tinfo->ssi_band = tswap32(info->ssi_band); 7358 tinfo->ssi_overrun = tswap32(info->ssi_overrun); 7359 tinfo->ssi_trapno = tswap32(info->ssi_trapno); 7360 tinfo->ssi_status = tswap32(info->ssi_status); 7361 tinfo->ssi_int = tswap32(info->ssi_int); 7362 tinfo->ssi_ptr = tswap64(info->ssi_ptr); 7363 tinfo->ssi_utime = tswap64(info->ssi_utime); 7364 tinfo->ssi_stime = tswap64(info->ssi_stime); 7365 tinfo->ssi_addr = tswap64(info->ssi_addr); 7366 } 7367 7368 static abi_long host_to_target_data_signalfd(void *buf, size_t len) 7369 { 7370 int i; 7371 7372 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) { 7373 host_to_target_signalfd_siginfo(buf + i, buf + i); 7374 } 7375 7376 return len; 7377 } 7378 7379 static TargetFdTrans target_signalfd_trans = { 7380 .host_to_target_data = host_to_target_data_signalfd, 7381 }; 7382 7383 static abi_long do_signalfd4(int fd, abi_long mask, int flags) 7384 { 7385 int host_flags; 7386 target_sigset_t *target_mask; 7387 sigset_t host_mask; 7388 abi_long ret; 7389 7390 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) { 7391 return -TARGET_EINVAL; 7392 } 7393 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) { 7394 return -TARGET_EFAULT; 7395 } 7396 7397 target_to_host_sigset(&host_mask, target_mask); 7398 7399 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl); 7400 7401 ret = get_errno(signalfd(fd, &host_mask, host_flags)); 7402 if (ret >= 0) { 7403 fd_trans_register(ret, &target_signalfd_trans); 7404 } 7405 7406 unlock_user_struct(target_mask, mask, 0); 7407 7408 return ret; 7409 } 7410 #endif 7411 7412 /* Map host to target signal numbers for the wait family of syscalls. 7413 Assume all other status bits are the same. */ 7414 int host_to_target_waitstatus(int status) 7415 { 7416 if (WIFSIGNALED(status)) { 7417 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f); 7418 } 7419 if (WIFSTOPPED(status)) { 7420 return (host_to_target_signal(WSTOPSIG(status)) << 8) 7421 | (status & 0xff); 7422 } 7423 return status; 7424 } 7425 7426 static int open_self_cmdline(void *cpu_env, int fd) 7427 { 7428 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 7429 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm; 7430 int i; 7431 7432 for (i = 0; i < bprm->argc; i++) { 7433 size_t len = strlen(bprm->argv[i]) + 1; 7434 7435 if (write(fd, bprm->argv[i], len) != len) { 7436 return -1; 7437 } 7438 } 7439 7440 return 0; 7441 } 7442 7443 static int open_self_maps(void *cpu_env, int fd) 7444 { 7445 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 7446 TaskState *ts = cpu->opaque; 7447 FILE *fp; 7448 char *line = NULL; 7449 size_t len = 0; 7450 ssize_t read; 7451 7452 fp = fopen("/proc/self/maps", "r"); 7453 if (fp == NULL) { 7454 return -1; 7455 } 7456 7457 while ((read = getline(&line, &len, fp)) != -1) { 7458 int fields, dev_maj, dev_min, inode; 7459 uint64_t min, max, offset; 7460 char flag_r, flag_w, flag_x, flag_p; 7461 char path[512] = ""; 7462 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d" 7463 " %512s", &min, &max, &flag_r, &flag_w, &flag_x, 7464 &flag_p, &offset, &dev_maj, &dev_min, &inode, path); 7465 7466 if ((fields < 10) || (fields > 11)) { 7467 continue; 7468 } 7469 if (h2g_valid(min)) { 7470 int flags = page_get_flags(h2g(min)); 7471 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX); 7472 if (page_check_range(h2g(min), max - min, flags) == -1) { 7473 continue; 7474 } 7475 if (h2g(min) == ts->info->stack_limit) { 7476 pstrcpy(path, sizeof(path), " [stack]"); 7477 } 7478 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx 7479 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n", 7480 h2g(min), h2g(max - 1) + 1, flag_r, flag_w, 7481 flag_x, flag_p, offset, dev_maj, dev_min, inode, 7482 path[0] ? " " : "", path); 7483 } 7484 } 7485 7486 free(line); 7487 fclose(fp); 7488 7489 return 0; 7490 } 7491 7492 static int open_self_stat(void *cpu_env, int fd) 7493 { 7494 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 7495 TaskState *ts = cpu->opaque; 7496 abi_ulong start_stack = ts->info->start_stack; 7497 int i; 7498 7499 for (i = 0; i < 44; i++) { 7500 char buf[128]; 7501 int len; 7502 uint64_t val = 0; 7503 7504 if (i == 0) { 7505 /* pid */ 7506 val = getpid(); 7507 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 7508 } else if (i == 1) { 7509 /* app name */ 7510 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]); 7511 } else if (i == 27) { 7512 /* stack bottom */ 7513 val = start_stack; 7514 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 7515 } else { 7516 /* for the rest, there is MasterCard */ 7517 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' '); 7518 } 7519 7520 len = strlen(buf); 7521 if (write(fd, buf, len) != len) { 7522 return -1; 7523 } 7524 } 7525 7526 return 0; 7527 } 7528 7529 static int open_self_auxv(void *cpu_env, int fd) 7530 { 7531 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 7532 TaskState *ts = cpu->opaque; 7533 abi_ulong auxv = ts->info->saved_auxv; 7534 abi_ulong len = ts->info->auxv_len; 7535 char *ptr; 7536 7537 /* 7538 * Auxiliary vector is stored in target process stack. 7539 * read in whole auxv vector and copy it to file 7540 */ 7541 ptr = lock_user(VERIFY_READ, auxv, len, 0); 7542 if (ptr != NULL) { 7543 while (len > 0) { 7544 ssize_t r; 7545 r = write(fd, ptr, len); 7546 if (r <= 0) { 7547 break; 7548 } 7549 len -= r; 7550 ptr += r; 7551 } 7552 lseek(fd, 0, SEEK_SET); 7553 unlock_user(ptr, auxv, len); 7554 } 7555 7556 return 0; 7557 } 7558 7559 static int is_proc_myself(const char *filename, const char *entry) 7560 { 7561 if (!strncmp(filename, "/proc/", strlen("/proc/"))) { 7562 filename += strlen("/proc/"); 7563 if (!strncmp(filename, "self/", strlen("self/"))) { 7564 filename += strlen("self/"); 7565 } else if (*filename >= '1' && *filename <= '9') { 7566 char myself[80]; 7567 snprintf(myself, sizeof(myself), "%d/", getpid()); 7568 if (!strncmp(filename, myself, strlen(myself))) { 7569 filename += strlen(myself); 7570 } else { 7571 return 0; 7572 } 7573 } else { 7574 return 0; 7575 } 7576 if (!strcmp(filename, entry)) { 7577 return 1; 7578 } 7579 } 7580 return 0; 7581 } 7582 7583 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 7584 static int is_proc(const char *filename, const char *entry) 7585 { 7586 return strcmp(filename, entry) == 0; 7587 } 7588 7589 static int open_net_route(void *cpu_env, int fd) 7590 { 7591 FILE *fp; 7592 char *line = NULL; 7593 size_t len = 0; 7594 ssize_t read; 7595 7596 fp = fopen("/proc/net/route", "r"); 7597 if (fp == NULL) { 7598 return -1; 7599 } 7600 7601 /* read header */ 7602 7603 read = getline(&line, &len, fp); 7604 dprintf(fd, "%s", line); 7605 7606 /* read routes */ 7607 7608 while ((read = getline(&line, &len, fp)) != -1) { 7609 char iface[16]; 7610 uint32_t dest, gw, mask; 7611 unsigned int flags, refcnt, use, metric, mtu, window, irtt; 7612 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 7613 iface, &dest, &gw, &flags, &refcnt, &use, &metric, 7614 &mask, &mtu, &window, &irtt); 7615 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 7616 iface, tswap32(dest), tswap32(gw), flags, refcnt, use, 7617 metric, tswap32(mask), mtu, window, irtt); 7618 } 7619 7620 free(line); 7621 fclose(fp); 7622 7623 return 0; 7624 } 7625 #endif 7626 7627 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode) 7628 { 7629 struct fake_open { 7630 const char *filename; 7631 int (*fill)(void *cpu_env, int fd); 7632 int (*cmp)(const char *s1, const char *s2); 7633 }; 7634 const struct fake_open *fake_open; 7635 static const struct fake_open fakes[] = { 7636 { "maps", open_self_maps, is_proc_myself }, 7637 { "stat", open_self_stat, is_proc_myself }, 7638 { "auxv", open_self_auxv, is_proc_myself }, 7639 { "cmdline", open_self_cmdline, is_proc_myself }, 7640 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 7641 { "/proc/net/route", open_net_route, is_proc }, 7642 #endif 7643 { NULL, NULL, NULL } 7644 }; 7645 7646 if (is_proc_myself(pathname, "exe")) { 7647 int execfd = qemu_getauxval(AT_EXECFD); 7648 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode); 7649 } 7650 7651 for (fake_open = fakes; fake_open->filename; fake_open++) { 7652 if (fake_open->cmp(pathname, fake_open->filename)) { 7653 break; 7654 } 7655 } 7656 7657 if (fake_open->filename) { 7658 const char *tmpdir; 7659 char filename[PATH_MAX]; 7660 int fd, r; 7661 7662 /* create temporary file to map stat to */ 7663 tmpdir = getenv("TMPDIR"); 7664 if (!tmpdir) 7665 tmpdir = "/tmp"; 7666 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir); 7667 fd = mkstemp(filename); 7668 if (fd < 0) { 7669 return fd; 7670 } 7671 unlink(filename); 7672 7673 if ((r = fake_open->fill(cpu_env, fd))) { 7674 int e = errno; 7675 close(fd); 7676 errno = e; 7677 return r; 7678 } 7679 lseek(fd, 0, SEEK_SET); 7680 7681 return fd; 7682 } 7683 7684 return safe_openat(dirfd, path(pathname), flags, mode); 7685 } 7686 7687 #define TIMER_MAGIC 0x0caf0000 7688 #define TIMER_MAGIC_MASK 0xffff0000 7689 7690 /* Convert QEMU provided timer ID back to internal 16bit index format */ 7691 static target_timer_t get_timer_id(abi_long arg) 7692 { 7693 target_timer_t timerid = arg; 7694 7695 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) { 7696 return -TARGET_EINVAL; 7697 } 7698 7699 timerid &= 0xffff; 7700 7701 if (timerid >= ARRAY_SIZE(g_posix_timers)) { 7702 return -TARGET_EINVAL; 7703 } 7704 7705 return timerid; 7706 } 7707 7708 static abi_long swap_data_eventfd(void *buf, size_t len) 7709 { 7710 uint64_t *counter = buf; 7711 int i; 7712 7713 if (len < sizeof(uint64_t)) { 7714 return -EINVAL; 7715 } 7716 7717 for (i = 0; i < len; i += sizeof(uint64_t)) { 7718 *counter = tswap64(*counter); 7719 counter++; 7720 } 7721 7722 return len; 7723 } 7724 7725 static TargetFdTrans target_eventfd_trans = { 7726 .host_to_target_data = swap_data_eventfd, 7727 .target_to_host_data = swap_data_eventfd, 7728 }; 7729 7730 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \ 7731 (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \ 7732 defined(__NR_inotify_init1)) 7733 static abi_long host_to_target_data_inotify(void *buf, size_t len) 7734 { 7735 struct inotify_event *ev; 7736 int i; 7737 uint32_t name_len; 7738 7739 for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) { 7740 ev = (struct inotify_event *)((char *)buf + i); 7741 name_len = ev->len; 7742 7743 ev->wd = tswap32(ev->wd); 7744 ev->mask = tswap32(ev->mask); 7745 ev->cookie = tswap32(ev->cookie); 7746 ev->len = tswap32(name_len); 7747 } 7748 7749 return len; 7750 } 7751 7752 static TargetFdTrans target_inotify_trans = { 7753 .host_to_target_data = host_to_target_data_inotify, 7754 }; 7755 #endif 7756 7757 static int target_to_host_cpu_mask(unsigned long *host_mask, 7758 size_t host_size, 7759 abi_ulong target_addr, 7760 size_t target_size) 7761 { 7762 unsigned target_bits = sizeof(abi_ulong) * 8; 7763 unsigned host_bits = sizeof(*host_mask) * 8; 7764 abi_ulong *target_mask; 7765 unsigned i, j; 7766 7767 assert(host_size >= target_size); 7768 7769 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1); 7770 if (!target_mask) { 7771 return -TARGET_EFAULT; 7772 } 7773 memset(host_mask, 0, host_size); 7774 7775 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) { 7776 unsigned bit = i * target_bits; 7777 abi_ulong val; 7778 7779 __get_user(val, &target_mask[i]); 7780 for (j = 0; j < target_bits; j++, bit++) { 7781 if (val & (1UL << j)) { 7782 host_mask[bit / host_bits] |= 1UL << (bit % host_bits); 7783 } 7784 } 7785 } 7786 7787 unlock_user(target_mask, target_addr, 0); 7788 return 0; 7789 } 7790 7791 static int host_to_target_cpu_mask(const unsigned long *host_mask, 7792 size_t host_size, 7793 abi_ulong target_addr, 7794 size_t target_size) 7795 { 7796 unsigned target_bits = sizeof(abi_ulong) * 8; 7797 unsigned host_bits = sizeof(*host_mask) * 8; 7798 abi_ulong *target_mask; 7799 unsigned i, j; 7800 7801 assert(host_size >= target_size); 7802 7803 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0); 7804 if (!target_mask) { 7805 return -TARGET_EFAULT; 7806 } 7807 7808 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) { 7809 unsigned bit = i * target_bits; 7810 abi_ulong val = 0; 7811 7812 for (j = 0; j < target_bits; j++, bit++) { 7813 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) { 7814 val |= 1UL << j; 7815 } 7816 } 7817 __put_user(val, &target_mask[i]); 7818 } 7819 7820 unlock_user(target_mask, target_addr, target_size); 7821 return 0; 7822 } 7823 7824 /* do_syscall() should always have a single exit point at the end so 7825 that actions, such as logging of syscall results, can be performed. 7826 All errnos that do_syscall() returns must be -TARGET_<errcode>. */ 7827 abi_long do_syscall(void *cpu_env, int num, abi_long arg1, 7828 abi_long arg2, abi_long arg3, abi_long arg4, 7829 abi_long arg5, abi_long arg6, abi_long arg7, 7830 abi_long arg8) 7831 { 7832 CPUState *cpu = ENV_GET_CPU(cpu_env); 7833 abi_long ret; 7834 struct stat st; 7835 struct statfs stfs; 7836 void *p; 7837 7838 #if defined(DEBUG_ERESTARTSYS) 7839 /* Debug-only code for exercising the syscall-restart code paths 7840 * in the per-architecture cpu main loops: restart every syscall 7841 * the guest makes once before letting it through. 7842 */ 7843 { 7844 static int flag; 7845 7846 flag = !flag; 7847 if (flag) { 7848 return -TARGET_ERESTARTSYS; 7849 } 7850 } 7851 #endif 7852 7853 #ifdef DEBUG 7854 gemu_log("syscall %d", num); 7855 #endif 7856 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8); 7857 if(do_strace) 7858 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); 7859 7860 switch(num) { 7861 case TARGET_NR_exit: 7862 /* In old applications this may be used to implement _exit(2). 7863 However in threaded applictions it is used for thread termination, 7864 and _exit_group is used for application termination. 7865 Do thread termination if we have more then one thread. */ 7866 7867 if (block_signals()) { 7868 ret = -TARGET_ERESTARTSYS; 7869 break; 7870 } 7871 7872 cpu_list_lock(); 7873 7874 if (CPU_NEXT(first_cpu)) { 7875 TaskState *ts; 7876 7877 /* Remove the CPU from the list. */ 7878 QTAILQ_REMOVE(&cpus, cpu, node); 7879 7880 cpu_list_unlock(); 7881 7882 ts = cpu->opaque; 7883 if (ts->child_tidptr) { 7884 put_user_u32(0, ts->child_tidptr); 7885 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX, 7886 NULL, NULL, 0); 7887 } 7888 thread_cpu = NULL; 7889 object_unref(OBJECT(cpu)); 7890 g_free(ts); 7891 rcu_unregister_thread(); 7892 pthread_exit(NULL); 7893 } 7894 7895 cpu_list_unlock(); 7896 #ifdef TARGET_GPROF 7897 _mcleanup(); 7898 #endif 7899 gdb_exit(cpu_env, arg1); 7900 _exit(arg1); 7901 ret = 0; /* avoid warning */ 7902 break; 7903 case TARGET_NR_read: 7904 if (arg3 == 0) 7905 ret = 0; 7906 else { 7907 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 7908 goto efault; 7909 ret = get_errno(safe_read(arg1, p, arg3)); 7910 if (ret >= 0 && 7911 fd_trans_host_to_target_data(arg1)) { 7912 ret = fd_trans_host_to_target_data(arg1)(p, ret); 7913 } 7914 unlock_user(p, arg2, ret); 7915 } 7916 break; 7917 case TARGET_NR_write: 7918 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 7919 goto efault; 7920 if (fd_trans_target_to_host_data(arg1)) { 7921 void *copy = g_malloc(arg3); 7922 memcpy(copy, p, arg3); 7923 ret = fd_trans_target_to_host_data(arg1)(copy, arg3); 7924 if (ret >= 0) { 7925 ret = get_errno(safe_write(arg1, copy, ret)); 7926 } 7927 g_free(copy); 7928 } else { 7929 ret = get_errno(safe_write(arg1, p, arg3)); 7930 } 7931 unlock_user(p, arg2, 0); 7932 break; 7933 #ifdef TARGET_NR_open 7934 case TARGET_NR_open: 7935 if (!(p = lock_user_string(arg1))) 7936 goto efault; 7937 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p, 7938 target_to_host_bitmask(arg2, fcntl_flags_tbl), 7939 arg3)); 7940 fd_trans_unregister(ret); 7941 unlock_user(p, arg1, 0); 7942 break; 7943 #endif 7944 case TARGET_NR_openat: 7945 if (!(p = lock_user_string(arg2))) 7946 goto efault; 7947 ret = get_errno(do_openat(cpu_env, arg1, p, 7948 target_to_host_bitmask(arg3, fcntl_flags_tbl), 7949 arg4)); 7950 fd_trans_unregister(ret); 7951 unlock_user(p, arg2, 0); 7952 break; 7953 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 7954 case TARGET_NR_name_to_handle_at: 7955 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5); 7956 break; 7957 #endif 7958 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 7959 case TARGET_NR_open_by_handle_at: 7960 ret = do_open_by_handle_at(arg1, arg2, arg3); 7961 fd_trans_unregister(ret); 7962 break; 7963 #endif 7964 case TARGET_NR_close: 7965 fd_trans_unregister(arg1); 7966 ret = get_errno(close(arg1)); 7967 break; 7968 case TARGET_NR_brk: 7969 ret = do_brk(arg1); 7970 break; 7971 #ifdef TARGET_NR_fork 7972 case TARGET_NR_fork: 7973 ret = get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0)); 7974 break; 7975 #endif 7976 #ifdef TARGET_NR_waitpid 7977 case TARGET_NR_waitpid: 7978 { 7979 int status; 7980 ret = get_errno(safe_wait4(arg1, &status, arg3, 0)); 7981 if (!is_error(ret) && arg2 && ret 7982 && put_user_s32(host_to_target_waitstatus(status), arg2)) 7983 goto efault; 7984 } 7985 break; 7986 #endif 7987 #ifdef TARGET_NR_waitid 7988 case TARGET_NR_waitid: 7989 { 7990 siginfo_t info; 7991 info.si_pid = 0; 7992 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL)); 7993 if (!is_error(ret) && arg3 && info.si_pid != 0) { 7994 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) 7995 goto efault; 7996 host_to_target_siginfo(p, &info); 7997 unlock_user(p, arg3, sizeof(target_siginfo_t)); 7998 } 7999 } 8000 break; 8001 #endif 8002 #ifdef TARGET_NR_creat /* not on alpha */ 8003 case TARGET_NR_creat: 8004 if (!(p = lock_user_string(arg1))) 8005 goto efault; 8006 ret = get_errno(creat(p, arg2)); 8007 fd_trans_unregister(ret); 8008 unlock_user(p, arg1, 0); 8009 break; 8010 #endif 8011 #ifdef TARGET_NR_link 8012 case TARGET_NR_link: 8013 { 8014 void * p2; 8015 p = lock_user_string(arg1); 8016 p2 = lock_user_string(arg2); 8017 if (!p || !p2) 8018 ret = -TARGET_EFAULT; 8019 else 8020 ret = get_errno(link(p, p2)); 8021 unlock_user(p2, arg2, 0); 8022 unlock_user(p, arg1, 0); 8023 } 8024 break; 8025 #endif 8026 #if defined(TARGET_NR_linkat) 8027 case TARGET_NR_linkat: 8028 { 8029 void * p2 = NULL; 8030 if (!arg2 || !arg4) 8031 goto efault; 8032 p = lock_user_string(arg2); 8033 p2 = lock_user_string(arg4); 8034 if (!p || !p2) 8035 ret = -TARGET_EFAULT; 8036 else 8037 ret = get_errno(linkat(arg1, p, arg3, p2, arg5)); 8038 unlock_user(p, arg2, 0); 8039 unlock_user(p2, arg4, 0); 8040 } 8041 break; 8042 #endif 8043 #ifdef TARGET_NR_unlink 8044 case TARGET_NR_unlink: 8045 if (!(p = lock_user_string(arg1))) 8046 goto efault; 8047 ret = get_errno(unlink(p)); 8048 unlock_user(p, arg1, 0); 8049 break; 8050 #endif 8051 #if defined(TARGET_NR_unlinkat) 8052 case TARGET_NR_unlinkat: 8053 if (!(p = lock_user_string(arg2))) 8054 goto efault; 8055 ret = get_errno(unlinkat(arg1, p, arg3)); 8056 unlock_user(p, arg2, 0); 8057 break; 8058 #endif 8059 case TARGET_NR_execve: 8060 { 8061 char **argp, **envp; 8062 int argc, envc; 8063 abi_ulong gp; 8064 abi_ulong guest_argp; 8065 abi_ulong guest_envp; 8066 abi_ulong addr; 8067 char **q; 8068 int total_size = 0; 8069 8070 argc = 0; 8071 guest_argp = arg2; 8072 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { 8073 if (get_user_ual(addr, gp)) 8074 goto efault; 8075 if (!addr) 8076 break; 8077 argc++; 8078 } 8079 envc = 0; 8080 guest_envp = arg3; 8081 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { 8082 if (get_user_ual(addr, gp)) 8083 goto efault; 8084 if (!addr) 8085 break; 8086 envc++; 8087 } 8088 8089 argp = g_new0(char *, argc + 1); 8090 envp = g_new0(char *, envc + 1); 8091 8092 for (gp = guest_argp, q = argp; gp; 8093 gp += sizeof(abi_ulong), q++) { 8094 if (get_user_ual(addr, gp)) 8095 goto execve_efault; 8096 if (!addr) 8097 break; 8098 if (!(*q = lock_user_string(addr))) 8099 goto execve_efault; 8100 total_size += strlen(*q) + 1; 8101 } 8102 *q = NULL; 8103 8104 for (gp = guest_envp, q = envp; gp; 8105 gp += sizeof(abi_ulong), q++) { 8106 if (get_user_ual(addr, gp)) 8107 goto execve_efault; 8108 if (!addr) 8109 break; 8110 if (!(*q = lock_user_string(addr))) 8111 goto execve_efault; 8112 total_size += strlen(*q) + 1; 8113 } 8114 *q = NULL; 8115 8116 if (!(p = lock_user_string(arg1))) 8117 goto execve_efault; 8118 /* Although execve() is not an interruptible syscall it is 8119 * a special case where we must use the safe_syscall wrapper: 8120 * if we allow a signal to happen before we make the host 8121 * syscall then we will 'lose' it, because at the point of 8122 * execve the process leaves QEMU's control. So we use the 8123 * safe syscall wrapper to ensure that we either take the 8124 * signal as a guest signal, or else it does not happen 8125 * before the execve completes and makes it the other 8126 * program's problem. 8127 */ 8128 ret = get_errno(safe_execve(p, argp, envp)); 8129 unlock_user(p, arg1, 0); 8130 8131 goto execve_end; 8132 8133 execve_efault: 8134 ret = -TARGET_EFAULT; 8135 8136 execve_end: 8137 for (gp = guest_argp, q = argp; *q; 8138 gp += sizeof(abi_ulong), q++) { 8139 if (get_user_ual(addr, gp) 8140 || !addr) 8141 break; 8142 unlock_user(*q, addr, 0); 8143 } 8144 for (gp = guest_envp, q = envp; *q; 8145 gp += sizeof(abi_ulong), q++) { 8146 if (get_user_ual(addr, gp) 8147 || !addr) 8148 break; 8149 unlock_user(*q, addr, 0); 8150 } 8151 8152 g_free(argp); 8153 g_free(envp); 8154 } 8155 break; 8156 case TARGET_NR_chdir: 8157 if (!(p = lock_user_string(arg1))) 8158 goto efault; 8159 ret = get_errno(chdir(p)); 8160 unlock_user(p, arg1, 0); 8161 break; 8162 #ifdef TARGET_NR_time 8163 case TARGET_NR_time: 8164 { 8165 time_t host_time; 8166 ret = get_errno(time(&host_time)); 8167 if (!is_error(ret) 8168 && arg1 8169 && put_user_sal(host_time, arg1)) 8170 goto efault; 8171 } 8172 break; 8173 #endif 8174 #ifdef TARGET_NR_mknod 8175 case TARGET_NR_mknod: 8176 if (!(p = lock_user_string(arg1))) 8177 goto efault; 8178 ret = get_errno(mknod(p, arg2, arg3)); 8179 unlock_user(p, arg1, 0); 8180 break; 8181 #endif 8182 #if defined(TARGET_NR_mknodat) 8183 case TARGET_NR_mknodat: 8184 if (!(p = lock_user_string(arg2))) 8185 goto efault; 8186 ret = get_errno(mknodat(arg1, p, arg3, arg4)); 8187 unlock_user(p, arg2, 0); 8188 break; 8189 #endif 8190 #ifdef TARGET_NR_chmod 8191 case TARGET_NR_chmod: 8192 if (!(p = lock_user_string(arg1))) 8193 goto efault; 8194 ret = get_errno(chmod(p, arg2)); 8195 unlock_user(p, arg1, 0); 8196 break; 8197 #endif 8198 #ifdef TARGET_NR_break 8199 case TARGET_NR_break: 8200 goto unimplemented; 8201 #endif 8202 #ifdef TARGET_NR_oldstat 8203 case TARGET_NR_oldstat: 8204 goto unimplemented; 8205 #endif 8206 case TARGET_NR_lseek: 8207 ret = get_errno(lseek(arg1, arg2, arg3)); 8208 break; 8209 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) 8210 /* Alpha specific */ 8211 case TARGET_NR_getxpid: 8212 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid(); 8213 ret = get_errno(getpid()); 8214 break; 8215 #endif 8216 #ifdef TARGET_NR_getpid 8217 case TARGET_NR_getpid: 8218 ret = get_errno(getpid()); 8219 break; 8220 #endif 8221 case TARGET_NR_mount: 8222 { 8223 /* need to look at the data field */ 8224 void *p2, *p3; 8225 8226 if (arg1) { 8227 p = lock_user_string(arg1); 8228 if (!p) { 8229 goto efault; 8230 } 8231 } else { 8232 p = NULL; 8233 } 8234 8235 p2 = lock_user_string(arg2); 8236 if (!p2) { 8237 if (arg1) { 8238 unlock_user(p, arg1, 0); 8239 } 8240 goto efault; 8241 } 8242 8243 if (arg3) { 8244 p3 = lock_user_string(arg3); 8245 if (!p3) { 8246 if (arg1) { 8247 unlock_user(p, arg1, 0); 8248 } 8249 unlock_user(p2, arg2, 0); 8250 goto efault; 8251 } 8252 } else { 8253 p3 = NULL; 8254 } 8255 8256 /* FIXME - arg5 should be locked, but it isn't clear how to 8257 * do that since it's not guaranteed to be a NULL-terminated 8258 * string. 8259 */ 8260 if (!arg5) { 8261 ret = mount(p, p2, p3, (unsigned long)arg4, NULL); 8262 } else { 8263 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)); 8264 } 8265 ret = get_errno(ret); 8266 8267 if (arg1) { 8268 unlock_user(p, arg1, 0); 8269 } 8270 unlock_user(p2, arg2, 0); 8271 if (arg3) { 8272 unlock_user(p3, arg3, 0); 8273 } 8274 } 8275 break; 8276 #ifdef TARGET_NR_umount 8277 case TARGET_NR_umount: 8278 if (!(p = lock_user_string(arg1))) 8279 goto efault; 8280 ret = get_errno(umount(p)); 8281 unlock_user(p, arg1, 0); 8282 break; 8283 #endif 8284 #ifdef TARGET_NR_stime /* not on alpha */ 8285 case TARGET_NR_stime: 8286 { 8287 time_t host_time; 8288 if (get_user_sal(host_time, arg1)) 8289 goto efault; 8290 ret = get_errno(stime(&host_time)); 8291 } 8292 break; 8293 #endif 8294 case TARGET_NR_ptrace: 8295 goto unimplemented; 8296 #ifdef TARGET_NR_alarm /* not on alpha */ 8297 case TARGET_NR_alarm: 8298 ret = alarm(arg1); 8299 break; 8300 #endif 8301 #ifdef TARGET_NR_oldfstat 8302 case TARGET_NR_oldfstat: 8303 goto unimplemented; 8304 #endif 8305 #ifdef TARGET_NR_pause /* not on alpha */ 8306 case TARGET_NR_pause: 8307 if (!block_signals()) { 8308 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask); 8309 } 8310 ret = -TARGET_EINTR; 8311 break; 8312 #endif 8313 #ifdef TARGET_NR_utime 8314 case TARGET_NR_utime: 8315 { 8316 struct utimbuf tbuf, *host_tbuf; 8317 struct target_utimbuf *target_tbuf; 8318 if (arg2) { 8319 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) 8320 goto efault; 8321 tbuf.actime = tswapal(target_tbuf->actime); 8322 tbuf.modtime = tswapal(target_tbuf->modtime); 8323 unlock_user_struct(target_tbuf, arg2, 0); 8324 host_tbuf = &tbuf; 8325 } else { 8326 host_tbuf = NULL; 8327 } 8328 if (!(p = lock_user_string(arg1))) 8329 goto efault; 8330 ret = get_errno(utime(p, host_tbuf)); 8331 unlock_user(p, arg1, 0); 8332 } 8333 break; 8334 #endif 8335 #ifdef TARGET_NR_utimes 8336 case TARGET_NR_utimes: 8337 { 8338 struct timeval *tvp, tv[2]; 8339 if (arg2) { 8340 if (copy_from_user_timeval(&tv[0], arg2) 8341 || copy_from_user_timeval(&tv[1], 8342 arg2 + sizeof(struct target_timeval))) 8343 goto efault; 8344 tvp = tv; 8345 } else { 8346 tvp = NULL; 8347 } 8348 if (!(p = lock_user_string(arg1))) 8349 goto efault; 8350 ret = get_errno(utimes(p, tvp)); 8351 unlock_user(p, arg1, 0); 8352 } 8353 break; 8354 #endif 8355 #if defined(TARGET_NR_futimesat) 8356 case TARGET_NR_futimesat: 8357 { 8358 struct timeval *tvp, tv[2]; 8359 if (arg3) { 8360 if (copy_from_user_timeval(&tv[0], arg3) 8361 || copy_from_user_timeval(&tv[1], 8362 arg3 + sizeof(struct target_timeval))) 8363 goto efault; 8364 tvp = tv; 8365 } else { 8366 tvp = NULL; 8367 } 8368 if (!(p = lock_user_string(arg2))) 8369 goto efault; 8370 ret = get_errno(futimesat(arg1, path(p), tvp)); 8371 unlock_user(p, arg2, 0); 8372 } 8373 break; 8374 #endif 8375 #ifdef TARGET_NR_stty 8376 case TARGET_NR_stty: 8377 goto unimplemented; 8378 #endif 8379 #ifdef TARGET_NR_gtty 8380 case TARGET_NR_gtty: 8381 goto unimplemented; 8382 #endif 8383 #ifdef TARGET_NR_access 8384 case TARGET_NR_access: 8385 if (!(p = lock_user_string(arg1))) 8386 goto efault; 8387 ret = get_errno(access(path(p), arg2)); 8388 unlock_user(p, arg1, 0); 8389 break; 8390 #endif 8391 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 8392 case TARGET_NR_faccessat: 8393 if (!(p = lock_user_string(arg2))) 8394 goto efault; 8395 ret = get_errno(faccessat(arg1, p, arg3, 0)); 8396 unlock_user(p, arg2, 0); 8397 break; 8398 #endif 8399 #ifdef TARGET_NR_nice /* not on alpha */ 8400 case TARGET_NR_nice: 8401 ret = get_errno(nice(arg1)); 8402 break; 8403 #endif 8404 #ifdef TARGET_NR_ftime 8405 case TARGET_NR_ftime: 8406 goto unimplemented; 8407 #endif 8408 case TARGET_NR_sync: 8409 sync(); 8410 ret = 0; 8411 break; 8412 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS) 8413 case TARGET_NR_syncfs: 8414 ret = get_errno(syncfs(arg1)); 8415 break; 8416 #endif 8417 case TARGET_NR_kill: 8418 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2))); 8419 break; 8420 #ifdef TARGET_NR_rename 8421 case TARGET_NR_rename: 8422 { 8423 void *p2; 8424 p = lock_user_string(arg1); 8425 p2 = lock_user_string(arg2); 8426 if (!p || !p2) 8427 ret = -TARGET_EFAULT; 8428 else 8429 ret = get_errno(rename(p, p2)); 8430 unlock_user(p2, arg2, 0); 8431 unlock_user(p, arg1, 0); 8432 } 8433 break; 8434 #endif 8435 #if defined(TARGET_NR_renameat) 8436 case TARGET_NR_renameat: 8437 { 8438 void *p2; 8439 p = lock_user_string(arg2); 8440 p2 = lock_user_string(arg4); 8441 if (!p || !p2) 8442 ret = -TARGET_EFAULT; 8443 else 8444 ret = get_errno(renameat(arg1, p, arg3, p2)); 8445 unlock_user(p2, arg4, 0); 8446 unlock_user(p, arg2, 0); 8447 } 8448 break; 8449 #endif 8450 #if defined(TARGET_NR_renameat2) 8451 case TARGET_NR_renameat2: 8452 { 8453 void *p2; 8454 p = lock_user_string(arg2); 8455 p2 = lock_user_string(arg4); 8456 if (!p || !p2) { 8457 ret = -TARGET_EFAULT; 8458 } else { 8459 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5)); 8460 } 8461 unlock_user(p2, arg4, 0); 8462 unlock_user(p, arg2, 0); 8463 } 8464 break; 8465 #endif 8466 #ifdef TARGET_NR_mkdir 8467 case TARGET_NR_mkdir: 8468 if (!(p = lock_user_string(arg1))) 8469 goto efault; 8470 ret = get_errno(mkdir(p, arg2)); 8471 unlock_user(p, arg1, 0); 8472 break; 8473 #endif 8474 #if defined(TARGET_NR_mkdirat) 8475 case TARGET_NR_mkdirat: 8476 if (!(p = lock_user_string(arg2))) 8477 goto efault; 8478 ret = get_errno(mkdirat(arg1, p, arg3)); 8479 unlock_user(p, arg2, 0); 8480 break; 8481 #endif 8482 #ifdef TARGET_NR_rmdir 8483 case TARGET_NR_rmdir: 8484 if (!(p = lock_user_string(arg1))) 8485 goto efault; 8486 ret = get_errno(rmdir(p)); 8487 unlock_user(p, arg1, 0); 8488 break; 8489 #endif 8490 case TARGET_NR_dup: 8491 ret = get_errno(dup(arg1)); 8492 if (ret >= 0) { 8493 fd_trans_dup(arg1, ret); 8494 } 8495 break; 8496 #ifdef TARGET_NR_pipe 8497 case TARGET_NR_pipe: 8498 ret = do_pipe(cpu_env, arg1, 0, 0); 8499 break; 8500 #endif 8501 #ifdef TARGET_NR_pipe2 8502 case TARGET_NR_pipe2: 8503 ret = do_pipe(cpu_env, arg1, 8504 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1); 8505 break; 8506 #endif 8507 case TARGET_NR_times: 8508 { 8509 struct target_tms *tmsp; 8510 struct tms tms; 8511 ret = get_errno(times(&tms)); 8512 if (arg1) { 8513 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); 8514 if (!tmsp) 8515 goto efault; 8516 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime)); 8517 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime)); 8518 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime)); 8519 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime)); 8520 } 8521 if (!is_error(ret)) 8522 ret = host_to_target_clock_t(ret); 8523 } 8524 break; 8525 #ifdef TARGET_NR_prof 8526 case TARGET_NR_prof: 8527 goto unimplemented; 8528 #endif 8529 #ifdef TARGET_NR_signal 8530 case TARGET_NR_signal: 8531 goto unimplemented; 8532 #endif 8533 case TARGET_NR_acct: 8534 if (arg1 == 0) { 8535 ret = get_errno(acct(NULL)); 8536 } else { 8537 if (!(p = lock_user_string(arg1))) 8538 goto efault; 8539 ret = get_errno(acct(path(p))); 8540 unlock_user(p, arg1, 0); 8541 } 8542 break; 8543 #ifdef TARGET_NR_umount2 8544 case TARGET_NR_umount2: 8545 if (!(p = lock_user_string(arg1))) 8546 goto efault; 8547 ret = get_errno(umount2(p, arg2)); 8548 unlock_user(p, arg1, 0); 8549 break; 8550 #endif 8551 #ifdef TARGET_NR_lock 8552 case TARGET_NR_lock: 8553 goto unimplemented; 8554 #endif 8555 case TARGET_NR_ioctl: 8556 ret = do_ioctl(arg1, arg2, arg3); 8557 break; 8558 case TARGET_NR_fcntl: 8559 ret = do_fcntl(arg1, arg2, arg3); 8560 break; 8561 #ifdef TARGET_NR_mpx 8562 case TARGET_NR_mpx: 8563 goto unimplemented; 8564 #endif 8565 case TARGET_NR_setpgid: 8566 ret = get_errno(setpgid(arg1, arg2)); 8567 break; 8568 #ifdef TARGET_NR_ulimit 8569 case TARGET_NR_ulimit: 8570 goto unimplemented; 8571 #endif 8572 #ifdef TARGET_NR_oldolduname 8573 case TARGET_NR_oldolduname: 8574 goto unimplemented; 8575 #endif 8576 case TARGET_NR_umask: 8577 ret = get_errno(umask(arg1)); 8578 break; 8579 case TARGET_NR_chroot: 8580 if (!(p = lock_user_string(arg1))) 8581 goto efault; 8582 ret = get_errno(chroot(p)); 8583 unlock_user(p, arg1, 0); 8584 break; 8585 #ifdef TARGET_NR_ustat 8586 case TARGET_NR_ustat: 8587 goto unimplemented; 8588 #endif 8589 #ifdef TARGET_NR_dup2 8590 case TARGET_NR_dup2: 8591 ret = get_errno(dup2(arg1, arg2)); 8592 if (ret >= 0) { 8593 fd_trans_dup(arg1, arg2); 8594 } 8595 break; 8596 #endif 8597 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) 8598 case TARGET_NR_dup3: 8599 { 8600 int host_flags; 8601 8602 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) { 8603 return -EINVAL; 8604 } 8605 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl); 8606 ret = get_errno(dup3(arg1, arg2, host_flags)); 8607 if (ret >= 0) { 8608 fd_trans_dup(arg1, arg2); 8609 } 8610 break; 8611 } 8612 #endif 8613 #ifdef TARGET_NR_getppid /* not on alpha */ 8614 case TARGET_NR_getppid: 8615 ret = get_errno(getppid()); 8616 break; 8617 #endif 8618 #ifdef TARGET_NR_getpgrp 8619 case TARGET_NR_getpgrp: 8620 ret = get_errno(getpgrp()); 8621 break; 8622 #endif 8623 case TARGET_NR_setsid: 8624 ret = get_errno(setsid()); 8625 break; 8626 #ifdef TARGET_NR_sigaction 8627 case TARGET_NR_sigaction: 8628 { 8629 #if defined(TARGET_ALPHA) 8630 struct target_sigaction act, oact, *pact = 0; 8631 struct target_old_sigaction *old_act; 8632 if (arg2) { 8633 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 8634 goto efault; 8635 act._sa_handler = old_act->_sa_handler; 8636 target_siginitset(&act.sa_mask, old_act->sa_mask); 8637 act.sa_flags = old_act->sa_flags; 8638 act.sa_restorer = 0; 8639 unlock_user_struct(old_act, arg2, 0); 8640 pact = &act; 8641 } 8642 ret = get_errno(do_sigaction(arg1, pact, &oact)); 8643 if (!is_error(ret) && arg3) { 8644 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 8645 goto efault; 8646 old_act->_sa_handler = oact._sa_handler; 8647 old_act->sa_mask = oact.sa_mask.sig[0]; 8648 old_act->sa_flags = oact.sa_flags; 8649 unlock_user_struct(old_act, arg3, 1); 8650 } 8651 #elif defined(TARGET_MIPS) 8652 struct target_sigaction act, oact, *pact, *old_act; 8653 8654 if (arg2) { 8655 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 8656 goto efault; 8657 act._sa_handler = old_act->_sa_handler; 8658 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); 8659 act.sa_flags = old_act->sa_flags; 8660 unlock_user_struct(old_act, arg2, 0); 8661 pact = &act; 8662 } else { 8663 pact = NULL; 8664 } 8665 8666 ret = get_errno(do_sigaction(arg1, pact, &oact)); 8667 8668 if (!is_error(ret) && arg3) { 8669 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 8670 goto efault; 8671 old_act->_sa_handler = oact._sa_handler; 8672 old_act->sa_flags = oact.sa_flags; 8673 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; 8674 old_act->sa_mask.sig[1] = 0; 8675 old_act->sa_mask.sig[2] = 0; 8676 old_act->sa_mask.sig[3] = 0; 8677 unlock_user_struct(old_act, arg3, 1); 8678 } 8679 #else 8680 struct target_old_sigaction *old_act; 8681 struct target_sigaction act, oact, *pact; 8682 if (arg2) { 8683 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 8684 goto efault; 8685 act._sa_handler = old_act->_sa_handler; 8686 target_siginitset(&act.sa_mask, old_act->sa_mask); 8687 act.sa_flags = old_act->sa_flags; 8688 act.sa_restorer = old_act->sa_restorer; 8689 unlock_user_struct(old_act, arg2, 0); 8690 pact = &act; 8691 } else { 8692 pact = NULL; 8693 } 8694 ret = get_errno(do_sigaction(arg1, pact, &oact)); 8695 if (!is_error(ret) && arg3) { 8696 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 8697 goto efault; 8698 old_act->_sa_handler = oact._sa_handler; 8699 old_act->sa_mask = oact.sa_mask.sig[0]; 8700 old_act->sa_flags = oact.sa_flags; 8701 old_act->sa_restorer = oact.sa_restorer; 8702 unlock_user_struct(old_act, arg3, 1); 8703 } 8704 #endif 8705 } 8706 break; 8707 #endif 8708 case TARGET_NR_rt_sigaction: 8709 { 8710 #if defined(TARGET_ALPHA) 8711 /* For Alpha and SPARC this is a 5 argument syscall, with 8712 * a 'restorer' parameter which must be copied into the 8713 * sa_restorer field of the sigaction struct. 8714 * For Alpha that 'restorer' is arg5; for SPARC it is arg4, 8715 * and arg5 is the sigsetsize. 8716 * Alpha also has a separate rt_sigaction struct that it uses 8717 * here; SPARC uses the usual sigaction struct. 8718 */ 8719 struct target_rt_sigaction *rt_act; 8720 struct target_sigaction act, oact, *pact = 0; 8721 8722 if (arg4 != sizeof(target_sigset_t)) { 8723 ret = -TARGET_EINVAL; 8724 break; 8725 } 8726 if (arg2) { 8727 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1)) 8728 goto efault; 8729 act._sa_handler = rt_act->_sa_handler; 8730 act.sa_mask = rt_act->sa_mask; 8731 act.sa_flags = rt_act->sa_flags; 8732 act.sa_restorer = arg5; 8733 unlock_user_struct(rt_act, arg2, 0); 8734 pact = &act; 8735 } 8736 ret = get_errno(do_sigaction(arg1, pact, &oact)); 8737 if (!is_error(ret) && arg3) { 8738 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0)) 8739 goto efault; 8740 rt_act->_sa_handler = oact._sa_handler; 8741 rt_act->sa_mask = oact.sa_mask; 8742 rt_act->sa_flags = oact.sa_flags; 8743 unlock_user_struct(rt_act, arg3, 1); 8744 } 8745 #else 8746 #ifdef TARGET_SPARC 8747 target_ulong restorer = arg4; 8748 target_ulong sigsetsize = arg5; 8749 #else 8750 target_ulong sigsetsize = arg4; 8751 #endif 8752 struct target_sigaction *act; 8753 struct target_sigaction *oact; 8754 8755 if (sigsetsize != sizeof(target_sigset_t)) { 8756 ret = -TARGET_EINVAL; 8757 break; 8758 } 8759 if (arg2) { 8760 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) { 8761 goto efault; 8762 } 8763 #ifdef TARGET_SPARC 8764 act->sa_restorer = restorer; 8765 #endif 8766 } else { 8767 act = NULL; 8768 } 8769 if (arg3) { 8770 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { 8771 ret = -TARGET_EFAULT; 8772 goto rt_sigaction_fail; 8773 } 8774 } else 8775 oact = NULL; 8776 ret = get_errno(do_sigaction(arg1, act, oact)); 8777 rt_sigaction_fail: 8778 if (act) 8779 unlock_user_struct(act, arg2, 0); 8780 if (oact) 8781 unlock_user_struct(oact, arg3, 1); 8782 #endif 8783 } 8784 break; 8785 #ifdef TARGET_NR_sgetmask /* not on alpha */ 8786 case TARGET_NR_sgetmask: 8787 { 8788 sigset_t cur_set; 8789 abi_ulong target_set; 8790 ret = do_sigprocmask(0, NULL, &cur_set); 8791 if (!ret) { 8792 host_to_target_old_sigset(&target_set, &cur_set); 8793 ret = target_set; 8794 } 8795 } 8796 break; 8797 #endif 8798 #ifdef TARGET_NR_ssetmask /* not on alpha */ 8799 case TARGET_NR_ssetmask: 8800 { 8801 sigset_t set, oset; 8802 abi_ulong target_set = arg1; 8803 target_to_host_old_sigset(&set, &target_set); 8804 ret = do_sigprocmask(SIG_SETMASK, &set, &oset); 8805 if (!ret) { 8806 host_to_target_old_sigset(&target_set, &oset); 8807 ret = target_set; 8808 } 8809 } 8810 break; 8811 #endif 8812 #ifdef TARGET_NR_sigprocmask 8813 case TARGET_NR_sigprocmask: 8814 { 8815 #if defined(TARGET_ALPHA) 8816 sigset_t set, oldset; 8817 abi_ulong mask; 8818 int how; 8819 8820 switch (arg1) { 8821 case TARGET_SIG_BLOCK: 8822 how = SIG_BLOCK; 8823 break; 8824 case TARGET_SIG_UNBLOCK: 8825 how = SIG_UNBLOCK; 8826 break; 8827 case TARGET_SIG_SETMASK: 8828 how = SIG_SETMASK; 8829 break; 8830 default: 8831 ret = -TARGET_EINVAL; 8832 goto fail; 8833 } 8834 mask = arg2; 8835 target_to_host_old_sigset(&set, &mask); 8836 8837 ret = do_sigprocmask(how, &set, &oldset); 8838 if (!is_error(ret)) { 8839 host_to_target_old_sigset(&mask, &oldset); 8840 ret = mask; 8841 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */ 8842 } 8843 #else 8844 sigset_t set, oldset, *set_ptr; 8845 int how; 8846 8847 if (arg2) { 8848 switch (arg1) { 8849 case TARGET_SIG_BLOCK: 8850 how = SIG_BLOCK; 8851 break; 8852 case TARGET_SIG_UNBLOCK: 8853 how = SIG_UNBLOCK; 8854 break; 8855 case TARGET_SIG_SETMASK: 8856 how = SIG_SETMASK; 8857 break; 8858 default: 8859 ret = -TARGET_EINVAL; 8860 goto fail; 8861 } 8862 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 8863 goto efault; 8864 target_to_host_old_sigset(&set, p); 8865 unlock_user(p, arg2, 0); 8866 set_ptr = &set; 8867 } else { 8868 how = 0; 8869 set_ptr = NULL; 8870 } 8871 ret = do_sigprocmask(how, set_ptr, &oldset); 8872 if (!is_error(ret) && arg3) { 8873 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 8874 goto efault; 8875 host_to_target_old_sigset(p, &oldset); 8876 unlock_user(p, arg3, sizeof(target_sigset_t)); 8877 } 8878 #endif 8879 } 8880 break; 8881 #endif 8882 case TARGET_NR_rt_sigprocmask: 8883 { 8884 int how = arg1; 8885 sigset_t set, oldset, *set_ptr; 8886 8887 if (arg4 != sizeof(target_sigset_t)) { 8888 ret = -TARGET_EINVAL; 8889 break; 8890 } 8891 8892 if (arg2) { 8893 switch(how) { 8894 case TARGET_SIG_BLOCK: 8895 how = SIG_BLOCK; 8896 break; 8897 case TARGET_SIG_UNBLOCK: 8898 how = SIG_UNBLOCK; 8899 break; 8900 case TARGET_SIG_SETMASK: 8901 how = SIG_SETMASK; 8902 break; 8903 default: 8904 ret = -TARGET_EINVAL; 8905 goto fail; 8906 } 8907 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 8908 goto efault; 8909 target_to_host_sigset(&set, p); 8910 unlock_user(p, arg2, 0); 8911 set_ptr = &set; 8912 } else { 8913 how = 0; 8914 set_ptr = NULL; 8915 } 8916 ret = do_sigprocmask(how, set_ptr, &oldset); 8917 if (!is_error(ret) && arg3) { 8918 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 8919 goto efault; 8920 host_to_target_sigset(p, &oldset); 8921 unlock_user(p, arg3, sizeof(target_sigset_t)); 8922 } 8923 } 8924 break; 8925 #ifdef TARGET_NR_sigpending 8926 case TARGET_NR_sigpending: 8927 { 8928 sigset_t set; 8929 ret = get_errno(sigpending(&set)); 8930 if (!is_error(ret)) { 8931 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 8932 goto efault; 8933 host_to_target_old_sigset(p, &set); 8934 unlock_user(p, arg1, sizeof(target_sigset_t)); 8935 } 8936 } 8937 break; 8938 #endif 8939 case TARGET_NR_rt_sigpending: 8940 { 8941 sigset_t set; 8942 8943 /* Yes, this check is >, not != like most. We follow the kernel's 8944 * logic and it does it like this because it implements 8945 * NR_sigpending through the same code path, and in that case 8946 * the old_sigset_t is smaller in size. 8947 */ 8948 if (arg2 > sizeof(target_sigset_t)) { 8949 ret = -TARGET_EINVAL; 8950 break; 8951 } 8952 8953 ret = get_errno(sigpending(&set)); 8954 if (!is_error(ret)) { 8955 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 8956 goto efault; 8957 host_to_target_sigset(p, &set); 8958 unlock_user(p, arg1, sizeof(target_sigset_t)); 8959 } 8960 } 8961 break; 8962 #ifdef TARGET_NR_sigsuspend 8963 case TARGET_NR_sigsuspend: 8964 { 8965 TaskState *ts = cpu->opaque; 8966 #if defined(TARGET_ALPHA) 8967 abi_ulong mask = arg1; 8968 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask); 8969 #else 8970 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 8971 goto efault; 8972 target_to_host_old_sigset(&ts->sigsuspend_mask, p); 8973 unlock_user(p, arg1, 0); 8974 #endif 8975 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask, 8976 SIGSET_T_SIZE)); 8977 if (ret != -TARGET_ERESTARTSYS) { 8978 ts->in_sigsuspend = 1; 8979 } 8980 } 8981 break; 8982 #endif 8983 case TARGET_NR_rt_sigsuspend: 8984 { 8985 TaskState *ts = cpu->opaque; 8986 8987 if (arg2 != sizeof(target_sigset_t)) { 8988 ret = -TARGET_EINVAL; 8989 break; 8990 } 8991 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 8992 goto efault; 8993 target_to_host_sigset(&ts->sigsuspend_mask, p); 8994 unlock_user(p, arg1, 0); 8995 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask, 8996 SIGSET_T_SIZE)); 8997 if (ret != -TARGET_ERESTARTSYS) { 8998 ts->in_sigsuspend = 1; 8999 } 9000 } 9001 break; 9002 case TARGET_NR_rt_sigtimedwait: 9003 { 9004 sigset_t set; 9005 struct timespec uts, *puts; 9006 siginfo_t uinfo; 9007 9008 if (arg4 != sizeof(target_sigset_t)) { 9009 ret = -TARGET_EINVAL; 9010 break; 9011 } 9012 9013 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 9014 goto efault; 9015 target_to_host_sigset(&set, p); 9016 unlock_user(p, arg1, 0); 9017 if (arg3) { 9018 puts = &uts; 9019 target_to_host_timespec(puts, arg3); 9020 } else { 9021 puts = NULL; 9022 } 9023 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts, 9024 SIGSET_T_SIZE)); 9025 if (!is_error(ret)) { 9026 if (arg2) { 9027 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 9028 0); 9029 if (!p) { 9030 goto efault; 9031 } 9032 host_to_target_siginfo(p, &uinfo); 9033 unlock_user(p, arg2, sizeof(target_siginfo_t)); 9034 } 9035 ret = host_to_target_signal(ret); 9036 } 9037 } 9038 break; 9039 case TARGET_NR_rt_sigqueueinfo: 9040 { 9041 siginfo_t uinfo; 9042 9043 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1); 9044 if (!p) { 9045 goto efault; 9046 } 9047 target_to_host_siginfo(&uinfo, p); 9048 unlock_user(p, arg3, 0); 9049 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo)); 9050 } 9051 break; 9052 case TARGET_NR_rt_tgsigqueueinfo: 9053 { 9054 siginfo_t uinfo; 9055 9056 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1); 9057 if (!p) { 9058 goto efault; 9059 } 9060 target_to_host_siginfo(&uinfo, p); 9061 unlock_user(p, arg4, 0); 9062 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo)); 9063 } 9064 break; 9065 #ifdef TARGET_NR_sigreturn 9066 case TARGET_NR_sigreturn: 9067 if (block_signals()) { 9068 ret = -TARGET_ERESTARTSYS; 9069 } else { 9070 ret = do_sigreturn(cpu_env); 9071 } 9072 break; 9073 #endif 9074 case TARGET_NR_rt_sigreturn: 9075 if (block_signals()) { 9076 ret = -TARGET_ERESTARTSYS; 9077 } else { 9078 ret = do_rt_sigreturn(cpu_env); 9079 } 9080 break; 9081 case TARGET_NR_sethostname: 9082 if (!(p = lock_user_string(arg1))) 9083 goto efault; 9084 ret = get_errno(sethostname(p, arg2)); 9085 unlock_user(p, arg1, 0); 9086 break; 9087 case TARGET_NR_setrlimit: 9088 { 9089 int resource = target_to_host_resource(arg1); 9090 struct target_rlimit *target_rlim; 9091 struct rlimit rlim; 9092 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) 9093 goto efault; 9094 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur); 9095 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max); 9096 unlock_user_struct(target_rlim, arg2, 0); 9097 ret = get_errno(setrlimit(resource, &rlim)); 9098 } 9099 break; 9100 case TARGET_NR_getrlimit: 9101 { 9102 int resource = target_to_host_resource(arg1); 9103 struct target_rlimit *target_rlim; 9104 struct rlimit rlim; 9105 9106 ret = get_errno(getrlimit(resource, &rlim)); 9107 if (!is_error(ret)) { 9108 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 9109 goto efault; 9110 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 9111 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 9112 unlock_user_struct(target_rlim, arg2, 1); 9113 } 9114 } 9115 break; 9116 case TARGET_NR_getrusage: 9117 { 9118 struct rusage rusage; 9119 ret = get_errno(getrusage(arg1, &rusage)); 9120 if (!is_error(ret)) { 9121 ret = host_to_target_rusage(arg2, &rusage); 9122 } 9123 } 9124 break; 9125 case TARGET_NR_gettimeofday: 9126 { 9127 struct timeval tv; 9128 ret = get_errno(gettimeofday(&tv, NULL)); 9129 if (!is_error(ret)) { 9130 if (copy_to_user_timeval(arg1, &tv)) 9131 goto efault; 9132 } 9133 } 9134 break; 9135 case TARGET_NR_settimeofday: 9136 { 9137 struct timeval tv, *ptv = NULL; 9138 struct timezone tz, *ptz = NULL; 9139 9140 if (arg1) { 9141 if (copy_from_user_timeval(&tv, arg1)) { 9142 goto efault; 9143 } 9144 ptv = &tv; 9145 } 9146 9147 if (arg2) { 9148 if (copy_from_user_timezone(&tz, arg2)) { 9149 goto efault; 9150 } 9151 ptz = &tz; 9152 } 9153 9154 ret = get_errno(settimeofday(ptv, ptz)); 9155 } 9156 break; 9157 #if defined(TARGET_NR_select) 9158 case TARGET_NR_select: 9159 #if defined(TARGET_WANT_NI_OLD_SELECT) 9160 /* some architectures used to have old_select here 9161 * but now ENOSYS it. 9162 */ 9163 ret = -TARGET_ENOSYS; 9164 #elif defined(TARGET_WANT_OLD_SYS_SELECT) 9165 ret = do_old_select(arg1); 9166 #else 9167 ret = do_select(arg1, arg2, arg3, arg4, arg5); 9168 #endif 9169 break; 9170 #endif 9171 #ifdef TARGET_NR_pselect6 9172 case TARGET_NR_pselect6: 9173 { 9174 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr; 9175 fd_set rfds, wfds, efds; 9176 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 9177 struct timespec ts, *ts_ptr; 9178 9179 /* 9180 * The 6th arg is actually two args smashed together, 9181 * so we cannot use the C library. 9182 */ 9183 sigset_t set; 9184 struct { 9185 sigset_t *set; 9186 size_t size; 9187 } sig, *sig_ptr; 9188 9189 abi_ulong arg_sigset, arg_sigsize, *arg7; 9190 target_sigset_t *target_sigset; 9191 9192 n = arg1; 9193 rfd_addr = arg2; 9194 wfd_addr = arg3; 9195 efd_addr = arg4; 9196 ts_addr = arg5; 9197 9198 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 9199 if (ret) { 9200 goto fail; 9201 } 9202 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 9203 if (ret) { 9204 goto fail; 9205 } 9206 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 9207 if (ret) { 9208 goto fail; 9209 } 9210 9211 /* 9212 * This takes a timespec, and not a timeval, so we cannot 9213 * use the do_select() helper ... 9214 */ 9215 if (ts_addr) { 9216 if (target_to_host_timespec(&ts, ts_addr)) { 9217 goto efault; 9218 } 9219 ts_ptr = &ts; 9220 } else { 9221 ts_ptr = NULL; 9222 } 9223 9224 /* Extract the two packed args for the sigset */ 9225 if (arg6) { 9226 sig_ptr = &sig; 9227 sig.size = SIGSET_T_SIZE; 9228 9229 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1); 9230 if (!arg7) { 9231 goto efault; 9232 } 9233 arg_sigset = tswapal(arg7[0]); 9234 arg_sigsize = tswapal(arg7[1]); 9235 unlock_user(arg7, arg6, 0); 9236 9237 if (arg_sigset) { 9238 sig.set = &set; 9239 if (arg_sigsize != sizeof(*target_sigset)) { 9240 /* Like the kernel, we enforce correct size sigsets */ 9241 ret = -TARGET_EINVAL; 9242 goto fail; 9243 } 9244 target_sigset = lock_user(VERIFY_READ, arg_sigset, 9245 sizeof(*target_sigset), 1); 9246 if (!target_sigset) { 9247 goto efault; 9248 } 9249 target_to_host_sigset(&set, target_sigset); 9250 unlock_user(target_sigset, arg_sigset, 0); 9251 } else { 9252 sig.set = NULL; 9253 } 9254 } else { 9255 sig_ptr = NULL; 9256 } 9257 9258 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 9259 ts_ptr, sig_ptr)); 9260 9261 if (!is_error(ret)) { 9262 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 9263 goto efault; 9264 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 9265 goto efault; 9266 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 9267 goto efault; 9268 9269 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) 9270 goto efault; 9271 } 9272 } 9273 break; 9274 #endif 9275 #ifdef TARGET_NR_symlink 9276 case TARGET_NR_symlink: 9277 { 9278 void *p2; 9279 p = lock_user_string(arg1); 9280 p2 = lock_user_string(arg2); 9281 if (!p || !p2) 9282 ret = -TARGET_EFAULT; 9283 else 9284 ret = get_errno(symlink(p, p2)); 9285 unlock_user(p2, arg2, 0); 9286 unlock_user(p, arg1, 0); 9287 } 9288 break; 9289 #endif 9290 #if defined(TARGET_NR_symlinkat) 9291 case TARGET_NR_symlinkat: 9292 { 9293 void *p2; 9294 p = lock_user_string(arg1); 9295 p2 = lock_user_string(arg3); 9296 if (!p || !p2) 9297 ret = -TARGET_EFAULT; 9298 else 9299 ret = get_errno(symlinkat(p, arg2, p2)); 9300 unlock_user(p2, arg3, 0); 9301 unlock_user(p, arg1, 0); 9302 } 9303 break; 9304 #endif 9305 #ifdef TARGET_NR_oldlstat 9306 case TARGET_NR_oldlstat: 9307 goto unimplemented; 9308 #endif 9309 #ifdef TARGET_NR_readlink 9310 case TARGET_NR_readlink: 9311 { 9312 void *p2; 9313 p = lock_user_string(arg1); 9314 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); 9315 if (!p || !p2) { 9316 ret = -TARGET_EFAULT; 9317 } else if (!arg3) { 9318 /* Short circuit this for the magic exe check. */ 9319 ret = -TARGET_EINVAL; 9320 } else if (is_proc_myself((const char *)p, "exe")) { 9321 char real[PATH_MAX], *temp; 9322 temp = realpath(exec_path, real); 9323 /* Return value is # of bytes that we wrote to the buffer. */ 9324 if (temp == NULL) { 9325 ret = get_errno(-1); 9326 } else { 9327 /* Don't worry about sign mismatch as earlier mapping 9328 * logic would have thrown a bad address error. */ 9329 ret = MIN(strlen(real), arg3); 9330 /* We cannot NUL terminate the string. */ 9331 memcpy(p2, real, ret); 9332 } 9333 } else { 9334 ret = get_errno(readlink(path(p), p2, arg3)); 9335 } 9336 unlock_user(p2, arg2, ret); 9337 unlock_user(p, arg1, 0); 9338 } 9339 break; 9340 #endif 9341 #if defined(TARGET_NR_readlinkat) 9342 case TARGET_NR_readlinkat: 9343 { 9344 void *p2; 9345 p = lock_user_string(arg2); 9346 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); 9347 if (!p || !p2) { 9348 ret = -TARGET_EFAULT; 9349 } else if (is_proc_myself((const char *)p, "exe")) { 9350 char real[PATH_MAX], *temp; 9351 temp = realpath(exec_path, real); 9352 ret = temp == NULL ? get_errno(-1) : strlen(real) ; 9353 snprintf((char *)p2, arg4, "%s", real); 9354 } else { 9355 ret = get_errno(readlinkat(arg1, path(p), p2, arg4)); 9356 } 9357 unlock_user(p2, arg3, ret); 9358 unlock_user(p, arg2, 0); 9359 } 9360 break; 9361 #endif 9362 #ifdef TARGET_NR_uselib 9363 case TARGET_NR_uselib: 9364 goto unimplemented; 9365 #endif 9366 #ifdef TARGET_NR_swapon 9367 case TARGET_NR_swapon: 9368 if (!(p = lock_user_string(arg1))) 9369 goto efault; 9370 ret = get_errno(swapon(p, arg2)); 9371 unlock_user(p, arg1, 0); 9372 break; 9373 #endif 9374 case TARGET_NR_reboot: 9375 if (arg3 == LINUX_REBOOT_CMD_RESTART2) { 9376 /* arg4 must be ignored in all other cases */ 9377 p = lock_user_string(arg4); 9378 if (!p) { 9379 goto efault; 9380 } 9381 ret = get_errno(reboot(arg1, arg2, arg3, p)); 9382 unlock_user(p, arg4, 0); 9383 } else { 9384 ret = get_errno(reboot(arg1, arg2, arg3, NULL)); 9385 } 9386 break; 9387 #ifdef TARGET_NR_readdir 9388 case TARGET_NR_readdir: 9389 goto unimplemented; 9390 #endif 9391 #ifdef TARGET_NR_mmap 9392 case TARGET_NR_mmap: 9393 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 9394 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \ 9395 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ 9396 || defined(TARGET_S390X) 9397 { 9398 abi_ulong *v; 9399 abi_ulong v1, v2, v3, v4, v5, v6; 9400 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) 9401 goto efault; 9402 v1 = tswapal(v[0]); 9403 v2 = tswapal(v[1]); 9404 v3 = tswapal(v[2]); 9405 v4 = tswapal(v[3]); 9406 v5 = tswapal(v[4]); 9407 v6 = tswapal(v[5]); 9408 unlock_user(v, arg1, 0); 9409 ret = get_errno(target_mmap(v1, v2, v3, 9410 target_to_host_bitmask(v4, mmap_flags_tbl), 9411 v5, v6)); 9412 } 9413 #else 9414 ret = get_errno(target_mmap(arg1, arg2, arg3, 9415 target_to_host_bitmask(arg4, mmap_flags_tbl), 9416 arg5, 9417 arg6)); 9418 #endif 9419 break; 9420 #endif 9421 #ifdef TARGET_NR_mmap2 9422 case TARGET_NR_mmap2: 9423 #ifndef MMAP_SHIFT 9424 #define MMAP_SHIFT 12 9425 #endif 9426 ret = get_errno(target_mmap(arg1, arg2, arg3, 9427 target_to_host_bitmask(arg4, mmap_flags_tbl), 9428 arg5, 9429 arg6 << MMAP_SHIFT)); 9430 break; 9431 #endif 9432 case TARGET_NR_munmap: 9433 ret = get_errno(target_munmap(arg1, arg2)); 9434 break; 9435 case TARGET_NR_mprotect: 9436 { 9437 TaskState *ts = cpu->opaque; 9438 /* Special hack to detect libc making the stack executable. */ 9439 if ((arg3 & PROT_GROWSDOWN) 9440 && arg1 >= ts->info->stack_limit 9441 && arg1 <= ts->info->start_stack) { 9442 arg3 &= ~PROT_GROWSDOWN; 9443 arg2 = arg2 + arg1 - ts->info->stack_limit; 9444 arg1 = ts->info->stack_limit; 9445 } 9446 } 9447 ret = get_errno(target_mprotect(arg1, arg2, arg3)); 9448 break; 9449 #ifdef TARGET_NR_mremap 9450 case TARGET_NR_mremap: 9451 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); 9452 break; 9453 #endif 9454 /* ??? msync/mlock/munlock are broken for softmmu. */ 9455 #ifdef TARGET_NR_msync 9456 case TARGET_NR_msync: 9457 ret = get_errno(msync(g2h(arg1), arg2, arg3)); 9458 break; 9459 #endif 9460 #ifdef TARGET_NR_mlock 9461 case TARGET_NR_mlock: 9462 ret = get_errno(mlock(g2h(arg1), arg2)); 9463 break; 9464 #endif 9465 #ifdef TARGET_NR_munlock 9466 case TARGET_NR_munlock: 9467 ret = get_errno(munlock(g2h(arg1), arg2)); 9468 break; 9469 #endif 9470 #ifdef TARGET_NR_mlockall 9471 case TARGET_NR_mlockall: 9472 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1))); 9473 break; 9474 #endif 9475 #ifdef TARGET_NR_munlockall 9476 case TARGET_NR_munlockall: 9477 ret = get_errno(munlockall()); 9478 break; 9479 #endif 9480 case TARGET_NR_truncate: 9481 if (!(p = lock_user_string(arg1))) 9482 goto efault; 9483 ret = get_errno(truncate(p, arg2)); 9484 unlock_user(p, arg1, 0); 9485 break; 9486 case TARGET_NR_ftruncate: 9487 ret = get_errno(ftruncate(arg1, arg2)); 9488 break; 9489 case TARGET_NR_fchmod: 9490 ret = get_errno(fchmod(arg1, arg2)); 9491 break; 9492 #if defined(TARGET_NR_fchmodat) 9493 case TARGET_NR_fchmodat: 9494 if (!(p = lock_user_string(arg2))) 9495 goto efault; 9496 ret = get_errno(fchmodat(arg1, p, arg3, 0)); 9497 unlock_user(p, arg2, 0); 9498 break; 9499 #endif 9500 case TARGET_NR_getpriority: 9501 /* Note that negative values are valid for getpriority, so we must 9502 differentiate based on errno settings. */ 9503 errno = 0; 9504 ret = getpriority(arg1, arg2); 9505 if (ret == -1 && errno != 0) { 9506 ret = -host_to_target_errno(errno); 9507 break; 9508 } 9509 #ifdef TARGET_ALPHA 9510 /* Return value is the unbiased priority. Signal no error. */ 9511 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; 9512 #else 9513 /* Return value is a biased priority to avoid negative numbers. */ 9514 ret = 20 - ret; 9515 #endif 9516 break; 9517 case TARGET_NR_setpriority: 9518 ret = get_errno(setpriority(arg1, arg2, arg3)); 9519 break; 9520 #ifdef TARGET_NR_profil 9521 case TARGET_NR_profil: 9522 goto unimplemented; 9523 #endif 9524 case TARGET_NR_statfs: 9525 if (!(p = lock_user_string(arg1))) 9526 goto efault; 9527 ret = get_errno(statfs(path(p), &stfs)); 9528 unlock_user(p, arg1, 0); 9529 convert_statfs: 9530 if (!is_error(ret)) { 9531 struct target_statfs *target_stfs; 9532 9533 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) 9534 goto efault; 9535 __put_user(stfs.f_type, &target_stfs->f_type); 9536 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 9537 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 9538 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 9539 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 9540 __put_user(stfs.f_files, &target_stfs->f_files); 9541 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 9542 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 9543 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 9544 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 9545 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 9546 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 9547 unlock_user_struct(target_stfs, arg2, 1); 9548 } 9549 break; 9550 case TARGET_NR_fstatfs: 9551 ret = get_errno(fstatfs(arg1, &stfs)); 9552 goto convert_statfs; 9553 #ifdef TARGET_NR_statfs64 9554 case TARGET_NR_statfs64: 9555 if (!(p = lock_user_string(arg1))) 9556 goto efault; 9557 ret = get_errno(statfs(path(p), &stfs)); 9558 unlock_user(p, arg1, 0); 9559 convert_statfs64: 9560 if (!is_error(ret)) { 9561 struct target_statfs64 *target_stfs; 9562 9563 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) 9564 goto efault; 9565 __put_user(stfs.f_type, &target_stfs->f_type); 9566 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 9567 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 9568 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 9569 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 9570 __put_user(stfs.f_files, &target_stfs->f_files); 9571 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 9572 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 9573 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 9574 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 9575 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 9576 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 9577 unlock_user_struct(target_stfs, arg3, 1); 9578 } 9579 break; 9580 case TARGET_NR_fstatfs64: 9581 ret = get_errno(fstatfs(arg1, &stfs)); 9582 goto convert_statfs64; 9583 #endif 9584 #ifdef TARGET_NR_ioperm 9585 case TARGET_NR_ioperm: 9586 goto unimplemented; 9587 #endif 9588 #ifdef TARGET_NR_socketcall 9589 case TARGET_NR_socketcall: 9590 ret = do_socketcall(arg1, arg2); 9591 break; 9592 #endif 9593 #ifdef TARGET_NR_accept 9594 case TARGET_NR_accept: 9595 ret = do_accept4(arg1, arg2, arg3, 0); 9596 break; 9597 #endif 9598 #ifdef TARGET_NR_accept4 9599 case TARGET_NR_accept4: 9600 ret = do_accept4(arg1, arg2, arg3, arg4); 9601 break; 9602 #endif 9603 #ifdef TARGET_NR_bind 9604 case TARGET_NR_bind: 9605 ret = do_bind(arg1, arg2, arg3); 9606 break; 9607 #endif 9608 #ifdef TARGET_NR_connect 9609 case TARGET_NR_connect: 9610 ret = do_connect(arg1, arg2, arg3); 9611 break; 9612 #endif 9613 #ifdef TARGET_NR_getpeername 9614 case TARGET_NR_getpeername: 9615 ret = do_getpeername(arg1, arg2, arg3); 9616 break; 9617 #endif 9618 #ifdef TARGET_NR_getsockname 9619 case TARGET_NR_getsockname: 9620 ret = do_getsockname(arg1, arg2, arg3); 9621 break; 9622 #endif 9623 #ifdef TARGET_NR_getsockopt 9624 case TARGET_NR_getsockopt: 9625 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5); 9626 break; 9627 #endif 9628 #ifdef TARGET_NR_listen 9629 case TARGET_NR_listen: 9630 ret = get_errno(listen(arg1, arg2)); 9631 break; 9632 #endif 9633 #ifdef TARGET_NR_recv 9634 case TARGET_NR_recv: 9635 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); 9636 break; 9637 #endif 9638 #ifdef TARGET_NR_recvfrom 9639 case TARGET_NR_recvfrom: 9640 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); 9641 break; 9642 #endif 9643 #ifdef TARGET_NR_recvmsg 9644 case TARGET_NR_recvmsg: 9645 ret = do_sendrecvmsg(arg1, arg2, arg3, 0); 9646 break; 9647 #endif 9648 #ifdef TARGET_NR_send 9649 case TARGET_NR_send: 9650 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0); 9651 break; 9652 #endif 9653 #ifdef TARGET_NR_sendmsg 9654 case TARGET_NR_sendmsg: 9655 ret = do_sendrecvmsg(arg1, arg2, arg3, 1); 9656 break; 9657 #endif 9658 #ifdef TARGET_NR_sendmmsg 9659 case TARGET_NR_sendmmsg: 9660 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1); 9661 break; 9662 case TARGET_NR_recvmmsg: 9663 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0); 9664 break; 9665 #endif 9666 #ifdef TARGET_NR_sendto 9667 case TARGET_NR_sendto: 9668 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); 9669 break; 9670 #endif 9671 #ifdef TARGET_NR_shutdown 9672 case TARGET_NR_shutdown: 9673 ret = get_errno(shutdown(arg1, arg2)); 9674 break; 9675 #endif 9676 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom) 9677 case TARGET_NR_getrandom: 9678 p = lock_user(VERIFY_WRITE, arg1, arg2, 0); 9679 if (!p) { 9680 goto efault; 9681 } 9682 ret = get_errno(getrandom(p, arg2, arg3)); 9683 unlock_user(p, arg1, ret); 9684 break; 9685 #endif 9686 #ifdef TARGET_NR_socket 9687 case TARGET_NR_socket: 9688 ret = do_socket(arg1, arg2, arg3); 9689 break; 9690 #endif 9691 #ifdef TARGET_NR_socketpair 9692 case TARGET_NR_socketpair: 9693 ret = do_socketpair(arg1, arg2, arg3, arg4); 9694 break; 9695 #endif 9696 #ifdef TARGET_NR_setsockopt 9697 case TARGET_NR_setsockopt: 9698 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); 9699 break; 9700 #endif 9701 #if defined(TARGET_NR_syslog) 9702 case TARGET_NR_syslog: 9703 { 9704 int len = arg2; 9705 9706 switch (arg1) { 9707 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */ 9708 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */ 9709 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */ 9710 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */ 9711 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */ 9712 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */ 9713 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */ 9714 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */ 9715 { 9716 ret = get_errno(sys_syslog((int)arg1, NULL, (int)arg3)); 9717 } 9718 break; 9719 case TARGET_SYSLOG_ACTION_READ: /* Read from log */ 9720 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */ 9721 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */ 9722 { 9723 ret = -TARGET_EINVAL; 9724 if (len < 0) { 9725 goto fail; 9726 } 9727 ret = 0; 9728 if (len == 0) { 9729 break; 9730 } 9731 p = lock_user(VERIFY_WRITE, arg2, arg3, 0); 9732 if (!p) { 9733 ret = -TARGET_EFAULT; 9734 goto fail; 9735 } 9736 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); 9737 unlock_user(p, arg2, arg3); 9738 } 9739 break; 9740 default: 9741 ret = -EINVAL; 9742 break; 9743 } 9744 } 9745 break; 9746 #endif 9747 case TARGET_NR_setitimer: 9748 { 9749 struct itimerval value, ovalue, *pvalue; 9750 9751 if (arg2) { 9752 pvalue = &value; 9753 if (copy_from_user_timeval(&pvalue->it_interval, arg2) 9754 || copy_from_user_timeval(&pvalue->it_value, 9755 arg2 + sizeof(struct target_timeval))) 9756 goto efault; 9757 } else { 9758 pvalue = NULL; 9759 } 9760 ret = get_errno(setitimer(arg1, pvalue, &ovalue)); 9761 if (!is_error(ret) && arg3) { 9762 if (copy_to_user_timeval(arg3, 9763 &ovalue.it_interval) 9764 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), 9765 &ovalue.it_value)) 9766 goto efault; 9767 } 9768 } 9769 break; 9770 case TARGET_NR_getitimer: 9771 { 9772 struct itimerval value; 9773 9774 ret = get_errno(getitimer(arg1, &value)); 9775 if (!is_error(ret) && arg2) { 9776 if (copy_to_user_timeval(arg2, 9777 &value.it_interval) 9778 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), 9779 &value.it_value)) 9780 goto efault; 9781 } 9782 } 9783 break; 9784 #ifdef TARGET_NR_stat 9785 case TARGET_NR_stat: 9786 if (!(p = lock_user_string(arg1))) 9787 goto efault; 9788 ret = get_errno(stat(path(p), &st)); 9789 unlock_user(p, arg1, 0); 9790 goto do_stat; 9791 #endif 9792 #ifdef TARGET_NR_lstat 9793 case TARGET_NR_lstat: 9794 if (!(p = lock_user_string(arg1))) 9795 goto efault; 9796 ret = get_errno(lstat(path(p), &st)); 9797 unlock_user(p, arg1, 0); 9798 goto do_stat; 9799 #endif 9800 case TARGET_NR_fstat: 9801 { 9802 ret = get_errno(fstat(arg1, &st)); 9803 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat) 9804 do_stat: 9805 #endif 9806 if (!is_error(ret)) { 9807 struct target_stat *target_st; 9808 9809 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) 9810 goto efault; 9811 memset(target_st, 0, sizeof(*target_st)); 9812 __put_user(st.st_dev, &target_st->st_dev); 9813 __put_user(st.st_ino, &target_st->st_ino); 9814 __put_user(st.st_mode, &target_st->st_mode); 9815 __put_user(st.st_uid, &target_st->st_uid); 9816 __put_user(st.st_gid, &target_st->st_gid); 9817 __put_user(st.st_nlink, &target_st->st_nlink); 9818 __put_user(st.st_rdev, &target_st->st_rdev); 9819 __put_user(st.st_size, &target_st->st_size); 9820 __put_user(st.st_blksize, &target_st->st_blksize); 9821 __put_user(st.st_blocks, &target_st->st_blocks); 9822 __put_user(st.st_atime, &target_st->target_st_atime); 9823 __put_user(st.st_mtime, &target_st->target_st_mtime); 9824 __put_user(st.st_ctime, &target_st->target_st_ctime); 9825 unlock_user_struct(target_st, arg2, 1); 9826 } 9827 } 9828 break; 9829 #ifdef TARGET_NR_olduname 9830 case TARGET_NR_olduname: 9831 goto unimplemented; 9832 #endif 9833 #ifdef TARGET_NR_iopl 9834 case TARGET_NR_iopl: 9835 goto unimplemented; 9836 #endif 9837 case TARGET_NR_vhangup: 9838 ret = get_errno(vhangup()); 9839 break; 9840 #ifdef TARGET_NR_idle 9841 case TARGET_NR_idle: 9842 goto unimplemented; 9843 #endif 9844 #ifdef TARGET_NR_syscall 9845 case TARGET_NR_syscall: 9846 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5, 9847 arg6, arg7, arg8, 0); 9848 break; 9849 #endif 9850 case TARGET_NR_wait4: 9851 { 9852 int status; 9853 abi_long status_ptr = arg2; 9854 struct rusage rusage, *rusage_ptr; 9855 abi_ulong target_rusage = arg4; 9856 abi_long rusage_err; 9857 if (target_rusage) 9858 rusage_ptr = &rusage; 9859 else 9860 rusage_ptr = NULL; 9861 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr)); 9862 if (!is_error(ret)) { 9863 if (status_ptr && ret) { 9864 status = host_to_target_waitstatus(status); 9865 if (put_user_s32(status, status_ptr)) 9866 goto efault; 9867 } 9868 if (target_rusage) { 9869 rusage_err = host_to_target_rusage(target_rusage, &rusage); 9870 if (rusage_err) { 9871 ret = rusage_err; 9872 } 9873 } 9874 } 9875 } 9876 break; 9877 #ifdef TARGET_NR_swapoff 9878 case TARGET_NR_swapoff: 9879 if (!(p = lock_user_string(arg1))) 9880 goto efault; 9881 ret = get_errno(swapoff(p)); 9882 unlock_user(p, arg1, 0); 9883 break; 9884 #endif 9885 case TARGET_NR_sysinfo: 9886 { 9887 struct target_sysinfo *target_value; 9888 struct sysinfo value; 9889 ret = get_errno(sysinfo(&value)); 9890 if (!is_error(ret) && arg1) 9891 { 9892 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) 9893 goto efault; 9894 __put_user(value.uptime, &target_value->uptime); 9895 __put_user(value.loads[0], &target_value->loads[0]); 9896 __put_user(value.loads[1], &target_value->loads[1]); 9897 __put_user(value.loads[2], &target_value->loads[2]); 9898 __put_user(value.totalram, &target_value->totalram); 9899 __put_user(value.freeram, &target_value->freeram); 9900 __put_user(value.sharedram, &target_value->sharedram); 9901 __put_user(value.bufferram, &target_value->bufferram); 9902 __put_user(value.totalswap, &target_value->totalswap); 9903 __put_user(value.freeswap, &target_value->freeswap); 9904 __put_user(value.procs, &target_value->procs); 9905 __put_user(value.totalhigh, &target_value->totalhigh); 9906 __put_user(value.freehigh, &target_value->freehigh); 9907 __put_user(value.mem_unit, &target_value->mem_unit); 9908 unlock_user_struct(target_value, arg1, 1); 9909 } 9910 } 9911 break; 9912 #ifdef TARGET_NR_ipc 9913 case TARGET_NR_ipc: 9914 ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6); 9915 break; 9916 #endif 9917 #ifdef TARGET_NR_semget 9918 case TARGET_NR_semget: 9919 ret = get_errno(semget(arg1, arg2, arg3)); 9920 break; 9921 #endif 9922 #ifdef TARGET_NR_semop 9923 case TARGET_NR_semop: 9924 ret = do_semop(arg1, arg2, arg3); 9925 break; 9926 #endif 9927 #ifdef TARGET_NR_semctl 9928 case TARGET_NR_semctl: 9929 ret = do_semctl(arg1, arg2, arg3, arg4); 9930 break; 9931 #endif 9932 #ifdef TARGET_NR_msgctl 9933 case TARGET_NR_msgctl: 9934 ret = do_msgctl(arg1, arg2, arg3); 9935 break; 9936 #endif 9937 #ifdef TARGET_NR_msgget 9938 case TARGET_NR_msgget: 9939 ret = get_errno(msgget(arg1, arg2)); 9940 break; 9941 #endif 9942 #ifdef TARGET_NR_msgrcv 9943 case TARGET_NR_msgrcv: 9944 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5); 9945 break; 9946 #endif 9947 #ifdef TARGET_NR_msgsnd 9948 case TARGET_NR_msgsnd: 9949 ret = do_msgsnd(arg1, arg2, arg3, arg4); 9950 break; 9951 #endif 9952 #ifdef TARGET_NR_shmget 9953 case TARGET_NR_shmget: 9954 ret = get_errno(shmget(arg1, arg2, arg3)); 9955 break; 9956 #endif 9957 #ifdef TARGET_NR_shmctl 9958 case TARGET_NR_shmctl: 9959 ret = do_shmctl(arg1, arg2, arg3); 9960 break; 9961 #endif 9962 #ifdef TARGET_NR_shmat 9963 case TARGET_NR_shmat: 9964 ret = do_shmat(cpu_env, arg1, arg2, arg3); 9965 break; 9966 #endif 9967 #ifdef TARGET_NR_shmdt 9968 case TARGET_NR_shmdt: 9969 ret = do_shmdt(arg1); 9970 break; 9971 #endif 9972 case TARGET_NR_fsync: 9973 ret = get_errno(fsync(arg1)); 9974 break; 9975 case TARGET_NR_clone: 9976 /* Linux manages to have three different orderings for its 9977 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines 9978 * match the kernel's CONFIG_CLONE_* settings. 9979 * Microblaze is further special in that it uses a sixth 9980 * implicit argument to clone for the TLS pointer. 9981 */ 9982 #if defined(TARGET_MICROBLAZE) 9983 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5)); 9984 #elif defined(TARGET_CLONE_BACKWARDS) 9985 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); 9986 #elif defined(TARGET_CLONE_BACKWARDS2) 9987 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); 9988 #else 9989 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); 9990 #endif 9991 break; 9992 #ifdef __NR_exit_group 9993 /* new thread calls */ 9994 case TARGET_NR_exit_group: 9995 #ifdef TARGET_GPROF 9996 _mcleanup(); 9997 #endif 9998 gdb_exit(cpu_env, arg1); 9999 ret = get_errno(exit_group(arg1)); 10000 break; 10001 #endif 10002 case TARGET_NR_setdomainname: 10003 if (!(p = lock_user_string(arg1))) 10004 goto efault; 10005 ret = get_errno(setdomainname(p, arg2)); 10006 unlock_user(p, arg1, 0); 10007 break; 10008 case TARGET_NR_uname: 10009 /* no need to transcode because we use the linux syscall */ 10010 { 10011 struct new_utsname * buf; 10012 10013 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) 10014 goto efault; 10015 ret = get_errno(sys_uname(buf)); 10016 if (!is_error(ret)) { 10017 /* Overwrite the native machine name with whatever is being 10018 emulated. */ 10019 strcpy (buf->machine, cpu_to_uname_machine(cpu_env)); 10020 /* Allow the user to override the reported release. */ 10021 if (qemu_uname_release && *qemu_uname_release) { 10022 g_strlcpy(buf->release, qemu_uname_release, 10023 sizeof(buf->release)); 10024 } 10025 } 10026 unlock_user_struct(buf, arg1, 1); 10027 } 10028 break; 10029 #ifdef TARGET_I386 10030 case TARGET_NR_modify_ldt: 10031 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3); 10032 break; 10033 #if !defined(TARGET_X86_64) 10034 case TARGET_NR_vm86old: 10035 goto unimplemented; 10036 case TARGET_NR_vm86: 10037 ret = do_vm86(cpu_env, arg1, arg2); 10038 break; 10039 #endif 10040 #endif 10041 case TARGET_NR_adjtimex: 10042 { 10043 struct timex host_buf; 10044 10045 if (target_to_host_timex(&host_buf, arg1) != 0) { 10046 goto efault; 10047 } 10048 ret = get_errno(adjtimex(&host_buf)); 10049 if (!is_error(ret)) { 10050 if (host_to_target_timex(arg1, &host_buf) != 0) { 10051 goto efault; 10052 } 10053 } 10054 } 10055 break; 10056 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME) 10057 case TARGET_NR_clock_adjtime: 10058 { 10059 struct timex htx, *phtx = &htx; 10060 10061 if (target_to_host_timex(phtx, arg2) != 0) { 10062 goto efault; 10063 } 10064 ret = get_errno(clock_adjtime(arg1, phtx)); 10065 if (!is_error(ret) && phtx) { 10066 if (host_to_target_timex(arg2, phtx) != 0) { 10067 goto efault; 10068 } 10069 } 10070 } 10071 break; 10072 #endif 10073 #ifdef TARGET_NR_create_module 10074 case TARGET_NR_create_module: 10075 #endif 10076 case TARGET_NR_init_module: 10077 case TARGET_NR_delete_module: 10078 #ifdef TARGET_NR_get_kernel_syms 10079 case TARGET_NR_get_kernel_syms: 10080 #endif 10081 goto unimplemented; 10082 case TARGET_NR_quotactl: 10083 goto unimplemented; 10084 case TARGET_NR_getpgid: 10085 ret = get_errno(getpgid(arg1)); 10086 break; 10087 case TARGET_NR_fchdir: 10088 ret = get_errno(fchdir(arg1)); 10089 break; 10090 #ifdef TARGET_NR_bdflush /* not on x86_64 */ 10091 case TARGET_NR_bdflush: 10092 goto unimplemented; 10093 #endif 10094 #ifdef TARGET_NR_sysfs 10095 case TARGET_NR_sysfs: 10096 goto unimplemented; 10097 #endif 10098 case TARGET_NR_personality: 10099 ret = get_errno(personality(arg1)); 10100 break; 10101 #ifdef TARGET_NR_afs_syscall 10102 case TARGET_NR_afs_syscall: 10103 goto unimplemented; 10104 #endif 10105 #ifdef TARGET_NR__llseek /* Not on alpha */ 10106 case TARGET_NR__llseek: 10107 { 10108 int64_t res; 10109 #if !defined(__NR_llseek) 10110 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5); 10111 if (res == -1) { 10112 ret = get_errno(res); 10113 } else { 10114 ret = 0; 10115 } 10116 #else 10117 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); 10118 #endif 10119 if ((ret == 0) && put_user_s64(res, arg4)) { 10120 goto efault; 10121 } 10122 } 10123 break; 10124 #endif 10125 #ifdef TARGET_NR_getdents 10126 case TARGET_NR_getdents: 10127 #ifdef __NR_getdents 10128 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 10129 { 10130 struct target_dirent *target_dirp; 10131 struct linux_dirent *dirp; 10132 abi_long count = arg3; 10133 10134 dirp = g_try_malloc(count); 10135 if (!dirp) { 10136 ret = -TARGET_ENOMEM; 10137 goto fail; 10138 } 10139 10140 ret = get_errno(sys_getdents(arg1, dirp, count)); 10141 if (!is_error(ret)) { 10142 struct linux_dirent *de; 10143 struct target_dirent *tde; 10144 int len = ret; 10145 int reclen, treclen; 10146 int count1, tnamelen; 10147 10148 count1 = 0; 10149 de = dirp; 10150 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 10151 goto efault; 10152 tde = target_dirp; 10153 while (len > 0) { 10154 reclen = de->d_reclen; 10155 tnamelen = reclen - offsetof(struct linux_dirent, d_name); 10156 assert(tnamelen >= 0); 10157 treclen = tnamelen + offsetof(struct target_dirent, d_name); 10158 assert(count1 + treclen <= count); 10159 tde->d_reclen = tswap16(treclen); 10160 tde->d_ino = tswapal(de->d_ino); 10161 tde->d_off = tswapal(de->d_off); 10162 memcpy(tde->d_name, de->d_name, tnamelen); 10163 de = (struct linux_dirent *)((char *)de + reclen); 10164 len -= reclen; 10165 tde = (struct target_dirent *)((char *)tde + treclen); 10166 count1 += treclen; 10167 } 10168 ret = count1; 10169 unlock_user(target_dirp, arg2, ret); 10170 } 10171 g_free(dirp); 10172 } 10173 #else 10174 { 10175 struct linux_dirent *dirp; 10176 abi_long count = arg3; 10177 10178 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 10179 goto efault; 10180 ret = get_errno(sys_getdents(arg1, dirp, count)); 10181 if (!is_error(ret)) { 10182 struct linux_dirent *de; 10183 int len = ret; 10184 int reclen; 10185 de = dirp; 10186 while (len > 0) { 10187 reclen = de->d_reclen; 10188 if (reclen > len) 10189 break; 10190 de->d_reclen = tswap16(reclen); 10191 tswapls(&de->d_ino); 10192 tswapls(&de->d_off); 10193 de = (struct linux_dirent *)((char *)de + reclen); 10194 len -= reclen; 10195 } 10196 } 10197 unlock_user(dirp, arg2, ret); 10198 } 10199 #endif 10200 #else 10201 /* Implement getdents in terms of getdents64 */ 10202 { 10203 struct linux_dirent64 *dirp; 10204 abi_long count = arg3; 10205 10206 dirp = lock_user(VERIFY_WRITE, arg2, count, 0); 10207 if (!dirp) { 10208 goto efault; 10209 } 10210 ret = get_errno(sys_getdents64(arg1, dirp, count)); 10211 if (!is_error(ret)) { 10212 /* Convert the dirent64 structs to target dirent. We do this 10213 * in-place, since we can guarantee that a target_dirent is no 10214 * larger than a dirent64; however this means we have to be 10215 * careful to read everything before writing in the new format. 10216 */ 10217 struct linux_dirent64 *de; 10218 struct target_dirent *tde; 10219 int len = ret; 10220 int tlen = 0; 10221 10222 de = dirp; 10223 tde = (struct target_dirent *)dirp; 10224 while (len > 0) { 10225 int namelen, treclen; 10226 int reclen = de->d_reclen; 10227 uint64_t ino = de->d_ino; 10228 int64_t off = de->d_off; 10229 uint8_t type = de->d_type; 10230 10231 namelen = strlen(de->d_name); 10232 treclen = offsetof(struct target_dirent, d_name) 10233 + namelen + 2; 10234 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long)); 10235 10236 memmove(tde->d_name, de->d_name, namelen + 1); 10237 tde->d_ino = tswapal(ino); 10238 tde->d_off = tswapal(off); 10239 tde->d_reclen = tswap16(treclen); 10240 /* The target_dirent type is in what was formerly a padding 10241 * byte at the end of the structure: 10242 */ 10243 *(((char *)tde) + treclen - 1) = type; 10244 10245 de = (struct linux_dirent64 *)((char *)de + reclen); 10246 tde = (struct target_dirent *)((char *)tde + treclen); 10247 len -= reclen; 10248 tlen += treclen; 10249 } 10250 ret = tlen; 10251 } 10252 unlock_user(dirp, arg2, ret); 10253 } 10254 #endif 10255 break; 10256 #endif /* TARGET_NR_getdents */ 10257 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 10258 case TARGET_NR_getdents64: 10259 { 10260 struct linux_dirent64 *dirp; 10261 abi_long count = arg3; 10262 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 10263 goto efault; 10264 ret = get_errno(sys_getdents64(arg1, dirp, count)); 10265 if (!is_error(ret)) { 10266 struct linux_dirent64 *de; 10267 int len = ret; 10268 int reclen; 10269 de = dirp; 10270 while (len > 0) { 10271 reclen = de->d_reclen; 10272 if (reclen > len) 10273 break; 10274 de->d_reclen = tswap16(reclen); 10275 tswap64s((uint64_t *)&de->d_ino); 10276 tswap64s((uint64_t *)&de->d_off); 10277 de = (struct linux_dirent64 *)((char *)de + reclen); 10278 len -= reclen; 10279 } 10280 } 10281 unlock_user(dirp, arg2, ret); 10282 } 10283 break; 10284 #endif /* TARGET_NR_getdents64 */ 10285 #if defined(TARGET_NR__newselect) 10286 case TARGET_NR__newselect: 10287 ret = do_select(arg1, arg2, arg3, arg4, arg5); 10288 break; 10289 #endif 10290 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) 10291 # ifdef TARGET_NR_poll 10292 case TARGET_NR_poll: 10293 # endif 10294 # ifdef TARGET_NR_ppoll 10295 case TARGET_NR_ppoll: 10296 # endif 10297 { 10298 struct target_pollfd *target_pfd; 10299 unsigned int nfds = arg2; 10300 struct pollfd *pfd; 10301 unsigned int i; 10302 10303 pfd = NULL; 10304 target_pfd = NULL; 10305 if (nfds) { 10306 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) { 10307 ret = -TARGET_EINVAL; 10308 break; 10309 } 10310 10311 target_pfd = lock_user(VERIFY_WRITE, arg1, 10312 sizeof(struct target_pollfd) * nfds, 1); 10313 if (!target_pfd) { 10314 goto efault; 10315 } 10316 10317 pfd = alloca(sizeof(struct pollfd) * nfds); 10318 for (i = 0; i < nfds; i++) { 10319 pfd[i].fd = tswap32(target_pfd[i].fd); 10320 pfd[i].events = tswap16(target_pfd[i].events); 10321 } 10322 } 10323 10324 switch (num) { 10325 # ifdef TARGET_NR_ppoll 10326 case TARGET_NR_ppoll: 10327 { 10328 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; 10329 target_sigset_t *target_set; 10330 sigset_t _set, *set = &_set; 10331 10332 if (arg3) { 10333 if (target_to_host_timespec(timeout_ts, arg3)) { 10334 unlock_user(target_pfd, arg1, 0); 10335 goto efault; 10336 } 10337 } else { 10338 timeout_ts = NULL; 10339 } 10340 10341 if (arg4) { 10342 if (arg5 != sizeof(target_sigset_t)) { 10343 unlock_user(target_pfd, arg1, 0); 10344 ret = -TARGET_EINVAL; 10345 break; 10346 } 10347 10348 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1); 10349 if (!target_set) { 10350 unlock_user(target_pfd, arg1, 0); 10351 goto efault; 10352 } 10353 target_to_host_sigset(set, target_set); 10354 } else { 10355 set = NULL; 10356 } 10357 10358 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts, 10359 set, SIGSET_T_SIZE)); 10360 10361 if (!is_error(ret) && arg3) { 10362 host_to_target_timespec(arg3, timeout_ts); 10363 } 10364 if (arg4) { 10365 unlock_user(target_set, arg4, 0); 10366 } 10367 break; 10368 } 10369 # endif 10370 # ifdef TARGET_NR_poll 10371 case TARGET_NR_poll: 10372 { 10373 struct timespec ts, *pts; 10374 10375 if (arg3 >= 0) { 10376 /* Convert ms to secs, ns */ 10377 ts.tv_sec = arg3 / 1000; 10378 ts.tv_nsec = (arg3 % 1000) * 1000000LL; 10379 pts = &ts; 10380 } else { 10381 /* -ve poll() timeout means "infinite" */ 10382 pts = NULL; 10383 } 10384 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0)); 10385 break; 10386 } 10387 # endif 10388 default: 10389 g_assert_not_reached(); 10390 } 10391 10392 if (!is_error(ret)) { 10393 for(i = 0; i < nfds; i++) { 10394 target_pfd[i].revents = tswap16(pfd[i].revents); 10395 } 10396 } 10397 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds); 10398 } 10399 break; 10400 #endif 10401 case TARGET_NR_flock: 10402 /* NOTE: the flock constant seems to be the same for every 10403 Linux platform */ 10404 ret = get_errno(safe_flock(arg1, arg2)); 10405 break; 10406 case TARGET_NR_readv: 10407 { 10408 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 10409 if (vec != NULL) { 10410 ret = get_errno(safe_readv(arg1, vec, arg3)); 10411 unlock_iovec(vec, arg2, arg3, 1); 10412 } else { 10413 ret = -host_to_target_errno(errno); 10414 } 10415 } 10416 break; 10417 case TARGET_NR_writev: 10418 { 10419 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 10420 if (vec != NULL) { 10421 ret = get_errno(safe_writev(arg1, vec, arg3)); 10422 unlock_iovec(vec, arg2, arg3, 0); 10423 } else { 10424 ret = -host_to_target_errno(errno); 10425 } 10426 } 10427 break; 10428 #if defined(TARGET_NR_preadv) 10429 case TARGET_NR_preadv: 10430 { 10431 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 10432 if (vec != NULL) { 10433 ret = get_errno(safe_preadv(arg1, vec, arg3, arg4, arg5)); 10434 unlock_iovec(vec, arg2, arg3, 1); 10435 } else { 10436 ret = -host_to_target_errno(errno); 10437 } 10438 } 10439 break; 10440 #endif 10441 #if defined(TARGET_NR_pwritev) 10442 case TARGET_NR_pwritev: 10443 { 10444 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 10445 if (vec != NULL) { 10446 ret = get_errno(safe_pwritev(arg1, vec, arg3, arg4, arg5)); 10447 unlock_iovec(vec, arg2, arg3, 0); 10448 } else { 10449 ret = -host_to_target_errno(errno); 10450 } 10451 } 10452 break; 10453 #endif 10454 case TARGET_NR_getsid: 10455 ret = get_errno(getsid(arg1)); 10456 break; 10457 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ 10458 case TARGET_NR_fdatasync: 10459 ret = get_errno(fdatasync(arg1)); 10460 break; 10461 #endif 10462 #ifdef TARGET_NR__sysctl 10463 case TARGET_NR__sysctl: 10464 /* We don't implement this, but ENOTDIR is always a safe 10465 return value. */ 10466 ret = -TARGET_ENOTDIR; 10467 break; 10468 #endif 10469 case TARGET_NR_sched_getaffinity: 10470 { 10471 unsigned int mask_size; 10472 unsigned long *mask; 10473 10474 /* 10475 * sched_getaffinity needs multiples of ulong, so need to take 10476 * care of mismatches between target ulong and host ulong sizes. 10477 */ 10478 if (arg2 & (sizeof(abi_ulong) - 1)) { 10479 ret = -TARGET_EINVAL; 10480 break; 10481 } 10482 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 10483 10484 mask = alloca(mask_size); 10485 memset(mask, 0, mask_size); 10486 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask)); 10487 10488 if (!is_error(ret)) { 10489 if (ret > arg2) { 10490 /* More data returned than the caller's buffer will fit. 10491 * This only happens if sizeof(abi_long) < sizeof(long) 10492 * and the caller passed us a buffer holding an odd number 10493 * of abi_longs. If the host kernel is actually using the 10494 * extra 4 bytes then fail EINVAL; otherwise we can just 10495 * ignore them and only copy the interesting part. 10496 */ 10497 int numcpus = sysconf(_SC_NPROCESSORS_CONF); 10498 if (numcpus > arg2 * 8) { 10499 ret = -TARGET_EINVAL; 10500 break; 10501 } 10502 ret = arg2; 10503 } 10504 10505 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) { 10506 goto efault; 10507 } 10508 } 10509 } 10510 break; 10511 case TARGET_NR_sched_setaffinity: 10512 { 10513 unsigned int mask_size; 10514 unsigned long *mask; 10515 10516 /* 10517 * sched_setaffinity needs multiples of ulong, so need to take 10518 * care of mismatches between target ulong and host ulong sizes. 10519 */ 10520 if (arg2 & (sizeof(abi_ulong) - 1)) { 10521 ret = -TARGET_EINVAL; 10522 break; 10523 } 10524 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 10525 mask = alloca(mask_size); 10526 10527 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2); 10528 if (ret) { 10529 break; 10530 } 10531 10532 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask)); 10533 } 10534 break; 10535 case TARGET_NR_getcpu: 10536 { 10537 unsigned cpu, node; 10538 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL, 10539 arg2 ? &node : NULL, 10540 NULL)); 10541 if (is_error(ret)) { 10542 goto fail; 10543 } 10544 if (arg1 && put_user_u32(cpu, arg1)) { 10545 goto efault; 10546 } 10547 if (arg2 && put_user_u32(node, arg2)) { 10548 goto efault; 10549 } 10550 } 10551 break; 10552 case TARGET_NR_sched_setparam: 10553 { 10554 struct sched_param *target_schp; 10555 struct sched_param schp; 10556 10557 if (arg2 == 0) { 10558 return -TARGET_EINVAL; 10559 } 10560 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) 10561 goto efault; 10562 schp.sched_priority = tswap32(target_schp->sched_priority); 10563 unlock_user_struct(target_schp, arg2, 0); 10564 ret = get_errno(sched_setparam(arg1, &schp)); 10565 } 10566 break; 10567 case TARGET_NR_sched_getparam: 10568 { 10569 struct sched_param *target_schp; 10570 struct sched_param schp; 10571 10572 if (arg2 == 0) { 10573 return -TARGET_EINVAL; 10574 } 10575 ret = get_errno(sched_getparam(arg1, &schp)); 10576 if (!is_error(ret)) { 10577 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) 10578 goto efault; 10579 target_schp->sched_priority = tswap32(schp.sched_priority); 10580 unlock_user_struct(target_schp, arg2, 1); 10581 } 10582 } 10583 break; 10584 case TARGET_NR_sched_setscheduler: 10585 { 10586 struct sched_param *target_schp; 10587 struct sched_param schp; 10588 if (arg3 == 0) { 10589 return -TARGET_EINVAL; 10590 } 10591 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) 10592 goto efault; 10593 schp.sched_priority = tswap32(target_schp->sched_priority); 10594 unlock_user_struct(target_schp, arg3, 0); 10595 ret = get_errno(sched_setscheduler(arg1, arg2, &schp)); 10596 } 10597 break; 10598 case TARGET_NR_sched_getscheduler: 10599 ret = get_errno(sched_getscheduler(arg1)); 10600 break; 10601 case TARGET_NR_sched_yield: 10602 ret = get_errno(sched_yield()); 10603 break; 10604 case TARGET_NR_sched_get_priority_max: 10605 ret = get_errno(sched_get_priority_max(arg1)); 10606 break; 10607 case TARGET_NR_sched_get_priority_min: 10608 ret = get_errno(sched_get_priority_min(arg1)); 10609 break; 10610 case TARGET_NR_sched_rr_get_interval: 10611 { 10612 struct timespec ts; 10613 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 10614 if (!is_error(ret)) { 10615 ret = host_to_target_timespec(arg2, &ts); 10616 } 10617 } 10618 break; 10619 case TARGET_NR_nanosleep: 10620 { 10621 struct timespec req, rem; 10622 target_to_host_timespec(&req, arg1); 10623 ret = get_errno(safe_nanosleep(&req, &rem)); 10624 if (is_error(ret) && arg2) { 10625 host_to_target_timespec(arg2, &rem); 10626 } 10627 } 10628 break; 10629 #ifdef TARGET_NR_query_module 10630 case TARGET_NR_query_module: 10631 goto unimplemented; 10632 #endif 10633 #ifdef TARGET_NR_nfsservctl 10634 case TARGET_NR_nfsservctl: 10635 goto unimplemented; 10636 #endif 10637 case TARGET_NR_prctl: 10638 switch (arg1) { 10639 case PR_GET_PDEATHSIG: 10640 { 10641 int deathsig; 10642 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5)); 10643 if (!is_error(ret) && arg2 10644 && put_user_ual(deathsig, arg2)) { 10645 goto efault; 10646 } 10647 break; 10648 } 10649 #ifdef PR_GET_NAME 10650 case PR_GET_NAME: 10651 { 10652 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1); 10653 if (!name) { 10654 goto efault; 10655 } 10656 ret = get_errno(prctl(arg1, (unsigned long)name, 10657 arg3, arg4, arg5)); 10658 unlock_user(name, arg2, 16); 10659 break; 10660 } 10661 case PR_SET_NAME: 10662 { 10663 void *name = lock_user(VERIFY_READ, arg2, 16, 1); 10664 if (!name) { 10665 goto efault; 10666 } 10667 ret = get_errno(prctl(arg1, (unsigned long)name, 10668 arg3, arg4, arg5)); 10669 unlock_user(name, arg2, 0); 10670 break; 10671 } 10672 #endif 10673 case PR_GET_SECCOMP: 10674 case PR_SET_SECCOMP: 10675 /* Disable seccomp to prevent the target disabling syscalls we 10676 * need. */ 10677 ret = -TARGET_EINVAL; 10678 break; 10679 default: 10680 /* Most prctl options have no pointer arguments */ 10681 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5)); 10682 break; 10683 } 10684 break; 10685 #ifdef TARGET_NR_arch_prctl 10686 case TARGET_NR_arch_prctl: 10687 #if defined(TARGET_I386) && !defined(TARGET_ABI32) 10688 ret = do_arch_prctl(cpu_env, arg1, arg2); 10689 break; 10690 #else 10691 goto unimplemented; 10692 #endif 10693 #endif 10694 #ifdef TARGET_NR_pread64 10695 case TARGET_NR_pread64: 10696 if (regpairs_aligned(cpu_env, num)) { 10697 arg4 = arg5; 10698 arg5 = arg6; 10699 } 10700 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 10701 goto efault; 10702 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); 10703 unlock_user(p, arg2, ret); 10704 break; 10705 case TARGET_NR_pwrite64: 10706 if (regpairs_aligned(cpu_env, num)) { 10707 arg4 = arg5; 10708 arg5 = arg6; 10709 } 10710 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 10711 goto efault; 10712 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); 10713 unlock_user(p, arg2, 0); 10714 break; 10715 #endif 10716 case TARGET_NR_getcwd: 10717 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) 10718 goto efault; 10719 ret = get_errno(sys_getcwd1(p, arg2)); 10720 unlock_user(p, arg1, ret); 10721 break; 10722 case TARGET_NR_capget: 10723 case TARGET_NR_capset: 10724 { 10725 struct target_user_cap_header *target_header; 10726 struct target_user_cap_data *target_data = NULL; 10727 struct __user_cap_header_struct header; 10728 struct __user_cap_data_struct data[2]; 10729 struct __user_cap_data_struct *dataptr = NULL; 10730 int i, target_datalen; 10731 int data_items = 1; 10732 10733 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) { 10734 goto efault; 10735 } 10736 header.version = tswap32(target_header->version); 10737 header.pid = tswap32(target_header->pid); 10738 10739 if (header.version != _LINUX_CAPABILITY_VERSION) { 10740 /* Version 2 and up takes pointer to two user_data structs */ 10741 data_items = 2; 10742 } 10743 10744 target_datalen = sizeof(*target_data) * data_items; 10745 10746 if (arg2) { 10747 if (num == TARGET_NR_capget) { 10748 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0); 10749 } else { 10750 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1); 10751 } 10752 if (!target_data) { 10753 unlock_user_struct(target_header, arg1, 0); 10754 goto efault; 10755 } 10756 10757 if (num == TARGET_NR_capset) { 10758 for (i = 0; i < data_items; i++) { 10759 data[i].effective = tswap32(target_data[i].effective); 10760 data[i].permitted = tswap32(target_data[i].permitted); 10761 data[i].inheritable = tswap32(target_data[i].inheritable); 10762 } 10763 } 10764 10765 dataptr = data; 10766 } 10767 10768 if (num == TARGET_NR_capget) { 10769 ret = get_errno(capget(&header, dataptr)); 10770 } else { 10771 ret = get_errno(capset(&header, dataptr)); 10772 } 10773 10774 /* The kernel always updates version for both capget and capset */ 10775 target_header->version = tswap32(header.version); 10776 unlock_user_struct(target_header, arg1, 1); 10777 10778 if (arg2) { 10779 if (num == TARGET_NR_capget) { 10780 for (i = 0; i < data_items; i++) { 10781 target_data[i].effective = tswap32(data[i].effective); 10782 target_data[i].permitted = tswap32(data[i].permitted); 10783 target_data[i].inheritable = tswap32(data[i].inheritable); 10784 } 10785 unlock_user(target_data, arg2, target_datalen); 10786 } else { 10787 unlock_user(target_data, arg2, 0); 10788 } 10789 } 10790 break; 10791 } 10792 case TARGET_NR_sigaltstack: 10793 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env)); 10794 break; 10795 10796 #ifdef CONFIG_SENDFILE 10797 case TARGET_NR_sendfile: 10798 { 10799 off_t *offp = NULL; 10800 off_t off; 10801 if (arg3) { 10802 ret = get_user_sal(off, arg3); 10803 if (is_error(ret)) { 10804 break; 10805 } 10806 offp = &off; 10807 } 10808 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 10809 if (!is_error(ret) && arg3) { 10810 abi_long ret2 = put_user_sal(off, arg3); 10811 if (is_error(ret2)) { 10812 ret = ret2; 10813 } 10814 } 10815 break; 10816 } 10817 #ifdef TARGET_NR_sendfile64 10818 case TARGET_NR_sendfile64: 10819 { 10820 off_t *offp = NULL; 10821 off_t off; 10822 if (arg3) { 10823 ret = get_user_s64(off, arg3); 10824 if (is_error(ret)) { 10825 break; 10826 } 10827 offp = &off; 10828 } 10829 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 10830 if (!is_error(ret) && arg3) { 10831 abi_long ret2 = put_user_s64(off, arg3); 10832 if (is_error(ret2)) { 10833 ret = ret2; 10834 } 10835 } 10836 break; 10837 } 10838 #endif 10839 #else 10840 case TARGET_NR_sendfile: 10841 #ifdef TARGET_NR_sendfile64 10842 case TARGET_NR_sendfile64: 10843 #endif 10844 goto unimplemented; 10845 #endif 10846 10847 #ifdef TARGET_NR_getpmsg 10848 case TARGET_NR_getpmsg: 10849 goto unimplemented; 10850 #endif 10851 #ifdef TARGET_NR_putpmsg 10852 case TARGET_NR_putpmsg: 10853 goto unimplemented; 10854 #endif 10855 #ifdef TARGET_NR_vfork 10856 case TARGET_NR_vfork: 10857 ret = get_errno(do_fork(cpu_env, 10858 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD, 10859 0, 0, 0, 0)); 10860 break; 10861 #endif 10862 #ifdef TARGET_NR_ugetrlimit 10863 case TARGET_NR_ugetrlimit: 10864 { 10865 struct rlimit rlim; 10866 int resource = target_to_host_resource(arg1); 10867 ret = get_errno(getrlimit(resource, &rlim)); 10868 if (!is_error(ret)) { 10869 struct target_rlimit *target_rlim; 10870 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 10871 goto efault; 10872 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 10873 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 10874 unlock_user_struct(target_rlim, arg2, 1); 10875 } 10876 break; 10877 } 10878 #endif 10879 #ifdef TARGET_NR_truncate64 10880 case TARGET_NR_truncate64: 10881 if (!(p = lock_user_string(arg1))) 10882 goto efault; 10883 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); 10884 unlock_user(p, arg1, 0); 10885 break; 10886 #endif 10887 #ifdef TARGET_NR_ftruncate64 10888 case TARGET_NR_ftruncate64: 10889 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); 10890 break; 10891 #endif 10892 #ifdef TARGET_NR_stat64 10893 case TARGET_NR_stat64: 10894 if (!(p = lock_user_string(arg1))) 10895 goto efault; 10896 ret = get_errno(stat(path(p), &st)); 10897 unlock_user(p, arg1, 0); 10898 if (!is_error(ret)) 10899 ret = host_to_target_stat64(cpu_env, arg2, &st); 10900 break; 10901 #endif 10902 #ifdef TARGET_NR_lstat64 10903 case TARGET_NR_lstat64: 10904 if (!(p = lock_user_string(arg1))) 10905 goto efault; 10906 ret = get_errno(lstat(path(p), &st)); 10907 unlock_user(p, arg1, 0); 10908 if (!is_error(ret)) 10909 ret = host_to_target_stat64(cpu_env, arg2, &st); 10910 break; 10911 #endif 10912 #ifdef TARGET_NR_fstat64 10913 case TARGET_NR_fstat64: 10914 ret = get_errno(fstat(arg1, &st)); 10915 if (!is_error(ret)) 10916 ret = host_to_target_stat64(cpu_env, arg2, &st); 10917 break; 10918 #endif 10919 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) 10920 #ifdef TARGET_NR_fstatat64 10921 case TARGET_NR_fstatat64: 10922 #endif 10923 #ifdef TARGET_NR_newfstatat 10924 case TARGET_NR_newfstatat: 10925 #endif 10926 if (!(p = lock_user_string(arg2))) 10927 goto efault; 10928 ret = get_errno(fstatat(arg1, path(p), &st, arg4)); 10929 if (!is_error(ret)) 10930 ret = host_to_target_stat64(cpu_env, arg3, &st); 10931 break; 10932 #endif 10933 #ifdef TARGET_NR_lchown 10934 case TARGET_NR_lchown: 10935 if (!(p = lock_user_string(arg1))) 10936 goto efault; 10937 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); 10938 unlock_user(p, arg1, 0); 10939 break; 10940 #endif 10941 #ifdef TARGET_NR_getuid 10942 case TARGET_NR_getuid: 10943 ret = get_errno(high2lowuid(getuid())); 10944 break; 10945 #endif 10946 #ifdef TARGET_NR_getgid 10947 case TARGET_NR_getgid: 10948 ret = get_errno(high2lowgid(getgid())); 10949 break; 10950 #endif 10951 #ifdef TARGET_NR_geteuid 10952 case TARGET_NR_geteuid: 10953 ret = get_errno(high2lowuid(geteuid())); 10954 break; 10955 #endif 10956 #ifdef TARGET_NR_getegid 10957 case TARGET_NR_getegid: 10958 ret = get_errno(high2lowgid(getegid())); 10959 break; 10960 #endif 10961 case TARGET_NR_setreuid: 10962 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); 10963 break; 10964 case TARGET_NR_setregid: 10965 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); 10966 break; 10967 case TARGET_NR_getgroups: 10968 { 10969 int gidsetsize = arg1; 10970 target_id *target_grouplist; 10971 gid_t *grouplist; 10972 int i; 10973 10974 grouplist = alloca(gidsetsize * sizeof(gid_t)); 10975 ret = get_errno(getgroups(gidsetsize, grouplist)); 10976 if (gidsetsize == 0) 10977 break; 10978 if (!is_error(ret)) { 10979 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0); 10980 if (!target_grouplist) 10981 goto efault; 10982 for(i = 0;i < ret; i++) 10983 target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); 10984 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id)); 10985 } 10986 } 10987 break; 10988 case TARGET_NR_setgroups: 10989 { 10990 int gidsetsize = arg1; 10991 target_id *target_grouplist; 10992 gid_t *grouplist = NULL; 10993 int i; 10994 if (gidsetsize) { 10995 grouplist = alloca(gidsetsize * sizeof(gid_t)); 10996 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1); 10997 if (!target_grouplist) { 10998 ret = -TARGET_EFAULT; 10999 goto fail; 11000 } 11001 for (i = 0; i < gidsetsize; i++) { 11002 grouplist[i] = low2highgid(tswapid(target_grouplist[i])); 11003 } 11004 unlock_user(target_grouplist, arg2, 0); 11005 } 11006 ret = get_errno(setgroups(gidsetsize, grouplist)); 11007 } 11008 break; 11009 case TARGET_NR_fchown: 11010 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); 11011 break; 11012 #if defined(TARGET_NR_fchownat) 11013 case TARGET_NR_fchownat: 11014 if (!(p = lock_user_string(arg2))) 11015 goto efault; 11016 ret = get_errno(fchownat(arg1, p, low2highuid(arg3), 11017 low2highgid(arg4), arg5)); 11018 unlock_user(p, arg2, 0); 11019 break; 11020 #endif 11021 #ifdef TARGET_NR_setresuid 11022 case TARGET_NR_setresuid: 11023 ret = get_errno(sys_setresuid(low2highuid(arg1), 11024 low2highuid(arg2), 11025 low2highuid(arg3))); 11026 break; 11027 #endif 11028 #ifdef TARGET_NR_getresuid 11029 case TARGET_NR_getresuid: 11030 { 11031 uid_t ruid, euid, suid; 11032 ret = get_errno(getresuid(&ruid, &euid, &suid)); 11033 if (!is_error(ret)) { 11034 if (put_user_id(high2lowuid(ruid), arg1) 11035 || put_user_id(high2lowuid(euid), arg2) 11036 || put_user_id(high2lowuid(suid), arg3)) 11037 goto efault; 11038 } 11039 } 11040 break; 11041 #endif 11042 #ifdef TARGET_NR_getresgid 11043 case TARGET_NR_setresgid: 11044 ret = get_errno(sys_setresgid(low2highgid(arg1), 11045 low2highgid(arg2), 11046 low2highgid(arg3))); 11047 break; 11048 #endif 11049 #ifdef TARGET_NR_getresgid 11050 case TARGET_NR_getresgid: 11051 { 11052 gid_t rgid, egid, sgid; 11053 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 11054 if (!is_error(ret)) { 11055 if (put_user_id(high2lowgid(rgid), arg1) 11056 || put_user_id(high2lowgid(egid), arg2) 11057 || put_user_id(high2lowgid(sgid), arg3)) 11058 goto efault; 11059 } 11060 } 11061 break; 11062 #endif 11063 #ifdef TARGET_NR_chown 11064 case TARGET_NR_chown: 11065 if (!(p = lock_user_string(arg1))) 11066 goto efault; 11067 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); 11068 unlock_user(p, arg1, 0); 11069 break; 11070 #endif 11071 case TARGET_NR_setuid: 11072 ret = get_errno(sys_setuid(low2highuid(arg1))); 11073 break; 11074 case TARGET_NR_setgid: 11075 ret = get_errno(sys_setgid(low2highgid(arg1))); 11076 break; 11077 case TARGET_NR_setfsuid: 11078 ret = get_errno(setfsuid(arg1)); 11079 break; 11080 case TARGET_NR_setfsgid: 11081 ret = get_errno(setfsgid(arg1)); 11082 break; 11083 11084 #ifdef TARGET_NR_lchown32 11085 case TARGET_NR_lchown32: 11086 if (!(p = lock_user_string(arg1))) 11087 goto efault; 11088 ret = get_errno(lchown(p, arg2, arg3)); 11089 unlock_user(p, arg1, 0); 11090 break; 11091 #endif 11092 #ifdef TARGET_NR_getuid32 11093 case TARGET_NR_getuid32: 11094 ret = get_errno(getuid()); 11095 break; 11096 #endif 11097 11098 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) 11099 /* Alpha specific */ 11100 case TARGET_NR_getxuid: 11101 { 11102 uid_t euid; 11103 euid=geteuid(); 11104 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid; 11105 } 11106 ret = get_errno(getuid()); 11107 break; 11108 #endif 11109 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) 11110 /* Alpha specific */ 11111 case TARGET_NR_getxgid: 11112 { 11113 uid_t egid; 11114 egid=getegid(); 11115 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid; 11116 } 11117 ret = get_errno(getgid()); 11118 break; 11119 #endif 11120 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) 11121 /* Alpha specific */ 11122 case TARGET_NR_osf_getsysinfo: 11123 ret = -TARGET_EOPNOTSUPP; 11124 switch (arg1) { 11125 case TARGET_GSI_IEEE_FP_CONTROL: 11126 { 11127 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env); 11128 11129 /* Copied from linux ieee_fpcr_to_swcr. */ 11130 swcr = (fpcr >> 35) & SWCR_STATUS_MASK; 11131 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ; 11132 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV 11133 | SWCR_TRAP_ENABLE_DZE 11134 | SWCR_TRAP_ENABLE_OVF); 11135 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF 11136 | SWCR_TRAP_ENABLE_INE); 11137 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ; 11138 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO; 11139 11140 if (put_user_u64 (swcr, arg2)) 11141 goto efault; 11142 ret = 0; 11143 } 11144 break; 11145 11146 /* case GSI_IEEE_STATE_AT_SIGNAL: 11147 -- Not implemented in linux kernel. 11148 case GSI_UACPROC: 11149 -- Retrieves current unaligned access state; not much used. 11150 case GSI_PROC_TYPE: 11151 -- Retrieves implver information; surely not used. 11152 case GSI_GET_HWRPB: 11153 -- Grabs a copy of the HWRPB; surely not used. 11154 */ 11155 } 11156 break; 11157 #endif 11158 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) 11159 /* Alpha specific */ 11160 case TARGET_NR_osf_setsysinfo: 11161 ret = -TARGET_EOPNOTSUPP; 11162 switch (arg1) { 11163 case TARGET_SSI_IEEE_FP_CONTROL: 11164 { 11165 uint64_t swcr, fpcr, orig_fpcr; 11166 11167 if (get_user_u64 (swcr, arg2)) { 11168 goto efault; 11169 } 11170 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 11171 fpcr = orig_fpcr & FPCR_DYN_MASK; 11172 11173 /* Copied from linux ieee_swcr_to_fpcr. */ 11174 fpcr |= (swcr & SWCR_STATUS_MASK) << 35; 11175 fpcr |= (swcr & SWCR_MAP_DMZ) << 36; 11176 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV 11177 | SWCR_TRAP_ENABLE_DZE 11178 | SWCR_TRAP_ENABLE_OVF)) << 48; 11179 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF 11180 | SWCR_TRAP_ENABLE_INE)) << 57; 11181 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0); 11182 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41; 11183 11184 cpu_alpha_store_fpcr(cpu_env, fpcr); 11185 ret = 0; 11186 } 11187 break; 11188 11189 case TARGET_SSI_IEEE_RAISE_EXCEPTION: 11190 { 11191 uint64_t exc, fpcr, orig_fpcr; 11192 int si_code; 11193 11194 if (get_user_u64(exc, arg2)) { 11195 goto efault; 11196 } 11197 11198 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 11199 11200 /* We only add to the exception status here. */ 11201 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35); 11202 11203 cpu_alpha_store_fpcr(cpu_env, fpcr); 11204 ret = 0; 11205 11206 /* Old exceptions are not signaled. */ 11207 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK); 11208 11209 /* If any exceptions set by this call, 11210 and are unmasked, send a signal. */ 11211 si_code = 0; 11212 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) { 11213 si_code = TARGET_FPE_FLTRES; 11214 } 11215 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) { 11216 si_code = TARGET_FPE_FLTUND; 11217 } 11218 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) { 11219 si_code = TARGET_FPE_FLTOVF; 11220 } 11221 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) { 11222 si_code = TARGET_FPE_FLTDIV; 11223 } 11224 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) { 11225 si_code = TARGET_FPE_FLTINV; 11226 } 11227 if (si_code != 0) { 11228 target_siginfo_t info; 11229 info.si_signo = SIGFPE; 11230 info.si_errno = 0; 11231 info.si_code = si_code; 11232 info._sifields._sigfault._addr 11233 = ((CPUArchState *)cpu_env)->pc; 11234 queue_signal((CPUArchState *)cpu_env, info.si_signo, 11235 QEMU_SI_FAULT, &info); 11236 } 11237 } 11238 break; 11239 11240 /* case SSI_NVPAIRS: 11241 -- Used with SSIN_UACPROC to enable unaligned accesses. 11242 case SSI_IEEE_STATE_AT_SIGNAL: 11243 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: 11244 -- Not implemented in linux kernel 11245 */ 11246 } 11247 break; 11248 #endif 11249 #ifdef TARGET_NR_osf_sigprocmask 11250 /* Alpha specific. */ 11251 case TARGET_NR_osf_sigprocmask: 11252 { 11253 abi_ulong mask; 11254 int how; 11255 sigset_t set, oldset; 11256 11257 switch(arg1) { 11258 case TARGET_SIG_BLOCK: 11259 how = SIG_BLOCK; 11260 break; 11261 case TARGET_SIG_UNBLOCK: 11262 how = SIG_UNBLOCK; 11263 break; 11264 case TARGET_SIG_SETMASK: 11265 how = SIG_SETMASK; 11266 break; 11267 default: 11268 ret = -TARGET_EINVAL; 11269 goto fail; 11270 } 11271 mask = arg2; 11272 target_to_host_old_sigset(&set, &mask); 11273 ret = do_sigprocmask(how, &set, &oldset); 11274 if (!ret) { 11275 host_to_target_old_sigset(&mask, &oldset); 11276 ret = mask; 11277 } 11278 } 11279 break; 11280 #endif 11281 11282 #ifdef TARGET_NR_getgid32 11283 case TARGET_NR_getgid32: 11284 ret = get_errno(getgid()); 11285 break; 11286 #endif 11287 #ifdef TARGET_NR_geteuid32 11288 case TARGET_NR_geteuid32: 11289 ret = get_errno(geteuid()); 11290 break; 11291 #endif 11292 #ifdef TARGET_NR_getegid32 11293 case TARGET_NR_getegid32: 11294 ret = get_errno(getegid()); 11295 break; 11296 #endif 11297 #ifdef TARGET_NR_setreuid32 11298 case TARGET_NR_setreuid32: 11299 ret = get_errno(setreuid(arg1, arg2)); 11300 break; 11301 #endif 11302 #ifdef TARGET_NR_setregid32 11303 case TARGET_NR_setregid32: 11304 ret = get_errno(setregid(arg1, arg2)); 11305 break; 11306 #endif 11307 #ifdef TARGET_NR_getgroups32 11308 case TARGET_NR_getgroups32: 11309 { 11310 int gidsetsize = arg1; 11311 uint32_t *target_grouplist; 11312 gid_t *grouplist; 11313 int i; 11314 11315 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11316 ret = get_errno(getgroups(gidsetsize, grouplist)); 11317 if (gidsetsize == 0) 11318 break; 11319 if (!is_error(ret)) { 11320 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); 11321 if (!target_grouplist) { 11322 ret = -TARGET_EFAULT; 11323 goto fail; 11324 } 11325 for(i = 0;i < ret; i++) 11326 target_grouplist[i] = tswap32(grouplist[i]); 11327 unlock_user(target_grouplist, arg2, gidsetsize * 4); 11328 } 11329 } 11330 break; 11331 #endif 11332 #ifdef TARGET_NR_setgroups32 11333 case TARGET_NR_setgroups32: 11334 { 11335 int gidsetsize = arg1; 11336 uint32_t *target_grouplist; 11337 gid_t *grouplist; 11338 int i; 11339 11340 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11341 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); 11342 if (!target_grouplist) { 11343 ret = -TARGET_EFAULT; 11344 goto fail; 11345 } 11346 for(i = 0;i < gidsetsize; i++) 11347 grouplist[i] = tswap32(target_grouplist[i]); 11348 unlock_user(target_grouplist, arg2, 0); 11349 ret = get_errno(setgroups(gidsetsize, grouplist)); 11350 } 11351 break; 11352 #endif 11353 #ifdef TARGET_NR_fchown32 11354 case TARGET_NR_fchown32: 11355 ret = get_errno(fchown(arg1, arg2, arg3)); 11356 break; 11357 #endif 11358 #ifdef TARGET_NR_setresuid32 11359 case TARGET_NR_setresuid32: 11360 ret = get_errno(sys_setresuid(arg1, arg2, arg3)); 11361 break; 11362 #endif 11363 #ifdef TARGET_NR_getresuid32 11364 case TARGET_NR_getresuid32: 11365 { 11366 uid_t ruid, euid, suid; 11367 ret = get_errno(getresuid(&ruid, &euid, &suid)); 11368 if (!is_error(ret)) { 11369 if (put_user_u32(ruid, arg1) 11370 || put_user_u32(euid, arg2) 11371 || put_user_u32(suid, arg3)) 11372 goto efault; 11373 } 11374 } 11375 break; 11376 #endif 11377 #ifdef TARGET_NR_setresgid32 11378 case TARGET_NR_setresgid32: 11379 ret = get_errno(sys_setresgid(arg1, arg2, arg3)); 11380 break; 11381 #endif 11382 #ifdef TARGET_NR_getresgid32 11383 case TARGET_NR_getresgid32: 11384 { 11385 gid_t rgid, egid, sgid; 11386 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 11387 if (!is_error(ret)) { 11388 if (put_user_u32(rgid, arg1) 11389 || put_user_u32(egid, arg2) 11390 || put_user_u32(sgid, arg3)) 11391 goto efault; 11392 } 11393 } 11394 break; 11395 #endif 11396 #ifdef TARGET_NR_chown32 11397 case TARGET_NR_chown32: 11398 if (!(p = lock_user_string(arg1))) 11399 goto efault; 11400 ret = get_errno(chown(p, arg2, arg3)); 11401 unlock_user(p, arg1, 0); 11402 break; 11403 #endif 11404 #ifdef TARGET_NR_setuid32 11405 case TARGET_NR_setuid32: 11406 ret = get_errno(sys_setuid(arg1)); 11407 break; 11408 #endif 11409 #ifdef TARGET_NR_setgid32 11410 case TARGET_NR_setgid32: 11411 ret = get_errno(sys_setgid(arg1)); 11412 break; 11413 #endif 11414 #ifdef TARGET_NR_setfsuid32 11415 case TARGET_NR_setfsuid32: 11416 ret = get_errno(setfsuid(arg1)); 11417 break; 11418 #endif 11419 #ifdef TARGET_NR_setfsgid32 11420 case TARGET_NR_setfsgid32: 11421 ret = get_errno(setfsgid(arg1)); 11422 break; 11423 #endif 11424 11425 case TARGET_NR_pivot_root: 11426 goto unimplemented; 11427 #ifdef TARGET_NR_mincore 11428 case TARGET_NR_mincore: 11429 { 11430 void *a; 11431 ret = -TARGET_ENOMEM; 11432 a = lock_user(VERIFY_READ, arg1, arg2, 0); 11433 if (!a) { 11434 goto fail; 11435 } 11436 ret = -TARGET_EFAULT; 11437 p = lock_user_string(arg3); 11438 if (!p) { 11439 goto mincore_fail; 11440 } 11441 ret = get_errno(mincore(a, arg2, p)); 11442 unlock_user(p, arg3, ret); 11443 mincore_fail: 11444 unlock_user(a, arg1, 0); 11445 } 11446 break; 11447 #endif 11448 #ifdef TARGET_NR_arm_fadvise64_64 11449 case TARGET_NR_arm_fadvise64_64: 11450 /* arm_fadvise64_64 looks like fadvise64_64 but 11451 * with different argument order: fd, advice, offset, len 11452 * rather than the usual fd, offset, len, advice. 11453 * Note that offset and len are both 64-bit so appear as 11454 * pairs of 32-bit registers. 11455 */ 11456 ret = posix_fadvise(arg1, target_offset64(arg3, arg4), 11457 target_offset64(arg5, arg6), arg2); 11458 ret = -host_to_target_errno(ret); 11459 break; 11460 #endif 11461 11462 #if TARGET_ABI_BITS == 32 11463 11464 #ifdef TARGET_NR_fadvise64_64 11465 case TARGET_NR_fadvise64_64: 11466 #if defined(TARGET_PPC) 11467 /* 6 args: fd, advice, offset (high, low), len (high, low) */ 11468 ret = arg2; 11469 arg2 = arg3; 11470 arg3 = arg4; 11471 arg4 = arg5; 11472 arg5 = arg6; 11473 arg6 = ret; 11474 #else 11475 /* 6 args: fd, offset (high, low), len (high, low), advice */ 11476 if (regpairs_aligned(cpu_env, num)) { 11477 /* offset is in (3,4), len in (5,6) and advice in 7 */ 11478 arg2 = arg3; 11479 arg3 = arg4; 11480 arg4 = arg5; 11481 arg5 = arg6; 11482 arg6 = arg7; 11483 } 11484 #endif 11485 ret = -host_to_target_errno(posix_fadvise(arg1, 11486 target_offset64(arg2, arg3), 11487 target_offset64(arg4, arg5), 11488 arg6)); 11489 break; 11490 #endif 11491 11492 #ifdef TARGET_NR_fadvise64 11493 case TARGET_NR_fadvise64: 11494 /* 5 args: fd, offset (high, low), len, advice */ 11495 if (regpairs_aligned(cpu_env, num)) { 11496 /* offset is in (3,4), len in 5 and advice in 6 */ 11497 arg2 = arg3; 11498 arg3 = arg4; 11499 arg4 = arg5; 11500 arg5 = arg6; 11501 } 11502 ret = -host_to_target_errno(posix_fadvise(arg1, 11503 target_offset64(arg2, arg3), 11504 arg4, arg5)); 11505 break; 11506 #endif 11507 11508 #else /* not a 32-bit ABI */ 11509 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64) 11510 #ifdef TARGET_NR_fadvise64_64 11511 case TARGET_NR_fadvise64_64: 11512 #endif 11513 #ifdef TARGET_NR_fadvise64 11514 case TARGET_NR_fadvise64: 11515 #endif 11516 #ifdef TARGET_S390X 11517 switch (arg4) { 11518 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ 11519 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ 11520 case 6: arg4 = POSIX_FADV_DONTNEED; break; 11521 case 7: arg4 = POSIX_FADV_NOREUSE; break; 11522 default: break; 11523 } 11524 #endif 11525 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4)); 11526 break; 11527 #endif 11528 #endif /* end of 64-bit ABI fadvise handling */ 11529 11530 #ifdef TARGET_NR_madvise 11531 case TARGET_NR_madvise: 11532 /* A straight passthrough may not be safe because qemu sometimes 11533 turns private file-backed mappings into anonymous mappings. 11534 This will break MADV_DONTNEED. 11535 This is a hint, so ignoring and returning success is ok. */ 11536 ret = get_errno(0); 11537 break; 11538 #endif 11539 #if TARGET_ABI_BITS == 32 11540 case TARGET_NR_fcntl64: 11541 { 11542 int cmd; 11543 struct flock64 fl; 11544 from_flock64_fn *copyfrom = copy_from_user_flock64; 11545 to_flock64_fn *copyto = copy_to_user_flock64; 11546 11547 #ifdef TARGET_ARM 11548 if (((CPUARMState *)cpu_env)->eabi) { 11549 copyfrom = copy_from_user_eabi_flock64; 11550 copyto = copy_to_user_eabi_flock64; 11551 } 11552 #endif 11553 11554 cmd = target_to_host_fcntl_cmd(arg2); 11555 if (cmd == -TARGET_EINVAL) { 11556 ret = cmd; 11557 break; 11558 } 11559 11560 switch(arg2) { 11561 case TARGET_F_GETLK64: 11562 ret = copyfrom(&fl, arg3); 11563 if (ret) { 11564 break; 11565 } 11566 ret = get_errno(fcntl(arg1, cmd, &fl)); 11567 if (ret == 0) { 11568 ret = copyto(arg3, &fl); 11569 } 11570 break; 11571 11572 case TARGET_F_SETLK64: 11573 case TARGET_F_SETLKW64: 11574 ret = copyfrom(&fl, arg3); 11575 if (ret) { 11576 break; 11577 } 11578 ret = get_errno(safe_fcntl(arg1, cmd, &fl)); 11579 break; 11580 default: 11581 ret = do_fcntl(arg1, arg2, arg3); 11582 break; 11583 } 11584 break; 11585 } 11586 #endif 11587 #ifdef TARGET_NR_cacheflush 11588 case TARGET_NR_cacheflush: 11589 /* self-modifying code is handled automatically, so nothing needed */ 11590 ret = 0; 11591 break; 11592 #endif 11593 #ifdef TARGET_NR_security 11594 case TARGET_NR_security: 11595 goto unimplemented; 11596 #endif 11597 #ifdef TARGET_NR_getpagesize 11598 case TARGET_NR_getpagesize: 11599 ret = TARGET_PAGE_SIZE; 11600 break; 11601 #endif 11602 case TARGET_NR_gettid: 11603 ret = get_errno(gettid()); 11604 break; 11605 #ifdef TARGET_NR_readahead 11606 case TARGET_NR_readahead: 11607 #if TARGET_ABI_BITS == 32 11608 if (regpairs_aligned(cpu_env, num)) { 11609 arg2 = arg3; 11610 arg3 = arg4; 11611 arg4 = arg5; 11612 } 11613 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4)); 11614 #else 11615 ret = get_errno(readahead(arg1, arg2, arg3)); 11616 #endif 11617 break; 11618 #endif 11619 #ifdef CONFIG_ATTR 11620 #ifdef TARGET_NR_setxattr 11621 case TARGET_NR_listxattr: 11622 case TARGET_NR_llistxattr: 11623 { 11624 void *p, *b = 0; 11625 if (arg2) { 11626 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 11627 if (!b) { 11628 ret = -TARGET_EFAULT; 11629 break; 11630 } 11631 } 11632 p = lock_user_string(arg1); 11633 if (p) { 11634 if (num == TARGET_NR_listxattr) { 11635 ret = get_errno(listxattr(p, b, arg3)); 11636 } else { 11637 ret = get_errno(llistxattr(p, b, arg3)); 11638 } 11639 } else { 11640 ret = -TARGET_EFAULT; 11641 } 11642 unlock_user(p, arg1, 0); 11643 unlock_user(b, arg2, arg3); 11644 break; 11645 } 11646 case TARGET_NR_flistxattr: 11647 { 11648 void *b = 0; 11649 if (arg2) { 11650 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 11651 if (!b) { 11652 ret = -TARGET_EFAULT; 11653 break; 11654 } 11655 } 11656 ret = get_errno(flistxattr(arg1, b, arg3)); 11657 unlock_user(b, arg2, arg3); 11658 break; 11659 } 11660 case TARGET_NR_setxattr: 11661 case TARGET_NR_lsetxattr: 11662 { 11663 void *p, *n, *v = 0; 11664 if (arg3) { 11665 v = lock_user(VERIFY_READ, arg3, arg4, 1); 11666 if (!v) { 11667 ret = -TARGET_EFAULT; 11668 break; 11669 } 11670 } 11671 p = lock_user_string(arg1); 11672 n = lock_user_string(arg2); 11673 if (p && n) { 11674 if (num == TARGET_NR_setxattr) { 11675 ret = get_errno(setxattr(p, n, v, arg4, arg5)); 11676 } else { 11677 ret = get_errno(lsetxattr(p, n, v, arg4, arg5)); 11678 } 11679 } else { 11680 ret = -TARGET_EFAULT; 11681 } 11682 unlock_user(p, arg1, 0); 11683 unlock_user(n, arg2, 0); 11684 unlock_user(v, arg3, 0); 11685 } 11686 break; 11687 case TARGET_NR_fsetxattr: 11688 { 11689 void *n, *v = 0; 11690 if (arg3) { 11691 v = lock_user(VERIFY_READ, arg3, arg4, 1); 11692 if (!v) { 11693 ret = -TARGET_EFAULT; 11694 break; 11695 } 11696 } 11697 n = lock_user_string(arg2); 11698 if (n) { 11699 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5)); 11700 } else { 11701 ret = -TARGET_EFAULT; 11702 } 11703 unlock_user(n, arg2, 0); 11704 unlock_user(v, arg3, 0); 11705 } 11706 break; 11707 case TARGET_NR_getxattr: 11708 case TARGET_NR_lgetxattr: 11709 { 11710 void *p, *n, *v = 0; 11711 if (arg3) { 11712 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 11713 if (!v) { 11714 ret = -TARGET_EFAULT; 11715 break; 11716 } 11717 } 11718 p = lock_user_string(arg1); 11719 n = lock_user_string(arg2); 11720 if (p && n) { 11721 if (num == TARGET_NR_getxattr) { 11722 ret = get_errno(getxattr(p, n, v, arg4)); 11723 } else { 11724 ret = get_errno(lgetxattr(p, n, v, arg4)); 11725 } 11726 } else { 11727 ret = -TARGET_EFAULT; 11728 } 11729 unlock_user(p, arg1, 0); 11730 unlock_user(n, arg2, 0); 11731 unlock_user(v, arg3, arg4); 11732 } 11733 break; 11734 case TARGET_NR_fgetxattr: 11735 { 11736 void *n, *v = 0; 11737 if (arg3) { 11738 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 11739 if (!v) { 11740 ret = -TARGET_EFAULT; 11741 break; 11742 } 11743 } 11744 n = lock_user_string(arg2); 11745 if (n) { 11746 ret = get_errno(fgetxattr(arg1, n, v, arg4)); 11747 } else { 11748 ret = -TARGET_EFAULT; 11749 } 11750 unlock_user(n, arg2, 0); 11751 unlock_user(v, arg3, arg4); 11752 } 11753 break; 11754 case TARGET_NR_removexattr: 11755 case TARGET_NR_lremovexattr: 11756 { 11757 void *p, *n; 11758 p = lock_user_string(arg1); 11759 n = lock_user_string(arg2); 11760 if (p && n) { 11761 if (num == TARGET_NR_removexattr) { 11762 ret = get_errno(removexattr(p, n)); 11763 } else { 11764 ret = get_errno(lremovexattr(p, n)); 11765 } 11766 } else { 11767 ret = -TARGET_EFAULT; 11768 } 11769 unlock_user(p, arg1, 0); 11770 unlock_user(n, arg2, 0); 11771 } 11772 break; 11773 case TARGET_NR_fremovexattr: 11774 { 11775 void *n; 11776 n = lock_user_string(arg2); 11777 if (n) { 11778 ret = get_errno(fremovexattr(arg1, n)); 11779 } else { 11780 ret = -TARGET_EFAULT; 11781 } 11782 unlock_user(n, arg2, 0); 11783 } 11784 break; 11785 #endif 11786 #endif /* CONFIG_ATTR */ 11787 #ifdef TARGET_NR_set_thread_area 11788 case TARGET_NR_set_thread_area: 11789 #if defined(TARGET_MIPS) 11790 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1; 11791 ret = 0; 11792 break; 11793 #elif defined(TARGET_CRIS) 11794 if (arg1 & 0xff) 11795 ret = -TARGET_EINVAL; 11796 else { 11797 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1; 11798 ret = 0; 11799 } 11800 break; 11801 #elif defined(TARGET_I386) && defined(TARGET_ABI32) 11802 ret = do_set_thread_area(cpu_env, arg1); 11803 break; 11804 #elif defined(TARGET_M68K) 11805 { 11806 TaskState *ts = cpu->opaque; 11807 ts->tp_value = arg1; 11808 ret = 0; 11809 break; 11810 } 11811 #else 11812 goto unimplemented_nowarn; 11813 #endif 11814 #endif 11815 #ifdef TARGET_NR_get_thread_area 11816 case TARGET_NR_get_thread_area: 11817 #if defined(TARGET_I386) && defined(TARGET_ABI32) 11818 ret = do_get_thread_area(cpu_env, arg1); 11819 break; 11820 #elif defined(TARGET_M68K) 11821 { 11822 TaskState *ts = cpu->opaque; 11823 ret = ts->tp_value; 11824 break; 11825 } 11826 #else 11827 goto unimplemented_nowarn; 11828 #endif 11829 #endif 11830 #ifdef TARGET_NR_getdomainname 11831 case TARGET_NR_getdomainname: 11832 goto unimplemented_nowarn; 11833 #endif 11834 11835 #ifdef TARGET_NR_clock_gettime 11836 case TARGET_NR_clock_gettime: 11837 { 11838 struct timespec ts; 11839 ret = get_errno(clock_gettime(arg1, &ts)); 11840 if (!is_error(ret)) { 11841 host_to_target_timespec(arg2, &ts); 11842 } 11843 break; 11844 } 11845 #endif 11846 #ifdef TARGET_NR_clock_getres 11847 case TARGET_NR_clock_getres: 11848 { 11849 struct timespec ts; 11850 ret = get_errno(clock_getres(arg1, &ts)); 11851 if (!is_error(ret)) { 11852 host_to_target_timespec(arg2, &ts); 11853 } 11854 break; 11855 } 11856 #endif 11857 #ifdef TARGET_NR_clock_nanosleep 11858 case TARGET_NR_clock_nanosleep: 11859 { 11860 struct timespec ts; 11861 target_to_host_timespec(&ts, arg3); 11862 ret = get_errno(safe_clock_nanosleep(arg1, arg2, 11863 &ts, arg4 ? &ts : NULL)); 11864 if (arg4) 11865 host_to_target_timespec(arg4, &ts); 11866 11867 #if defined(TARGET_PPC) 11868 /* clock_nanosleep is odd in that it returns positive errno values. 11869 * On PPC, CR0 bit 3 should be set in such a situation. */ 11870 if (ret && ret != -TARGET_ERESTARTSYS) { 11871 ((CPUPPCState *)cpu_env)->crf[0] |= 1; 11872 } 11873 #endif 11874 break; 11875 } 11876 #endif 11877 11878 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 11879 case TARGET_NR_set_tid_address: 11880 ret = get_errno(set_tid_address((int *)g2h(arg1))); 11881 break; 11882 #endif 11883 11884 case TARGET_NR_tkill: 11885 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2))); 11886 break; 11887 11888 case TARGET_NR_tgkill: 11889 ret = get_errno(safe_tgkill((int)arg1, (int)arg2, 11890 target_to_host_signal(arg3))); 11891 break; 11892 11893 #ifdef TARGET_NR_set_robust_list 11894 case TARGET_NR_set_robust_list: 11895 case TARGET_NR_get_robust_list: 11896 /* The ABI for supporting robust futexes has userspace pass 11897 * the kernel a pointer to a linked list which is updated by 11898 * userspace after the syscall; the list is walked by the kernel 11899 * when the thread exits. Since the linked list in QEMU guest 11900 * memory isn't a valid linked list for the host and we have 11901 * no way to reliably intercept the thread-death event, we can't 11902 * support these. Silently return ENOSYS so that guest userspace 11903 * falls back to a non-robust futex implementation (which should 11904 * be OK except in the corner case of the guest crashing while 11905 * holding a mutex that is shared with another process via 11906 * shared memory). 11907 */ 11908 goto unimplemented_nowarn; 11909 #endif 11910 11911 #if defined(TARGET_NR_utimensat) 11912 case TARGET_NR_utimensat: 11913 { 11914 struct timespec *tsp, ts[2]; 11915 if (!arg3) { 11916 tsp = NULL; 11917 } else { 11918 target_to_host_timespec(ts, arg3); 11919 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec)); 11920 tsp = ts; 11921 } 11922 if (!arg2) 11923 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 11924 else { 11925 if (!(p = lock_user_string(arg2))) { 11926 ret = -TARGET_EFAULT; 11927 goto fail; 11928 } 11929 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 11930 unlock_user(p, arg2, 0); 11931 } 11932 } 11933 break; 11934 #endif 11935 case TARGET_NR_futex: 11936 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6); 11937 break; 11938 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 11939 case TARGET_NR_inotify_init: 11940 ret = get_errno(sys_inotify_init()); 11941 if (ret >= 0) { 11942 fd_trans_register(ret, &target_inotify_trans); 11943 } 11944 break; 11945 #endif 11946 #ifdef CONFIG_INOTIFY1 11947 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 11948 case TARGET_NR_inotify_init1: 11949 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1, 11950 fcntl_flags_tbl))); 11951 if (ret >= 0) { 11952 fd_trans_register(ret, &target_inotify_trans); 11953 } 11954 break; 11955 #endif 11956 #endif 11957 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 11958 case TARGET_NR_inotify_add_watch: 11959 p = lock_user_string(arg2); 11960 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3)); 11961 unlock_user(p, arg2, 0); 11962 break; 11963 #endif 11964 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 11965 case TARGET_NR_inotify_rm_watch: 11966 ret = get_errno(sys_inotify_rm_watch(arg1, arg2)); 11967 break; 11968 #endif 11969 11970 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 11971 case TARGET_NR_mq_open: 11972 { 11973 struct mq_attr posix_mq_attr; 11974 struct mq_attr *pposix_mq_attr; 11975 int host_flags; 11976 11977 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl); 11978 pposix_mq_attr = NULL; 11979 if (arg4) { 11980 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) { 11981 goto efault; 11982 } 11983 pposix_mq_attr = &posix_mq_attr; 11984 } 11985 p = lock_user_string(arg1 - 1); 11986 if (!p) { 11987 goto efault; 11988 } 11989 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr)); 11990 unlock_user (p, arg1, 0); 11991 } 11992 break; 11993 11994 case TARGET_NR_mq_unlink: 11995 p = lock_user_string(arg1 - 1); 11996 if (!p) { 11997 ret = -TARGET_EFAULT; 11998 break; 11999 } 12000 ret = get_errno(mq_unlink(p)); 12001 unlock_user (p, arg1, 0); 12002 break; 12003 12004 case TARGET_NR_mq_timedsend: 12005 { 12006 struct timespec ts; 12007 12008 p = lock_user (VERIFY_READ, arg2, arg3, 1); 12009 if (arg5 != 0) { 12010 target_to_host_timespec(&ts, arg5); 12011 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts)); 12012 host_to_target_timespec(arg5, &ts); 12013 } else { 12014 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL)); 12015 } 12016 unlock_user (p, arg2, arg3); 12017 } 12018 break; 12019 12020 case TARGET_NR_mq_timedreceive: 12021 { 12022 struct timespec ts; 12023 unsigned int prio; 12024 12025 p = lock_user (VERIFY_READ, arg2, arg3, 1); 12026 if (arg5 != 0) { 12027 target_to_host_timespec(&ts, arg5); 12028 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12029 &prio, &ts)); 12030 host_to_target_timespec(arg5, &ts); 12031 } else { 12032 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12033 &prio, NULL)); 12034 } 12035 unlock_user (p, arg2, arg3); 12036 if (arg4 != 0) 12037 put_user_u32(prio, arg4); 12038 } 12039 break; 12040 12041 /* Not implemented for now... */ 12042 /* case TARGET_NR_mq_notify: */ 12043 /* break; */ 12044 12045 case TARGET_NR_mq_getsetattr: 12046 { 12047 struct mq_attr posix_mq_attr_in, posix_mq_attr_out; 12048 ret = 0; 12049 if (arg3 != 0) { 12050 ret = mq_getattr(arg1, &posix_mq_attr_out); 12051 copy_to_user_mq_attr(arg3, &posix_mq_attr_out); 12052 } 12053 if (arg2 != 0) { 12054 copy_from_user_mq_attr(&posix_mq_attr_in, arg2); 12055 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out); 12056 } 12057 12058 } 12059 break; 12060 #endif 12061 12062 #ifdef CONFIG_SPLICE 12063 #ifdef TARGET_NR_tee 12064 case TARGET_NR_tee: 12065 { 12066 ret = get_errno(tee(arg1,arg2,arg3,arg4)); 12067 } 12068 break; 12069 #endif 12070 #ifdef TARGET_NR_splice 12071 case TARGET_NR_splice: 12072 { 12073 loff_t loff_in, loff_out; 12074 loff_t *ploff_in = NULL, *ploff_out = NULL; 12075 if (arg2) { 12076 if (get_user_u64(loff_in, arg2)) { 12077 goto efault; 12078 } 12079 ploff_in = &loff_in; 12080 } 12081 if (arg4) { 12082 if (get_user_u64(loff_out, arg4)) { 12083 goto efault; 12084 } 12085 ploff_out = &loff_out; 12086 } 12087 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); 12088 if (arg2) { 12089 if (put_user_u64(loff_in, arg2)) { 12090 goto efault; 12091 } 12092 } 12093 if (arg4) { 12094 if (put_user_u64(loff_out, arg4)) { 12095 goto efault; 12096 } 12097 } 12098 } 12099 break; 12100 #endif 12101 #ifdef TARGET_NR_vmsplice 12102 case TARGET_NR_vmsplice: 12103 { 12104 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 12105 if (vec != NULL) { 12106 ret = get_errno(vmsplice(arg1, vec, arg3, arg4)); 12107 unlock_iovec(vec, arg2, arg3, 0); 12108 } else { 12109 ret = -host_to_target_errno(errno); 12110 } 12111 } 12112 break; 12113 #endif 12114 #endif /* CONFIG_SPLICE */ 12115 #ifdef CONFIG_EVENTFD 12116 #if defined(TARGET_NR_eventfd) 12117 case TARGET_NR_eventfd: 12118 ret = get_errno(eventfd(arg1, 0)); 12119 if (ret >= 0) { 12120 fd_trans_register(ret, &target_eventfd_trans); 12121 } 12122 break; 12123 #endif 12124 #if defined(TARGET_NR_eventfd2) 12125 case TARGET_NR_eventfd2: 12126 { 12127 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)); 12128 if (arg2 & TARGET_O_NONBLOCK) { 12129 host_flags |= O_NONBLOCK; 12130 } 12131 if (arg2 & TARGET_O_CLOEXEC) { 12132 host_flags |= O_CLOEXEC; 12133 } 12134 ret = get_errno(eventfd(arg1, host_flags)); 12135 if (ret >= 0) { 12136 fd_trans_register(ret, &target_eventfd_trans); 12137 } 12138 break; 12139 } 12140 #endif 12141 #endif /* CONFIG_EVENTFD */ 12142 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) 12143 case TARGET_NR_fallocate: 12144 #if TARGET_ABI_BITS == 32 12145 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4), 12146 target_offset64(arg5, arg6))); 12147 #else 12148 ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); 12149 #endif 12150 break; 12151 #endif 12152 #if defined(CONFIG_SYNC_FILE_RANGE) 12153 #if defined(TARGET_NR_sync_file_range) 12154 case TARGET_NR_sync_file_range: 12155 #if TARGET_ABI_BITS == 32 12156 #if defined(TARGET_MIPS) 12157 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 12158 target_offset64(arg5, arg6), arg7)); 12159 #else 12160 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3), 12161 target_offset64(arg4, arg5), arg6)); 12162 #endif /* !TARGET_MIPS */ 12163 #else 12164 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4)); 12165 #endif 12166 break; 12167 #endif 12168 #if defined(TARGET_NR_sync_file_range2) 12169 case TARGET_NR_sync_file_range2: 12170 /* This is like sync_file_range but the arguments are reordered */ 12171 #if TARGET_ABI_BITS == 32 12172 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 12173 target_offset64(arg5, arg6), arg2)); 12174 #else 12175 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2)); 12176 #endif 12177 break; 12178 #endif 12179 #endif 12180 #if defined(TARGET_NR_signalfd4) 12181 case TARGET_NR_signalfd4: 12182 ret = do_signalfd4(arg1, arg2, arg4); 12183 break; 12184 #endif 12185 #if defined(TARGET_NR_signalfd) 12186 case TARGET_NR_signalfd: 12187 ret = do_signalfd4(arg1, arg2, 0); 12188 break; 12189 #endif 12190 #if defined(CONFIG_EPOLL) 12191 #if defined(TARGET_NR_epoll_create) 12192 case TARGET_NR_epoll_create: 12193 ret = get_errno(epoll_create(arg1)); 12194 break; 12195 #endif 12196 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1) 12197 case TARGET_NR_epoll_create1: 12198 ret = get_errno(epoll_create1(arg1)); 12199 break; 12200 #endif 12201 #if defined(TARGET_NR_epoll_ctl) 12202 case TARGET_NR_epoll_ctl: 12203 { 12204 struct epoll_event ep; 12205 struct epoll_event *epp = 0; 12206 if (arg4) { 12207 struct target_epoll_event *target_ep; 12208 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) { 12209 goto efault; 12210 } 12211 ep.events = tswap32(target_ep->events); 12212 /* The epoll_data_t union is just opaque data to the kernel, 12213 * so we transfer all 64 bits across and need not worry what 12214 * actual data type it is. 12215 */ 12216 ep.data.u64 = tswap64(target_ep->data.u64); 12217 unlock_user_struct(target_ep, arg4, 0); 12218 epp = &ep; 12219 } 12220 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp)); 12221 break; 12222 } 12223 #endif 12224 12225 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait) 12226 #if defined(TARGET_NR_epoll_wait) 12227 case TARGET_NR_epoll_wait: 12228 #endif 12229 #if defined(TARGET_NR_epoll_pwait) 12230 case TARGET_NR_epoll_pwait: 12231 #endif 12232 { 12233 struct target_epoll_event *target_ep; 12234 struct epoll_event *ep; 12235 int epfd = arg1; 12236 int maxevents = arg3; 12237 int timeout = arg4; 12238 12239 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) { 12240 ret = -TARGET_EINVAL; 12241 break; 12242 } 12243 12244 target_ep = lock_user(VERIFY_WRITE, arg2, 12245 maxevents * sizeof(struct target_epoll_event), 1); 12246 if (!target_ep) { 12247 goto efault; 12248 } 12249 12250 ep = g_try_new(struct epoll_event, maxevents); 12251 if (!ep) { 12252 unlock_user(target_ep, arg2, 0); 12253 ret = -TARGET_ENOMEM; 12254 break; 12255 } 12256 12257 switch (num) { 12258 #if defined(TARGET_NR_epoll_pwait) 12259 case TARGET_NR_epoll_pwait: 12260 { 12261 target_sigset_t *target_set; 12262 sigset_t _set, *set = &_set; 12263 12264 if (arg5) { 12265 if (arg6 != sizeof(target_sigset_t)) { 12266 ret = -TARGET_EINVAL; 12267 break; 12268 } 12269 12270 target_set = lock_user(VERIFY_READ, arg5, 12271 sizeof(target_sigset_t), 1); 12272 if (!target_set) { 12273 ret = -TARGET_EFAULT; 12274 break; 12275 } 12276 target_to_host_sigset(set, target_set); 12277 unlock_user(target_set, arg5, 0); 12278 } else { 12279 set = NULL; 12280 } 12281 12282 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout, 12283 set, SIGSET_T_SIZE)); 12284 break; 12285 } 12286 #endif 12287 #if defined(TARGET_NR_epoll_wait) 12288 case TARGET_NR_epoll_wait: 12289 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout, 12290 NULL, 0)); 12291 break; 12292 #endif 12293 default: 12294 ret = -TARGET_ENOSYS; 12295 } 12296 if (!is_error(ret)) { 12297 int i; 12298 for (i = 0; i < ret; i++) { 12299 target_ep[i].events = tswap32(ep[i].events); 12300 target_ep[i].data.u64 = tswap64(ep[i].data.u64); 12301 } 12302 unlock_user(target_ep, arg2, 12303 ret * sizeof(struct target_epoll_event)); 12304 } else { 12305 unlock_user(target_ep, arg2, 0); 12306 } 12307 g_free(ep); 12308 break; 12309 } 12310 #endif 12311 #endif 12312 #ifdef TARGET_NR_prlimit64 12313 case TARGET_NR_prlimit64: 12314 { 12315 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */ 12316 struct target_rlimit64 *target_rnew, *target_rold; 12317 struct host_rlimit64 rnew, rold, *rnewp = 0; 12318 int resource = target_to_host_resource(arg2); 12319 if (arg3) { 12320 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { 12321 goto efault; 12322 } 12323 rnew.rlim_cur = tswap64(target_rnew->rlim_cur); 12324 rnew.rlim_max = tswap64(target_rnew->rlim_max); 12325 unlock_user_struct(target_rnew, arg3, 0); 12326 rnewp = &rnew; 12327 } 12328 12329 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0)); 12330 if (!is_error(ret) && arg4) { 12331 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { 12332 goto efault; 12333 } 12334 target_rold->rlim_cur = tswap64(rold.rlim_cur); 12335 target_rold->rlim_max = tswap64(rold.rlim_max); 12336 unlock_user_struct(target_rold, arg4, 1); 12337 } 12338 break; 12339 } 12340 #endif 12341 #ifdef TARGET_NR_gethostname 12342 case TARGET_NR_gethostname: 12343 { 12344 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0); 12345 if (name) { 12346 ret = get_errno(gethostname(name, arg2)); 12347 unlock_user(name, arg1, arg2); 12348 } else { 12349 ret = -TARGET_EFAULT; 12350 } 12351 break; 12352 } 12353 #endif 12354 #ifdef TARGET_NR_atomic_cmpxchg_32 12355 case TARGET_NR_atomic_cmpxchg_32: 12356 { 12357 /* should use start_exclusive from main.c */ 12358 abi_ulong mem_value; 12359 if (get_user_u32(mem_value, arg6)) { 12360 target_siginfo_t info; 12361 info.si_signo = SIGSEGV; 12362 info.si_errno = 0; 12363 info.si_code = TARGET_SEGV_MAPERR; 12364 info._sifields._sigfault._addr = arg6; 12365 queue_signal((CPUArchState *)cpu_env, info.si_signo, 12366 QEMU_SI_FAULT, &info); 12367 ret = 0xdeadbeef; 12368 12369 } 12370 if (mem_value == arg2) 12371 put_user_u32(arg1, arg6); 12372 ret = mem_value; 12373 break; 12374 } 12375 #endif 12376 #ifdef TARGET_NR_atomic_barrier 12377 case TARGET_NR_atomic_barrier: 12378 { 12379 /* Like the kernel implementation and the qemu arm barrier, no-op this? */ 12380 ret = 0; 12381 break; 12382 } 12383 #endif 12384 12385 #ifdef TARGET_NR_timer_create 12386 case TARGET_NR_timer_create: 12387 { 12388 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */ 12389 12390 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL; 12391 12392 int clkid = arg1; 12393 int timer_index = next_free_host_timer(); 12394 12395 if (timer_index < 0) { 12396 ret = -TARGET_EAGAIN; 12397 } else { 12398 timer_t *phtimer = g_posix_timers + timer_index; 12399 12400 if (arg2) { 12401 phost_sevp = &host_sevp; 12402 ret = target_to_host_sigevent(phost_sevp, arg2); 12403 if (ret != 0) { 12404 break; 12405 } 12406 } 12407 12408 ret = get_errno(timer_create(clkid, phost_sevp, phtimer)); 12409 if (ret) { 12410 phtimer = NULL; 12411 } else { 12412 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) { 12413 goto efault; 12414 } 12415 } 12416 } 12417 break; 12418 } 12419 #endif 12420 12421 #ifdef TARGET_NR_timer_settime 12422 case TARGET_NR_timer_settime: 12423 { 12424 /* args: timer_t timerid, int flags, const struct itimerspec *new_value, 12425 * struct itimerspec * old_value */ 12426 target_timer_t timerid = get_timer_id(arg1); 12427 12428 if (timerid < 0) { 12429 ret = timerid; 12430 } else if (arg3 == 0) { 12431 ret = -TARGET_EINVAL; 12432 } else { 12433 timer_t htimer = g_posix_timers[timerid]; 12434 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},}; 12435 12436 if (target_to_host_itimerspec(&hspec_new, arg3)) { 12437 goto efault; 12438 } 12439 ret = get_errno( 12440 timer_settime(htimer, arg2, &hspec_new, &hspec_old)); 12441 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) { 12442 goto efault; 12443 } 12444 } 12445 break; 12446 } 12447 #endif 12448 12449 #ifdef TARGET_NR_timer_gettime 12450 case TARGET_NR_timer_gettime: 12451 { 12452 /* args: timer_t timerid, struct itimerspec *curr_value */ 12453 target_timer_t timerid = get_timer_id(arg1); 12454 12455 if (timerid < 0) { 12456 ret = timerid; 12457 } else if (!arg2) { 12458 ret = -TARGET_EFAULT; 12459 } else { 12460 timer_t htimer = g_posix_timers[timerid]; 12461 struct itimerspec hspec; 12462 ret = get_errno(timer_gettime(htimer, &hspec)); 12463 12464 if (host_to_target_itimerspec(arg2, &hspec)) { 12465 ret = -TARGET_EFAULT; 12466 } 12467 } 12468 break; 12469 } 12470 #endif 12471 12472 #ifdef TARGET_NR_timer_getoverrun 12473 case TARGET_NR_timer_getoverrun: 12474 { 12475 /* args: timer_t timerid */ 12476 target_timer_t timerid = get_timer_id(arg1); 12477 12478 if (timerid < 0) { 12479 ret = timerid; 12480 } else { 12481 timer_t htimer = g_posix_timers[timerid]; 12482 ret = get_errno(timer_getoverrun(htimer)); 12483 } 12484 fd_trans_unregister(ret); 12485 break; 12486 } 12487 #endif 12488 12489 #ifdef TARGET_NR_timer_delete 12490 case TARGET_NR_timer_delete: 12491 { 12492 /* args: timer_t timerid */ 12493 target_timer_t timerid = get_timer_id(arg1); 12494 12495 if (timerid < 0) { 12496 ret = timerid; 12497 } else { 12498 timer_t htimer = g_posix_timers[timerid]; 12499 ret = get_errno(timer_delete(htimer)); 12500 g_posix_timers[timerid] = 0; 12501 } 12502 break; 12503 } 12504 #endif 12505 12506 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD) 12507 case TARGET_NR_timerfd_create: 12508 ret = get_errno(timerfd_create(arg1, 12509 target_to_host_bitmask(arg2, fcntl_flags_tbl))); 12510 break; 12511 #endif 12512 12513 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD) 12514 case TARGET_NR_timerfd_gettime: 12515 { 12516 struct itimerspec its_curr; 12517 12518 ret = get_errno(timerfd_gettime(arg1, &its_curr)); 12519 12520 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) { 12521 goto efault; 12522 } 12523 } 12524 break; 12525 #endif 12526 12527 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD) 12528 case TARGET_NR_timerfd_settime: 12529 { 12530 struct itimerspec its_new, its_old, *p_new; 12531 12532 if (arg3) { 12533 if (target_to_host_itimerspec(&its_new, arg3)) { 12534 goto efault; 12535 } 12536 p_new = &its_new; 12537 } else { 12538 p_new = NULL; 12539 } 12540 12541 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old)); 12542 12543 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) { 12544 goto efault; 12545 } 12546 } 12547 break; 12548 #endif 12549 12550 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get) 12551 case TARGET_NR_ioprio_get: 12552 ret = get_errno(ioprio_get(arg1, arg2)); 12553 break; 12554 #endif 12555 12556 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set) 12557 case TARGET_NR_ioprio_set: 12558 ret = get_errno(ioprio_set(arg1, arg2, arg3)); 12559 break; 12560 #endif 12561 12562 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS) 12563 case TARGET_NR_setns: 12564 ret = get_errno(setns(arg1, arg2)); 12565 break; 12566 #endif 12567 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS) 12568 case TARGET_NR_unshare: 12569 ret = get_errno(unshare(arg1)); 12570 break; 12571 #endif 12572 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp) 12573 case TARGET_NR_kcmp: 12574 ret = get_errno(kcmp(arg1, arg2, arg3, arg4, arg5)); 12575 break; 12576 #endif 12577 12578 default: 12579 unimplemented: 12580 gemu_log("qemu: Unsupported syscall: %d\n", num); 12581 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list) 12582 unimplemented_nowarn: 12583 #endif 12584 ret = -TARGET_ENOSYS; 12585 break; 12586 } 12587 fail: 12588 #ifdef DEBUG 12589 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret); 12590 #endif 12591 if(do_strace) 12592 print_syscall_ret(num, ret); 12593 trace_guest_user_syscall_ret(cpu, num, ret); 12594 return ret; 12595 efault: 12596 ret = -TARGET_EFAULT; 12597 goto fail; 12598 } 12599