1 /* 2 * Linux syscalls 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #define _ATFILE_SOURCE 20 #include "qemu/osdep.h" 21 #include "qemu/cutils.h" 22 #include "qemu/path.h" 23 #include <elf.h> 24 #include <endian.h> 25 #include <grp.h> 26 #include <sys/ipc.h> 27 #include <sys/msg.h> 28 #include <sys/wait.h> 29 #include <sys/mount.h> 30 #include <sys/file.h> 31 #include <sys/fsuid.h> 32 #include <sys/personality.h> 33 #include <sys/prctl.h> 34 #include <sys/resource.h> 35 #include <sys/swap.h> 36 #include <linux/capability.h> 37 #include <sched.h> 38 #include <sys/timex.h> 39 #include <sys/socket.h> 40 #include <sys/un.h> 41 #include <sys/uio.h> 42 #include <poll.h> 43 #include <sys/times.h> 44 #include <sys/shm.h> 45 #include <sys/sem.h> 46 #include <sys/statfs.h> 47 #include <utime.h> 48 #include <sys/sysinfo.h> 49 #include <sys/signalfd.h> 50 //#include <sys/user.h> 51 #include <netinet/ip.h> 52 #include <netinet/tcp.h> 53 #include <linux/wireless.h> 54 #include <linux/icmp.h> 55 #include <linux/icmpv6.h> 56 #include <linux/errqueue.h> 57 #include <linux/random.h> 58 #include "qemu-common.h" 59 #ifdef CONFIG_TIMERFD 60 #include <sys/timerfd.h> 61 #endif 62 #ifdef TARGET_GPROF 63 #include <sys/gmon.h> 64 #endif 65 #ifdef CONFIG_EVENTFD 66 #include <sys/eventfd.h> 67 #endif 68 #ifdef CONFIG_EPOLL 69 #include <sys/epoll.h> 70 #endif 71 #ifdef CONFIG_ATTR 72 #include "qemu/xattr.h" 73 #endif 74 #ifdef CONFIG_SENDFILE 75 #include <sys/sendfile.h> 76 #endif 77 78 #define termios host_termios 79 #define winsize host_winsize 80 #define termio host_termio 81 #define sgttyb host_sgttyb /* same as target */ 82 #define tchars host_tchars /* same as target */ 83 #define ltchars host_ltchars /* same as target */ 84 85 #include <linux/termios.h> 86 #include <linux/unistd.h> 87 #include <linux/cdrom.h> 88 #include <linux/hdreg.h> 89 #include <linux/soundcard.h> 90 #include <linux/kd.h> 91 #include <linux/mtio.h> 92 #include <linux/fs.h> 93 #if defined(CONFIG_FIEMAP) 94 #include <linux/fiemap.h> 95 #endif 96 #include <linux/fb.h> 97 #include <linux/vt.h> 98 #include <linux/dm-ioctl.h> 99 #include <linux/reboot.h> 100 #include <linux/route.h> 101 #include <linux/filter.h> 102 #include <linux/blkpg.h> 103 #include <netpacket/packet.h> 104 #include <linux/netlink.h> 105 #ifdef CONFIG_RTNETLINK 106 #include <linux/rtnetlink.h> 107 #include <linux/if_bridge.h> 108 #endif 109 #include <linux/audit.h> 110 #include "linux_loop.h" 111 #include "uname.h" 112 113 #include "qemu.h" 114 115 #ifndef CLONE_IO 116 #define CLONE_IO 0x80000000 /* Clone io context */ 117 #endif 118 119 /* We can't directly call the host clone syscall, because this will 120 * badly confuse libc (breaking mutexes, for example). So we must 121 * divide clone flags into: 122 * * flag combinations that look like pthread_create() 123 * * flag combinations that look like fork() 124 * * flags we can implement within QEMU itself 125 * * flags we can't support and will return an error for 126 */ 127 /* For thread creation, all these flags must be present; for 128 * fork, none must be present. 129 */ 130 #define CLONE_THREAD_FLAGS \ 131 (CLONE_VM | CLONE_FS | CLONE_FILES | \ 132 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM) 133 134 /* These flags are ignored: 135 * CLONE_DETACHED is now ignored by the kernel; 136 * CLONE_IO is just an optimisation hint to the I/O scheduler 137 */ 138 #define CLONE_IGNORED_FLAGS \ 139 (CLONE_DETACHED | CLONE_IO) 140 141 /* Flags for fork which we can implement within QEMU itself */ 142 #define CLONE_OPTIONAL_FORK_FLAGS \ 143 (CLONE_SETTLS | CLONE_PARENT_SETTID | \ 144 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID) 145 146 /* Flags for thread creation which we can implement within QEMU itself */ 147 #define CLONE_OPTIONAL_THREAD_FLAGS \ 148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \ 149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT) 150 151 #define CLONE_INVALID_FORK_FLAGS \ 152 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS)) 153 154 #define CLONE_INVALID_THREAD_FLAGS \ 155 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \ 156 CLONE_IGNORED_FLAGS)) 157 158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits 159 * have almost all been allocated. We cannot support any of 160 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC, 161 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED. 162 * The checks against the invalid thread masks above will catch these. 163 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.) 164 */ 165 166 //#define DEBUG 167 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted 168 * once. This exercises the codepaths for restart. 169 */ 170 //#define DEBUG_ERESTARTSYS 171 172 //#include <linux/msdos_fs.h> 173 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2]) 174 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2]) 175 176 #undef _syscall0 177 #undef _syscall1 178 #undef _syscall2 179 #undef _syscall3 180 #undef _syscall4 181 #undef _syscall5 182 #undef _syscall6 183 184 #define _syscall0(type,name) \ 185 static type name (void) \ 186 { \ 187 return syscall(__NR_##name); \ 188 } 189 190 #define _syscall1(type,name,type1,arg1) \ 191 static type name (type1 arg1) \ 192 { \ 193 return syscall(__NR_##name, arg1); \ 194 } 195 196 #define _syscall2(type,name,type1,arg1,type2,arg2) \ 197 static type name (type1 arg1,type2 arg2) \ 198 { \ 199 return syscall(__NR_##name, arg1, arg2); \ 200 } 201 202 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 203 static type name (type1 arg1,type2 arg2,type3 arg3) \ 204 { \ 205 return syscall(__NR_##name, arg1, arg2, arg3); \ 206 } 207 208 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 209 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ 210 { \ 211 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 212 } 213 214 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 215 type5,arg5) \ 216 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ 217 { \ 218 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 219 } 220 221 222 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 223 type5,arg5,type6,arg6) \ 224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \ 225 type6 arg6) \ 226 { \ 227 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 228 } 229 230 231 #define __NR_sys_uname __NR_uname 232 #define __NR_sys_getcwd1 __NR_getcwd 233 #define __NR_sys_getdents __NR_getdents 234 #define __NR_sys_getdents64 __NR_getdents64 235 #define __NR_sys_getpriority __NR_getpriority 236 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo 237 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo 238 #define __NR_sys_syslog __NR_syslog 239 #define __NR_sys_futex __NR_futex 240 #define __NR_sys_inotify_init __NR_inotify_init 241 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch 242 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch 243 244 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__) 245 #define __NR__llseek __NR_lseek 246 #endif 247 248 /* Newer kernel ports have llseek() instead of _llseek() */ 249 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek) 250 #define TARGET_NR__llseek TARGET_NR_llseek 251 #endif 252 253 #ifdef __NR_gettid 254 _syscall0(int, gettid) 255 #else 256 /* This is a replacement for the host gettid() and must return a host 257 errno. */ 258 static int gettid(void) { 259 return -ENOSYS; 260 } 261 #endif 262 #if defined(TARGET_NR_getdents) && defined(__NR_getdents) 263 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count); 264 #endif 265 #if !defined(__NR_getdents) || \ 266 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64)) 267 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count); 268 #endif 269 #if defined(TARGET_NR__llseek) && defined(__NR_llseek) 270 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo, 271 loff_t *, res, uint, wh); 272 #endif 273 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo) 274 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig, 275 siginfo_t *, uinfo) 276 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len) 277 #ifdef __NR_exit_group 278 _syscall1(int,exit_group,int,error_code) 279 #endif 280 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 281 _syscall1(int,set_tid_address,int *,tidptr) 282 #endif 283 #if defined(TARGET_NR_futex) && defined(__NR_futex) 284 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, 285 const struct timespec *,timeout,int *,uaddr2,int,val3) 286 #endif 287 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity 288 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, 289 unsigned long *, user_mask_ptr); 290 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity 291 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len, 292 unsigned long *, user_mask_ptr); 293 #define __NR_sys_getcpu __NR_getcpu 294 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache); 295 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd, 296 void *, arg); 297 _syscall2(int, capget, struct __user_cap_header_struct *, header, 298 struct __user_cap_data_struct *, data); 299 _syscall2(int, capset, struct __user_cap_header_struct *, header, 300 struct __user_cap_data_struct *, data); 301 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get) 302 _syscall2(int, ioprio_get, int, which, int, who) 303 #endif 304 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set) 305 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio) 306 #endif 307 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom) 308 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags) 309 #endif 310 311 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp) 312 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type, 313 unsigned long, idx1, unsigned long, idx2) 314 #endif 315 316 static bitmask_transtbl fcntl_flags_tbl[] = { 317 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, }, 318 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, }, 319 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, }, 320 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, }, 321 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, }, 322 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, }, 323 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, }, 324 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, }, 325 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, }, 326 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, }, 327 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, }, 328 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, }, 329 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, }, 330 #if defined(O_DIRECT) 331 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, }, 332 #endif 333 #if defined(O_NOATIME) 334 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME }, 335 #endif 336 #if defined(O_CLOEXEC) 337 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC }, 338 #endif 339 #if defined(O_PATH) 340 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH }, 341 #endif 342 #if defined(O_TMPFILE) 343 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE }, 344 #endif 345 /* Don't terminate the list prematurely on 64-bit host+guest. */ 346 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0 347 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, }, 348 #endif 349 { 0, 0, 0, 0 } 350 }; 351 352 enum { 353 QEMU_IFLA_BR_UNSPEC, 354 QEMU_IFLA_BR_FORWARD_DELAY, 355 QEMU_IFLA_BR_HELLO_TIME, 356 QEMU_IFLA_BR_MAX_AGE, 357 QEMU_IFLA_BR_AGEING_TIME, 358 QEMU_IFLA_BR_STP_STATE, 359 QEMU_IFLA_BR_PRIORITY, 360 QEMU_IFLA_BR_VLAN_FILTERING, 361 QEMU_IFLA_BR_VLAN_PROTOCOL, 362 QEMU_IFLA_BR_GROUP_FWD_MASK, 363 QEMU_IFLA_BR_ROOT_ID, 364 QEMU_IFLA_BR_BRIDGE_ID, 365 QEMU_IFLA_BR_ROOT_PORT, 366 QEMU_IFLA_BR_ROOT_PATH_COST, 367 QEMU_IFLA_BR_TOPOLOGY_CHANGE, 368 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED, 369 QEMU_IFLA_BR_HELLO_TIMER, 370 QEMU_IFLA_BR_TCN_TIMER, 371 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER, 372 QEMU_IFLA_BR_GC_TIMER, 373 QEMU_IFLA_BR_GROUP_ADDR, 374 QEMU_IFLA_BR_FDB_FLUSH, 375 QEMU_IFLA_BR_MCAST_ROUTER, 376 QEMU_IFLA_BR_MCAST_SNOOPING, 377 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR, 378 QEMU_IFLA_BR_MCAST_QUERIER, 379 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY, 380 QEMU_IFLA_BR_MCAST_HASH_MAX, 381 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT, 382 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT, 383 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL, 384 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL, 385 QEMU_IFLA_BR_MCAST_QUERIER_INTVL, 386 QEMU_IFLA_BR_MCAST_QUERY_INTVL, 387 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, 388 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL, 389 QEMU_IFLA_BR_NF_CALL_IPTABLES, 390 QEMU_IFLA_BR_NF_CALL_IP6TABLES, 391 QEMU_IFLA_BR_NF_CALL_ARPTABLES, 392 QEMU_IFLA_BR_VLAN_DEFAULT_PVID, 393 QEMU_IFLA_BR_PAD, 394 QEMU_IFLA_BR_VLAN_STATS_ENABLED, 395 QEMU_IFLA_BR_MCAST_STATS_ENABLED, 396 QEMU___IFLA_BR_MAX, 397 }; 398 399 enum { 400 QEMU_IFLA_UNSPEC, 401 QEMU_IFLA_ADDRESS, 402 QEMU_IFLA_BROADCAST, 403 QEMU_IFLA_IFNAME, 404 QEMU_IFLA_MTU, 405 QEMU_IFLA_LINK, 406 QEMU_IFLA_QDISC, 407 QEMU_IFLA_STATS, 408 QEMU_IFLA_COST, 409 QEMU_IFLA_PRIORITY, 410 QEMU_IFLA_MASTER, 411 QEMU_IFLA_WIRELESS, 412 QEMU_IFLA_PROTINFO, 413 QEMU_IFLA_TXQLEN, 414 QEMU_IFLA_MAP, 415 QEMU_IFLA_WEIGHT, 416 QEMU_IFLA_OPERSTATE, 417 QEMU_IFLA_LINKMODE, 418 QEMU_IFLA_LINKINFO, 419 QEMU_IFLA_NET_NS_PID, 420 QEMU_IFLA_IFALIAS, 421 QEMU_IFLA_NUM_VF, 422 QEMU_IFLA_VFINFO_LIST, 423 QEMU_IFLA_STATS64, 424 QEMU_IFLA_VF_PORTS, 425 QEMU_IFLA_PORT_SELF, 426 QEMU_IFLA_AF_SPEC, 427 QEMU_IFLA_GROUP, 428 QEMU_IFLA_NET_NS_FD, 429 QEMU_IFLA_EXT_MASK, 430 QEMU_IFLA_PROMISCUITY, 431 QEMU_IFLA_NUM_TX_QUEUES, 432 QEMU_IFLA_NUM_RX_QUEUES, 433 QEMU_IFLA_CARRIER, 434 QEMU_IFLA_PHYS_PORT_ID, 435 QEMU_IFLA_CARRIER_CHANGES, 436 QEMU_IFLA_PHYS_SWITCH_ID, 437 QEMU_IFLA_LINK_NETNSID, 438 QEMU_IFLA_PHYS_PORT_NAME, 439 QEMU_IFLA_PROTO_DOWN, 440 QEMU_IFLA_GSO_MAX_SEGS, 441 QEMU_IFLA_GSO_MAX_SIZE, 442 QEMU_IFLA_PAD, 443 QEMU_IFLA_XDP, 444 QEMU___IFLA_MAX 445 }; 446 447 enum { 448 QEMU_IFLA_BRPORT_UNSPEC, 449 QEMU_IFLA_BRPORT_STATE, 450 QEMU_IFLA_BRPORT_PRIORITY, 451 QEMU_IFLA_BRPORT_COST, 452 QEMU_IFLA_BRPORT_MODE, 453 QEMU_IFLA_BRPORT_GUARD, 454 QEMU_IFLA_BRPORT_PROTECT, 455 QEMU_IFLA_BRPORT_FAST_LEAVE, 456 QEMU_IFLA_BRPORT_LEARNING, 457 QEMU_IFLA_BRPORT_UNICAST_FLOOD, 458 QEMU_IFLA_BRPORT_PROXYARP, 459 QEMU_IFLA_BRPORT_LEARNING_SYNC, 460 QEMU_IFLA_BRPORT_PROXYARP_WIFI, 461 QEMU_IFLA_BRPORT_ROOT_ID, 462 QEMU_IFLA_BRPORT_BRIDGE_ID, 463 QEMU_IFLA_BRPORT_DESIGNATED_PORT, 464 QEMU_IFLA_BRPORT_DESIGNATED_COST, 465 QEMU_IFLA_BRPORT_ID, 466 QEMU_IFLA_BRPORT_NO, 467 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK, 468 QEMU_IFLA_BRPORT_CONFIG_PENDING, 469 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER, 470 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER, 471 QEMU_IFLA_BRPORT_HOLD_TIMER, 472 QEMU_IFLA_BRPORT_FLUSH, 473 QEMU_IFLA_BRPORT_MULTICAST_ROUTER, 474 QEMU_IFLA_BRPORT_PAD, 475 QEMU___IFLA_BRPORT_MAX 476 }; 477 478 enum { 479 QEMU_IFLA_INFO_UNSPEC, 480 QEMU_IFLA_INFO_KIND, 481 QEMU_IFLA_INFO_DATA, 482 QEMU_IFLA_INFO_XSTATS, 483 QEMU_IFLA_INFO_SLAVE_KIND, 484 QEMU_IFLA_INFO_SLAVE_DATA, 485 QEMU___IFLA_INFO_MAX, 486 }; 487 488 enum { 489 QEMU_IFLA_INET_UNSPEC, 490 QEMU_IFLA_INET_CONF, 491 QEMU___IFLA_INET_MAX, 492 }; 493 494 enum { 495 QEMU_IFLA_INET6_UNSPEC, 496 QEMU_IFLA_INET6_FLAGS, 497 QEMU_IFLA_INET6_CONF, 498 QEMU_IFLA_INET6_STATS, 499 QEMU_IFLA_INET6_MCAST, 500 QEMU_IFLA_INET6_CACHEINFO, 501 QEMU_IFLA_INET6_ICMP6STATS, 502 QEMU_IFLA_INET6_TOKEN, 503 QEMU_IFLA_INET6_ADDR_GEN_MODE, 504 QEMU___IFLA_INET6_MAX 505 }; 506 507 typedef abi_long (*TargetFdDataFunc)(void *, size_t); 508 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t); 509 typedef struct TargetFdTrans { 510 TargetFdDataFunc host_to_target_data; 511 TargetFdDataFunc target_to_host_data; 512 TargetFdAddrFunc target_to_host_addr; 513 } TargetFdTrans; 514 515 static TargetFdTrans **target_fd_trans; 516 517 static unsigned int target_fd_max; 518 519 static TargetFdDataFunc fd_trans_target_to_host_data(int fd) 520 { 521 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) { 522 return target_fd_trans[fd]->target_to_host_data; 523 } 524 return NULL; 525 } 526 527 static TargetFdDataFunc fd_trans_host_to_target_data(int fd) 528 { 529 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) { 530 return target_fd_trans[fd]->host_to_target_data; 531 } 532 return NULL; 533 } 534 535 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd) 536 { 537 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) { 538 return target_fd_trans[fd]->target_to_host_addr; 539 } 540 return NULL; 541 } 542 543 static void fd_trans_register(int fd, TargetFdTrans *trans) 544 { 545 unsigned int oldmax; 546 547 if (fd >= target_fd_max) { 548 oldmax = target_fd_max; 549 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */ 550 target_fd_trans = g_renew(TargetFdTrans *, 551 target_fd_trans, target_fd_max); 552 memset((void *)(target_fd_trans + oldmax), 0, 553 (target_fd_max - oldmax) * sizeof(TargetFdTrans *)); 554 } 555 target_fd_trans[fd] = trans; 556 } 557 558 static void fd_trans_unregister(int fd) 559 { 560 if (fd >= 0 && fd < target_fd_max) { 561 target_fd_trans[fd] = NULL; 562 } 563 } 564 565 static void fd_trans_dup(int oldfd, int newfd) 566 { 567 fd_trans_unregister(newfd); 568 if (oldfd < target_fd_max && target_fd_trans[oldfd]) { 569 fd_trans_register(newfd, target_fd_trans[oldfd]); 570 } 571 } 572 573 static int sys_getcwd1(char *buf, size_t size) 574 { 575 if (getcwd(buf, size) == NULL) { 576 /* getcwd() sets errno */ 577 return (-1); 578 } 579 return strlen(buf)+1; 580 } 581 582 #ifdef TARGET_NR_utimensat 583 #if defined(__NR_utimensat) 584 #define __NR_sys_utimensat __NR_utimensat 585 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname, 586 const struct timespec *,tsp,int,flags) 587 #else 588 static int sys_utimensat(int dirfd, const char *pathname, 589 const struct timespec times[2], int flags) 590 { 591 errno = ENOSYS; 592 return -1; 593 } 594 #endif 595 #endif /* TARGET_NR_utimensat */ 596 597 #ifdef TARGET_NR_renameat2 598 #if defined(__NR_renameat2) 599 #define __NR_sys_renameat2 __NR_renameat2 600 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd, 601 const char *, new, unsigned int, flags) 602 #else 603 static int sys_renameat2(int oldfd, const char *old, 604 int newfd, const char *new, int flags) 605 { 606 if (flags == 0) { 607 return renameat(oldfd, old, newfd, new); 608 } 609 errno = ENOSYS; 610 return -1; 611 } 612 #endif 613 #endif /* TARGET_NR_renameat2 */ 614 615 #ifdef CONFIG_INOTIFY 616 #include <sys/inotify.h> 617 618 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 619 static int sys_inotify_init(void) 620 { 621 return (inotify_init()); 622 } 623 #endif 624 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 625 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask) 626 { 627 return (inotify_add_watch(fd, pathname, mask)); 628 } 629 #endif 630 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 631 static int sys_inotify_rm_watch(int fd, int32_t wd) 632 { 633 return (inotify_rm_watch(fd, wd)); 634 } 635 #endif 636 #ifdef CONFIG_INOTIFY1 637 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 638 static int sys_inotify_init1(int flags) 639 { 640 return (inotify_init1(flags)); 641 } 642 #endif 643 #endif 644 #else 645 /* Userspace can usually survive runtime without inotify */ 646 #undef TARGET_NR_inotify_init 647 #undef TARGET_NR_inotify_init1 648 #undef TARGET_NR_inotify_add_watch 649 #undef TARGET_NR_inotify_rm_watch 650 #endif /* CONFIG_INOTIFY */ 651 652 #if defined(TARGET_NR_prlimit64) 653 #ifndef __NR_prlimit64 654 # define __NR_prlimit64 -1 655 #endif 656 #define __NR_sys_prlimit64 __NR_prlimit64 657 /* The glibc rlimit structure may not be that used by the underlying syscall */ 658 struct host_rlimit64 { 659 uint64_t rlim_cur; 660 uint64_t rlim_max; 661 }; 662 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource, 663 const struct host_rlimit64 *, new_limit, 664 struct host_rlimit64 *, old_limit) 665 #endif 666 667 668 #if defined(TARGET_NR_timer_create) 669 /* Maxiumum of 32 active POSIX timers allowed at any one time. */ 670 static timer_t g_posix_timers[32] = { 0, } ; 671 672 static inline int next_free_host_timer(void) 673 { 674 int k ; 675 /* FIXME: Does finding the next free slot require a lock? */ 676 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) { 677 if (g_posix_timers[k] == 0) { 678 g_posix_timers[k] = (timer_t) 1; 679 return k; 680 } 681 } 682 return -1; 683 } 684 #endif 685 686 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */ 687 #ifdef TARGET_ARM 688 static inline int regpairs_aligned(void *cpu_env, int num) 689 { 690 return ((((CPUARMState *)cpu_env)->eabi) == 1) ; 691 } 692 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32) 693 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; } 694 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64) 695 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs 696 * of registers which translates to the same as ARM/MIPS, because we start with 697 * r3 as arg1 */ 698 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; } 699 #elif defined(TARGET_SH4) 700 /* SH4 doesn't align register pairs, except for p{read,write}64 */ 701 static inline int regpairs_aligned(void *cpu_env, int num) 702 { 703 switch (num) { 704 case TARGET_NR_pread64: 705 case TARGET_NR_pwrite64: 706 return 1; 707 708 default: 709 return 0; 710 } 711 } 712 #else 713 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; } 714 #endif 715 716 #define ERRNO_TABLE_SIZE 1200 717 718 /* target_to_host_errno_table[] is initialized from 719 * host_to_target_errno_table[] in syscall_init(). */ 720 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = { 721 }; 722 723 /* 724 * This list is the union of errno values overridden in asm-<arch>/errno.h 725 * minus the errnos that are not actually generic to all archs. 726 */ 727 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = { 728 [EAGAIN] = TARGET_EAGAIN, 729 [EIDRM] = TARGET_EIDRM, 730 [ECHRNG] = TARGET_ECHRNG, 731 [EL2NSYNC] = TARGET_EL2NSYNC, 732 [EL3HLT] = TARGET_EL3HLT, 733 [EL3RST] = TARGET_EL3RST, 734 [ELNRNG] = TARGET_ELNRNG, 735 [EUNATCH] = TARGET_EUNATCH, 736 [ENOCSI] = TARGET_ENOCSI, 737 [EL2HLT] = TARGET_EL2HLT, 738 [EDEADLK] = TARGET_EDEADLK, 739 [ENOLCK] = TARGET_ENOLCK, 740 [EBADE] = TARGET_EBADE, 741 [EBADR] = TARGET_EBADR, 742 [EXFULL] = TARGET_EXFULL, 743 [ENOANO] = TARGET_ENOANO, 744 [EBADRQC] = TARGET_EBADRQC, 745 [EBADSLT] = TARGET_EBADSLT, 746 [EBFONT] = TARGET_EBFONT, 747 [ENOSTR] = TARGET_ENOSTR, 748 [ENODATA] = TARGET_ENODATA, 749 [ETIME] = TARGET_ETIME, 750 [ENOSR] = TARGET_ENOSR, 751 [ENONET] = TARGET_ENONET, 752 [ENOPKG] = TARGET_ENOPKG, 753 [EREMOTE] = TARGET_EREMOTE, 754 [ENOLINK] = TARGET_ENOLINK, 755 [EADV] = TARGET_EADV, 756 [ESRMNT] = TARGET_ESRMNT, 757 [ECOMM] = TARGET_ECOMM, 758 [EPROTO] = TARGET_EPROTO, 759 [EDOTDOT] = TARGET_EDOTDOT, 760 [EMULTIHOP] = TARGET_EMULTIHOP, 761 [EBADMSG] = TARGET_EBADMSG, 762 [ENAMETOOLONG] = TARGET_ENAMETOOLONG, 763 [EOVERFLOW] = TARGET_EOVERFLOW, 764 [ENOTUNIQ] = TARGET_ENOTUNIQ, 765 [EBADFD] = TARGET_EBADFD, 766 [EREMCHG] = TARGET_EREMCHG, 767 [ELIBACC] = TARGET_ELIBACC, 768 [ELIBBAD] = TARGET_ELIBBAD, 769 [ELIBSCN] = TARGET_ELIBSCN, 770 [ELIBMAX] = TARGET_ELIBMAX, 771 [ELIBEXEC] = TARGET_ELIBEXEC, 772 [EILSEQ] = TARGET_EILSEQ, 773 [ENOSYS] = TARGET_ENOSYS, 774 [ELOOP] = TARGET_ELOOP, 775 [ERESTART] = TARGET_ERESTART, 776 [ESTRPIPE] = TARGET_ESTRPIPE, 777 [ENOTEMPTY] = TARGET_ENOTEMPTY, 778 [EUSERS] = TARGET_EUSERS, 779 [ENOTSOCK] = TARGET_ENOTSOCK, 780 [EDESTADDRREQ] = TARGET_EDESTADDRREQ, 781 [EMSGSIZE] = TARGET_EMSGSIZE, 782 [EPROTOTYPE] = TARGET_EPROTOTYPE, 783 [ENOPROTOOPT] = TARGET_ENOPROTOOPT, 784 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT, 785 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT, 786 [EOPNOTSUPP] = TARGET_EOPNOTSUPP, 787 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT, 788 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT, 789 [EADDRINUSE] = TARGET_EADDRINUSE, 790 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL, 791 [ENETDOWN] = TARGET_ENETDOWN, 792 [ENETUNREACH] = TARGET_ENETUNREACH, 793 [ENETRESET] = TARGET_ENETRESET, 794 [ECONNABORTED] = TARGET_ECONNABORTED, 795 [ECONNRESET] = TARGET_ECONNRESET, 796 [ENOBUFS] = TARGET_ENOBUFS, 797 [EISCONN] = TARGET_EISCONN, 798 [ENOTCONN] = TARGET_ENOTCONN, 799 [EUCLEAN] = TARGET_EUCLEAN, 800 [ENOTNAM] = TARGET_ENOTNAM, 801 [ENAVAIL] = TARGET_ENAVAIL, 802 [EISNAM] = TARGET_EISNAM, 803 [EREMOTEIO] = TARGET_EREMOTEIO, 804 [EDQUOT] = TARGET_EDQUOT, 805 [ESHUTDOWN] = TARGET_ESHUTDOWN, 806 [ETOOMANYREFS] = TARGET_ETOOMANYREFS, 807 [ETIMEDOUT] = TARGET_ETIMEDOUT, 808 [ECONNREFUSED] = TARGET_ECONNREFUSED, 809 [EHOSTDOWN] = TARGET_EHOSTDOWN, 810 [EHOSTUNREACH] = TARGET_EHOSTUNREACH, 811 [EALREADY] = TARGET_EALREADY, 812 [EINPROGRESS] = TARGET_EINPROGRESS, 813 [ESTALE] = TARGET_ESTALE, 814 [ECANCELED] = TARGET_ECANCELED, 815 [ENOMEDIUM] = TARGET_ENOMEDIUM, 816 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE, 817 #ifdef ENOKEY 818 [ENOKEY] = TARGET_ENOKEY, 819 #endif 820 #ifdef EKEYEXPIRED 821 [EKEYEXPIRED] = TARGET_EKEYEXPIRED, 822 #endif 823 #ifdef EKEYREVOKED 824 [EKEYREVOKED] = TARGET_EKEYREVOKED, 825 #endif 826 #ifdef EKEYREJECTED 827 [EKEYREJECTED] = TARGET_EKEYREJECTED, 828 #endif 829 #ifdef EOWNERDEAD 830 [EOWNERDEAD] = TARGET_EOWNERDEAD, 831 #endif 832 #ifdef ENOTRECOVERABLE 833 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE, 834 #endif 835 #ifdef ENOMSG 836 [ENOMSG] = TARGET_ENOMSG, 837 #endif 838 #ifdef ERKFILL 839 [ERFKILL] = TARGET_ERFKILL, 840 #endif 841 #ifdef EHWPOISON 842 [EHWPOISON] = TARGET_EHWPOISON, 843 #endif 844 }; 845 846 static inline int host_to_target_errno(int err) 847 { 848 if (err >= 0 && err < ERRNO_TABLE_SIZE && 849 host_to_target_errno_table[err]) { 850 return host_to_target_errno_table[err]; 851 } 852 return err; 853 } 854 855 static inline int target_to_host_errno(int err) 856 { 857 if (err >= 0 && err < ERRNO_TABLE_SIZE && 858 target_to_host_errno_table[err]) { 859 return target_to_host_errno_table[err]; 860 } 861 return err; 862 } 863 864 static inline abi_long get_errno(abi_long ret) 865 { 866 if (ret == -1) 867 return -host_to_target_errno(errno); 868 else 869 return ret; 870 } 871 872 static inline int is_error(abi_long ret) 873 { 874 return (abi_ulong)ret >= (abi_ulong)(-4096); 875 } 876 877 const char *target_strerror(int err) 878 { 879 if (err == TARGET_ERESTARTSYS) { 880 return "To be restarted"; 881 } 882 if (err == TARGET_QEMU_ESIGRETURN) { 883 return "Successful exit from sigreturn"; 884 } 885 886 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) { 887 return NULL; 888 } 889 return strerror(target_to_host_errno(err)); 890 } 891 892 #define safe_syscall0(type, name) \ 893 static type safe_##name(void) \ 894 { \ 895 return safe_syscall(__NR_##name); \ 896 } 897 898 #define safe_syscall1(type, name, type1, arg1) \ 899 static type safe_##name(type1 arg1) \ 900 { \ 901 return safe_syscall(__NR_##name, arg1); \ 902 } 903 904 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \ 905 static type safe_##name(type1 arg1, type2 arg2) \ 906 { \ 907 return safe_syscall(__NR_##name, arg1, arg2); \ 908 } 909 910 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \ 911 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \ 912 { \ 913 return safe_syscall(__NR_##name, arg1, arg2, arg3); \ 914 } 915 916 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \ 917 type4, arg4) \ 918 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ 919 { \ 920 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 921 } 922 923 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \ 924 type4, arg4, type5, arg5) \ 925 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 926 type5 arg5) \ 927 { \ 928 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 929 } 930 931 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \ 932 type4, arg4, type5, arg5, type6, arg6) \ 933 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 934 type5 arg5, type6 arg6) \ 935 { \ 936 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 937 } 938 939 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count) 940 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count) 941 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \ 942 int, flags, mode_t, mode) 943 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \ 944 struct rusage *, rusage) 945 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \ 946 int, options, struct rusage *, rusage) 947 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp) 948 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \ 949 fd_set *, exceptfds, struct timespec *, timeout, void *, sig) 950 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds, 951 struct timespec *, tsp, const sigset_t *, sigmask, 952 size_t, sigsetsize) 953 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events, 954 int, maxevents, int, timeout, const sigset_t *, sigmask, 955 size_t, sigsetsize) 956 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \ 957 const struct timespec *,timeout,int *,uaddr2,int,val3) 958 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize) 959 safe_syscall2(int, kill, pid_t, pid, int, sig) 960 safe_syscall2(int, tkill, int, tid, int, sig) 961 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig) 962 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt) 963 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt) 964 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt, 965 unsigned long, pos_l, unsigned long, pos_h) 966 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt, 967 unsigned long, pos_l, unsigned long, pos_h) 968 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr, 969 socklen_t, addrlen) 970 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len, 971 int, flags, const struct sockaddr *, addr, socklen_t, addrlen) 972 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len, 973 int, flags, struct sockaddr *, addr, socklen_t *, addrlen) 974 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags) 975 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags) 976 safe_syscall2(int, flock, int, fd, int, operation) 977 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo, 978 const struct timespec *, uts, size_t, sigsetsize) 979 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len, 980 int, flags) 981 safe_syscall2(int, nanosleep, const struct timespec *, req, 982 struct timespec *, rem) 983 #ifdef TARGET_NR_clock_nanosleep 984 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags, 985 const struct timespec *, req, struct timespec *, rem) 986 #endif 987 #ifdef __NR_msgsnd 988 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz, 989 int, flags) 990 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz, 991 long, msgtype, int, flags) 992 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops, 993 unsigned, nsops, const struct timespec *, timeout) 994 #else 995 /* This host kernel architecture uses a single ipc syscall; fake up 996 * wrappers for the sub-operations to hide this implementation detail. 997 * Annoyingly we can't include linux/ipc.h to get the constant definitions 998 * for the call parameter because some structs in there conflict with the 999 * sys/ipc.h ones. So we just define them here, and rely on them being 1000 * the same for all host architectures. 1001 */ 1002 #define Q_SEMTIMEDOP 4 1003 #define Q_MSGSND 11 1004 #define Q_MSGRCV 12 1005 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP)) 1006 1007 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third, 1008 void *, ptr, long, fifth) 1009 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags) 1010 { 1011 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0); 1012 } 1013 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags) 1014 { 1015 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type); 1016 } 1017 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops, 1018 const struct timespec *timeout) 1019 { 1020 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops, 1021 (long)timeout); 1022 } 1023 #endif 1024 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 1025 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr, 1026 size_t, len, unsigned, prio, const struct timespec *, timeout) 1027 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr, 1028 size_t, len, unsigned *, prio, const struct timespec *, timeout) 1029 #endif 1030 /* We do ioctl like this rather than via safe_syscall3 to preserve the 1031 * "third argument might be integer or pointer or not present" behaviour of 1032 * the libc function. 1033 */ 1034 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__) 1035 /* Similarly for fcntl. Note that callers must always: 1036 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK 1037 * use the flock64 struct rather than unsuffixed flock 1038 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts. 1039 */ 1040 #ifdef __NR_fcntl64 1041 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__) 1042 #else 1043 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__) 1044 #endif 1045 1046 static inline int host_to_target_sock_type(int host_type) 1047 { 1048 int target_type; 1049 1050 switch (host_type & 0xf /* SOCK_TYPE_MASK */) { 1051 case SOCK_DGRAM: 1052 target_type = TARGET_SOCK_DGRAM; 1053 break; 1054 case SOCK_STREAM: 1055 target_type = TARGET_SOCK_STREAM; 1056 break; 1057 default: 1058 target_type = host_type & 0xf /* SOCK_TYPE_MASK */; 1059 break; 1060 } 1061 1062 #if defined(SOCK_CLOEXEC) 1063 if (host_type & SOCK_CLOEXEC) { 1064 target_type |= TARGET_SOCK_CLOEXEC; 1065 } 1066 #endif 1067 1068 #if defined(SOCK_NONBLOCK) 1069 if (host_type & SOCK_NONBLOCK) { 1070 target_type |= TARGET_SOCK_NONBLOCK; 1071 } 1072 #endif 1073 1074 return target_type; 1075 } 1076 1077 static abi_ulong target_brk; 1078 static abi_ulong target_original_brk; 1079 static abi_ulong brk_page; 1080 1081 void target_set_brk(abi_ulong new_brk) 1082 { 1083 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk); 1084 brk_page = HOST_PAGE_ALIGN(target_brk); 1085 } 1086 1087 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0) 1088 #define DEBUGF_BRK(message, args...) 1089 1090 /* do_brk() must return target values and target errnos. */ 1091 abi_long do_brk(abi_ulong new_brk) 1092 { 1093 abi_long mapped_addr; 1094 abi_ulong new_alloc_size; 1095 1096 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk); 1097 1098 if (!new_brk) { 1099 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk); 1100 return target_brk; 1101 } 1102 if (new_brk < target_original_brk) { 1103 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n", 1104 target_brk); 1105 return target_brk; 1106 } 1107 1108 /* If the new brk is less than the highest page reserved to the 1109 * target heap allocation, set it and we're almost done... */ 1110 if (new_brk <= brk_page) { 1111 /* Heap contents are initialized to zero, as for anonymous 1112 * mapped pages. */ 1113 if (new_brk > target_brk) { 1114 memset(g2h(target_brk), 0, new_brk - target_brk); 1115 } 1116 target_brk = new_brk; 1117 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk); 1118 return target_brk; 1119 } 1120 1121 /* We need to allocate more memory after the brk... Note that 1122 * we don't use MAP_FIXED because that will map over the top of 1123 * any existing mapping (like the one with the host libc or qemu 1124 * itself); instead we treat "mapped but at wrong address" as 1125 * a failure and unmap again. 1126 */ 1127 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page); 1128 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, 1129 PROT_READ|PROT_WRITE, 1130 MAP_ANON|MAP_PRIVATE, 0, 0)); 1131 1132 if (mapped_addr == brk_page) { 1133 /* Heap contents are initialized to zero, as for anonymous 1134 * mapped pages. Technically the new pages are already 1135 * initialized to zero since they *are* anonymous mapped 1136 * pages, however we have to take care with the contents that 1137 * come from the remaining part of the previous page: it may 1138 * contains garbage data due to a previous heap usage (grown 1139 * then shrunken). */ 1140 memset(g2h(target_brk), 0, brk_page - target_brk); 1141 1142 target_brk = new_brk; 1143 brk_page = HOST_PAGE_ALIGN(target_brk); 1144 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n", 1145 target_brk); 1146 return target_brk; 1147 } else if (mapped_addr != -1) { 1148 /* Mapped but at wrong address, meaning there wasn't actually 1149 * enough space for this brk. 1150 */ 1151 target_munmap(mapped_addr, new_alloc_size); 1152 mapped_addr = -1; 1153 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk); 1154 } 1155 else { 1156 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk); 1157 } 1158 1159 #if defined(TARGET_ALPHA) 1160 /* We (partially) emulate OSF/1 on Alpha, which requires we 1161 return a proper errno, not an unchanged brk value. */ 1162 return -TARGET_ENOMEM; 1163 #endif 1164 /* For everything else, return the previous break. */ 1165 return target_brk; 1166 } 1167 1168 static inline abi_long copy_from_user_fdset(fd_set *fds, 1169 abi_ulong target_fds_addr, 1170 int n) 1171 { 1172 int i, nw, j, k; 1173 abi_ulong b, *target_fds; 1174 1175 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS); 1176 if (!(target_fds = lock_user(VERIFY_READ, 1177 target_fds_addr, 1178 sizeof(abi_ulong) * nw, 1179 1))) 1180 return -TARGET_EFAULT; 1181 1182 FD_ZERO(fds); 1183 k = 0; 1184 for (i = 0; i < nw; i++) { 1185 /* grab the abi_ulong */ 1186 __get_user(b, &target_fds[i]); 1187 for (j = 0; j < TARGET_ABI_BITS; j++) { 1188 /* check the bit inside the abi_ulong */ 1189 if ((b >> j) & 1) 1190 FD_SET(k, fds); 1191 k++; 1192 } 1193 } 1194 1195 unlock_user(target_fds, target_fds_addr, 0); 1196 1197 return 0; 1198 } 1199 1200 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr, 1201 abi_ulong target_fds_addr, 1202 int n) 1203 { 1204 if (target_fds_addr) { 1205 if (copy_from_user_fdset(fds, target_fds_addr, n)) 1206 return -TARGET_EFAULT; 1207 *fds_ptr = fds; 1208 } else { 1209 *fds_ptr = NULL; 1210 } 1211 return 0; 1212 } 1213 1214 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr, 1215 const fd_set *fds, 1216 int n) 1217 { 1218 int i, nw, j, k; 1219 abi_long v; 1220 abi_ulong *target_fds; 1221 1222 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS); 1223 if (!(target_fds = lock_user(VERIFY_WRITE, 1224 target_fds_addr, 1225 sizeof(abi_ulong) * nw, 1226 0))) 1227 return -TARGET_EFAULT; 1228 1229 k = 0; 1230 for (i = 0; i < nw; i++) { 1231 v = 0; 1232 for (j = 0; j < TARGET_ABI_BITS; j++) { 1233 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j); 1234 k++; 1235 } 1236 __put_user(v, &target_fds[i]); 1237 } 1238 1239 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw); 1240 1241 return 0; 1242 } 1243 1244 #if defined(__alpha__) 1245 #define HOST_HZ 1024 1246 #else 1247 #define HOST_HZ 100 1248 #endif 1249 1250 static inline abi_long host_to_target_clock_t(long ticks) 1251 { 1252 #if HOST_HZ == TARGET_HZ 1253 return ticks; 1254 #else 1255 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ; 1256 #endif 1257 } 1258 1259 static inline abi_long host_to_target_rusage(abi_ulong target_addr, 1260 const struct rusage *rusage) 1261 { 1262 struct target_rusage *target_rusage; 1263 1264 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0)) 1265 return -TARGET_EFAULT; 1266 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec); 1267 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec); 1268 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec); 1269 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec); 1270 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss); 1271 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss); 1272 target_rusage->ru_idrss = tswapal(rusage->ru_idrss); 1273 target_rusage->ru_isrss = tswapal(rusage->ru_isrss); 1274 target_rusage->ru_minflt = tswapal(rusage->ru_minflt); 1275 target_rusage->ru_majflt = tswapal(rusage->ru_majflt); 1276 target_rusage->ru_nswap = tswapal(rusage->ru_nswap); 1277 target_rusage->ru_inblock = tswapal(rusage->ru_inblock); 1278 target_rusage->ru_oublock = tswapal(rusage->ru_oublock); 1279 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd); 1280 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv); 1281 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals); 1282 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw); 1283 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw); 1284 unlock_user_struct(target_rusage, target_addr, 1); 1285 1286 return 0; 1287 } 1288 1289 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim) 1290 { 1291 abi_ulong target_rlim_swap; 1292 rlim_t result; 1293 1294 target_rlim_swap = tswapal(target_rlim); 1295 if (target_rlim_swap == TARGET_RLIM_INFINITY) 1296 return RLIM_INFINITY; 1297 1298 result = target_rlim_swap; 1299 if (target_rlim_swap != (rlim_t)result) 1300 return RLIM_INFINITY; 1301 1302 return result; 1303 } 1304 1305 static inline abi_ulong host_to_target_rlim(rlim_t rlim) 1306 { 1307 abi_ulong target_rlim_swap; 1308 abi_ulong result; 1309 1310 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim) 1311 target_rlim_swap = TARGET_RLIM_INFINITY; 1312 else 1313 target_rlim_swap = rlim; 1314 result = tswapal(target_rlim_swap); 1315 1316 return result; 1317 } 1318 1319 static inline int target_to_host_resource(int code) 1320 { 1321 switch (code) { 1322 case TARGET_RLIMIT_AS: 1323 return RLIMIT_AS; 1324 case TARGET_RLIMIT_CORE: 1325 return RLIMIT_CORE; 1326 case TARGET_RLIMIT_CPU: 1327 return RLIMIT_CPU; 1328 case TARGET_RLIMIT_DATA: 1329 return RLIMIT_DATA; 1330 case TARGET_RLIMIT_FSIZE: 1331 return RLIMIT_FSIZE; 1332 case TARGET_RLIMIT_LOCKS: 1333 return RLIMIT_LOCKS; 1334 case TARGET_RLIMIT_MEMLOCK: 1335 return RLIMIT_MEMLOCK; 1336 case TARGET_RLIMIT_MSGQUEUE: 1337 return RLIMIT_MSGQUEUE; 1338 case TARGET_RLIMIT_NICE: 1339 return RLIMIT_NICE; 1340 case TARGET_RLIMIT_NOFILE: 1341 return RLIMIT_NOFILE; 1342 case TARGET_RLIMIT_NPROC: 1343 return RLIMIT_NPROC; 1344 case TARGET_RLIMIT_RSS: 1345 return RLIMIT_RSS; 1346 case TARGET_RLIMIT_RTPRIO: 1347 return RLIMIT_RTPRIO; 1348 case TARGET_RLIMIT_SIGPENDING: 1349 return RLIMIT_SIGPENDING; 1350 case TARGET_RLIMIT_STACK: 1351 return RLIMIT_STACK; 1352 default: 1353 return code; 1354 } 1355 } 1356 1357 static inline abi_long copy_from_user_timeval(struct timeval *tv, 1358 abi_ulong target_tv_addr) 1359 { 1360 struct target_timeval *target_tv; 1361 1362 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) 1363 return -TARGET_EFAULT; 1364 1365 __get_user(tv->tv_sec, &target_tv->tv_sec); 1366 __get_user(tv->tv_usec, &target_tv->tv_usec); 1367 1368 unlock_user_struct(target_tv, target_tv_addr, 0); 1369 1370 return 0; 1371 } 1372 1373 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr, 1374 const struct timeval *tv) 1375 { 1376 struct target_timeval *target_tv; 1377 1378 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) 1379 return -TARGET_EFAULT; 1380 1381 __put_user(tv->tv_sec, &target_tv->tv_sec); 1382 __put_user(tv->tv_usec, &target_tv->tv_usec); 1383 1384 unlock_user_struct(target_tv, target_tv_addr, 1); 1385 1386 return 0; 1387 } 1388 1389 static inline abi_long copy_from_user_timezone(struct timezone *tz, 1390 abi_ulong target_tz_addr) 1391 { 1392 struct target_timezone *target_tz; 1393 1394 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) { 1395 return -TARGET_EFAULT; 1396 } 1397 1398 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest); 1399 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime); 1400 1401 unlock_user_struct(target_tz, target_tz_addr, 0); 1402 1403 return 0; 1404 } 1405 1406 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 1407 #include <mqueue.h> 1408 1409 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr, 1410 abi_ulong target_mq_attr_addr) 1411 { 1412 struct target_mq_attr *target_mq_attr; 1413 1414 if (!lock_user_struct(VERIFY_READ, target_mq_attr, 1415 target_mq_attr_addr, 1)) 1416 return -TARGET_EFAULT; 1417 1418 __get_user(attr->mq_flags, &target_mq_attr->mq_flags); 1419 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1420 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1421 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1422 1423 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0); 1424 1425 return 0; 1426 } 1427 1428 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr, 1429 const struct mq_attr *attr) 1430 { 1431 struct target_mq_attr *target_mq_attr; 1432 1433 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr, 1434 target_mq_attr_addr, 0)) 1435 return -TARGET_EFAULT; 1436 1437 __put_user(attr->mq_flags, &target_mq_attr->mq_flags); 1438 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1439 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1440 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1441 1442 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1); 1443 1444 return 0; 1445 } 1446 #endif 1447 1448 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) 1449 /* do_select() must return target values and target errnos. */ 1450 static abi_long do_select(int n, 1451 abi_ulong rfd_addr, abi_ulong wfd_addr, 1452 abi_ulong efd_addr, abi_ulong target_tv_addr) 1453 { 1454 fd_set rfds, wfds, efds; 1455 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 1456 struct timeval tv; 1457 struct timespec ts, *ts_ptr; 1458 abi_long ret; 1459 1460 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 1461 if (ret) { 1462 return ret; 1463 } 1464 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 1465 if (ret) { 1466 return ret; 1467 } 1468 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 1469 if (ret) { 1470 return ret; 1471 } 1472 1473 if (target_tv_addr) { 1474 if (copy_from_user_timeval(&tv, target_tv_addr)) 1475 return -TARGET_EFAULT; 1476 ts.tv_sec = tv.tv_sec; 1477 ts.tv_nsec = tv.tv_usec * 1000; 1478 ts_ptr = &ts; 1479 } else { 1480 ts_ptr = NULL; 1481 } 1482 1483 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 1484 ts_ptr, NULL)); 1485 1486 if (!is_error(ret)) { 1487 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 1488 return -TARGET_EFAULT; 1489 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 1490 return -TARGET_EFAULT; 1491 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 1492 return -TARGET_EFAULT; 1493 1494 if (target_tv_addr) { 1495 tv.tv_sec = ts.tv_sec; 1496 tv.tv_usec = ts.tv_nsec / 1000; 1497 if (copy_to_user_timeval(target_tv_addr, &tv)) { 1498 return -TARGET_EFAULT; 1499 } 1500 } 1501 } 1502 1503 return ret; 1504 } 1505 1506 #if defined(TARGET_WANT_OLD_SYS_SELECT) 1507 static abi_long do_old_select(abi_ulong arg1) 1508 { 1509 struct target_sel_arg_struct *sel; 1510 abi_ulong inp, outp, exp, tvp; 1511 long nsel; 1512 1513 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) { 1514 return -TARGET_EFAULT; 1515 } 1516 1517 nsel = tswapal(sel->n); 1518 inp = tswapal(sel->inp); 1519 outp = tswapal(sel->outp); 1520 exp = tswapal(sel->exp); 1521 tvp = tswapal(sel->tvp); 1522 1523 unlock_user_struct(sel, arg1, 0); 1524 1525 return do_select(nsel, inp, outp, exp, tvp); 1526 } 1527 #endif 1528 #endif 1529 1530 static abi_long do_pipe2(int host_pipe[], int flags) 1531 { 1532 #ifdef CONFIG_PIPE2 1533 return pipe2(host_pipe, flags); 1534 #else 1535 return -ENOSYS; 1536 #endif 1537 } 1538 1539 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes, 1540 int flags, int is_pipe2) 1541 { 1542 int host_pipe[2]; 1543 abi_long ret; 1544 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe); 1545 1546 if (is_error(ret)) 1547 return get_errno(ret); 1548 1549 /* Several targets have special calling conventions for the original 1550 pipe syscall, but didn't replicate this into the pipe2 syscall. */ 1551 if (!is_pipe2) { 1552 #if defined(TARGET_ALPHA) 1553 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1]; 1554 return host_pipe[0]; 1555 #elif defined(TARGET_MIPS) 1556 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1]; 1557 return host_pipe[0]; 1558 #elif defined(TARGET_SH4) 1559 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1]; 1560 return host_pipe[0]; 1561 #elif defined(TARGET_SPARC) 1562 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1]; 1563 return host_pipe[0]; 1564 #endif 1565 } 1566 1567 if (put_user_s32(host_pipe[0], pipedes) 1568 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0]))) 1569 return -TARGET_EFAULT; 1570 return get_errno(ret); 1571 } 1572 1573 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn, 1574 abi_ulong target_addr, 1575 socklen_t len) 1576 { 1577 struct target_ip_mreqn *target_smreqn; 1578 1579 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1); 1580 if (!target_smreqn) 1581 return -TARGET_EFAULT; 1582 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr; 1583 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr; 1584 if (len == sizeof(struct target_ip_mreqn)) 1585 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex); 1586 unlock_user(target_smreqn, target_addr, 0); 1587 1588 return 0; 1589 } 1590 1591 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr, 1592 abi_ulong target_addr, 1593 socklen_t len) 1594 { 1595 const socklen_t unix_maxlen = sizeof (struct sockaddr_un); 1596 sa_family_t sa_family; 1597 struct target_sockaddr *target_saddr; 1598 1599 if (fd_trans_target_to_host_addr(fd)) { 1600 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len); 1601 } 1602 1603 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 1604 if (!target_saddr) 1605 return -TARGET_EFAULT; 1606 1607 sa_family = tswap16(target_saddr->sa_family); 1608 1609 /* Oops. The caller might send a incomplete sun_path; sun_path 1610 * must be terminated by \0 (see the manual page), but 1611 * unfortunately it is quite common to specify sockaddr_un 1612 * length as "strlen(x->sun_path)" while it should be 1613 * "strlen(...) + 1". We'll fix that here if needed. 1614 * Linux kernel has a similar feature. 1615 */ 1616 1617 if (sa_family == AF_UNIX) { 1618 if (len < unix_maxlen && len > 0) { 1619 char *cp = (char*)target_saddr; 1620 1621 if ( cp[len-1] && !cp[len] ) 1622 len++; 1623 } 1624 if (len > unix_maxlen) 1625 len = unix_maxlen; 1626 } 1627 1628 memcpy(addr, target_saddr, len); 1629 addr->sa_family = sa_family; 1630 if (sa_family == AF_NETLINK) { 1631 struct sockaddr_nl *nladdr; 1632 1633 nladdr = (struct sockaddr_nl *)addr; 1634 nladdr->nl_pid = tswap32(nladdr->nl_pid); 1635 nladdr->nl_groups = tswap32(nladdr->nl_groups); 1636 } else if (sa_family == AF_PACKET) { 1637 struct target_sockaddr_ll *lladdr; 1638 1639 lladdr = (struct target_sockaddr_ll *)addr; 1640 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex); 1641 lladdr->sll_hatype = tswap16(lladdr->sll_hatype); 1642 } 1643 unlock_user(target_saddr, target_addr, 0); 1644 1645 return 0; 1646 } 1647 1648 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr, 1649 struct sockaddr *addr, 1650 socklen_t len) 1651 { 1652 struct target_sockaddr *target_saddr; 1653 1654 if (len == 0) { 1655 return 0; 1656 } 1657 assert(addr); 1658 1659 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0); 1660 if (!target_saddr) 1661 return -TARGET_EFAULT; 1662 memcpy(target_saddr, addr, len); 1663 if (len >= offsetof(struct target_sockaddr, sa_family) + 1664 sizeof(target_saddr->sa_family)) { 1665 target_saddr->sa_family = tswap16(addr->sa_family); 1666 } 1667 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) { 1668 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr; 1669 target_nl->nl_pid = tswap32(target_nl->nl_pid); 1670 target_nl->nl_groups = tswap32(target_nl->nl_groups); 1671 } else if (addr->sa_family == AF_PACKET) { 1672 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr; 1673 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex); 1674 target_ll->sll_hatype = tswap16(target_ll->sll_hatype); 1675 } else if (addr->sa_family == AF_INET6 && 1676 len >= sizeof(struct target_sockaddr_in6)) { 1677 struct target_sockaddr_in6 *target_in6 = 1678 (struct target_sockaddr_in6 *)target_saddr; 1679 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id); 1680 } 1681 unlock_user(target_saddr, target_addr, len); 1682 1683 return 0; 1684 } 1685 1686 static inline abi_long target_to_host_cmsg(struct msghdr *msgh, 1687 struct target_msghdr *target_msgh) 1688 { 1689 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1690 abi_long msg_controllen; 1691 abi_ulong target_cmsg_addr; 1692 struct target_cmsghdr *target_cmsg, *target_cmsg_start; 1693 socklen_t space = 0; 1694 1695 msg_controllen = tswapal(target_msgh->msg_controllen); 1696 if (msg_controllen < sizeof (struct target_cmsghdr)) 1697 goto the_end; 1698 target_cmsg_addr = tswapal(target_msgh->msg_control); 1699 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1); 1700 target_cmsg_start = target_cmsg; 1701 if (!target_cmsg) 1702 return -TARGET_EFAULT; 1703 1704 while (cmsg && target_cmsg) { 1705 void *data = CMSG_DATA(cmsg); 1706 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1707 1708 int len = tswapal(target_cmsg->cmsg_len) 1709 - sizeof(struct target_cmsghdr); 1710 1711 space += CMSG_SPACE(len); 1712 if (space > msgh->msg_controllen) { 1713 space -= CMSG_SPACE(len); 1714 /* This is a QEMU bug, since we allocated the payload 1715 * area ourselves (unlike overflow in host-to-target 1716 * conversion, which is just the guest giving us a buffer 1717 * that's too small). It can't happen for the payload types 1718 * we currently support; if it becomes an issue in future 1719 * we would need to improve our allocation strategy to 1720 * something more intelligent than "twice the size of the 1721 * target buffer we're reading from". 1722 */ 1723 gemu_log("Host cmsg overflow\n"); 1724 break; 1725 } 1726 1727 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) { 1728 cmsg->cmsg_level = SOL_SOCKET; 1729 } else { 1730 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level); 1731 } 1732 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type); 1733 cmsg->cmsg_len = CMSG_LEN(len); 1734 1735 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) { 1736 int *fd = (int *)data; 1737 int *target_fd = (int *)target_data; 1738 int i, numfds = len / sizeof(int); 1739 1740 for (i = 0; i < numfds; i++) { 1741 __get_user(fd[i], target_fd + i); 1742 } 1743 } else if (cmsg->cmsg_level == SOL_SOCKET 1744 && cmsg->cmsg_type == SCM_CREDENTIALS) { 1745 struct ucred *cred = (struct ucred *)data; 1746 struct target_ucred *target_cred = 1747 (struct target_ucred *)target_data; 1748 1749 __get_user(cred->pid, &target_cred->pid); 1750 __get_user(cred->uid, &target_cred->uid); 1751 __get_user(cred->gid, &target_cred->gid); 1752 } else { 1753 gemu_log("Unsupported ancillary data: %d/%d\n", 1754 cmsg->cmsg_level, cmsg->cmsg_type); 1755 memcpy(data, target_data, len); 1756 } 1757 1758 cmsg = CMSG_NXTHDR(msgh, cmsg); 1759 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg, 1760 target_cmsg_start); 1761 } 1762 unlock_user(target_cmsg, target_cmsg_addr, 0); 1763 the_end: 1764 msgh->msg_controllen = space; 1765 return 0; 1766 } 1767 1768 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, 1769 struct msghdr *msgh) 1770 { 1771 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1772 abi_long msg_controllen; 1773 abi_ulong target_cmsg_addr; 1774 struct target_cmsghdr *target_cmsg, *target_cmsg_start; 1775 socklen_t space = 0; 1776 1777 msg_controllen = tswapal(target_msgh->msg_controllen); 1778 if (msg_controllen < sizeof (struct target_cmsghdr)) 1779 goto the_end; 1780 target_cmsg_addr = tswapal(target_msgh->msg_control); 1781 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0); 1782 target_cmsg_start = target_cmsg; 1783 if (!target_cmsg) 1784 return -TARGET_EFAULT; 1785 1786 while (cmsg && target_cmsg) { 1787 void *data = CMSG_DATA(cmsg); 1788 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1789 1790 int len = cmsg->cmsg_len - sizeof(struct cmsghdr); 1791 int tgt_len, tgt_space; 1792 1793 /* We never copy a half-header but may copy half-data; 1794 * this is Linux's behaviour in put_cmsg(). Note that 1795 * truncation here is a guest problem (which we report 1796 * to the guest via the CTRUNC bit), unlike truncation 1797 * in target_to_host_cmsg, which is a QEMU bug. 1798 */ 1799 if (msg_controllen < sizeof(struct target_cmsghdr)) { 1800 target_msgh->msg_flags |= tswap32(MSG_CTRUNC); 1801 break; 1802 } 1803 1804 if (cmsg->cmsg_level == SOL_SOCKET) { 1805 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET); 1806 } else { 1807 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level); 1808 } 1809 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type); 1810 1811 /* Payload types which need a different size of payload on 1812 * the target must adjust tgt_len here. 1813 */ 1814 switch (cmsg->cmsg_level) { 1815 case SOL_SOCKET: 1816 switch (cmsg->cmsg_type) { 1817 case SO_TIMESTAMP: 1818 tgt_len = sizeof(struct target_timeval); 1819 break; 1820 default: 1821 break; 1822 } 1823 default: 1824 tgt_len = len; 1825 break; 1826 } 1827 1828 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) { 1829 target_msgh->msg_flags |= tswap32(MSG_CTRUNC); 1830 tgt_len = msg_controllen - sizeof(struct target_cmsghdr); 1831 } 1832 1833 /* We must now copy-and-convert len bytes of payload 1834 * into tgt_len bytes of destination space. Bear in mind 1835 * that in both source and destination we may be dealing 1836 * with a truncated value! 1837 */ 1838 switch (cmsg->cmsg_level) { 1839 case SOL_SOCKET: 1840 switch (cmsg->cmsg_type) { 1841 case SCM_RIGHTS: 1842 { 1843 int *fd = (int *)data; 1844 int *target_fd = (int *)target_data; 1845 int i, numfds = tgt_len / sizeof(int); 1846 1847 for (i = 0; i < numfds; i++) { 1848 __put_user(fd[i], target_fd + i); 1849 } 1850 break; 1851 } 1852 case SO_TIMESTAMP: 1853 { 1854 struct timeval *tv = (struct timeval *)data; 1855 struct target_timeval *target_tv = 1856 (struct target_timeval *)target_data; 1857 1858 if (len != sizeof(struct timeval) || 1859 tgt_len != sizeof(struct target_timeval)) { 1860 goto unimplemented; 1861 } 1862 1863 /* copy struct timeval to target */ 1864 __put_user(tv->tv_sec, &target_tv->tv_sec); 1865 __put_user(tv->tv_usec, &target_tv->tv_usec); 1866 break; 1867 } 1868 case SCM_CREDENTIALS: 1869 { 1870 struct ucred *cred = (struct ucred *)data; 1871 struct target_ucred *target_cred = 1872 (struct target_ucred *)target_data; 1873 1874 __put_user(cred->pid, &target_cred->pid); 1875 __put_user(cred->uid, &target_cred->uid); 1876 __put_user(cred->gid, &target_cred->gid); 1877 break; 1878 } 1879 default: 1880 goto unimplemented; 1881 } 1882 break; 1883 1884 case SOL_IP: 1885 switch (cmsg->cmsg_type) { 1886 case IP_TTL: 1887 { 1888 uint32_t *v = (uint32_t *)data; 1889 uint32_t *t_int = (uint32_t *)target_data; 1890 1891 if (len != sizeof(uint32_t) || 1892 tgt_len != sizeof(uint32_t)) { 1893 goto unimplemented; 1894 } 1895 __put_user(*v, t_int); 1896 break; 1897 } 1898 case IP_RECVERR: 1899 { 1900 struct errhdr_t { 1901 struct sock_extended_err ee; 1902 struct sockaddr_in offender; 1903 }; 1904 struct errhdr_t *errh = (struct errhdr_t *)data; 1905 struct errhdr_t *target_errh = 1906 (struct errhdr_t *)target_data; 1907 1908 if (len != sizeof(struct errhdr_t) || 1909 tgt_len != sizeof(struct errhdr_t)) { 1910 goto unimplemented; 1911 } 1912 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno); 1913 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin); 1914 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type); 1915 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code); 1916 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad); 1917 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info); 1918 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data); 1919 host_to_target_sockaddr((unsigned long) &target_errh->offender, 1920 (void *) &errh->offender, sizeof(errh->offender)); 1921 break; 1922 } 1923 default: 1924 goto unimplemented; 1925 } 1926 break; 1927 1928 case SOL_IPV6: 1929 switch (cmsg->cmsg_type) { 1930 case IPV6_HOPLIMIT: 1931 { 1932 uint32_t *v = (uint32_t *)data; 1933 uint32_t *t_int = (uint32_t *)target_data; 1934 1935 if (len != sizeof(uint32_t) || 1936 tgt_len != sizeof(uint32_t)) { 1937 goto unimplemented; 1938 } 1939 __put_user(*v, t_int); 1940 break; 1941 } 1942 case IPV6_RECVERR: 1943 { 1944 struct errhdr6_t { 1945 struct sock_extended_err ee; 1946 struct sockaddr_in6 offender; 1947 }; 1948 struct errhdr6_t *errh = (struct errhdr6_t *)data; 1949 struct errhdr6_t *target_errh = 1950 (struct errhdr6_t *)target_data; 1951 1952 if (len != sizeof(struct errhdr6_t) || 1953 tgt_len != sizeof(struct errhdr6_t)) { 1954 goto unimplemented; 1955 } 1956 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno); 1957 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin); 1958 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type); 1959 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code); 1960 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad); 1961 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info); 1962 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data); 1963 host_to_target_sockaddr((unsigned long) &target_errh->offender, 1964 (void *) &errh->offender, sizeof(errh->offender)); 1965 break; 1966 } 1967 default: 1968 goto unimplemented; 1969 } 1970 break; 1971 1972 default: 1973 unimplemented: 1974 gemu_log("Unsupported ancillary data: %d/%d\n", 1975 cmsg->cmsg_level, cmsg->cmsg_type); 1976 memcpy(target_data, data, MIN(len, tgt_len)); 1977 if (tgt_len > len) { 1978 memset(target_data + len, 0, tgt_len - len); 1979 } 1980 } 1981 1982 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len)); 1983 tgt_space = TARGET_CMSG_SPACE(tgt_len); 1984 if (msg_controllen < tgt_space) { 1985 tgt_space = msg_controllen; 1986 } 1987 msg_controllen -= tgt_space; 1988 space += tgt_space; 1989 cmsg = CMSG_NXTHDR(msgh, cmsg); 1990 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg, 1991 target_cmsg_start); 1992 } 1993 unlock_user(target_cmsg, target_cmsg_addr, space); 1994 the_end: 1995 target_msgh->msg_controllen = tswapal(space); 1996 return 0; 1997 } 1998 1999 static void tswap_nlmsghdr(struct nlmsghdr *nlh) 2000 { 2001 nlh->nlmsg_len = tswap32(nlh->nlmsg_len); 2002 nlh->nlmsg_type = tswap16(nlh->nlmsg_type); 2003 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags); 2004 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq); 2005 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid); 2006 } 2007 2008 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh, 2009 size_t len, 2010 abi_long (*host_to_target_nlmsg) 2011 (struct nlmsghdr *)) 2012 { 2013 uint32_t nlmsg_len; 2014 abi_long ret; 2015 2016 while (len > sizeof(struct nlmsghdr)) { 2017 2018 nlmsg_len = nlh->nlmsg_len; 2019 if (nlmsg_len < sizeof(struct nlmsghdr) || 2020 nlmsg_len > len) { 2021 break; 2022 } 2023 2024 switch (nlh->nlmsg_type) { 2025 case NLMSG_DONE: 2026 tswap_nlmsghdr(nlh); 2027 return 0; 2028 case NLMSG_NOOP: 2029 break; 2030 case NLMSG_ERROR: 2031 { 2032 struct nlmsgerr *e = NLMSG_DATA(nlh); 2033 e->error = tswap32(e->error); 2034 tswap_nlmsghdr(&e->msg); 2035 tswap_nlmsghdr(nlh); 2036 return 0; 2037 } 2038 default: 2039 ret = host_to_target_nlmsg(nlh); 2040 if (ret < 0) { 2041 tswap_nlmsghdr(nlh); 2042 return ret; 2043 } 2044 break; 2045 } 2046 tswap_nlmsghdr(nlh); 2047 len -= NLMSG_ALIGN(nlmsg_len); 2048 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len)); 2049 } 2050 return 0; 2051 } 2052 2053 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh, 2054 size_t len, 2055 abi_long (*target_to_host_nlmsg) 2056 (struct nlmsghdr *)) 2057 { 2058 int ret; 2059 2060 while (len > sizeof(struct nlmsghdr)) { 2061 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) || 2062 tswap32(nlh->nlmsg_len) > len) { 2063 break; 2064 } 2065 tswap_nlmsghdr(nlh); 2066 switch (nlh->nlmsg_type) { 2067 case NLMSG_DONE: 2068 return 0; 2069 case NLMSG_NOOP: 2070 break; 2071 case NLMSG_ERROR: 2072 { 2073 struct nlmsgerr *e = NLMSG_DATA(nlh); 2074 e->error = tswap32(e->error); 2075 tswap_nlmsghdr(&e->msg); 2076 return 0; 2077 } 2078 default: 2079 ret = target_to_host_nlmsg(nlh); 2080 if (ret < 0) { 2081 return ret; 2082 } 2083 } 2084 len -= NLMSG_ALIGN(nlh->nlmsg_len); 2085 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len)); 2086 } 2087 return 0; 2088 } 2089 2090 #ifdef CONFIG_RTNETLINK 2091 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr, 2092 size_t len, void *context, 2093 abi_long (*host_to_target_nlattr) 2094 (struct nlattr *, 2095 void *context)) 2096 { 2097 unsigned short nla_len; 2098 abi_long ret; 2099 2100 while (len > sizeof(struct nlattr)) { 2101 nla_len = nlattr->nla_len; 2102 if (nla_len < sizeof(struct nlattr) || 2103 nla_len > len) { 2104 break; 2105 } 2106 ret = host_to_target_nlattr(nlattr, context); 2107 nlattr->nla_len = tswap16(nlattr->nla_len); 2108 nlattr->nla_type = tswap16(nlattr->nla_type); 2109 if (ret < 0) { 2110 return ret; 2111 } 2112 len -= NLA_ALIGN(nla_len); 2113 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len)); 2114 } 2115 return 0; 2116 } 2117 2118 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr, 2119 size_t len, 2120 abi_long (*host_to_target_rtattr) 2121 (struct rtattr *)) 2122 { 2123 unsigned short rta_len; 2124 abi_long ret; 2125 2126 while (len > sizeof(struct rtattr)) { 2127 rta_len = rtattr->rta_len; 2128 if (rta_len < sizeof(struct rtattr) || 2129 rta_len > len) { 2130 break; 2131 } 2132 ret = host_to_target_rtattr(rtattr); 2133 rtattr->rta_len = tswap16(rtattr->rta_len); 2134 rtattr->rta_type = tswap16(rtattr->rta_type); 2135 if (ret < 0) { 2136 return ret; 2137 } 2138 len -= RTA_ALIGN(rta_len); 2139 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len)); 2140 } 2141 return 0; 2142 } 2143 2144 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN) 2145 2146 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr, 2147 void *context) 2148 { 2149 uint16_t *u16; 2150 uint32_t *u32; 2151 uint64_t *u64; 2152 2153 switch (nlattr->nla_type) { 2154 /* no data */ 2155 case QEMU_IFLA_BR_FDB_FLUSH: 2156 break; 2157 /* binary */ 2158 case QEMU_IFLA_BR_GROUP_ADDR: 2159 break; 2160 /* uint8_t */ 2161 case QEMU_IFLA_BR_VLAN_FILTERING: 2162 case QEMU_IFLA_BR_TOPOLOGY_CHANGE: 2163 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED: 2164 case QEMU_IFLA_BR_MCAST_ROUTER: 2165 case QEMU_IFLA_BR_MCAST_SNOOPING: 2166 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR: 2167 case QEMU_IFLA_BR_MCAST_QUERIER: 2168 case QEMU_IFLA_BR_NF_CALL_IPTABLES: 2169 case QEMU_IFLA_BR_NF_CALL_IP6TABLES: 2170 case QEMU_IFLA_BR_NF_CALL_ARPTABLES: 2171 break; 2172 /* uint16_t */ 2173 case QEMU_IFLA_BR_PRIORITY: 2174 case QEMU_IFLA_BR_VLAN_PROTOCOL: 2175 case QEMU_IFLA_BR_GROUP_FWD_MASK: 2176 case QEMU_IFLA_BR_ROOT_PORT: 2177 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID: 2178 u16 = NLA_DATA(nlattr); 2179 *u16 = tswap16(*u16); 2180 break; 2181 /* uint32_t */ 2182 case QEMU_IFLA_BR_FORWARD_DELAY: 2183 case QEMU_IFLA_BR_HELLO_TIME: 2184 case QEMU_IFLA_BR_MAX_AGE: 2185 case QEMU_IFLA_BR_AGEING_TIME: 2186 case QEMU_IFLA_BR_STP_STATE: 2187 case QEMU_IFLA_BR_ROOT_PATH_COST: 2188 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY: 2189 case QEMU_IFLA_BR_MCAST_HASH_MAX: 2190 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT: 2191 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT: 2192 u32 = NLA_DATA(nlattr); 2193 *u32 = tswap32(*u32); 2194 break; 2195 /* uint64_t */ 2196 case QEMU_IFLA_BR_HELLO_TIMER: 2197 case QEMU_IFLA_BR_TCN_TIMER: 2198 case QEMU_IFLA_BR_GC_TIMER: 2199 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER: 2200 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL: 2201 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL: 2202 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL: 2203 case QEMU_IFLA_BR_MCAST_QUERY_INTVL: 2204 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL: 2205 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL: 2206 u64 = NLA_DATA(nlattr); 2207 *u64 = tswap64(*u64); 2208 break; 2209 /* ifla_bridge_id: uin8_t[] */ 2210 case QEMU_IFLA_BR_ROOT_ID: 2211 case QEMU_IFLA_BR_BRIDGE_ID: 2212 break; 2213 default: 2214 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type); 2215 break; 2216 } 2217 return 0; 2218 } 2219 2220 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr, 2221 void *context) 2222 { 2223 uint16_t *u16; 2224 uint32_t *u32; 2225 uint64_t *u64; 2226 2227 switch (nlattr->nla_type) { 2228 /* uint8_t */ 2229 case QEMU_IFLA_BRPORT_STATE: 2230 case QEMU_IFLA_BRPORT_MODE: 2231 case QEMU_IFLA_BRPORT_GUARD: 2232 case QEMU_IFLA_BRPORT_PROTECT: 2233 case QEMU_IFLA_BRPORT_FAST_LEAVE: 2234 case QEMU_IFLA_BRPORT_LEARNING: 2235 case QEMU_IFLA_BRPORT_UNICAST_FLOOD: 2236 case QEMU_IFLA_BRPORT_PROXYARP: 2237 case QEMU_IFLA_BRPORT_LEARNING_SYNC: 2238 case QEMU_IFLA_BRPORT_PROXYARP_WIFI: 2239 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK: 2240 case QEMU_IFLA_BRPORT_CONFIG_PENDING: 2241 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER: 2242 break; 2243 /* uint16_t */ 2244 case QEMU_IFLA_BRPORT_PRIORITY: 2245 case QEMU_IFLA_BRPORT_DESIGNATED_PORT: 2246 case QEMU_IFLA_BRPORT_DESIGNATED_COST: 2247 case QEMU_IFLA_BRPORT_ID: 2248 case QEMU_IFLA_BRPORT_NO: 2249 u16 = NLA_DATA(nlattr); 2250 *u16 = tswap16(*u16); 2251 break; 2252 /* uin32_t */ 2253 case QEMU_IFLA_BRPORT_COST: 2254 u32 = NLA_DATA(nlattr); 2255 *u32 = tswap32(*u32); 2256 break; 2257 /* uint64_t */ 2258 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER: 2259 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER: 2260 case QEMU_IFLA_BRPORT_HOLD_TIMER: 2261 u64 = NLA_DATA(nlattr); 2262 *u64 = tswap64(*u64); 2263 break; 2264 /* ifla_bridge_id: uint8_t[] */ 2265 case QEMU_IFLA_BRPORT_ROOT_ID: 2266 case QEMU_IFLA_BRPORT_BRIDGE_ID: 2267 break; 2268 default: 2269 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type); 2270 break; 2271 } 2272 return 0; 2273 } 2274 2275 struct linkinfo_context { 2276 int len; 2277 char *name; 2278 int slave_len; 2279 char *slave_name; 2280 }; 2281 2282 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr, 2283 void *context) 2284 { 2285 struct linkinfo_context *li_context = context; 2286 2287 switch (nlattr->nla_type) { 2288 /* string */ 2289 case QEMU_IFLA_INFO_KIND: 2290 li_context->name = NLA_DATA(nlattr); 2291 li_context->len = nlattr->nla_len - NLA_HDRLEN; 2292 break; 2293 case QEMU_IFLA_INFO_SLAVE_KIND: 2294 li_context->slave_name = NLA_DATA(nlattr); 2295 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN; 2296 break; 2297 /* stats */ 2298 case QEMU_IFLA_INFO_XSTATS: 2299 /* FIXME: only used by CAN */ 2300 break; 2301 /* nested */ 2302 case QEMU_IFLA_INFO_DATA: 2303 if (strncmp(li_context->name, "bridge", 2304 li_context->len) == 0) { 2305 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), 2306 nlattr->nla_len, 2307 NULL, 2308 host_to_target_data_bridge_nlattr); 2309 } else { 2310 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name); 2311 } 2312 break; 2313 case QEMU_IFLA_INFO_SLAVE_DATA: 2314 if (strncmp(li_context->slave_name, "bridge", 2315 li_context->slave_len) == 0) { 2316 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), 2317 nlattr->nla_len, 2318 NULL, 2319 host_to_target_slave_data_bridge_nlattr); 2320 } else { 2321 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n", 2322 li_context->slave_name); 2323 } 2324 break; 2325 default: 2326 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type); 2327 break; 2328 } 2329 2330 return 0; 2331 } 2332 2333 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr, 2334 void *context) 2335 { 2336 uint32_t *u32; 2337 int i; 2338 2339 switch (nlattr->nla_type) { 2340 case QEMU_IFLA_INET_CONF: 2341 u32 = NLA_DATA(nlattr); 2342 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32); 2343 i++) { 2344 u32[i] = tswap32(u32[i]); 2345 } 2346 break; 2347 default: 2348 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type); 2349 } 2350 return 0; 2351 } 2352 2353 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr, 2354 void *context) 2355 { 2356 uint32_t *u32; 2357 uint64_t *u64; 2358 struct ifla_cacheinfo *ci; 2359 int i; 2360 2361 switch (nlattr->nla_type) { 2362 /* binaries */ 2363 case QEMU_IFLA_INET6_TOKEN: 2364 break; 2365 /* uint8_t */ 2366 case QEMU_IFLA_INET6_ADDR_GEN_MODE: 2367 break; 2368 /* uint32_t */ 2369 case QEMU_IFLA_INET6_FLAGS: 2370 u32 = NLA_DATA(nlattr); 2371 *u32 = tswap32(*u32); 2372 break; 2373 /* uint32_t[] */ 2374 case QEMU_IFLA_INET6_CONF: 2375 u32 = NLA_DATA(nlattr); 2376 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32); 2377 i++) { 2378 u32[i] = tswap32(u32[i]); 2379 } 2380 break; 2381 /* ifla_cacheinfo */ 2382 case QEMU_IFLA_INET6_CACHEINFO: 2383 ci = NLA_DATA(nlattr); 2384 ci->max_reasm_len = tswap32(ci->max_reasm_len); 2385 ci->tstamp = tswap32(ci->tstamp); 2386 ci->reachable_time = tswap32(ci->reachable_time); 2387 ci->retrans_time = tswap32(ci->retrans_time); 2388 break; 2389 /* uint64_t[] */ 2390 case QEMU_IFLA_INET6_STATS: 2391 case QEMU_IFLA_INET6_ICMP6STATS: 2392 u64 = NLA_DATA(nlattr); 2393 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64); 2394 i++) { 2395 u64[i] = tswap64(u64[i]); 2396 } 2397 break; 2398 default: 2399 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type); 2400 } 2401 return 0; 2402 } 2403 2404 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr, 2405 void *context) 2406 { 2407 switch (nlattr->nla_type) { 2408 case AF_INET: 2409 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len, 2410 NULL, 2411 host_to_target_data_inet_nlattr); 2412 case AF_INET6: 2413 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len, 2414 NULL, 2415 host_to_target_data_inet6_nlattr); 2416 default: 2417 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type); 2418 break; 2419 } 2420 return 0; 2421 } 2422 2423 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr) 2424 { 2425 uint32_t *u32; 2426 struct rtnl_link_stats *st; 2427 struct rtnl_link_stats64 *st64; 2428 struct rtnl_link_ifmap *map; 2429 struct linkinfo_context li_context; 2430 2431 switch (rtattr->rta_type) { 2432 /* binary stream */ 2433 case QEMU_IFLA_ADDRESS: 2434 case QEMU_IFLA_BROADCAST: 2435 /* string */ 2436 case QEMU_IFLA_IFNAME: 2437 case QEMU_IFLA_QDISC: 2438 break; 2439 /* uin8_t */ 2440 case QEMU_IFLA_OPERSTATE: 2441 case QEMU_IFLA_LINKMODE: 2442 case QEMU_IFLA_CARRIER: 2443 case QEMU_IFLA_PROTO_DOWN: 2444 break; 2445 /* uint32_t */ 2446 case QEMU_IFLA_MTU: 2447 case QEMU_IFLA_LINK: 2448 case QEMU_IFLA_WEIGHT: 2449 case QEMU_IFLA_TXQLEN: 2450 case QEMU_IFLA_CARRIER_CHANGES: 2451 case QEMU_IFLA_NUM_RX_QUEUES: 2452 case QEMU_IFLA_NUM_TX_QUEUES: 2453 case QEMU_IFLA_PROMISCUITY: 2454 case QEMU_IFLA_EXT_MASK: 2455 case QEMU_IFLA_LINK_NETNSID: 2456 case QEMU_IFLA_GROUP: 2457 case QEMU_IFLA_MASTER: 2458 case QEMU_IFLA_NUM_VF: 2459 case QEMU_IFLA_GSO_MAX_SEGS: 2460 case QEMU_IFLA_GSO_MAX_SIZE: 2461 u32 = RTA_DATA(rtattr); 2462 *u32 = tswap32(*u32); 2463 break; 2464 /* struct rtnl_link_stats */ 2465 case QEMU_IFLA_STATS: 2466 st = RTA_DATA(rtattr); 2467 st->rx_packets = tswap32(st->rx_packets); 2468 st->tx_packets = tswap32(st->tx_packets); 2469 st->rx_bytes = tswap32(st->rx_bytes); 2470 st->tx_bytes = tswap32(st->tx_bytes); 2471 st->rx_errors = tswap32(st->rx_errors); 2472 st->tx_errors = tswap32(st->tx_errors); 2473 st->rx_dropped = tswap32(st->rx_dropped); 2474 st->tx_dropped = tswap32(st->tx_dropped); 2475 st->multicast = tswap32(st->multicast); 2476 st->collisions = tswap32(st->collisions); 2477 2478 /* detailed rx_errors: */ 2479 st->rx_length_errors = tswap32(st->rx_length_errors); 2480 st->rx_over_errors = tswap32(st->rx_over_errors); 2481 st->rx_crc_errors = tswap32(st->rx_crc_errors); 2482 st->rx_frame_errors = tswap32(st->rx_frame_errors); 2483 st->rx_fifo_errors = tswap32(st->rx_fifo_errors); 2484 st->rx_missed_errors = tswap32(st->rx_missed_errors); 2485 2486 /* detailed tx_errors */ 2487 st->tx_aborted_errors = tswap32(st->tx_aborted_errors); 2488 st->tx_carrier_errors = tswap32(st->tx_carrier_errors); 2489 st->tx_fifo_errors = tswap32(st->tx_fifo_errors); 2490 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors); 2491 st->tx_window_errors = tswap32(st->tx_window_errors); 2492 2493 /* for cslip etc */ 2494 st->rx_compressed = tswap32(st->rx_compressed); 2495 st->tx_compressed = tswap32(st->tx_compressed); 2496 break; 2497 /* struct rtnl_link_stats64 */ 2498 case QEMU_IFLA_STATS64: 2499 st64 = RTA_DATA(rtattr); 2500 st64->rx_packets = tswap64(st64->rx_packets); 2501 st64->tx_packets = tswap64(st64->tx_packets); 2502 st64->rx_bytes = tswap64(st64->rx_bytes); 2503 st64->tx_bytes = tswap64(st64->tx_bytes); 2504 st64->rx_errors = tswap64(st64->rx_errors); 2505 st64->tx_errors = tswap64(st64->tx_errors); 2506 st64->rx_dropped = tswap64(st64->rx_dropped); 2507 st64->tx_dropped = tswap64(st64->tx_dropped); 2508 st64->multicast = tswap64(st64->multicast); 2509 st64->collisions = tswap64(st64->collisions); 2510 2511 /* detailed rx_errors: */ 2512 st64->rx_length_errors = tswap64(st64->rx_length_errors); 2513 st64->rx_over_errors = tswap64(st64->rx_over_errors); 2514 st64->rx_crc_errors = tswap64(st64->rx_crc_errors); 2515 st64->rx_frame_errors = tswap64(st64->rx_frame_errors); 2516 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors); 2517 st64->rx_missed_errors = tswap64(st64->rx_missed_errors); 2518 2519 /* detailed tx_errors */ 2520 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors); 2521 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors); 2522 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors); 2523 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors); 2524 st64->tx_window_errors = tswap64(st64->tx_window_errors); 2525 2526 /* for cslip etc */ 2527 st64->rx_compressed = tswap64(st64->rx_compressed); 2528 st64->tx_compressed = tswap64(st64->tx_compressed); 2529 break; 2530 /* struct rtnl_link_ifmap */ 2531 case QEMU_IFLA_MAP: 2532 map = RTA_DATA(rtattr); 2533 map->mem_start = tswap64(map->mem_start); 2534 map->mem_end = tswap64(map->mem_end); 2535 map->base_addr = tswap64(map->base_addr); 2536 map->irq = tswap16(map->irq); 2537 break; 2538 /* nested */ 2539 case QEMU_IFLA_LINKINFO: 2540 memset(&li_context, 0, sizeof(li_context)); 2541 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len, 2542 &li_context, 2543 host_to_target_data_linkinfo_nlattr); 2544 case QEMU_IFLA_AF_SPEC: 2545 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len, 2546 NULL, 2547 host_to_target_data_spec_nlattr); 2548 default: 2549 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type); 2550 break; 2551 } 2552 return 0; 2553 } 2554 2555 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr) 2556 { 2557 uint32_t *u32; 2558 struct ifa_cacheinfo *ci; 2559 2560 switch (rtattr->rta_type) { 2561 /* binary: depends on family type */ 2562 case IFA_ADDRESS: 2563 case IFA_LOCAL: 2564 break; 2565 /* string */ 2566 case IFA_LABEL: 2567 break; 2568 /* u32 */ 2569 case IFA_FLAGS: 2570 case IFA_BROADCAST: 2571 u32 = RTA_DATA(rtattr); 2572 *u32 = tswap32(*u32); 2573 break; 2574 /* struct ifa_cacheinfo */ 2575 case IFA_CACHEINFO: 2576 ci = RTA_DATA(rtattr); 2577 ci->ifa_prefered = tswap32(ci->ifa_prefered); 2578 ci->ifa_valid = tswap32(ci->ifa_valid); 2579 ci->cstamp = tswap32(ci->cstamp); 2580 ci->tstamp = tswap32(ci->tstamp); 2581 break; 2582 default: 2583 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type); 2584 break; 2585 } 2586 return 0; 2587 } 2588 2589 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr) 2590 { 2591 uint32_t *u32; 2592 switch (rtattr->rta_type) { 2593 /* binary: depends on family type */ 2594 case RTA_GATEWAY: 2595 case RTA_DST: 2596 case RTA_PREFSRC: 2597 break; 2598 /* u32 */ 2599 case RTA_PRIORITY: 2600 case RTA_TABLE: 2601 case RTA_OIF: 2602 u32 = RTA_DATA(rtattr); 2603 *u32 = tswap32(*u32); 2604 break; 2605 default: 2606 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type); 2607 break; 2608 } 2609 return 0; 2610 } 2611 2612 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr, 2613 uint32_t rtattr_len) 2614 { 2615 return host_to_target_for_each_rtattr(rtattr, rtattr_len, 2616 host_to_target_data_link_rtattr); 2617 } 2618 2619 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr, 2620 uint32_t rtattr_len) 2621 { 2622 return host_to_target_for_each_rtattr(rtattr, rtattr_len, 2623 host_to_target_data_addr_rtattr); 2624 } 2625 2626 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr, 2627 uint32_t rtattr_len) 2628 { 2629 return host_to_target_for_each_rtattr(rtattr, rtattr_len, 2630 host_to_target_data_route_rtattr); 2631 } 2632 2633 static abi_long host_to_target_data_route(struct nlmsghdr *nlh) 2634 { 2635 uint32_t nlmsg_len; 2636 struct ifinfomsg *ifi; 2637 struct ifaddrmsg *ifa; 2638 struct rtmsg *rtm; 2639 2640 nlmsg_len = nlh->nlmsg_len; 2641 switch (nlh->nlmsg_type) { 2642 case RTM_NEWLINK: 2643 case RTM_DELLINK: 2644 case RTM_GETLINK: 2645 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) { 2646 ifi = NLMSG_DATA(nlh); 2647 ifi->ifi_type = tswap16(ifi->ifi_type); 2648 ifi->ifi_index = tswap32(ifi->ifi_index); 2649 ifi->ifi_flags = tswap32(ifi->ifi_flags); 2650 ifi->ifi_change = tswap32(ifi->ifi_change); 2651 host_to_target_link_rtattr(IFLA_RTA(ifi), 2652 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi))); 2653 } 2654 break; 2655 case RTM_NEWADDR: 2656 case RTM_DELADDR: 2657 case RTM_GETADDR: 2658 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) { 2659 ifa = NLMSG_DATA(nlh); 2660 ifa->ifa_index = tswap32(ifa->ifa_index); 2661 host_to_target_addr_rtattr(IFA_RTA(ifa), 2662 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa))); 2663 } 2664 break; 2665 case RTM_NEWROUTE: 2666 case RTM_DELROUTE: 2667 case RTM_GETROUTE: 2668 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) { 2669 rtm = NLMSG_DATA(nlh); 2670 rtm->rtm_flags = tswap32(rtm->rtm_flags); 2671 host_to_target_route_rtattr(RTM_RTA(rtm), 2672 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm))); 2673 } 2674 break; 2675 default: 2676 return -TARGET_EINVAL; 2677 } 2678 return 0; 2679 } 2680 2681 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh, 2682 size_t len) 2683 { 2684 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route); 2685 } 2686 2687 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr, 2688 size_t len, 2689 abi_long (*target_to_host_rtattr) 2690 (struct rtattr *)) 2691 { 2692 abi_long ret; 2693 2694 while (len >= sizeof(struct rtattr)) { 2695 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) || 2696 tswap16(rtattr->rta_len) > len) { 2697 break; 2698 } 2699 rtattr->rta_len = tswap16(rtattr->rta_len); 2700 rtattr->rta_type = tswap16(rtattr->rta_type); 2701 ret = target_to_host_rtattr(rtattr); 2702 if (ret < 0) { 2703 return ret; 2704 } 2705 len -= RTA_ALIGN(rtattr->rta_len); 2706 rtattr = (struct rtattr *)(((char *)rtattr) + 2707 RTA_ALIGN(rtattr->rta_len)); 2708 } 2709 return 0; 2710 } 2711 2712 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr) 2713 { 2714 switch (rtattr->rta_type) { 2715 default: 2716 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type); 2717 break; 2718 } 2719 return 0; 2720 } 2721 2722 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr) 2723 { 2724 switch (rtattr->rta_type) { 2725 /* binary: depends on family type */ 2726 case IFA_LOCAL: 2727 case IFA_ADDRESS: 2728 break; 2729 default: 2730 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type); 2731 break; 2732 } 2733 return 0; 2734 } 2735 2736 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr) 2737 { 2738 uint32_t *u32; 2739 switch (rtattr->rta_type) { 2740 /* binary: depends on family type */ 2741 case RTA_DST: 2742 case RTA_SRC: 2743 case RTA_GATEWAY: 2744 break; 2745 /* u32 */ 2746 case RTA_PRIORITY: 2747 case RTA_OIF: 2748 u32 = RTA_DATA(rtattr); 2749 *u32 = tswap32(*u32); 2750 break; 2751 default: 2752 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type); 2753 break; 2754 } 2755 return 0; 2756 } 2757 2758 static void target_to_host_link_rtattr(struct rtattr *rtattr, 2759 uint32_t rtattr_len) 2760 { 2761 target_to_host_for_each_rtattr(rtattr, rtattr_len, 2762 target_to_host_data_link_rtattr); 2763 } 2764 2765 static void target_to_host_addr_rtattr(struct rtattr *rtattr, 2766 uint32_t rtattr_len) 2767 { 2768 target_to_host_for_each_rtattr(rtattr, rtattr_len, 2769 target_to_host_data_addr_rtattr); 2770 } 2771 2772 static void target_to_host_route_rtattr(struct rtattr *rtattr, 2773 uint32_t rtattr_len) 2774 { 2775 target_to_host_for_each_rtattr(rtattr, rtattr_len, 2776 target_to_host_data_route_rtattr); 2777 } 2778 2779 static abi_long target_to_host_data_route(struct nlmsghdr *nlh) 2780 { 2781 struct ifinfomsg *ifi; 2782 struct ifaddrmsg *ifa; 2783 struct rtmsg *rtm; 2784 2785 switch (nlh->nlmsg_type) { 2786 case RTM_GETLINK: 2787 break; 2788 case RTM_NEWLINK: 2789 case RTM_DELLINK: 2790 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) { 2791 ifi = NLMSG_DATA(nlh); 2792 ifi->ifi_type = tswap16(ifi->ifi_type); 2793 ifi->ifi_index = tswap32(ifi->ifi_index); 2794 ifi->ifi_flags = tswap32(ifi->ifi_flags); 2795 ifi->ifi_change = tswap32(ifi->ifi_change); 2796 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len - 2797 NLMSG_LENGTH(sizeof(*ifi))); 2798 } 2799 break; 2800 case RTM_GETADDR: 2801 case RTM_NEWADDR: 2802 case RTM_DELADDR: 2803 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) { 2804 ifa = NLMSG_DATA(nlh); 2805 ifa->ifa_index = tswap32(ifa->ifa_index); 2806 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len - 2807 NLMSG_LENGTH(sizeof(*ifa))); 2808 } 2809 break; 2810 case RTM_GETROUTE: 2811 break; 2812 case RTM_NEWROUTE: 2813 case RTM_DELROUTE: 2814 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) { 2815 rtm = NLMSG_DATA(nlh); 2816 rtm->rtm_flags = tswap32(rtm->rtm_flags); 2817 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len - 2818 NLMSG_LENGTH(sizeof(*rtm))); 2819 } 2820 break; 2821 default: 2822 return -TARGET_EOPNOTSUPP; 2823 } 2824 return 0; 2825 } 2826 2827 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len) 2828 { 2829 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route); 2830 } 2831 #endif /* CONFIG_RTNETLINK */ 2832 2833 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh) 2834 { 2835 switch (nlh->nlmsg_type) { 2836 default: 2837 gemu_log("Unknown host audit message type %d\n", 2838 nlh->nlmsg_type); 2839 return -TARGET_EINVAL; 2840 } 2841 return 0; 2842 } 2843 2844 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh, 2845 size_t len) 2846 { 2847 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit); 2848 } 2849 2850 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh) 2851 { 2852 switch (nlh->nlmsg_type) { 2853 case AUDIT_USER: 2854 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG: 2855 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2: 2856 break; 2857 default: 2858 gemu_log("Unknown target audit message type %d\n", 2859 nlh->nlmsg_type); 2860 return -TARGET_EINVAL; 2861 } 2862 2863 return 0; 2864 } 2865 2866 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len) 2867 { 2868 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit); 2869 } 2870 2871 /* do_setsockopt() Must return target values and target errnos. */ 2872 static abi_long do_setsockopt(int sockfd, int level, int optname, 2873 abi_ulong optval_addr, socklen_t optlen) 2874 { 2875 abi_long ret; 2876 int val; 2877 struct ip_mreqn *ip_mreq; 2878 struct ip_mreq_source *ip_mreq_source; 2879 2880 switch(level) { 2881 case SOL_TCP: 2882 /* TCP options all take an 'int' value. */ 2883 if (optlen < sizeof(uint32_t)) 2884 return -TARGET_EINVAL; 2885 2886 if (get_user_u32(val, optval_addr)) 2887 return -TARGET_EFAULT; 2888 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 2889 break; 2890 case SOL_IP: 2891 switch(optname) { 2892 case IP_TOS: 2893 case IP_TTL: 2894 case IP_HDRINCL: 2895 case IP_ROUTER_ALERT: 2896 case IP_RECVOPTS: 2897 case IP_RETOPTS: 2898 case IP_PKTINFO: 2899 case IP_MTU_DISCOVER: 2900 case IP_RECVERR: 2901 case IP_RECVTTL: 2902 case IP_RECVTOS: 2903 #ifdef IP_FREEBIND 2904 case IP_FREEBIND: 2905 #endif 2906 case IP_MULTICAST_TTL: 2907 case IP_MULTICAST_LOOP: 2908 val = 0; 2909 if (optlen >= sizeof(uint32_t)) { 2910 if (get_user_u32(val, optval_addr)) 2911 return -TARGET_EFAULT; 2912 } else if (optlen >= 1) { 2913 if (get_user_u8(val, optval_addr)) 2914 return -TARGET_EFAULT; 2915 } 2916 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 2917 break; 2918 case IP_ADD_MEMBERSHIP: 2919 case IP_DROP_MEMBERSHIP: 2920 if (optlen < sizeof (struct target_ip_mreq) || 2921 optlen > sizeof (struct target_ip_mreqn)) 2922 return -TARGET_EINVAL; 2923 2924 ip_mreq = (struct ip_mreqn *) alloca(optlen); 2925 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen); 2926 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen)); 2927 break; 2928 2929 case IP_BLOCK_SOURCE: 2930 case IP_UNBLOCK_SOURCE: 2931 case IP_ADD_SOURCE_MEMBERSHIP: 2932 case IP_DROP_SOURCE_MEMBERSHIP: 2933 if (optlen != sizeof (struct target_ip_mreq_source)) 2934 return -TARGET_EINVAL; 2935 2936 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1); 2937 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen)); 2938 unlock_user (ip_mreq_source, optval_addr, 0); 2939 break; 2940 2941 default: 2942 goto unimplemented; 2943 } 2944 break; 2945 case SOL_IPV6: 2946 switch (optname) { 2947 case IPV6_MTU_DISCOVER: 2948 case IPV6_MTU: 2949 case IPV6_V6ONLY: 2950 case IPV6_RECVPKTINFO: 2951 case IPV6_UNICAST_HOPS: 2952 case IPV6_RECVERR: 2953 case IPV6_RECVHOPLIMIT: 2954 case IPV6_2292HOPLIMIT: 2955 case IPV6_CHECKSUM: 2956 val = 0; 2957 if (optlen < sizeof(uint32_t)) { 2958 return -TARGET_EINVAL; 2959 } 2960 if (get_user_u32(val, optval_addr)) { 2961 return -TARGET_EFAULT; 2962 } 2963 ret = get_errno(setsockopt(sockfd, level, optname, 2964 &val, sizeof(val))); 2965 break; 2966 case IPV6_PKTINFO: 2967 { 2968 struct in6_pktinfo pki; 2969 2970 if (optlen < sizeof(pki)) { 2971 return -TARGET_EINVAL; 2972 } 2973 2974 if (copy_from_user(&pki, optval_addr, sizeof(pki))) { 2975 return -TARGET_EFAULT; 2976 } 2977 2978 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex); 2979 2980 ret = get_errno(setsockopt(sockfd, level, optname, 2981 &pki, sizeof(pki))); 2982 break; 2983 } 2984 default: 2985 goto unimplemented; 2986 } 2987 break; 2988 case SOL_ICMPV6: 2989 switch (optname) { 2990 case ICMPV6_FILTER: 2991 { 2992 struct icmp6_filter icmp6f; 2993 2994 if (optlen > sizeof(icmp6f)) { 2995 optlen = sizeof(icmp6f); 2996 } 2997 2998 if (copy_from_user(&icmp6f, optval_addr, optlen)) { 2999 return -TARGET_EFAULT; 3000 } 3001 3002 for (val = 0; val < 8; val++) { 3003 icmp6f.data[val] = tswap32(icmp6f.data[val]); 3004 } 3005 3006 ret = get_errno(setsockopt(sockfd, level, optname, 3007 &icmp6f, optlen)); 3008 break; 3009 } 3010 default: 3011 goto unimplemented; 3012 } 3013 break; 3014 case SOL_RAW: 3015 switch (optname) { 3016 case ICMP_FILTER: 3017 case IPV6_CHECKSUM: 3018 /* those take an u32 value */ 3019 if (optlen < sizeof(uint32_t)) { 3020 return -TARGET_EINVAL; 3021 } 3022 3023 if (get_user_u32(val, optval_addr)) { 3024 return -TARGET_EFAULT; 3025 } 3026 ret = get_errno(setsockopt(sockfd, level, optname, 3027 &val, sizeof(val))); 3028 break; 3029 3030 default: 3031 goto unimplemented; 3032 } 3033 break; 3034 case TARGET_SOL_SOCKET: 3035 switch (optname) { 3036 case TARGET_SO_RCVTIMEO: 3037 { 3038 struct timeval tv; 3039 3040 optname = SO_RCVTIMEO; 3041 3042 set_timeout: 3043 if (optlen != sizeof(struct target_timeval)) { 3044 return -TARGET_EINVAL; 3045 } 3046 3047 if (copy_from_user_timeval(&tv, optval_addr)) { 3048 return -TARGET_EFAULT; 3049 } 3050 3051 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 3052 &tv, sizeof(tv))); 3053 return ret; 3054 } 3055 case TARGET_SO_SNDTIMEO: 3056 optname = SO_SNDTIMEO; 3057 goto set_timeout; 3058 case TARGET_SO_ATTACH_FILTER: 3059 { 3060 struct target_sock_fprog *tfprog; 3061 struct target_sock_filter *tfilter; 3062 struct sock_fprog fprog; 3063 struct sock_filter *filter; 3064 int i; 3065 3066 if (optlen != sizeof(*tfprog)) { 3067 return -TARGET_EINVAL; 3068 } 3069 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) { 3070 return -TARGET_EFAULT; 3071 } 3072 if (!lock_user_struct(VERIFY_READ, tfilter, 3073 tswapal(tfprog->filter), 0)) { 3074 unlock_user_struct(tfprog, optval_addr, 1); 3075 return -TARGET_EFAULT; 3076 } 3077 3078 fprog.len = tswap16(tfprog->len); 3079 filter = g_try_new(struct sock_filter, fprog.len); 3080 if (filter == NULL) { 3081 unlock_user_struct(tfilter, tfprog->filter, 1); 3082 unlock_user_struct(tfprog, optval_addr, 1); 3083 return -TARGET_ENOMEM; 3084 } 3085 for (i = 0; i < fprog.len; i++) { 3086 filter[i].code = tswap16(tfilter[i].code); 3087 filter[i].jt = tfilter[i].jt; 3088 filter[i].jf = tfilter[i].jf; 3089 filter[i].k = tswap32(tfilter[i].k); 3090 } 3091 fprog.filter = filter; 3092 3093 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, 3094 SO_ATTACH_FILTER, &fprog, sizeof(fprog))); 3095 g_free(filter); 3096 3097 unlock_user_struct(tfilter, tfprog->filter, 1); 3098 unlock_user_struct(tfprog, optval_addr, 1); 3099 return ret; 3100 } 3101 case TARGET_SO_BINDTODEVICE: 3102 { 3103 char *dev_ifname, *addr_ifname; 3104 3105 if (optlen > IFNAMSIZ - 1) { 3106 optlen = IFNAMSIZ - 1; 3107 } 3108 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1); 3109 if (!dev_ifname) { 3110 return -TARGET_EFAULT; 3111 } 3112 optname = SO_BINDTODEVICE; 3113 addr_ifname = alloca(IFNAMSIZ); 3114 memcpy(addr_ifname, dev_ifname, optlen); 3115 addr_ifname[optlen] = 0; 3116 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 3117 addr_ifname, optlen)); 3118 unlock_user (dev_ifname, optval_addr, 0); 3119 return ret; 3120 } 3121 /* Options with 'int' argument. */ 3122 case TARGET_SO_DEBUG: 3123 optname = SO_DEBUG; 3124 break; 3125 case TARGET_SO_REUSEADDR: 3126 optname = SO_REUSEADDR; 3127 break; 3128 case TARGET_SO_TYPE: 3129 optname = SO_TYPE; 3130 break; 3131 case TARGET_SO_ERROR: 3132 optname = SO_ERROR; 3133 break; 3134 case TARGET_SO_DONTROUTE: 3135 optname = SO_DONTROUTE; 3136 break; 3137 case TARGET_SO_BROADCAST: 3138 optname = SO_BROADCAST; 3139 break; 3140 case TARGET_SO_SNDBUF: 3141 optname = SO_SNDBUF; 3142 break; 3143 case TARGET_SO_SNDBUFFORCE: 3144 optname = SO_SNDBUFFORCE; 3145 break; 3146 case TARGET_SO_RCVBUF: 3147 optname = SO_RCVBUF; 3148 break; 3149 case TARGET_SO_RCVBUFFORCE: 3150 optname = SO_RCVBUFFORCE; 3151 break; 3152 case TARGET_SO_KEEPALIVE: 3153 optname = SO_KEEPALIVE; 3154 break; 3155 case TARGET_SO_OOBINLINE: 3156 optname = SO_OOBINLINE; 3157 break; 3158 case TARGET_SO_NO_CHECK: 3159 optname = SO_NO_CHECK; 3160 break; 3161 case TARGET_SO_PRIORITY: 3162 optname = SO_PRIORITY; 3163 break; 3164 #ifdef SO_BSDCOMPAT 3165 case TARGET_SO_BSDCOMPAT: 3166 optname = SO_BSDCOMPAT; 3167 break; 3168 #endif 3169 case TARGET_SO_PASSCRED: 3170 optname = SO_PASSCRED; 3171 break; 3172 case TARGET_SO_PASSSEC: 3173 optname = SO_PASSSEC; 3174 break; 3175 case TARGET_SO_TIMESTAMP: 3176 optname = SO_TIMESTAMP; 3177 break; 3178 case TARGET_SO_RCVLOWAT: 3179 optname = SO_RCVLOWAT; 3180 break; 3181 default: 3182 goto unimplemented; 3183 } 3184 if (optlen < sizeof(uint32_t)) 3185 return -TARGET_EINVAL; 3186 3187 if (get_user_u32(val, optval_addr)) 3188 return -TARGET_EFAULT; 3189 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val))); 3190 break; 3191 default: 3192 unimplemented: 3193 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname); 3194 ret = -TARGET_ENOPROTOOPT; 3195 } 3196 return ret; 3197 } 3198 3199 /* do_getsockopt() Must return target values and target errnos. */ 3200 static abi_long do_getsockopt(int sockfd, int level, int optname, 3201 abi_ulong optval_addr, abi_ulong optlen) 3202 { 3203 abi_long ret; 3204 int len, val; 3205 socklen_t lv; 3206 3207 switch(level) { 3208 case TARGET_SOL_SOCKET: 3209 level = SOL_SOCKET; 3210 switch (optname) { 3211 /* These don't just return a single integer */ 3212 case TARGET_SO_LINGER: 3213 case TARGET_SO_RCVTIMEO: 3214 case TARGET_SO_SNDTIMEO: 3215 case TARGET_SO_PEERNAME: 3216 goto unimplemented; 3217 case TARGET_SO_PEERCRED: { 3218 struct ucred cr; 3219 socklen_t crlen; 3220 struct target_ucred *tcr; 3221 3222 if (get_user_u32(len, optlen)) { 3223 return -TARGET_EFAULT; 3224 } 3225 if (len < 0) { 3226 return -TARGET_EINVAL; 3227 } 3228 3229 crlen = sizeof(cr); 3230 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED, 3231 &cr, &crlen)); 3232 if (ret < 0) { 3233 return ret; 3234 } 3235 if (len > crlen) { 3236 len = crlen; 3237 } 3238 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) { 3239 return -TARGET_EFAULT; 3240 } 3241 __put_user(cr.pid, &tcr->pid); 3242 __put_user(cr.uid, &tcr->uid); 3243 __put_user(cr.gid, &tcr->gid); 3244 unlock_user_struct(tcr, optval_addr, 1); 3245 if (put_user_u32(len, optlen)) { 3246 return -TARGET_EFAULT; 3247 } 3248 break; 3249 } 3250 /* Options with 'int' argument. */ 3251 case TARGET_SO_DEBUG: 3252 optname = SO_DEBUG; 3253 goto int_case; 3254 case TARGET_SO_REUSEADDR: 3255 optname = SO_REUSEADDR; 3256 goto int_case; 3257 case TARGET_SO_TYPE: 3258 optname = SO_TYPE; 3259 goto int_case; 3260 case TARGET_SO_ERROR: 3261 optname = SO_ERROR; 3262 goto int_case; 3263 case TARGET_SO_DONTROUTE: 3264 optname = SO_DONTROUTE; 3265 goto int_case; 3266 case TARGET_SO_BROADCAST: 3267 optname = SO_BROADCAST; 3268 goto int_case; 3269 case TARGET_SO_SNDBUF: 3270 optname = SO_SNDBUF; 3271 goto int_case; 3272 case TARGET_SO_RCVBUF: 3273 optname = SO_RCVBUF; 3274 goto int_case; 3275 case TARGET_SO_KEEPALIVE: 3276 optname = SO_KEEPALIVE; 3277 goto int_case; 3278 case TARGET_SO_OOBINLINE: 3279 optname = SO_OOBINLINE; 3280 goto int_case; 3281 case TARGET_SO_NO_CHECK: 3282 optname = SO_NO_CHECK; 3283 goto int_case; 3284 case TARGET_SO_PRIORITY: 3285 optname = SO_PRIORITY; 3286 goto int_case; 3287 #ifdef SO_BSDCOMPAT 3288 case TARGET_SO_BSDCOMPAT: 3289 optname = SO_BSDCOMPAT; 3290 goto int_case; 3291 #endif 3292 case TARGET_SO_PASSCRED: 3293 optname = SO_PASSCRED; 3294 goto int_case; 3295 case TARGET_SO_TIMESTAMP: 3296 optname = SO_TIMESTAMP; 3297 goto int_case; 3298 case TARGET_SO_RCVLOWAT: 3299 optname = SO_RCVLOWAT; 3300 goto int_case; 3301 case TARGET_SO_ACCEPTCONN: 3302 optname = SO_ACCEPTCONN; 3303 goto int_case; 3304 default: 3305 goto int_case; 3306 } 3307 break; 3308 case SOL_TCP: 3309 /* TCP options all take an 'int' value. */ 3310 int_case: 3311 if (get_user_u32(len, optlen)) 3312 return -TARGET_EFAULT; 3313 if (len < 0) 3314 return -TARGET_EINVAL; 3315 lv = sizeof(lv); 3316 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 3317 if (ret < 0) 3318 return ret; 3319 if (optname == SO_TYPE) { 3320 val = host_to_target_sock_type(val); 3321 } 3322 if (len > lv) 3323 len = lv; 3324 if (len == 4) { 3325 if (put_user_u32(val, optval_addr)) 3326 return -TARGET_EFAULT; 3327 } else { 3328 if (put_user_u8(val, optval_addr)) 3329 return -TARGET_EFAULT; 3330 } 3331 if (put_user_u32(len, optlen)) 3332 return -TARGET_EFAULT; 3333 break; 3334 case SOL_IP: 3335 switch(optname) { 3336 case IP_TOS: 3337 case IP_TTL: 3338 case IP_HDRINCL: 3339 case IP_ROUTER_ALERT: 3340 case IP_RECVOPTS: 3341 case IP_RETOPTS: 3342 case IP_PKTINFO: 3343 case IP_MTU_DISCOVER: 3344 case IP_RECVERR: 3345 case IP_RECVTOS: 3346 #ifdef IP_FREEBIND 3347 case IP_FREEBIND: 3348 #endif 3349 case IP_MULTICAST_TTL: 3350 case IP_MULTICAST_LOOP: 3351 if (get_user_u32(len, optlen)) 3352 return -TARGET_EFAULT; 3353 if (len < 0) 3354 return -TARGET_EINVAL; 3355 lv = sizeof(lv); 3356 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 3357 if (ret < 0) 3358 return ret; 3359 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 3360 len = 1; 3361 if (put_user_u32(len, optlen) 3362 || put_user_u8(val, optval_addr)) 3363 return -TARGET_EFAULT; 3364 } else { 3365 if (len > sizeof(int)) 3366 len = sizeof(int); 3367 if (put_user_u32(len, optlen) 3368 || put_user_u32(val, optval_addr)) 3369 return -TARGET_EFAULT; 3370 } 3371 break; 3372 default: 3373 ret = -TARGET_ENOPROTOOPT; 3374 break; 3375 } 3376 break; 3377 default: 3378 unimplemented: 3379 gemu_log("getsockopt level=%d optname=%d not yet supported\n", 3380 level, optname); 3381 ret = -TARGET_EOPNOTSUPP; 3382 break; 3383 } 3384 return ret; 3385 } 3386 3387 static struct iovec *lock_iovec(int type, abi_ulong target_addr, 3388 abi_ulong count, int copy) 3389 { 3390 struct target_iovec *target_vec; 3391 struct iovec *vec; 3392 abi_ulong total_len, max_len; 3393 int i; 3394 int err = 0; 3395 bool bad_address = false; 3396 3397 if (count == 0) { 3398 errno = 0; 3399 return NULL; 3400 } 3401 if (count > IOV_MAX) { 3402 errno = EINVAL; 3403 return NULL; 3404 } 3405 3406 vec = g_try_new0(struct iovec, count); 3407 if (vec == NULL) { 3408 errno = ENOMEM; 3409 return NULL; 3410 } 3411 3412 target_vec = lock_user(VERIFY_READ, target_addr, 3413 count * sizeof(struct target_iovec), 1); 3414 if (target_vec == NULL) { 3415 err = EFAULT; 3416 goto fail2; 3417 } 3418 3419 /* ??? If host page size > target page size, this will result in a 3420 value larger than what we can actually support. */ 3421 max_len = 0x7fffffff & TARGET_PAGE_MASK; 3422 total_len = 0; 3423 3424 for (i = 0; i < count; i++) { 3425 abi_ulong base = tswapal(target_vec[i].iov_base); 3426 abi_long len = tswapal(target_vec[i].iov_len); 3427 3428 if (len < 0) { 3429 err = EINVAL; 3430 goto fail; 3431 } else if (len == 0) { 3432 /* Zero length pointer is ignored. */ 3433 vec[i].iov_base = 0; 3434 } else { 3435 vec[i].iov_base = lock_user(type, base, len, copy); 3436 /* If the first buffer pointer is bad, this is a fault. But 3437 * subsequent bad buffers will result in a partial write; this 3438 * is realized by filling the vector with null pointers and 3439 * zero lengths. */ 3440 if (!vec[i].iov_base) { 3441 if (i == 0) { 3442 err = EFAULT; 3443 goto fail; 3444 } else { 3445 bad_address = true; 3446 } 3447 } 3448 if (bad_address) { 3449 len = 0; 3450 } 3451 if (len > max_len - total_len) { 3452 len = max_len - total_len; 3453 } 3454 } 3455 vec[i].iov_len = len; 3456 total_len += len; 3457 } 3458 3459 unlock_user(target_vec, target_addr, 0); 3460 return vec; 3461 3462 fail: 3463 while (--i >= 0) { 3464 if (tswapal(target_vec[i].iov_len) > 0) { 3465 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0); 3466 } 3467 } 3468 unlock_user(target_vec, target_addr, 0); 3469 fail2: 3470 g_free(vec); 3471 errno = err; 3472 return NULL; 3473 } 3474 3475 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr, 3476 abi_ulong count, int copy) 3477 { 3478 struct target_iovec *target_vec; 3479 int i; 3480 3481 target_vec = lock_user(VERIFY_READ, target_addr, 3482 count * sizeof(struct target_iovec), 1); 3483 if (target_vec) { 3484 for (i = 0; i < count; i++) { 3485 abi_ulong base = tswapal(target_vec[i].iov_base); 3486 abi_long len = tswapal(target_vec[i].iov_len); 3487 if (len < 0) { 3488 break; 3489 } 3490 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0); 3491 } 3492 unlock_user(target_vec, target_addr, 0); 3493 } 3494 3495 g_free(vec); 3496 } 3497 3498 static inline int target_to_host_sock_type(int *type) 3499 { 3500 int host_type = 0; 3501 int target_type = *type; 3502 3503 switch (target_type & TARGET_SOCK_TYPE_MASK) { 3504 case TARGET_SOCK_DGRAM: 3505 host_type = SOCK_DGRAM; 3506 break; 3507 case TARGET_SOCK_STREAM: 3508 host_type = SOCK_STREAM; 3509 break; 3510 default: 3511 host_type = target_type & TARGET_SOCK_TYPE_MASK; 3512 break; 3513 } 3514 if (target_type & TARGET_SOCK_CLOEXEC) { 3515 #if defined(SOCK_CLOEXEC) 3516 host_type |= SOCK_CLOEXEC; 3517 #else 3518 return -TARGET_EINVAL; 3519 #endif 3520 } 3521 if (target_type & TARGET_SOCK_NONBLOCK) { 3522 #if defined(SOCK_NONBLOCK) 3523 host_type |= SOCK_NONBLOCK; 3524 #elif !defined(O_NONBLOCK) 3525 return -TARGET_EINVAL; 3526 #endif 3527 } 3528 *type = host_type; 3529 return 0; 3530 } 3531 3532 /* Try to emulate socket type flags after socket creation. */ 3533 static int sock_flags_fixup(int fd, int target_type) 3534 { 3535 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK) 3536 if (target_type & TARGET_SOCK_NONBLOCK) { 3537 int flags = fcntl(fd, F_GETFL); 3538 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) { 3539 close(fd); 3540 return -TARGET_EINVAL; 3541 } 3542 } 3543 #endif 3544 return fd; 3545 } 3546 3547 static abi_long packet_target_to_host_sockaddr(void *host_addr, 3548 abi_ulong target_addr, 3549 socklen_t len) 3550 { 3551 struct sockaddr *addr = host_addr; 3552 struct target_sockaddr *target_saddr; 3553 3554 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 3555 if (!target_saddr) { 3556 return -TARGET_EFAULT; 3557 } 3558 3559 memcpy(addr, target_saddr, len); 3560 addr->sa_family = tswap16(target_saddr->sa_family); 3561 /* spkt_protocol is big-endian */ 3562 3563 unlock_user(target_saddr, target_addr, 0); 3564 return 0; 3565 } 3566 3567 static TargetFdTrans target_packet_trans = { 3568 .target_to_host_addr = packet_target_to_host_sockaddr, 3569 }; 3570 3571 #ifdef CONFIG_RTNETLINK 3572 static abi_long netlink_route_target_to_host(void *buf, size_t len) 3573 { 3574 abi_long ret; 3575 3576 ret = target_to_host_nlmsg_route(buf, len); 3577 if (ret < 0) { 3578 return ret; 3579 } 3580 3581 return len; 3582 } 3583 3584 static abi_long netlink_route_host_to_target(void *buf, size_t len) 3585 { 3586 abi_long ret; 3587 3588 ret = host_to_target_nlmsg_route(buf, len); 3589 if (ret < 0) { 3590 return ret; 3591 } 3592 3593 return len; 3594 } 3595 3596 static TargetFdTrans target_netlink_route_trans = { 3597 .target_to_host_data = netlink_route_target_to_host, 3598 .host_to_target_data = netlink_route_host_to_target, 3599 }; 3600 #endif /* CONFIG_RTNETLINK */ 3601 3602 static abi_long netlink_audit_target_to_host(void *buf, size_t len) 3603 { 3604 abi_long ret; 3605 3606 ret = target_to_host_nlmsg_audit(buf, len); 3607 if (ret < 0) { 3608 return ret; 3609 } 3610 3611 return len; 3612 } 3613 3614 static abi_long netlink_audit_host_to_target(void *buf, size_t len) 3615 { 3616 abi_long ret; 3617 3618 ret = host_to_target_nlmsg_audit(buf, len); 3619 if (ret < 0) { 3620 return ret; 3621 } 3622 3623 return len; 3624 } 3625 3626 static TargetFdTrans target_netlink_audit_trans = { 3627 .target_to_host_data = netlink_audit_target_to_host, 3628 .host_to_target_data = netlink_audit_host_to_target, 3629 }; 3630 3631 /* do_socket() Must return target values and target errnos. */ 3632 static abi_long do_socket(int domain, int type, int protocol) 3633 { 3634 int target_type = type; 3635 int ret; 3636 3637 ret = target_to_host_sock_type(&type); 3638 if (ret) { 3639 return ret; 3640 } 3641 3642 if (domain == PF_NETLINK && !( 3643 #ifdef CONFIG_RTNETLINK 3644 protocol == NETLINK_ROUTE || 3645 #endif 3646 protocol == NETLINK_KOBJECT_UEVENT || 3647 protocol == NETLINK_AUDIT)) { 3648 return -EPFNOSUPPORT; 3649 } 3650 3651 if (domain == AF_PACKET || 3652 (domain == AF_INET && type == SOCK_PACKET)) { 3653 protocol = tswap16(protocol); 3654 } 3655 3656 ret = get_errno(socket(domain, type, protocol)); 3657 if (ret >= 0) { 3658 ret = sock_flags_fixup(ret, target_type); 3659 if (type == SOCK_PACKET) { 3660 /* Manage an obsolete case : 3661 * if socket type is SOCK_PACKET, bind by name 3662 */ 3663 fd_trans_register(ret, &target_packet_trans); 3664 } else if (domain == PF_NETLINK) { 3665 switch (protocol) { 3666 #ifdef CONFIG_RTNETLINK 3667 case NETLINK_ROUTE: 3668 fd_trans_register(ret, &target_netlink_route_trans); 3669 break; 3670 #endif 3671 case NETLINK_KOBJECT_UEVENT: 3672 /* nothing to do: messages are strings */ 3673 break; 3674 case NETLINK_AUDIT: 3675 fd_trans_register(ret, &target_netlink_audit_trans); 3676 break; 3677 default: 3678 g_assert_not_reached(); 3679 } 3680 } 3681 } 3682 return ret; 3683 } 3684 3685 /* do_bind() Must return target values and target errnos. */ 3686 static abi_long do_bind(int sockfd, abi_ulong target_addr, 3687 socklen_t addrlen) 3688 { 3689 void *addr; 3690 abi_long ret; 3691 3692 if ((int)addrlen < 0) { 3693 return -TARGET_EINVAL; 3694 } 3695 3696 addr = alloca(addrlen+1); 3697 3698 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen); 3699 if (ret) 3700 return ret; 3701 3702 return get_errno(bind(sockfd, addr, addrlen)); 3703 } 3704 3705 /* do_connect() Must return target values and target errnos. */ 3706 static abi_long do_connect(int sockfd, abi_ulong target_addr, 3707 socklen_t addrlen) 3708 { 3709 void *addr; 3710 abi_long ret; 3711 3712 if ((int)addrlen < 0) { 3713 return -TARGET_EINVAL; 3714 } 3715 3716 addr = alloca(addrlen+1); 3717 3718 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen); 3719 if (ret) 3720 return ret; 3721 3722 return get_errno(safe_connect(sockfd, addr, addrlen)); 3723 } 3724 3725 /* do_sendrecvmsg_locked() Must return target values and target errnos. */ 3726 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp, 3727 int flags, int send) 3728 { 3729 abi_long ret, len; 3730 struct msghdr msg; 3731 abi_ulong count; 3732 struct iovec *vec; 3733 abi_ulong target_vec; 3734 3735 if (msgp->msg_name) { 3736 msg.msg_namelen = tswap32(msgp->msg_namelen); 3737 msg.msg_name = alloca(msg.msg_namelen+1); 3738 ret = target_to_host_sockaddr(fd, msg.msg_name, 3739 tswapal(msgp->msg_name), 3740 msg.msg_namelen); 3741 if (ret == -TARGET_EFAULT) { 3742 /* For connected sockets msg_name and msg_namelen must 3743 * be ignored, so returning EFAULT immediately is wrong. 3744 * Instead, pass a bad msg_name to the host kernel, and 3745 * let it decide whether to return EFAULT or not. 3746 */ 3747 msg.msg_name = (void *)-1; 3748 } else if (ret) { 3749 goto out2; 3750 } 3751 } else { 3752 msg.msg_name = NULL; 3753 msg.msg_namelen = 0; 3754 } 3755 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen); 3756 msg.msg_control = alloca(msg.msg_controllen); 3757 msg.msg_flags = tswap32(msgp->msg_flags); 3758 3759 count = tswapal(msgp->msg_iovlen); 3760 target_vec = tswapal(msgp->msg_iov); 3761 3762 if (count > IOV_MAX) { 3763 /* sendrcvmsg returns a different errno for this condition than 3764 * readv/writev, so we must catch it here before lock_iovec() does. 3765 */ 3766 ret = -TARGET_EMSGSIZE; 3767 goto out2; 3768 } 3769 3770 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, 3771 target_vec, count, send); 3772 if (vec == NULL) { 3773 ret = -host_to_target_errno(errno); 3774 goto out2; 3775 } 3776 msg.msg_iovlen = count; 3777 msg.msg_iov = vec; 3778 3779 if (send) { 3780 if (fd_trans_target_to_host_data(fd)) { 3781 void *host_msg; 3782 3783 host_msg = g_malloc(msg.msg_iov->iov_len); 3784 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len); 3785 ret = fd_trans_target_to_host_data(fd)(host_msg, 3786 msg.msg_iov->iov_len); 3787 if (ret >= 0) { 3788 msg.msg_iov->iov_base = host_msg; 3789 ret = get_errno(safe_sendmsg(fd, &msg, flags)); 3790 } 3791 g_free(host_msg); 3792 } else { 3793 ret = target_to_host_cmsg(&msg, msgp); 3794 if (ret == 0) { 3795 ret = get_errno(safe_sendmsg(fd, &msg, flags)); 3796 } 3797 } 3798 } else { 3799 ret = get_errno(safe_recvmsg(fd, &msg, flags)); 3800 if (!is_error(ret)) { 3801 len = ret; 3802 if (fd_trans_host_to_target_data(fd)) { 3803 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base, 3804 len); 3805 } else { 3806 ret = host_to_target_cmsg(msgp, &msg); 3807 } 3808 if (!is_error(ret)) { 3809 msgp->msg_namelen = tswap32(msg.msg_namelen); 3810 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) { 3811 ret = host_to_target_sockaddr(tswapal(msgp->msg_name), 3812 msg.msg_name, msg.msg_namelen); 3813 if (ret) { 3814 goto out; 3815 } 3816 } 3817 3818 ret = len; 3819 } 3820 } 3821 } 3822 3823 out: 3824 unlock_iovec(vec, target_vec, count, !send); 3825 out2: 3826 return ret; 3827 } 3828 3829 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg, 3830 int flags, int send) 3831 { 3832 abi_long ret; 3833 struct target_msghdr *msgp; 3834 3835 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE, 3836 msgp, 3837 target_msg, 3838 send ? 1 : 0)) { 3839 return -TARGET_EFAULT; 3840 } 3841 ret = do_sendrecvmsg_locked(fd, msgp, flags, send); 3842 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 3843 return ret; 3844 } 3845 3846 /* We don't rely on the C library to have sendmmsg/recvmmsg support, 3847 * so it might not have this *mmsg-specific flag either. 3848 */ 3849 #ifndef MSG_WAITFORONE 3850 #define MSG_WAITFORONE 0x10000 3851 #endif 3852 3853 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec, 3854 unsigned int vlen, unsigned int flags, 3855 int send) 3856 { 3857 struct target_mmsghdr *mmsgp; 3858 abi_long ret = 0; 3859 int i; 3860 3861 if (vlen > UIO_MAXIOV) { 3862 vlen = UIO_MAXIOV; 3863 } 3864 3865 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1); 3866 if (!mmsgp) { 3867 return -TARGET_EFAULT; 3868 } 3869 3870 for (i = 0; i < vlen; i++) { 3871 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send); 3872 if (is_error(ret)) { 3873 break; 3874 } 3875 mmsgp[i].msg_len = tswap32(ret); 3876 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ 3877 if (flags & MSG_WAITFORONE) { 3878 flags |= MSG_DONTWAIT; 3879 } 3880 } 3881 3882 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i); 3883 3884 /* Return number of datagrams sent if we sent any at all; 3885 * otherwise return the error. 3886 */ 3887 if (i) { 3888 return i; 3889 } 3890 return ret; 3891 } 3892 3893 /* do_accept4() Must return target values and target errnos. */ 3894 static abi_long do_accept4(int fd, abi_ulong target_addr, 3895 abi_ulong target_addrlen_addr, int flags) 3896 { 3897 socklen_t addrlen; 3898 void *addr; 3899 abi_long ret; 3900 int host_flags; 3901 3902 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl); 3903 3904 if (target_addr == 0) { 3905 return get_errno(safe_accept4(fd, NULL, NULL, host_flags)); 3906 } 3907 3908 /* linux returns EINVAL if addrlen pointer is invalid */ 3909 if (get_user_u32(addrlen, target_addrlen_addr)) 3910 return -TARGET_EINVAL; 3911 3912 if ((int)addrlen < 0) { 3913 return -TARGET_EINVAL; 3914 } 3915 3916 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 3917 return -TARGET_EINVAL; 3918 3919 addr = alloca(addrlen); 3920 3921 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags)); 3922 if (!is_error(ret)) { 3923 host_to_target_sockaddr(target_addr, addr, addrlen); 3924 if (put_user_u32(addrlen, target_addrlen_addr)) 3925 ret = -TARGET_EFAULT; 3926 } 3927 return ret; 3928 } 3929 3930 /* do_getpeername() Must return target values and target errnos. */ 3931 static abi_long do_getpeername(int fd, abi_ulong target_addr, 3932 abi_ulong target_addrlen_addr) 3933 { 3934 socklen_t addrlen; 3935 void *addr; 3936 abi_long ret; 3937 3938 if (get_user_u32(addrlen, target_addrlen_addr)) 3939 return -TARGET_EFAULT; 3940 3941 if ((int)addrlen < 0) { 3942 return -TARGET_EINVAL; 3943 } 3944 3945 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 3946 return -TARGET_EFAULT; 3947 3948 addr = alloca(addrlen); 3949 3950 ret = get_errno(getpeername(fd, addr, &addrlen)); 3951 if (!is_error(ret)) { 3952 host_to_target_sockaddr(target_addr, addr, addrlen); 3953 if (put_user_u32(addrlen, target_addrlen_addr)) 3954 ret = -TARGET_EFAULT; 3955 } 3956 return ret; 3957 } 3958 3959 /* do_getsockname() Must return target values and target errnos. */ 3960 static abi_long do_getsockname(int fd, abi_ulong target_addr, 3961 abi_ulong target_addrlen_addr) 3962 { 3963 socklen_t addrlen; 3964 void *addr; 3965 abi_long ret; 3966 3967 if (get_user_u32(addrlen, target_addrlen_addr)) 3968 return -TARGET_EFAULT; 3969 3970 if ((int)addrlen < 0) { 3971 return -TARGET_EINVAL; 3972 } 3973 3974 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 3975 return -TARGET_EFAULT; 3976 3977 addr = alloca(addrlen); 3978 3979 ret = get_errno(getsockname(fd, addr, &addrlen)); 3980 if (!is_error(ret)) { 3981 host_to_target_sockaddr(target_addr, addr, addrlen); 3982 if (put_user_u32(addrlen, target_addrlen_addr)) 3983 ret = -TARGET_EFAULT; 3984 } 3985 return ret; 3986 } 3987 3988 /* do_socketpair() Must return target values and target errnos. */ 3989 static abi_long do_socketpair(int domain, int type, int protocol, 3990 abi_ulong target_tab_addr) 3991 { 3992 int tab[2]; 3993 abi_long ret; 3994 3995 target_to_host_sock_type(&type); 3996 3997 ret = get_errno(socketpair(domain, type, protocol, tab)); 3998 if (!is_error(ret)) { 3999 if (put_user_s32(tab[0], target_tab_addr) 4000 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0]))) 4001 ret = -TARGET_EFAULT; 4002 } 4003 return ret; 4004 } 4005 4006 /* do_sendto() Must return target values and target errnos. */ 4007 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags, 4008 abi_ulong target_addr, socklen_t addrlen) 4009 { 4010 void *addr; 4011 void *host_msg; 4012 void *copy_msg = NULL; 4013 abi_long ret; 4014 4015 if ((int)addrlen < 0) { 4016 return -TARGET_EINVAL; 4017 } 4018 4019 host_msg = lock_user(VERIFY_READ, msg, len, 1); 4020 if (!host_msg) 4021 return -TARGET_EFAULT; 4022 if (fd_trans_target_to_host_data(fd)) { 4023 copy_msg = host_msg; 4024 host_msg = g_malloc(len); 4025 memcpy(host_msg, copy_msg, len); 4026 ret = fd_trans_target_to_host_data(fd)(host_msg, len); 4027 if (ret < 0) { 4028 goto fail; 4029 } 4030 } 4031 if (target_addr) { 4032 addr = alloca(addrlen+1); 4033 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen); 4034 if (ret) { 4035 goto fail; 4036 } 4037 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen)); 4038 } else { 4039 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0)); 4040 } 4041 fail: 4042 if (copy_msg) { 4043 g_free(host_msg); 4044 host_msg = copy_msg; 4045 } 4046 unlock_user(host_msg, msg, 0); 4047 return ret; 4048 } 4049 4050 /* do_recvfrom() Must return target values and target errnos. */ 4051 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags, 4052 abi_ulong target_addr, 4053 abi_ulong target_addrlen) 4054 { 4055 socklen_t addrlen; 4056 void *addr; 4057 void *host_msg; 4058 abi_long ret; 4059 4060 host_msg = lock_user(VERIFY_WRITE, msg, len, 0); 4061 if (!host_msg) 4062 return -TARGET_EFAULT; 4063 if (target_addr) { 4064 if (get_user_u32(addrlen, target_addrlen)) { 4065 ret = -TARGET_EFAULT; 4066 goto fail; 4067 } 4068 if ((int)addrlen < 0) { 4069 ret = -TARGET_EINVAL; 4070 goto fail; 4071 } 4072 addr = alloca(addrlen); 4073 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, 4074 addr, &addrlen)); 4075 } else { 4076 addr = NULL; /* To keep compiler quiet. */ 4077 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0)); 4078 } 4079 if (!is_error(ret)) { 4080 if (fd_trans_host_to_target_data(fd)) { 4081 ret = fd_trans_host_to_target_data(fd)(host_msg, ret); 4082 } 4083 if (target_addr) { 4084 host_to_target_sockaddr(target_addr, addr, addrlen); 4085 if (put_user_u32(addrlen, target_addrlen)) { 4086 ret = -TARGET_EFAULT; 4087 goto fail; 4088 } 4089 } 4090 unlock_user(host_msg, msg, len); 4091 } else { 4092 fail: 4093 unlock_user(host_msg, msg, 0); 4094 } 4095 return ret; 4096 } 4097 4098 #ifdef TARGET_NR_socketcall 4099 /* do_socketcall() must return target values and target errnos. */ 4100 static abi_long do_socketcall(int num, abi_ulong vptr) 4101 { 4102 static const unsigned nargs[] = { /* number of arguments per operation */ 4103 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */ 4104 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */ 4105 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */ 4106 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */ 4107 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */ 4108 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */ 4109 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */ 4110 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */ 4111 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */ 4112 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */ 4113 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */ 4114 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */ 4115 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */ 4116 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */ 4117 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */ 4118 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */ 4119 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */ 4120 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */ 4121 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */ 4122 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */ 4123 }; 4124 abi_long a[6]; /* max 6 args */ 4125 unsigned i; 4126 4127 /* check the range of the first argument num */ 4128 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */ 4129 if (num < 1 || num > TARGET_SYS_SENDMMSG) { 4130 return -TARGET_EINVAL; 4131 } 4132 /* ensure we have space for args */ 4133 if (nargs[num] > ARRAY_SIZE(a)) { 4134 return -TARGET_EINVAL; 4135 } 4136 /* collect the arguments in a[] according to nargs[] */ 4137 for (i = 0; i < nargs[num]; ++i) { 4138 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) { 4139 return -TARGET_EFAULT; 4140 } 4141 } 4142 /* now when we have the args, invoke the appropriate underlying function */ 4143 switch (num) { 4144 case TARGET_SYS_SOCKET: /* domain, type, protocol */ 4145 return do_socket(a[0], a[1], a[2]); 4146 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */ 4147 return do_bind(a[0], a[1], a[2]); 4148 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */ 4149 return do_connect(a[0], a[1], a[2]); 4150 case TARGET_SYS_LISTEN: /* sockfd, backlog */ 4151 return get_errno(listen(a[0], a[1])); 4152 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */ 4153 return do_accept4(a[0], a[1], a[2], 0); 4154 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */ 4155 return do_getsockname(a[0], a[1], a[2]); 4156 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */ 4157 return do_getpeername(a[0], a[1], a[2]); 4158 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */ 4159 return do_socketpair(a[0], a[1], a[2], a[3]); 4160 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */ 4161 return do_sendto(a[0], a[1], a[2], a[3], 0, 0); 4162 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */ 4163 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0); 4164 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */ 4165 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]); 4166 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */ 4167 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]); 4168 case TARGET_SYS_SHUTDOWN: /* sockfd, how */ 4169 return get_errno(shutdown(a[0], a[1])); 4170 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */ 4171 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]); 4172 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */ 4173 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]); 4174 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */ 4175 return do_sendrecvmsg(a[0], a[1], a[2], 1); 4176 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */ 4177 return do_sendrecvmsg(a[0], a[1], a[2], 0); 4178 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */ 4179 return do_accept4(a[0], a[1], a[2], a[3]); 4180 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */ 4181 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0); 4182 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */ 4183 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1); 4184 default: 4185 gemu_log("Unsupported socketcall: %d\n", num); 4186 return -TARGET_EINVAL; 4187 } 4188 } 4189 #endif 4190 4191 #define N_SHM_REGIONS 32 4192 4193 static struct shm_region { 4194 abi_ulong start; 4195 abi_ulong size; 4196 bool in_use; 4197 } shm_regions[N_SHM_REGIONS]; 4198 4199 #ifndef TARGET_SEMID64_DS 4200 /* asm-generic version of this struct */ 4201 struct target_semid64_ds 4202 { 4203 struct target_ipc_perm sem_perm; 4204 abi_ulong sem_otime; 4205 #if TARGET_ABI_BITS == 32 4206 abi_ulong __unused1; 4207 #endif 4208 abi_ulong sem_ctime; 4209 #if TARGET_ABI_BITS == 32 4210 abi_ulong __unused2; 4211 #endif 4212 abi_ulong sem_nsems; 4213 abi_ulong __unused3; 4214 abi_ulong __unused4; 4215 }; 4216 #endif 4217 4218 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip, 4219 abi_ulong target_addr) 4220 { 4221 struct target_ipc_perm *target_ip; 4222 struct target_semid64_ds *target_sd; 4223 4224 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 4225 return -TARGET_EFAULT; 4226 target_ip = &(target_sd->sem_perm); 4227 host_ip->__key = tswap32(target_ip->__key); 4228 host_ip->uid = tswap32(target_ip->uid); 4229 host_ip->gid = tswap32(target_ip->gid); 4230 host_ip->cuid = tswap32(target_ip->cuid); 4231 host_ip->cgid = tswap32(target_ip->cgid); 4232 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 4233 host_ip->mode = tswap32(target_ip->mode); 4234 #else 4235 host_ip->mode = tswap16(target_ip->mode); 4236 #endif 4237 #if defined(TARGET_PPC) 4238 host_ip->__seq = tswap32(target_ip->__seq); 4239 #else 4240 host_ip->__seq = tswap16(target_ip->__seq); 4241 #endif 4242 unlock_user_struct(target_sd, target_addr, 0); 4243 return 0; 4244 } 4245 4246 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr, 4247 struct ipc_perm *host_ip) 4248 { 4249 struct target_ipc_perm *target_ip; 4250 struct target_semid64_ds *target_sd; 4251 4252 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 4253 return -TARGET_EFAULT; 4254 target_ip = &(target_sd->sem_perm); 4255 target_ip->__key = tswap32(host_ip->__key); 4256 target_ip->uid = tswap32(host_ip->uid); 4257 target_ip->gid = tswap32(host_ip->gid); 4258 target_ip->cuid = tswap32(host_ip->cuid); 4259 target_ip->cgid = tswap32(host_ip->cgid); 4260 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 4261 target_ip->mode = tswap32(host_ip->mode); 4262 #else 4263 target_ip->mode = tswap16(host_ip->mode); 4264 #endif 4265 #if defined(TARGET_PPC) 4266 target_ip->__seq = tswap32(host_ip->__seq); 4267 #else 4268 target_ip->__seq = tswap16(host_ip->__seq); 4269 #endif 4270 unlock_user_struct(target_sd, target_addr, 1); 4271 return 0; 4272 } 4273 4274 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd, 4275 abi_ulong target_addr) 4276 { 4277 struct target_semid64_ds *target_sd; 4278 4279 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 4280 return -TARGET_EFAULT; 4281 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr)) 4282 return -TARGET_EFAULT; 4283 host_sd->sem_nsems = tswapal(target_sd->sem_nsems); 4284 host_sd->sem_otime = tswapal(target_sd->sem_otime); 4285 host_sd->sem_ctime = tswapal(target_sd->sem_ctime); 4286 unlock_user_struct(target_sd, target_addr, 0); 4287 return 0; 4288 } 4289 4290 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr, 4291 struct semid_ds *host_sd) 4292 { 4293 struct target_semid64_ds *target_sd; 4294 4295 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 4296 return -TARGET_EFAULT; 4297 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm))) 4298 return -TARGET_EFAULT; 4299 target_sd->sem_nsems = tswapal(host_sd->sem_nsems); 4300 target_sd->sem_otime = tswapal(host_sd->sem_otime); 4301 target_sd->sem_ctime = tswapal(host_sd->sem_ctime); 4302 unlock_user_struct(target_sd, target_addr, 1); 4303 return 0; 4304 } 4305 4306 struct target_seminfo { 4307 int semmap; 4308 int semmni; 4309 int semmns; 4310 int semmnu; 4311 int semmsl; 4312 int semopm; 4313 int semume; 4314 int semusz; 4315 int semvmx; 4316 int semaem; 4317 }; 4318 4319 static inline abi_long host_to_target_seminfo(abi_ulong target_addr, 4320 struct seminfo *host_seminfo) 4321 { 4322 struct target_seminfo *target_seminfo; 4323 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0)) 4324 return -TARGET_EFAULT; 4325 __put_user(host_seminfo->semmap, &target_seminfo->semmap); 4326 __put_user(host_seminfo->semmni, &target_seminfo->semmni); 4327 __put_user(host_seminfo->semmns, &target_seminfo->semmns); 4328 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu); 4329 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl); 4330 __put_user(host_seminfo->semopm, &target_seminfo->semopm); 4331 __put_user(host_seminfo->semume, &target_seminfo->semume); 4332 __put_user(host_seminfo->semusz, &target_seminfo->semusz); 4333 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx); 4334 __put_user(host_seminfo->semaem, &target_seminfo->semaem); 4335 unlock_user_struct(target_seminfo, target_addr, 1); 4336 return 0; 4337 } 4338 4339 union semun { 4340 int val; 4341 struct semid_ds *buf; 4342 unsigned short *array; 4343 struct seminfo *__buf; 4344 }; 4345 4346 union target_semun { 4347 int val; 4348 abi_ulong buf; 4349 abi_ulong array; 4350 abi_ulong __buf; 4351 }; 4352 4353 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array, 4354 abi_ulong target_addr) 4355 { 4356 int nsems; 4357 unsigned short *array; 4358 union semun semun; 4359 struct semid_ds semid_ds; 4360 int i, ret; 4361 4362 semun.buf = &semid_ds; 4363 4364 ret = semctl(semid, 0, IPC_STAT, semun); 4365 if (ret == -1) 4366 return get_errno(ret); 4367 4368 nsems = semid_ds.sem_nsems; 4369 4370 *host_array = g_try_new(unsigned short, nsems); 4371 if (!*host_array) { 4372 return -TARGET_ENOMEM; 4373 } 4374 array = lock_user(VERIFY_READ, target_addr, 4375 nsems*sizeof(unsigned short), 1); 4376 if (!array) { 4377 g_free(*host_array); 4378 return -TARGET_EFAULT; 4379 } 4380 4381 for(i=0; i<nsems; i++) { 4382 __get_user((*host_array)[i], &array[i]); 4383 } 4384 unlock_user(array, target_addr, 0); 4385 4386 return 0; 4387 } 4388 4389 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr, 4390 unsigned short **host_array) 4391 { 4392 int nsems; 4393 unsigned short *array; 4394 union semun semun; 4395 struct semid_ds semid_ds; 4396 int i, ret; 4397 4398 semun.buf = &semid_ds; 4399 4400 ret = semctl(semid, 0, IPC_STAT, semun); 4401 if (ret == -1) 4402 return get_errno(ret); 4403 4404 nsems = semid_ds.sem_nsems; 4405 4406 array = lock_user(VERIFY_WRITE, target_addr, 4407 nsems*sizeof(unsigned short), 0); 4408 if (!array) 4409 return -TARGET_EFAULT; 4410 4411 for(i=0; i<nsems; i++) { 4412 __put_user((*host_array)[i], &array[i]); 4413 } 4414 g_free(*host_array); 4415 unlock_user(array, target_addr, 1); 4416 4417 return 0; 4418 } 4419 4420 static inline abi_long do_semctl(int semid, int semnum, int cmd, 4421 abi_ulong target_arg) 4422 { 4423 union target_semun target_su = { .buf = target_arg }; 4424 union semun arg; 4425 struct semid_ds dsarg; 4426 unsigned short *array = NULL; 4427 struct seminfo seminfo; 4428 abi_long ret = -TARGET_EINVAL; 4429 abi_long err; 4430 cmd &= 0xff; 4431 4432 switch( cmd ) { 4433 case GETVAL: 4434 case SETVAL: 4435 /* In 64 bit cross-endian situations, we will erroneously pick up 4436 * the wrong half of the union for the "val" element. To rectify 4437 * this, the entire 8-byte structure is byteswapped, followed by 4438 * a swap of the 4 byte val field. In other cases, the data is 4439 * already in proper host byte order. */ 4440 if (sizeof(target_su.val) != (sizeof(target_su.buf))) { 4441 target_su.buf = tswapal(target_su.buf); 4442 arg.val = tswap32(target_su.val); 4443 } else { 4444 arg.val = target_su.val; 4445 } 4446 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4447 break; 4448 case GETALL: 4449 case SETALL: 4450 err = target_to_host_semarray(semid, &array, target_su.array); 4451 if (err) 4452 return err; 4453 arg.array = array; 4454 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4455 err = host_to_target_semarray(semid, target_su.array, &array); 4456 if (err) 4457 return err; 4458 break; 4459 case IPC_STAT: 4460 case IPC_SET: 4461 case SEM_STAT: 4462 err = target_to_host_semid_ds(&dsarg, target_su.buf); 4463 if (err) 4464 return err; 4465 arg.buf = &dsarg; 4466 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4467 err = host_to_target_semid_ds(target_su.buf, &dsarg); 4468 if (err) 4469 return err; 4470 break; 4471 case IPC_INFO: 4472 case SEM_INFO: 4473 arg.__buf = &seminfo; 4474 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4475 err = host_to_target_seminfo(target_su.__buf, &seminfo); 4476 if (err) 4477 return err; 4478 break; 4479 case IPC_RMID: 4480 case GETPID: 4481 case GETNCNT: 4482 case GETZCNT: 4483 ret = get_errno(semctl(semid, semnum, cmd, NULL)); 4484 break; 4485 } 4486 4487 return ret; 4488 } 4489 4490 struct target_sembuf { 4491 unsigned short sem_num; 4492 short sem_op; 4493 short sem_flg; 4494 }; 4495 4496 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf, 4497 abi_ulong target_addr, 4498 unsigned nsops) 4499 { 4500 struct target_sembuf *target_sembuf; 4501 int i; 4502 4503 target_sembuf = lock_user(VERIFY_READ, target_addr, 4504 nsops*sizeof(struct target_sembuf), 1); 4505 if (!target_sembuf) 4506 return -TARGET_EFAULT; 4507 4508 for(i=0; i<nsops; i++) { 4509 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num); 4510 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op); 4511 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg); 4512 } 4513 4514 unlock_user(target_sembuf, target_addr, 0); 4515 4516 return 0; 4517 } 4518 4519 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops) 4520 { 4521 struct sembuf sops[nsops]; 4522 4523 if (target_to_host_sembuf(sops, ptr, nsops)) 4524 return -TARGET_EFAULT; 4525 4526 return get_errno(safe_semtimedop(semid, sops, nsops, NULL)); 4527 } 4528 4529 struct target_msqid_ds 4530 { 4531 struct target_ipc_perm msg_perm; 4532 abi_ulong msg_stime; 4533 #if TARGET_ABI_BITS == 32 4534 abi_ulong __unused1; 4535 #endif 4536 abi_ulong msg_rtime; 4537 #if TARGET_ABI_BITS == 32 4538 abi_ulong __unused2; 4539 #endif 4540 abi_ulong msg_ctime; 4541 #if TARGET_ABI_BITS == 32 4542 abi_ulong __unused3; 4543 #endif 4544 abi_ulong __msg_cbytes; 4545 abi_ulong msg_qnum; 4546 abi_ulong msg_qbytes; 4547 abi_ulong msg_lspid; 4548 abi_ulong msg_lrpid; 4549 abi_ulong __unused4; 4550 abi_ulong __unused5; 4551 }; 4552 4553 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md, 4554 abi_ulong target_addr) 4555 { 4556 struct target_msqid_ds *target_md; 4557 4558 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1)) 4559 return -TARGET_EFAULT; 4560 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr)) 4561 return -TARGET_EFAULT; 4562 host_md->msg_stime = tswapal(target_md->msg_stime); 4563 host_md->msg_rtime = tswapal(target_md->msg_rtime); 4564 host_md->msg_ctime = tswapal(target_md->msg_ctime); 4565 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes); 4566 host_md->msg_qnum = tswapal(target_md->msg_qnum); 4567 host_md->msg_qbytes = tswapal(target_md->msg_qbytes); 4568 host_md->msg_lspid = tswapal(target_md->msg_lspid); 4569 host_md->msg_lrpid = tswapal(target_md->msg_lrpid); 4570 unlock_user_struct(target_md, target_addr, 0); 4571 return 0; 4572 } 4573 4574 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr, 4575 struct msqid_ds *host_md) 4576 { 4577 struct target_msqid_ds *target_md; 4578 4579 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0)) 4580 return -TARGET_EFAULT; 4581 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm))) 4582 return -TARGET_EFAULT; 4583 target_md->msg_stime = tswapal(host_md->msg_stime); 4584 target_md->msg_rtime = tswapal(host_md->msg_rtime); 4585 target_md->msg_ctime = tswapal(host_md->msg_ctime); 4586 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes); 4587 target_md->msg_qnum = tswapal(host_md->msg_qnum); 4588 target_md->msg_qbytes = tswapal(host_md->msg_qbytes); 4589 target_md->msg_lspid = tswapal(host_md->msg_lspid); 4590 target_md->msg_lrpid = tswapal(host_md->msg_lrpid); 4591 unlock_user_struct(target_md, target_addr, 1); 4592 return 0; 4593 } 4594 4595 struct target_msginfo { 4596 int msgpool; 4597 int msgmap; 4598 int msgmax; 4599 int msgmnb; 4600 int msgmni; 4601 int msgssz; 4602 int msgtql; 4603 unsigned short int msgseg; 4604 }; 4605 4606 static inline abi_long host_to_target_msginfo(abi_ulong target_addr, 4607 struct msginfo *host_msginfo) 4608 { 4609 struct target_msginfo *target_msginfo; 4610 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0)) 4611 return -TARGET_EFAULT; 4612 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool); 4613 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap); 4614 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax); 4615 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb); 4616 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni); 4617 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz); 4618 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql); 4619 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg); 4620 unlock_user_struct(target_msginfo, target_addr, 1); 4621 return 0; 4622 } 4623 4624 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr) 4625 { 4626 struct msqid_ds dsarg; 4627 struct msginfo msginfo; 4628 abi_long ret = -TARGET_EINVAL; 4629 4630 cmd &= 0xff; 4631 4632 switch (cmd) { 4633 case IPC_STAT: 4634 case IPC_SET: 4635 case MSG_STAT: 4636 if (target_to_host_msqid_ds(&dsarg,ptr)) 4637 return -TARGET_EFAULT; 4638 ret = get_errno(msgctl(msgid, cmd, &dsarg)); 4639 if (host_to_target_msqid_ds(ptr,&dsarg)) 4640 return -TARGET_EFAULT; 4641 break; 4642 case IPC_RMID: 4643 ret = get_errno(msgctl(msgid, cmd, NULL)); 4644 break; 4645 case IPC_INFO: 4646 case MSG_INFO: 4647 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo)); 4648 if (host_to_target_msginfo(ptr, &msginfo)) 4649 return -TARGET_EFAULT; 4650 break; 4651 } 4652 4653 return ret; 4654 } 4655 4656 struct target_msgbuf { 4657 abi_long mtype; 4658 char mtext[1]; 4659 }; 4660 4661 static inline abi_long do_msgsnd(int msqid, abi_long msgp, 4662 ssize_t msgsz, int msgflg) 4663 { 4664 struct target_msgbuf *target_mb; 4665 struct msgbuf *host_mb; 4666 abi_long ret = 0; 4667 4668 if (msgsz < 0) { 4669 return -TARGET_EINVAL; 4670 } 4671 4672 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0)) 4673 return -TARGET_EFAULT; 4674 host_mb = g_try_malloc(msgsz + sizeof(long)); 4675 if (!host_mb) { 4676 unlock_user_struct(target_mb, msgp, 0); 4677 return -TARGET_ENOMEM; 4678 } 4679 host_mb->mtype = (abi_long) tswapal(target_mb->mtype); 4680 memcpy(host_mb->mtext, target_mb->mtext, msgsz); 4681 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg)); 4682 g_free(host_mb); 4683 unlock_user_struct(target_mb, msgp, 0); 4684 4685 return ret; 4686 } 4687 4688 static inline abi_long do_msgrcv(int msqid, abi_long msgp, 4689 ssize_t msgsz, abi_long msgtyp, 4690 int msgflg) 4691 { 4692 struct target_msgbuf *target_mb; 4693 char *target_mtext; 4694 struct msgbuf *host_mb; 4695 abi_long ret = 0; 4696 4697 if (msgsz < 0) { 4698 return -TARGET_EINVAL; 4699 } 4700 4701 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0)) 4702 return -TARGET_EFAULT; 4703 4704 host_mb = g_try_malloc(msgsz + sizeof(long)); 4705 if (!host_mb) { 4706 ret = -TARGET_ENOMEM; 4707 goto end; 4708 } 4709 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg)); 4710 4711 if (ret > 0) { 4712 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong); 4713 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0); 4714 if (!target_mtext) { 4715 ret = -TARGET_EFAULT; 4716 goto end; 4717 } 4718 memcpy(target_mb->mtext, host_mb->mtext, ret); 4719 unlock_user(target_mtext, target_mtext_addr, ret); 4720 } 4721 4722 target_mb->mtype = tswapal(host_mb->mtype); 4723 4724 end: 4725 if (target_mb) 4726 unlock_user_struct(target_mb, msgp, 1); 4727 g_free(host_mb); 4728 return ret; 4729 } 4730 4731 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd, 4732 abi_ulong target_addr) 4733 { 4734 struct target_shmid_ds *target_sd; 4735 4736 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 4737 return -TARGET_EFAULT; 4738 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr)) 4739 return -TARGET_EFAULT; 4740 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz); 4741 __get_user(host_sd->shm_atime, &target_sd->shm_atime); 4742 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime); 4743 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime); 4744 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid); 4745 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid); 4746 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch); 4747 unlock_user_struct(target_sd, target_addr, 0); 4748 return 0; 4749 } 4750 4751 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr, 4752 struct shmid_ds *host_sd) 4753 { 4754 struct target_shmid_ds *target_sd; 4755 4756 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 4757 return -TARGET_EFAULT; 4758 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm))) 4759 return -TARGET_EFAULT; 4760 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz); 4761 __put_user(host_sd->shm_atime, &target_sd->shm_atime); 4762 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime); 4763 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime); 4764 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid); 4765 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid); 4766 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch); 4767 unlock_user_struct(target_sd, target_addr, 1); 4768 return 0; 4769 } 4770 4771 struct target_shminfo { 4772 abi_ulong shmmax; 4773 abi_ulong shmmin; 4774 abi_ulong shmmni; 4775 abi_ulong shmseg; 4776 abi_ulong shmall; 4777 }; 4778 4779 static inline abi_long host_to_target_shminfo(abi_ulong target_addr, 4780 struct shminfo *host_shminfo) 4781 { 4782 struct target_shminfo *target_shminfo; 4783 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0)) 4784 return -TARGET_EFAULT; 4785 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax); 4786 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin); 4787 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni); 4788 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg); 4789 __put_user(host_shminfo->shmall, &target_shminfo->shmall); 4790 unlock_user_struct(target_shminfo, target_addr, 1); 4791 return 0; 4792 } 4793 4794 struct target_shm_info { 4795 int used_ids; 4796 abi_ulong shm_tot; 4797 abi_ulong shm_rss; 4798 abi_ulong shm_swp; 4799 abi_ulong swap_attempts; 4800 abi_ulong swap_successes; 4801 }; 4802 4803 static inline abi_long host_to_target_shm_info(abi_ulong target_addr, 4804 struct shm_info *host_shm_info) 4805 { 4806 struct target_shm_info *target_shm_info; 4807 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0)) 4808 return -TARGET_EFAULT; 4809 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids); 4810 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot); 4811 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss); 4812 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp); 4813 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts); 4814 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes); 4815 unlock_user_struct(target_shm_info, target_addr, 1); 4816 return 0; 4817 } 4818 4819 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf) 4820 { 4821 struct shmid_ds dsarg; 4822 struct shminfo shminfo; 4823 struct shm_info shm_info; 4824 abi_long ret = -TARGET_EINVAL; 4825 4826 cmd &= 0xff; 4827 4828 switch(cmd) { 4829 case IPC_STAT: 4830 case IPC_SET: 4831 case SHM_STAT: 4832 if (target_to_host_shmid_ds(&dsarg, buf)) 4833 return -TARGET_EFAULT; 4834 ret = get_errno(shmctl(shmid, cmd, &dsarg)); 4835 if (host_to_target_shmid_ds(buf, &dsarg)) 4836 return -TARGET_EFAULT; 4837 break; 4838 case IPC_INFO: 4839 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo)); 4840 if (host_to_target_shminfo(buf, &shminfo)) 4841 return -TARGET_EFAULT; 4842 break; 4843 case SHM_INFO: 4844 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info)); 4845 if (host_to_target_shm_info(buf, &shm_info)) 4846 return -TARGET_EFAULT; 4847 break; 4848 case IPC_RMID: 4849 case SHM_LOCK: 4850 case SHM_UNLOCK: 4851 ret = get_errno(shmctl(shmid, cmd, NULL)); 4852 break; 4853 } 4854 4855 return ret; 4856 } 4857 4858 #ifndef TARGET_FORCE_SHMLBA 4859 /* For most architectures, SHMLBA is the same as the page size; 4860 * some architectures have larger values, in which case they should 4861 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function. 4862 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA 4863 * and defining its own value for SHMLBA. 4864 * 4865 * The kernel also permits SHMLBA to be set by the architecture to a 4866 * value larger than the page size without setting __ARCH_FORCE_SHMLBA; 4867 * this means that addresses are rounded to the large size if 4868 * SHM_RND is set but addresses not aligned to that size are not rejected 4869 * as long as they are at least page-aligned. Since the only architecture 4870 * which uses this is ia64 this code doesn't provide for that oddity. 4871 */ 4872 static inline abi_ulong target_shmlba(CPUArchState *cpu_env) 4873 { 4874 return TARGET_PAGE_SIZE; 4875 } 4876 #endif 4877 4878 static inline abi_ulong do_shmat(CPUArchState *cpu_env, 4879 int shmid, abi_ulong shmaddr, int shmflg) 4880 { 4881 abi_long raddr; 4882 void *host_raddr; 4883 struct shmid_ds shm_info; 4884 int i,ret; 4885 abi_ulong shmlba; 4886 4887 /* find out the length of the shared memory segment */ 4888 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); 4889 if (is_error(ret)) { 4890 /* can't get length, bail out */ 4891 return ret; 4892 } 4893 4894 shmlba = target_shmlba(cpu_env); 4895 4896 if (shmaddr & (shmlba - 1)) { 4897 if (shmflg & SHM_RND) { 4898 shmaddr &= ~(shmlba - 1); 4899 } else { 4900 return -TARGET_EINVAL; 4901 } 4902 } 4903 if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) { 4904 return -TARGET_EINVAL; 4905 } 4906 4907 mmap_lock(); 4908 4909 if (shmaddr) 4910 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg); 4911 else { 4912 abi_ulong mmap_start; 4913 4914 mmap_start = mmap_find_vma(0, shm_info.shm_segsz); 4915 4916 if (mmap_start == -1) { 4917 errno = ENOMEM; 4918 host_raddr = (void *)-1; 4919 } else 4920 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP); 4921 } 4922 4923 if (host_raddr == (void *)-1) { 4924 mmap_unlock(); 4925 return get_errno((long)host_raddr); 4926 } 4927 raddr=h2g((unsigned long)host_raddr); 4928 4929 page_set_flags(raddr, raddr + shm_info.shm_segsz, 4930 PAGE_VALID | PAGE_READ | 4931 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE)); 4932 4933 for (i = 0; i < N_SHM_REGIONS; i++) { 4934 if (!shm_regions[i].in_use) { 4935 shm_regions[i].in_use = true; 4936 shm_regions[i].start = raddr; 4937 shm_regions[i].size = shm_info.shm_segsz; 4938 break; 4939 } 4940 } 4941 4942 mmap_unlock(); 4943 return raddr; 4944 4945 } 4946 4947 static inline abi_long do_shmdt(abi_ulong shmaddr) 4948 { 4949 int i; 4950 4951 for (i = 0; i < N_SHM_REGIONS; ++i) { 4952 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) { 4953 shm_regions[i].in_use = false; 4954 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0); 4955 break; 4956 } 4957 } 4958 4959 return get_errno(shmdt(g2h(shmaddr))); 4960 } 4961 4962 #ifdef TARGET_NR_ipc 4963 /* ??? This only works with linear mappings. */ 4964 /* do_ipc() must return target values and target errnos. */ 4965 static abi_long do_ipc(CPUArchState *cpu_env, 4966 unsigned int call, abi_long first, 4967 abi_long second, abi_long third, 4968 abi_long ptr, abi_long fifth) 4969 { 4970 int version; 4971 abi_long ret = 0; 4972 4973 version = call >> 16; 4974 call &= 0xffff; 4975 4976 switch (call) { 4977 case IPCOP_semop: 4978 ret = do_semop(first, ptr, second); 4979 break; 4980 4981 case IPCOP_semget: 4982 ret = get_errno(semget(first, second, third)); 4983 break; 4984 4985 case IPCOP_semctl: { 4986 /* The semun argument to semctl is passed by value, so dereference the 4987 * ptr argument. */ 4988 abi_ulong atptr; 4989 get_user_ual(atptr, ptr); 4990 ret = do_semctl(first, second, third, atptr); 4991 break; 4992 } 4993 4994 case IPCOP_msgget: 4995 ret = get_errno(msgget(first, second)); 4996 break; 4997 4998 case IPCOP_msgsnd: 4999 ret = do_msgsnd(first, ptr, second, third); 5000 break; 5001 5002 case IPCOP_msgctl: 5003 ret = do_msgctl(first, second, ptr); 5004 break; 5005 5006 case IPCOP_msgrcv: 5007 switch (version) { 5008 case 0: 5009 { 5010 struct target_ipc_kludge { 5011 abi_long msgp; 5012 abi_long msgtyp; 5013 } *tmp; 5014 5015 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) { 5016 ret = -TARGET_EFAULT; 5017 break; 5018 } 5019 5020 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third); 5021 5022 unlock_user_struct(tmp, ptr, 0); 5023 break; 5024 } 5025 default: 5026 ret = do_msgrcv(first, ptr, second, fifth, third); 5027 } 5028 break; 5029 5030 case IPCOP_shmat: 5031 switch (version) { 5032 default: 5033 { 5034 abi_ulong raddr; 5035 raddr = do_shmat(cpu_env, first, ptr, second); 5036 if (is_error(raddr)) 5037 return get_errno(raddr); 5038 if (put_user_ual(raddr, third)) 5039 return -TARGET_EFAULT; 5040 break; 5041 } 5042 case 1: 5043 ret = -TARGET_EINVAL; 5044 break; 5045 } 5046 break; 5047 case IPCOP_shmdt: 5048 ret = do_shmdt(ptr); 5049 break; 5050 5051 case IPCOP_shmget: 5052 /* IPC_* flag values are the same on all linux platforms */ 5053 ret = get_errno(shmget(first, second, third)); 5054 break; 5055 5056 /* IPC_* and SHM_* command values are the same on all linux platforms */ 5057 case IPCOP_shmctl: 5058 ret = do_shmctl(first, second, ptr); 5059 break; 5060 default: 5061 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version); 5062 ret = -TARGET_ENOSYS; 5063 break; 5064 } 5065 return ret; 5066 } 5067 #endif 5068 5069 /* kernel structure types definitions */ 5070 5071 #define STRUCT(name, ...) STRUCT_ ## name, 5072 #define STRUCT_SPECIAL(name) STRUCT_ ## name, 5073 enum { 5074 #include "syscall_types.h" 5075 STRUCT_MAX 5076 }; 5077 #undef STRUCT 5078 #undef STRUCT_SPECIAL 5079 5080 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL }; 5081 #define STRUCT_SPECIAL(name) 5082 #include "syscall_types.h" 5083 #undef STRUCT 5084 #undef STRUCT_SPECIAL 5085 5086 typedef struct IOCTLEntry IOCTLEntry; 5087 5088 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp, 5089 int fd, int cmd, abi_long arg); 5090 5091 struct IOCTLEntry { 5092 int target_cmd; 5093 unsigned int host_cmd; 5094 const char *name; 5095 int access; 5096 do_ioctl_fn *do_ioctl; 5097 const argtype arg_type[5]; 5098 }; 5099 5100 #define IOC_R 0x0001 5101 #define IOC_W 0x0002 5102 #define IOC_RW (IOC_R | IOC_W) 5103 5104 #define MAX_STRUCT_SIZE 4096 5105 5106 #ifdef CONFIG_FIEMAP 5107 /* So fiemap access checks don't overflow on 32 bit systems. 5108 * This is very slightly smaller than the limit imposed by 5109 * the underlying kernel. 5110 */ 5111 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \ 5112 / sizeof(struct fiemap_extent)) 5113 5114 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp, 5115 int fd, int cmd, abi_long arg) 5116 { 5117 /* The parameter for this ioctl is a struct fiemap followed 5118 * by an array of struct fiemap_extent whose size is set 5119 * in fiemap->fm_extent_count. The array is filled in by the 5120 * ioctl. 5121 */ 5122 int target_size_in, target_size_out; 5123 struct fiemap *fm; 5124 const argtype *arg_type = ie->arg_type; 5125 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) }; 5126 void *argptr, *p; 5127 abi_long ret; 5128 int i, extent_size = thunk_type_size(extent_arg_type, 0); 5129 uint32_t outbufsz; 5130 int free_fm = 0; 5131 5132 assert(arg_type[0] == TYPE_PTR); 5133 assert(ie->access == IOC_RW); 5134 arg_type++; 5135 target_size_in = thunk_type_size(arg_type, 0); 5136 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1); 5137 if (!argptr) { 5138 return -TARGET_EFAULT; 5139 } 5140 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5141 unlock_user(argptr, arg, 0); 5142 fm = (struct fiemap *)buf_temp; 5143 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) { 5144 return -TARGET_EINVAL; 5145 } 5146 5147 outbufsz = sizeof (*fm) + 5148 (sizeof(struct fiemap_extent) * fm->fm_extent_count); 5149 5150 if (outbufsz > MAX_STRUCT_SIZE) { 5151 /* We can't fit all the extents into the fixed size buffer. 5152 * Allocate one that is large enough and use it instead. 5153 */ 5154 fm = g_try_malloc(outbufsz); 5155 if (!fm) { 5156 return -TARGET_ENOMEM; 5157 } 5158 memcpy(fm, buf_temp, sizeof(struct fiemap)); 5159 free_fm = 1; 5160 } 5161 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm)); 5162 if (!is_error(ret)) { 5163 target_size_out = target_size_in; 5164 /* An extent_count of 0 means we were only counting the extents 5165 * so there are no structs to copy 5166 */ 5167 if (fm->fm_extent_count != 0) { 5168 target_size_out += fm->fm_mapped_extents * extent_size; 5169 } 5170 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0); 5171 if (!argptr) { 5172 ret = -TARGET_EFAULT; 5173 } else { 5174 /* Convert the struct fiemap */ 5175 thunk_convert(argptr, fm, arg_type, THUNK_TARGET); 5176 if (fm->fm_extent_count != 0) { 5177 p = argptr + target_size_in; 5178 /* ...and then all the struct fiemap_extents */ 5179 for (i = 0; i < fm->fm_mapped_extents; i++) { 5180 thunk_convert(p, &fm->fm_extents[i], extent_arg_type, 5181 THUNK_TARGET); 5182 p += extent_size; 5183 } 5184 } 5185 unlock_user(argptr, arg, target_size_out); 5186 } 5187 } 5188 if (free_fm) { 5189 g_free(fm); 5190 } 5191 return ret; 5192 } 5193 #endif 5194 5195 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, 5196 int fd, int cmd, abi_long arg) 5197 { 5198 const argtype *arg_type = ie->arg_type; 5199 int target_size; 5200 void *argptr; 5201 int ret; 5202 struct ifconf *host_ifconf; 5203 uint32_t outbufsz; 5204 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) }; 5205 int target_ifreq_size; 5206 int nb_ifreq; 5207 int free_buf = 0; 5208 int i; 5209 int target_ifc_len; 5210 abi_long target_ifc_buf; 5211 int host_ifc_len; 5212 char *host_ifc_buf; 5213 5214 assert(arg_type[0] == TYPE_PTR); 5215 assert(ie->access == IOC_RW); 5216 5217 arg_type++; 5218 target_size = thunk_type_size(arg_type, 0); 5219 5220 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5221 if (!argptr) 5222 return -TARGET_EFAULT; 5223 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5224 unlock_user(argptr, arg, 0); 5225 5226 host_ifconf = (struct ifconf *)(unsigned long)buf_temp; 5227 target_ifc_len = host_ifconf->ifc_len; 5228 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf; 5229 5230 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0); 5231 nb_ifreq = target_ifc_len / target_ifreq_size; 5232 host_ifc_len = nb_ifreq * sizeof(struct ifreq); 5233 5234 outbufsz = sizeof(*host_ifconf) + host_ifc_len; 5235 if (outbufsz > MAX_STRUCT_SIZE) { 5236 /* We can't fit all the extents into the fixed size buffer. 5237 * Allocate one that is large enough and use it instead. 5238 */ 5239 host_ifconf = malloc(outbufsz); 5240 if (!host_ifconf) { 5241 return -TARGET_ENOMEM; 5242 } 5243 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf)); 5244 free_buf = 1; 5245 } 5246 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf); 5247 5248 host_ifconf->ifc_len = host_ifc_len; 5249 host_ifconf->ifc_buf = host_ifc_buf; 5250 5251 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf)); 5252 if (!is_error(ret)) { 5253 /* convert host ifc_len to target ifc_len */ 5254 5255 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq); 5256 target_ifc_len = nb_ifreq * target_ifreq_size; 5257 host_ifconf->ifc_len = target_ifc_len; 5258 5259 /* restore target ifc_buf */ 5260 5261 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf; 5262 5263 /* copy struct ifconf to target user */ 5264 5265 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5266 if (!argptr) 5267 return -TARGET_EFAULT; 5268 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET); 5269 unlock_user(argptr, arg, target_size); 5270 5271 /* copy ifreq[] to target user */ 5272 5273 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0); 5274 for (i = 0; i < nb_ifreq ; i++) { 5275 thunk_convert(argptr + i * target_ifreq_size, 5276 host_ifc_buf + i * sizeof(struct ifreq), 5277 ifreq_arg_type, THUNK_TARGET); 5278 } 5279 unlock_user(argptr, target_ifc_buf, target_ifc_len); 5280 } 5281 5282 if (free_buf) { 5283 free(host_ifconf); 5284 } 5285 5286 return ret; 5287 } 5288 5289 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 5290 int cmd, abi_long arg) 5291 { 5292 void *argptr; 5293 struct dm_ioctl *host_dm; 5294 abi_long guest_data; 5295 uint32_t guest_data_size; 5296 int target_size; 5297 const argtype *arg_type = ie->arg_type; 5298 abi_long ret; 5299 void *big_buf = NULL; 5300 char *host_data; 5301 5302 arg_type++; 5303 target_size = thunk_type_size(arg_type, 0); 5304 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5305 if (!argptr) { 5306 ret = -TARGET_EFAULT; 5307 goto out; 5308 } 5309 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5310 unlock_user(argptr, arg, 0); 5311 5312 /* buf_temp is too small, so fetch things into a bigger buffer */ 5313 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2); 5314 memcpy(big_buf, buf_temp, target_size); 5315 buf_temp = big_buf; 5316 host_dm = big_buf; 5317 5318 guest_data = arg + host_dm->data_start; 5319 if ((guest_data - arg) < 0) { 5320 ret = -TARGET_EINVAL; 5321 goto out; 5322 } 5323 guest_data_size = host_dm->data_size - host_dm->data_start; 5324 host_data = (char*)host_dm + host_dm->data_start; 5325 5326 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1); 5327 if (!argptr) { 5328 ret = -TARGET_EFAULT; 5329 goto out; 5330 } 5331 5332 switch (ie->host_cmd) { 5333 case DM_REMOVE_ALL: 5334 case DM_LIST_DEVICES: 5335 case DM_DEV_CREATE: 5336 case DM_DEV_REMOVE: 5337 case DM_DEV_SUSPEND: 5338 case DM_DEV_STATUS: 5339 case DM_DEV_WAIT: 5340 case DM_TABLE_STATUS: 5341 case DM_TABLE_CLEAR: 5342 case DM_TABLE_DEPS: 5343 case DM_LIST_VERSIONS: 5344 /* no input data */ 5345 break; 5346 case DM_DEV_RENAME: 5347 case DM_DEV_SET_GEOMETRY: 5348 /* data contains only strings */ 5349 memcpy(host_data, argptr, guest_data_size); 5350 break; 5351 case DM_TARGET_MSG: 5352 memcpy(host_data, argptr, guest_data_size); 5353 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr); 5354 break; 5355 case DM_TABLE_LOAD: 5356 { 5357 void *gspec = argptr; 5358 void *cur_data = host_data; 5359 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 5360 int spec_size = thunk_type_size(arg_type, 0); 5361 int i; 5362 5363 for (i = 0; i < host_dm->target_count; i++) { 5364 struct dm_target_spec *spec = cur_data; 5365 uint32_t next; 5366 int slen; 5367 5368 thunk_convert(spec, gspec, arg_type, THUNK_HOST); 5369 slen = strlen((char*)gspec + spec_size) + 1; 5370 next = spec->next; 5371 spec->next = sizeof(*spec) + slen; 5372 strcpy((char*)&spec[1], gspec + spec_size); 5373 gspec += next; 5374 cur_data += spec->next; 5375 } 5376 break; 5377 } 5378 default: 5379 ret = -TARGET_EINVAL; 5380 unlock_user(argptr, guest_data, 0); 5381 goto out; 5382 } 5383 unlock_user(argptr, guest_data, 0); 5384 5385 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5386 if (!is_error(ret)) { 5387 guest_data = arg + host_dm->data_start; 5388 guest_data_size = host_dm->data_size - host_dm->data_start; 5389 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0); 5390 switch (ie->host_cmd) { 5391 case DM_REMOVE_ALL: 5392 case DM_DEV_CREATE: 5393 case DM_DEV_REMOVE: 5394 case DM_DEV_RENAME: 5395 case DM_DEV_SUSPEND: 5396 case DM_DEV_STATUS: 5397 case DM_TABLE_LOAD: 5398 case DM_TABLE_CLEAR: 5399 case DM_TARGET_MSG: 5400 case DM_DEV_SET_GEOMETRY: 5401 /* no return data */ 5402 break; 5403 case DM_LIST_DEVICES: 5404 { 5405 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start; 5406 uint32_t remaining_data = guest_data_size; 5407 void *cur_data = argptr; 5408 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) }; 5409 int nl_size = 12; /* can't use thunk_size due to alignment */ 5410 5411 while (1) { 5412 uint32_t next = nl->next; 5413 if (next) { 5414 nl->next = nl_size + (strlen(nl->name) + 1); 5415 } 5416 if (remaining_data < nl->next) { 5417 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5418 break; 5419 } 5420 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET); 5421 strcpy(cur_data + nl_size, nl->name); 5422 cur_data += nl->next; 5423 remaining_data -= nl->next; 5424 if (!next) { 5425 break; 5426 } 5427 nl = (void*)nl + next; 5428 } 5429 break; 5430 } 5431 case DM_DEV_WAIT: 5432 case DM_TABLE_STATUS: 5433 { 5434 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start; 5435 void *cur_data = argptr; 5436 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 5437 int spec_size = thunk_type_size(arg_type, 0); 5438 int i; 5439 5440 for (i = 0; i < host_dm->target_count; i++) { 5441 uint32_t next = spec->next; 5442 int slen = strlen((char*)&spec[1]) + 1; 5443 spec->next = (cur_data - argptr) + spec_size + slen; 5444 if (guest_data_size < spec->next) { 5445 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5446 break; 5447 } 5448 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET); 5449 strcpy(cur_data + spec_size, (char*)&spec[1]); 5450 cur_data = argptr + spec->next; 5451 spec = (void*)host_dm + host_dm->data_start + next; 5452 } 5453 break; 5454 } 5455 case DM_TABLE_DEPS: 5456 { 5457 void *hdata = (void*)host_dm + host_dm->data_start; 5458 int count = *(uint32_t*)hdata; 5459 uint64_t *hdev = hdata + 8; 5460 uint64_t *gdev = argptr + 8; 5461 int i; 5462 5463 *(uint32_t*)argptr = tswap32(count); 5464 for (i = 0; i < count; i++) { 5465 *gdev = tswap64(*hdev); 5466 gdev++; 5467 hdev++; 5468 } 5469 break; 5470 } 5471 case DM_LIST_VERSIONS: 5472 { 5473 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start; 5474 uint32_t remaining_data = guest_data_size; 5475 void *cur_data = argptr; 5476 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) }; 5477 int vers_size = thunk_type_size(arg_type, 0); 5478 5479 while (1) { 5480 uint32_t next = vers->next; 5481 if (next) { 5482 vers->next = vers_size + (strlen(vers->name) + 1); 5483 } 5484 if (remaining_data < vers->next) { 5485 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5486 break; 5487 } 5488 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET); 5489 strcpy(cur_data + vers_size, vers->name); 5490 cur_data += vers->next; 5491 remaining_data -= vers->next; 5492 if (!next) { 5493 break; 5494 } 5495 vers = (void*)vers + next; 5496 } 5497 break; 5498 } 5499 default: 5500 unlock_user(argptr, guest_data, 0); 5501 ret = -TARGET_EINVAL; 5502 goto out; 5503 } 5504 unlock_user(argptr, guest_data, guest_data_size); 5505 5506 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5507 if (!argptr) { 5508 ret = -TARGET_EFAULT; 5509 goto out; 5510 } 5511 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5512 unlock_user(argptr, arg, target_size); 5513 } 5514 out: 5515 g_free(big_buf); 5516 return ret; 5517 } 5518 5519 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 5520 int cmd, abi_long arg) 5521 { 5522 void *argptr; 5523 int target_size; 5524 const argtype *arg_type = ie->arg_type; 5525 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) }; 5526 abi_long ret; 5527 5528 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp; 5529 struct blkpg_partition host_part; 5530 5531 /* Read and convert blkpg */ 5532 arg_type++; 5533 target_size = thunk_type_size(arg_type, 0); 5534 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5535 if (!argptr) { 5536 ret = -TARGET_EFAULT; 5537 goto out; 5538 } 5539 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5540 unlock_user(argptr, arg, 0); 5541 5542 switch (host_blkpg->op) { 5543 case BLKPG_ADD_PARTITION: 5544 case BLKPG_DEL_PARTITION: 5545 /* payload is struct blkpg_partition */ 5546 break; 5547 default: 5548 /* Unknown opcode */ 5549 ret = -TARGET_EINVAL; 5550 goto out; 5551 } 5552 5553 /* Read and convert blkpg->data */ 5554 arg = (abi_long)(uintptr_t)host_blkpg->data; 5555 target_size = thunk_type_size(part_arg_type, 0); 5556 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5557 if (!argptr) { 5558 ret = -TARGET_EFAULT; 5559 goto out; 5560 } 5561 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST); 5562 unlock_user(argptr, arg, 0); 5563 5564 /* Swizzle the data pointer to our local copy and call! */ 5565 host_blkpg->data = &host_part; 5566 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg)); 5567 5568 out: 5569 return ret; 5570 } 5571 5572 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp, 5573 int fd, int cmd, abi_long arg) 5574 { 5575 const argtype *arg_type = ie->arg_type; 5576 const StructEntry *se; 5577 const argtype *field_types; 5578 const int *dst_offsets, *src_offsets; 5579 int target_size; 5580 void *argptr; 5581 abi_ulong *target_rt_dev_ptr; 5582 unsigned long *host_rt_dev_ptr; 5583 abi_long ret; 5584 int i; 5585 5586 assert(ie->access == IOC_W); 5587 assert(*arg_type == TYPE_PTR); 5588 arg_type++; 5589 assert(*arg_type == TYPE_STRUCT); 5590 target_size = thunk_type_size(arg_type, 0); 5591 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5592 if (!argptr) { 5593 return -TARGET_EFAULT; 5594 } 5595 arg_type++; 5596 assert(*arg_type == (int)STRUCT_rtentry); 5597 se = struct_entries + *arg_type++; 5598 assert(se->convert[0] == NULL); 5599 /* convert struct here to be able to catch rt_dev string */ 5600 field_types = se->field_types; 5601 dst_offsets = se->field_offsets[THUNK_HOST]; 5602 src_offsets = se->field_offsets[THUNK_TARGET]; 5603 for (i = 0; i < se->nb_fields; i++) { 5604 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) { 5605 assert(*field_types == TYPE_PTRVOID); 5606 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]); 5607 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]); 5608 if (*target_rt_dev_ptr != 0) { 5609 *host_rt_dev_ptr = (unsigned long)lock_user_string( 5610 tswapal(*target_rt_dev_ptr)); 5611 if (!*host_rt_dev_ptr) { 5612 unlock_user(argptr, arg, 0); 5613 return -TARGET_EFAULT; 5614 } 5615 } else { 5616 *host_rt_dev_ptr = 0; 5617 } 5618 field_types++; 5619 continue; 5620 } 5621 field_types = thunk_convert(buf_temp + dst_offsets[i], 5622 argptr + src_offsets[i], 5623 field_types, THUNK_HOST); 5624 } 5625 unlock_user(argptr, arg, 0); 5626 5627 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5628 if (*host_rt_dev_ptr != 0) { 5629 unlock_user((void *)*host_rt_dev_ptr, 5630 *target_rt_dev_ptr, 0); 5631 } 5632 return ret; 5633 } 5634 5635 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp, 5636 int fd, int cmd, abi_long arg) 5637 { 5638 int sig = target_to_host_signal(arg); 5639 return get_errno(safe_ioctl(fd, ie->host_cmd, sig)); 5640 } 5641 5642 #ifdef TIOCGPTPEER 5643 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp, 5644 int fd, int cmd, abi_long arg) 5645 { 5646 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl); 5647 return get_errno(safe_ioctl(fd, ie->host_cmd, flags)); 5648 } 5649 #endif 5650 5651 static IOCTLEntry ioctl_entries[] = { 5652 #define IOCTL(cmd, access, ...) \ 5653 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } }, 5654 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \ 5655 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } }, 5656 #define IOCTL_IGNORE(cmd) \ 5657 { TARGET_ ## cmd, 0, #cmd }, 5658 #include "ioctls.h" 5659 { 0, 0, }, 5660 }; 5661 5662 /* ??? Implement proper locking for ioctls. */ 5663 /* do_ioctl() Must return target values and target errnos. */ 5664 static abi_long do_ioctl(int fd, int cmd, abi_long arg) 5665 { 5666 const IOCTLEntry *ie; 5667 const argtype *arg_type; 5668 abi_long ret; 5669 uint8_t buf_temp[MAX_STRUCT_SIZE]; 5670 int target_size; 5671 void *argptr; 5672 5673 ie = ioctl_entries; 5674 for(;;) { 5675 if (ie->target_cmd == 0) { 5676 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd); 5677 return -TARGET_ENOSYS; 5678 } 5679 if (ie->target_cmd == cmd) 5680 break; 5681 ie++; 5682 } 5683 arg_type = ie->arg_type; 5684 #if defined(DEBUG) 5685 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name); 5686 #endif 5687 if (ie->do_ioctl) { 5688 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg); 5689 } else if (!ie->host_cmd) { 5690 /* Some architectures define BSD ioctls in their headers 5691 that are not implemented in Linux. */ 5692 return -TARGET_ENOSYS; 5693 } 5694 5695 switch(arg_type[0]) { 5696 case TYPE_NULL: 5697 /* no argument */ 5698 ret = get_errno(safe_ioctl(fd, ie->host_cmd)); 5699 break; 5700 case TYPE_PTRVOID: 5701 case TYPE_INT: 5702 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg)); 5703 break; 5704 case TYPE_PTR: 5705 arg_type++; 5706 target_size = thunk_type_size(arg_type, 0); 5707 switch(ie->access) { 5708 case IOC_R: 5709 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5710 if (!is_error(ret)) { 5711 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5712 if (!argptr) 5713 return -TARGET_EFAULT; 5714 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5715 unlock_user(argptr, arg, target_size); 5716 } 5717 break; 5718 case IOC_W: 5719 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5720 if (!argptr) 5721 return -TARGET_EFAULT; 5722 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5723 unlock_user(argptr, arg, 0); 5724 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5725 break; 5726 default: 5727 case IOC_RW: 5728 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5729 if (!argptr) 5730 return -TARGET_EFAULT; 5731 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5732 unlock_user(argptr, arg, 0); 5733 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5734 if (!is_error(ret)) { 5735 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5736 if (!argptr) 5737 return -TARGET_EFAULT; 5738 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5739 unlock_user(argptr, arg, target_size); 5740 } 5741 break; 5742 } 5743 break; 5744 default: 5745 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n", 5746 (long)cmd, arg_type[0]); 5747 ret = -TARGET_ENOSYS; 5748 break; 5749 } 5750 return ret; 5751 } 5752 5753 static const bitmask_transtbl iflag_tbl[] = { 5754 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK }, 5755 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT }, 5756 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR }, 5757 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK }, 5758 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK }, 5759 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP }, 5760 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR }, 5761 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR }, 5762 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL }, 5763 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC }, 5764 { TARGET_IXON, TARGET_IXON, IXON, IXON }, 5765 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY }, 5766 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF }, 5767 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL }, 5768 { 0, 0, 0, 0 } 5769 }; 5770 5771 static const bitmask_transtbl oflag_tbl[] = { 5772 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST }, 5773 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC }, 5774 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR }, 5775 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL }, 5776 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR }, 5777 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET }, 5778 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL }, 5779 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL }, 5780 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 }, 5781 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 }, 5782 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 }, 5783 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 }, 5784 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 }, 5785 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 }, 5786 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 }, 5787 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 }, 5788 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 }, 5789 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 }, 5790 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 }, 5791 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 }, 5792 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 }, 5793 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 }, 5794 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 }, 5795 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 }, 5796 { 0, 0, 0, 0 } 5797 }; 5798 5799 static const bitmask_transtbl cflag_tbl[] = { 5800 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 }, 5801 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 }, 5802 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 }, 5803 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 }, 5804 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 }, 5805 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 }, 5806 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 }, 5807 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 }, 5808 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 }, 5809 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 }, 5810 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 }, 5811 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 }, 5812 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 }, 5813 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 }, 5814 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 }, 5815 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 }, 5816 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 }, 5817 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 }, 5818 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 }, 5819 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 }, 5820 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 }, 5821 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 }, 5822 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 }, 5823 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 }, 5824 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB }, 5825 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD }, 5826 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB }, 5827 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD }, 5828 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL }, 5829 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL }, 5830 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS }, 5831 { 0, 0, 0, 0 } 5832 }; 5833 5834 static const bitmask_transtbl lflag_tbl[] = { 5835 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG }, 5836 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON }, 5837 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE }, 5838 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO }, 5839 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE }, 5840 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK }, 5841 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL }, 5842 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH }, 5843 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP }, 5844 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL }, 5845 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT }, 5846 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE }, 5847 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO }, 5848 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN }, 5849 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN }, 5850 { 0, 0, 0, 0 } 5851 }; 5852 5853 static void target_to_host_termios (void *dst, const void *src) 5854 { 5855 struct host_termios *host = dst; 5856 const struct target_termios *target = src; 5857 5858 host->c_iflag = 5859 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl); 5860 host->c_oflag = 5861 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl); 5862 host->c_cflag = 5863 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl); 5864 host->c_lflag = 5865 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl); 5866 host->c_line = target->c_line; 5867 5868 memset(host->c_cc, 0, sizeof(host->c_cc)); 5869 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR]; 5870 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT]; 5871 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE]; 5872 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL]; 5873 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF]; 5874 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME]; 5875 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN]; 5876 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC]; 5877 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART]; 5878 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP]; 5879 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP]; 5880 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL]; 5881 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT]; 5882 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD]; 5883 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE]; 5884 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT]; 5885 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2]; 5886 } 5887 5888 static void host_to_target_termios (void *dst, const void *src) 5889 { 5890 struct target_termios *target = dst; 5891 const struct host_termios *host = src; 5892 5893 target->c_iflag = 5894 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl)); 5895 target->c_oflag = 5896 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl)); 5897 target->c_cflag = 5898 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl)); 5899 target->c_lflag = 5900 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl)); 5901 target->c_line = host->c_line; 5902 5903 memset(target->c_cc, 0, sizeof(target->c_cc)); 5904 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR]; 5905 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT]; 5906 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE]; 5907 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL]; 5908 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF]; 5909 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME]; 5910 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN]; 5911 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC]; 5912 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART]; 5913 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP]; 5914 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP]; 5915 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL]; 5916 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT]; 5917 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD]; 5918 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE]; 5919 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT]; 5920 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2]; 5921 } 5922 5923 static const StructEntry struct_termios_def = { 5924 .convert = { host_to_target_termios, target_to_host_termios }, 5925 .size = { sizeof(struct target_termios), sizeof(struct host_termios) }, 5926 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) }, 5927 }; 5928 5929 static bitmask_transtbl mmap_flags_tbl[] = { 5930 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED }, 5931 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE }, 5932 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED }, 5933 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, 5934 MAP_ANONYMOUS, MAP_ANONYMOUS }, 5935 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, 5936 MAP_GROWSDOWN, MAP_GROWSDOWN }, 5937 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, 5938 MAP_DENYWRITE, MAP_DENYWRITE }, 5939 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, 5940 MAP_EXECUTABLE, MAP_EXECUTABLE }, 5941 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED }, 5942 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, 5943 MAP_NORESERVE, MAP_NORESERVE }, 5944 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB }, 5945 /* MAP_STACK had been ignored by the kernel for quite some time. 5946 Recognize it for the target insofar as we do not want to pass 5947 it through to the host. */ 5948 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 }, 5949 { 0, 0, 0, 0 } 5950 }; 5951 5952 #if defined(TARGET_I386) 5953 5954 /* NOTE: there is really one LDT for all the threads */ 5955 static uint8_t *ldt_table; 5956 5957 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount) 5958 { 5959 int size; 5960 void *p; 5961 5962 if (!ldt_table) 5963 return 0; 5964 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE; 5965 if (size > bytecount) 5966 size = bytecount; 5967 p = lock_user(VERIFY_WRITE, ptr, size, 0); 5968 if (!p) 5969 return -TARGET_EFAULT; 5970 /* ??? Should this by byteswapped? */ 5971 memcpy(p, ldt_table, size); 5972 unlock_user(p, ptr, size); 5973 return size; 5974 } 5975 5976 /* XXX: add locking support */ 5977 static abi_long write_ldt(CPUX86State *env, 5978 abi_ulong ptr, unsigned long bytecount, int oldmode) 5979 { 5980 struct target_modify_ldt_ldt_s ldt_info; 5981 struct target_modify_ldt_ldt_s *target_ldt_info; 5982 int seg_32bit, contents, read_exec_only, limit_in_pages; 5983 int seg_not_present, useable, lm; 5984 uint32_t *lp, entry_1, entry_2; 5985 5986 if (bytecount != sizeof(ldt_info)) 5987 return -TARGET_EINVAL; 5988 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1)) 5989 return -TARGET_EFAULT; 5990 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 5991 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 5992 ldt_info.limit = tswap32(target_ldt_info->limit); 5993 ldt_info.flags = tswap32(target_ldt_info->flags); 5994 unlock_user_struct(target_ldt_info, ptr, 0); 5995 5996 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES) 5997 return -TARGET_EINVAL; 5998 seg_32bit = ldt_info.flags & 1; 5999 contents = (ldt_info.flags >> 1) & 3; 6000 read_exec_only = (ldt_info.flags >> 3) & 1; 6001 limit_in_pages = (ldt_info.flags >> 4) & 1; 6002 seg_not_present = (ldt_info.flags >> 5) & 1; 6003 useable = (ldt_info.flags >> 6) & 1; 6004 #ifdef TARGET_ABI32 6005 lm = 0; 6006 #else 6007 lm = (ldt_info.flags >> 7) & 1; 6008 #endif 6009 if (contents == 3) { 6010 if (oldmode) 6011 return -TARGET_EINVAL; 6012 if (seg_not_present == 0) 6013 return -TARGET_EINVAL; 6014 } 6015 /* allocate the LDT */ 6016 if (!ldt_table) { 6017 env->ldt.base = target_mmap(0, 6018 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE, 6019 PROT_READ|PROT_WRITE, 6020 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 6021 if (env->ldt.base == -1) 6022 return -TARGET_ENOMEM; 6023 memset(g2h(env->ldt.base), 0, 6024 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE); 6025 env->ldt.limit = 0xffff; 6026 ldt_table = g2h(env->ldt.base); 6027 } 6028 6029 /* NOTE: same code as Linux kernel */ 6030 /* Allow LDTs to be cleared by the user. */ 6031 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 6032 if (oldmode || 6033 (contents == 0 && 6034 read_exec_only == 1 && 6035 seg_32bit == 0 && 6036 limit_in_pages == 0 && 6037 seg_not_present == 1 && 6038 useable == 0 )) { 6039 entry_1 = 0; 6040 entry_2 = 0; 6041 goto install; 6042 } 6043 } 6044 6045 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 6046 (ldt_info.limit & 0x0ffff); 6047 entry_2 = (ldt_info.base_addr & 0xff000000) | 6048 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 6049 (ldt_info.limit & 0xf0000) | 6050 ((read_exec_only ^ 1) << 9) | 6051 (contents << 10) | 6052 ((seg_not_present ^ 1) << 15) | 6053 (seg_32bit << 22) | 6054 (limit_in_pages << 23) | 6055 (lm << 21) | 6056 0x7000; 6057 if (!oldmode) 6058 entry_2 |= (useable << 20); 6059 6060 /* Install the new entry ... */ 6061 install: 6062 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3)); 6063 lp[0] = tswap32(entry_1); 6064 lp[1] = tswap32(entry_2); 6065 return 0; 6066 } 6067 6068 /* specific and weird i386 syscalls */ 6069 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr, 6070 unsigned long bytecount) 6071 { 6072 abi_long ret; 6073 6074 switch (func) { 6075 case 0: 6076 ret = read_ldt(ptr, bytecount); 6077 break; 6078 case 1: 6079 ret = write_ldt(env, ptr, bytecount, 1); 6080 break; 6081 case 0x11: 6082 ret = write_ldt(env, ptr, bytecount, 0); 6083 break; 6084 default: 6085 ret = -TARGET_ENOSYS; 6086 break; 6087 } 6088 return ret; 6089 } 6090 6091 #if defined(TARGET_I386) && defined(TARGET_ABI32) 6092 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr) 6093 { 6094 uint64_t *gdt_table = g2h(env->gdt.base); 6095 struct target_modify_ldt_ldt_s ldt_info; 6096 struct target_modify_ldt_ldt_s *target_ldt_info; 6097 int seg_32bit, contents, read_exec_only, limit_in_pages; 6098 int seg_not_present, useable, lm; 6099 uint32_t *lp, entry_1, entry_2; 6100 int i; 6101 6102 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 6103 if (!target_ldt_info) 6104 return -TARGET_EFAULT; 6105 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 6106 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 6107 ldt_info.limit = tswap32(target_ldt_info->limit); 6108 ldt_info.flags = tswap32(target_ldt_info->flags); 6109 if (ldt_info.entry_number == -1) { 6110 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) { 6111 if (gdt_table[i] == 0) { 6112 ldt_info.entry_number = i; 6113 target_ldt_info->entry_number = tswap32(i); 6114 break; 6115 } 6116 } 6117 } 6118 unlock_user_struct(target_ldt_info, ptr, 1); 6119 6120 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 6121 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX) 6122 return -TARGET_EINVAL; 6123 seg_32bit = ldt_info.flags & 1; 6124 contents = (ldt_info.flags >> 1) & 3; 6125 read_exec_only = (ldt_info.flags >> 3) & 1; 6126 limit_in_pages = (ldt_info.flags >> 4) & 1; 6127 seg_not_present = (ldt_info.flags >> 5) & 1; 6128 useable = (ldt_info.flags >> 6) & 1; 6129 #ifdef TARGET_ABI32 6130 lm = 0; 6131 #else 6132 lm = (ldt_info.flags >> 7) & 1; 6133 #endif 6134 6135 if (contents == 3) { 6136 if (seg_not_present == 0) 6137 return -TARGET_EINVAL; 6138 } 6139 6140 /* NOTE: same code as Linux kernel */ 6141 /* Allow LDTs to be cleared by the user. */ 6142 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 6143 if ((contents == 0 && 6144 read_exec_only == 1 && 6145 seg_32bit == 0 && 6146 limit_in_pages == 0 && 6147 seg_not_present == 1 && 6148 useable == 0 )) { 6149 entry_1 = 0; 6150 entry_2 = 0; 6151 goto install; 6152 } 6153 } 6154 6155 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 6156 (ldt_info.limit & 0x0ffff); 6157 entry_2 = (ldt_info.base_addr & 0xff000000) | 6158 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 6159 (ldt_info.limit & 0xf0000) | 6160 ((read_exec_only ^ 1) << 9) | 6161 (contents << 10) | 6162 ((seg_not_present ^ 1) << 15) | 6163 (seg_32bit << 22) | 6164 (limit_in_pages << 23) | 6165 (useable << 20) | 6166 (lm << 21) | 6167 0x7000; 6168 6169 /* Install the new entry ... */ 6170 install: 6171 lp = (uint32_t *)(gdt_table + ldt_info.entry_number); 6172 lp[0] = tswap32(entry_1); 6173 lp[1] = tswap32(entry_2); 6174 return 0; 6175 } 6176 6177 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr) 6178 { 6179 struct target_modify_ldt_ldt_s *target_ldt_info; 6180 uint64_t *gdt_table = g2h(env->gdt.base); 6181 uint32_t base_addr, limit, flags; 6182 int seg_32bit, contents, read_exec_only, limit_in_pages, idx; 6183 int seg_not_present, useable, lm; 6184 uint32_t *lp, entry_1, entry_2; 6185 6186 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 6187 if (!target_ldt_info) 6188 return -TARGET_EFAULT; 6189 idx = tswap32(target_ldt_info->entry_number); 6190 if (idx < TARGET_GDT_ENTRY_TLS_MIN || 6191 idx > TARGET_GDT_ENTRY_TLS_MAX) { 6192 unlock_user_struct(target_ldt_info, ptr, 1); 6193 return -TARGET_EINVAL; 6194 } 6195 lp = (uint32_t *)(gdt_table + idx); 6196 entry_1 = tswap32(lp[0]); 6197 entry_2 = tswap32(lp[1]); 6198 6199 read_exec_only = ((entry_2 >> 9) & 1) ^ 1; 6200 contents = (entry_2 >> 10) & 3; 6201 seg_not_present = ((entry_2 >> 15) & 1) ^ 1; 6202 seg_32bit = (entry_2 >> 22) & 1; 6203 limit_in_pages = (entry_2 >> 23) & 1; 6204 useable = (entry_2 >> 20) & 1; 6205 #ifdef TARGET_ABI32 6206 lm = 0; 6207 #else 6208 lm = (entry_2 >> 21) & 1; 6209 #endif 6210 flags = (seg_32bit << 0) | (contents << 1) | 6211 (read_exec_only << 3) | (limit_in_pages << 4) | 6212 (seg_not_present << 5) | (useable << 6) | (lm << 7); 6213 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000); 6214 base_addr = (entry_1 >> 16) | 6215 (entry_2 & 0xff000000) | 6216 ((entry_2 & 0xff) << 16); 6217 target_ldt_info->base_addr = tswapal(base_addr); 6218 target_ldt_info->limit = tswap32(limit); 6219 target_ldt_info->flags = tswap32(flags); 6220 unlock_user_struct(target_ldt_info, ptr, 1); 6221 return 0; 6222 } 6223 #endif /* TARGET_I386 && TARGET_ABI32 */ 6224 6225 #ifndef TARGET_ABI32 6226 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 6227 { 6228 abi_long ret = 0; 6229 abi_ulong val; 6230 int idx; 6231 6232 switch(code) { 6233 case TARGET_ARCH_SET_GS: 6234 case TARGET_ARCH_SET_FS: 6235 if (code == TARGET_ARCH_SET_GS) 6236 idx = R_GS; 6237 else 6238 idx = R_FS; 6239 cpu_x86_load_seg(env, idx, 0); 6240 env->segs[idx].base = addr; 6241 break; 6242 case TARGET_ARCH_GET_GS: 6243 case TARGET_ARCH_GET_FS: 6244 if (code == TARGET_ARCH_GET_GS) 6245 idx = R_GS; 6246 else 6247 idx = R_FS; 6248 val = env->segs[idx].base; 6249 if (put_user(val, addr, abi_ulong)) 6250 ret = -TARGET_EFAULT; 6251 break; 6252 default: 6253 ret = -TARGET_EINVAL; 6254 break; 6255 } 6256 return ret; 6257 } 6258 #endif 6259 6260 #endif /* defined(TARGET_I386) */ 6261 6262 #define NEW_STACK_SIZE 0x40000 6263 6264 6265 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; 6266 typedef struct { 6267 CPUArchState *env; 6268 pthread_mutex_t mutex; 6269 pthread_cond_t cond; 6270 pthread_t thread; 6271 uint32_t tid; 6272 abi_ulong child_tidptr; 6273 abi_ulong parent_tidptr; 6274 sigset_t sigmask; 6275 } new_thread_info; 6276 6277 static void *clone_func(void *arg) 6278 { 6279 new_thread_info *info = arg; 6280 CPUArchState *env; 6281 CPUState *cpu; 6282 TaskState *ts; 6283 6284 rcu_register_thread(); 6285 tcg_register_thread(); 6286 env = info->env; 6287 cpu = ENV_GET_CPU(env); 6288 thread_cpu = cpu; 6289 ts = (TaskState *)cpu->opaque; 6290 info->tid = gettid(); 6291 task_settid(ts); 6292 if (info->child_tidptr) 6293 put_user_u32(info->tid, info->child_tidptr); 6294 if (info->parent_tidptr) 6295 put_user_u32(info->tid, info->parent_tidptr); 6296 /* Enable signals. */ 6297 sigprocmask(SIG_SETMASK, &info->sigmask, NULL); 6298 /* Signal to the parent that we're ready. */ 6299 pthread_mutex_lock(&info->mutex); 6300 pthread_cond_broadcast(&info->cond); 6301 pthread_mutex_unlock(&info->mutex); 6302 /* Wait until the parent has finished initializing the tls state. */ 6303 pthread_mutex_lock(&clone_lock); 6304 pthread_mutex_unlock(&clone_lock); 6305 cpu_loop(env); 6306 /* never exits */ 6307 return NULL; 6308 } 6309 6310 /* do_fork() Must return host values and target errnos (unlike most 6311 do_*() functions). */ 6312 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, 6313 abi_ulong parent_tidptr, target_ulong newtls, 6314 abi_ulong child_tidptr) 6315 { 6316 CPUState *cpu = ENV_GET_CPU(env); 6317 int ret; 6318 TaskState *ts; 6319 CPUState *new_cpu; 6320 CPUArchState *new_env; 6321 sigset_t sigmask; 6322 6323 flags &= ~CLONE_IGNORED_FLAGS; 6324 6325 /* Emulate vfork() with fork() */ 6326 if (flags & CLONE_VFORK) 6327 flags &= ~(CLONE_VFORK | CLONE_VM); 6328 6329 if (flags & CLONE_VM) { 6330 TaskState *parent_ts = (TaskState *)cpu->opaque; 6331 new_thread_info info; 6332 pthread_attr_t attr; 6333 6334 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) || 6335 (flags & CLONE_INVALID_THREAD_FLAGS)) { 6336 return -TARGET_EINVAL; 6337 } 6338 6339 ts = g_new0(TaskState, 1); 6340 init_task_state(ts); 6341 /* we create a new CPU instance. */ 6342 new_env = cpu_copy(env); 6343 /* Init regs that differ from the parent. */ 6344 cpu_clone_regs(new_env, newsp); 6345 new_cpu = ENV_GET_CPU(new_env); 6346 new_cpu->opaque = ts; 6347 ts->bprm = parent_ts->bprm; 6348 ts->info = parent_ts->info; 6349 ts->signal_mask = parent_ts->signal_mask; 6350 6351 if (flags & CLONE_CHILD_CLEARTID) { 6352 ts->child_tidptr = child_tidptr; 6353 } 6354 6355 if (flags & CLONE_SETTLS) { 6356 cpu_set_tls (new_env, newtls); 6357 } 6358 6359 /* Grab a mutex so that thread setup appears atomic. */ 6360 pthread_mutex_lock(&clone_lock); 6361 6362 memset(&info, 0, sizeof(info)); 6363 pthread_mutex_init(&info.mutex, NULL); 6364 pthread_mutex_lock(&info.mutex); 6365 pthread_cond_init(&info.cond, NULL); 6366 info.env = new_env; 6367 if (flags & CLONE_CHILD_SETTID) { 6368 info.child_tidptr = child_tidptr; 6369 } 6370 if (flags & CLONE_PARENT_SETTID) { 6371 info.parent_tidptr = parent_tidptr; 6372 } 6373 6374 ret = pthread_attr_init(&attr); 6375 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE); 6376 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 6377 /* It is not safe to deliver signals until the child has finished 6378 initializing, so temporarily block all signals. */ 6379 sigfillset(&sigmask); 6380 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); 6381 6382 /* If this is our first additional thread, we need to ensure we 6383 * generate code for parallel execution and flush old translations. 6384 */ 6385 if (!parallel_cpus) { 6386 parallel_cpus = true; 6387 tb_flush(cpu); 6388 } 6389 6390 ret = pthread_create(&info.thread, &attr, clone_func, &info); 6391 /* TODO: Free new CPU state if thread creation failed. */ 6392 6393 sigprocmask(SIG_SETMASK, &info.sigmask, NULL); 6394 pthread_attr_destroy(&attr); 6395 if (ret == 0) { 6396 /* Wait for the child to initialize. */ 6397 pthread_cond_wait(&info.cond, &info.mutex); 6398 ret = info.tid; 6399 } else { 6400 ret = -1; 6401 } 6402 pthread_mutex_unlock(&info.mutex); 6403 pthread_cond_destroy(&info.cond); 6404 pthread_mutex_destroy(&info.mutex); 6405 pthread_mutex_unlock(&clone_lock); 6406 } else { 6407 /* if no CLONE_VM, we consider it is a fork */ 6408 if (flags & CLONE_INVALID_FORK_FLAGS) { 6409 return -TARGET_EINVAL; 6410 } 6411 6412 /* We can't support custom termination signals */ 6413 if ((flags & CSIGNAL) != TARGET_SIGCHLD) { 6414 return -TARGET_EINVAL; 6415 } 6416 6417 if (block_signals()) { 6418 return -TARGET_ERESTARTSYS; 6419 } 6420 6421 fork_start(); 6422 ret = fork(); 6423 if (ret == 0) { 6424 /* Child Process. */ 6425 cpu_clone_regs(env, newsp); 6426 fork_end(1); 6427 /* There is a race condition here. The parent process could 6428 theoretically read the TID in the child process before the child 6429 tid is set. This would require using either ptrace 6430 (not implemented) or having *_tidptr to point at a shared memory 6431 mapping. We can't repeat the spinlock hack used above because 6432 the child process gets its own copy of the lock. */ 6433 if (flags & CLONE_CHILD_SETTID) 6434 put_user_u32(gettid(), child_tidptr); 6435 if (flags & CLONE_PARENT_SETTID) 6436 put_user_u32(gettid(), parent_tidptr); 6437 ts = (TaskState *)cpu->opaque; 6438 if (flags & CLONE_SETTLS) 6439 cpu_set_tls (env, newtls); 6440 if (flags & CLONE_CHILD_CLEARTID) 6441 ts->child_tidptr = child_tidptr; 6442 } else { 6443 fork_end(0); 6444 } 6445 } 6446 return ret; 6447 } 6448 6449 /* warning : doesn't handle linux specific flags... */ 6450 static int target_to_host_fcntl_cmd(int cmd) 6451 { 6452 switch(cmd) { 6453 case TARGET_F_DUPFD: 6454 case TARGET_F_GETFD: 6455 case TARGET_F_SETFD: 6456 case TARGET_F_GETFL: 6457 case TARGET_F_SETFL: 6458 return cmd; 6459 case TARGET_F_GETLK: 6460 return F_GETLK64; 6461 case TARGET_F_SETLK: 6462 return F_SETLK64; 6463 case TARGET_F_SETLKW: 6464 return F_SETLKW64; 6465 case TARGET_F_GETOWN: 6466 return F_GETOWN; 6467 case TARGET_F_SETOWN: 6468 return F_SETOWN; 6469 case TARGET_F_GETSIG: 6470 return F_GETSIG; 6471 case TARGET_F_SETSIG: 6472 return F_SETSIG; 6473 #if TARGET_ABI_BITS == 32 6474 case TARGET_F_GETLK64: 6475 return F_GETLK64; 6476 case TARGET_F_SETLK64: 6477 return F_SETLK64; 6478 case TARGET_F_SETLKW64: 6479 return F_SETLKW64; 6480 #endif 6481 case TARGET_F_SETLEASE: 6482 return F_SETLEASE; 6483 case TARGET_F_GETLEASE: 6484 return F_GETLEASE; 6485 #ifdef F_DUPFD_CLOEXEC 6486 case TARGET_F_DUPFD_CLOEXEC: 6487 return F_DUPFD_CLOEXEC; 6488 #endif 6489 case TARGET_F_NOTIFY: 6490 return F_NOTIFY; 6491 #ifdef F_GETOWN_EX 6492 case TARGET_F_GETOWN_EX: 6493 return F_GETOWN_EX; 6494 #endif 6495 #ifdef F_SETOWN_EX 6496 case TARGET_F_SETOWN_EX: 6497 return F_SETOWN_EX; 6498 #endif 6499 #ifdef F_SETPIPE_SZ 6500 case TARGET_F_SETPIPE_SZ: 6501 return F_SETPIPE_SZ; 6502 case TARGET_F_GETPIPE_SZ: 6503 return F_GETPIPE_SZ; 6504 #endif 6505 default: 6506 return -TARGET_EINVAL; 6507 } 6508 return -TARGET_EINVAL; 6509 } 6510 6511 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a } 6512 static const bitmask_transtbl flock_tbl[] = { 6513 TRANSTBL_CONVERT(F_RDLCK), 6514 TRANSTBL_CONVERT(F_WRLCK), 6515 TRANSTBL_CONVERT(F_UNLCK), 6516 TRANSTBL_CONVERT(F_EXLCK), 6517 TRANSTBL_CONVERT(F_SHLCK), 6518 { 0, 0, 0, 0 } 6519 }; 6520 6521 static inline abi_long copy_from_user_flock(struct flock64 *fl, 6522 abi_ulong target_flock_addr) 6523 { 6524 struct target_flock *target_fl; 6525 short l_type; 6526 6527 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6528 return -TARGET_EFAULT; 6529 } 6530 6531 __get_user(l_type, &target_fl->l_type); 6532 fl->l_type = target_to_host_bitmask(l_type, flock_tbl); 6533 __get_user(fl->l_whence, &target_fl->l_whence); 6534 __get_user(fl->l_start, &target_fl->l_start); 6535 __get_user(fl->l_len, &target_fl->l_len); 6536 __get_user(fl->l_pid, &target_fl->l_pid); 6537 unlock_user_struct(target_fl, target_flock_addr, 0); 6538 return 0; 6539 } 6540 6541 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr, 6542 const struct flock64 *fl) 6543 { 6544 struct target_flock *target_fl; 6545 short l_type; 6546 6547 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 6548 return -TARGET_EFAULT; 6549 } 6550 6551 l_type = host_to_target_bitmask(fl->l_type, flock_tbl); 6552 __put_user(l_type, &target_fl->l_type); 6553 __put_user(fl->l_whence, &target_fl->l_whence); 6554 __put_user(fl->l_start, &target_fl->l_start); 6555 __put_user(fl->l_len, &target_fl->l_len); 6556 __put_user(fl->l_pid, &target_fl->l_pid); 6557 unlock_user_struct(target_fl, target_flock_addr, 1); 6558 return 0; 6559 } 6560 6561 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr); 6562 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl); 6563 6564 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32 6565 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl, 6566 abi_ulong target_flock_addr) 6567 { 6568 struct target_eabi_flock64 *target_fl; 6569 short l_type; 6570 6571 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6572 return -TARGET_EFAULT; 6573 } 6574 6575 __get_user(l_type, &target_fl->l_type); 6576 fl->l_type = target_to_host_bitmask(l_type, flock_tbl); 6577 __get_user(fl->l_whence, &target_fl->l_whence); 6578 __get_user(fl->l_start, &target_fl->l_start); 6579 __get_user(fl->l_len, &target_fl->l_len); 6580 __get_user(fl->l_pid, &target_fl->l_pid); 6581 unlock_user_struct(target_fl, target_flock_addr, 0); 6582 return 0; 6583 } 6584 6585 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr, 6586 const struct flock64 *fl) 6587 { 6588 struct target_eabi_flock64 *target_fl; 6589 short l_type; 6590 6591 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 6592 return -TARGET_EFAULT; 6593 } 6594 6595 l_type = host_to_target_bitmask(fl->l_type, flock_tbl); 6596 __put_user(l_type, &target_fl->l_type); 6597 __put_user(fl->l_whence, &target_fl->l_whence); 6598 __put_user(fl->l_start, &target_fl->l_start); 6599 __put_user(fl->l_len, &target_fl->l_len); 6600 __put_user(fl->l_pid, &target_fl->l_pid); 6601 unlock_user_struct(target_fl, target_flock_addr, 1); 6602 return 0; 6603 } 6604 #endif 6605 6606 static inline abi_long copy_from_user_flock64(struct flock64 *fl, 6607 abi_ulong target_flock_addr) 6608 { 6609 struct target_flock64 *target_fl; 6610 short l_type; 6611 6612 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6613 return -TARGET_EFAULT; 6614 } 6615 6616 __get_user(l_type, &target_fl->l_type); 6617 fl->l_type = target_to_host_bitmask(l_type, flock_tbl); 6618 __get_user(fl->l_whence, &target_fl->l_whence); 6619 __get_user(fl->l_start, &target_fl->l_start); 6620 __get_user(fl->l_len, &target_fl->l_len); 6621 __get_user(fl->l_pid, &target_fl->l_pid); 6622 unlock_user_struct(target_fl, target_flock_addr, 0); 6623 return 0; 6624 } 6625 6626 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr, 6627 const struct flock64 *fl) 6628 { 6629 struct target_flock64 *target_fl; 6630 short l_type; 6631 6632 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 6633 return -TARGET_EFAULT; 6634 } 6635 6636 l_type = host_to_target_bitmask(fl->l_type, flock_tbl); 6637 __put_user(l_type, &target_fl->l_type); 6638 __put_user(fl->l_whence, &target_fl->l_whence); 6639 __put_user(fl->l_start, &target_fl->l_start); 6640 __put_user(fl->l_len, &target_fl->l_len); 6641 __put_user(fl->l_pid, &target_fl->l_pid); 6642 unlock_user_struct(target_fl, target_flock_addr, 1); 6643 return 0; 6644 } 6645 6646 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) 6647 { 6648 struct flock64 fl64; 6649 #ifdef F_GETOWN_EX 6650 struct f_owner_ex fox; 6651 struct target_f_owner_ex *target_fox; 6652 #endif 6653 abi_long ret; 6654 int host_cmd = target_to_host_fcntl_cmd(cmd); 6655 6656 if (host_cmd == -TARGET_EINVAL) 6657 return host_cmd; 6658 6659 switch(cmd) { 6660 case TARGET_F_GETLK: 6661 ret = copy_from_user_flock(&fl64, arg); 6662 if (ret) { 6663 return ret; 6664 } 6665 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 6666 if (ret == 0) { 6667 ret = copy_to_user_flock(arg, &fl64); 6668 } 6669 break; 6670 6671 case TARGET_F_SETLK: 6672 case TARGET_F_SETLKW: 6673 ret = copy_from_user_flock(&fl64, arg); 6674 if (ret) { 6675 return ret; 6676 } 6677 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 6678 break; 6679 6680 case TARGET_F_GETLK64: 6681 ret = copy_from_user_flock64(&fl64, arg); 6682 if (ret) { 6683 return ret; 6684 } 6685 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 6686 if (ret == 0) { 6687 ret = copy_to_user_flock64(arg, &fl64); 6688 } 6689 break; 6690 case TARGET_F_SETLK64: 6691 case TARGET_F_SETLKW64: 6692 ret = copy_from_user_flock64(&fl64, arg); 6693 if (ret) { 6694 return ret; 6695 } 6696 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 6697 break; 6698 6699 case TARGET_F_GETFL: 6700 ret = get_errno(safe_fcntl(fd, host_cmd, arg)); 6701 if (ret >= 0) { 6702 ret = host_to_target_bitmask(ret, fcntl_flags_tbl); 6703 } 6704 break; 6705 6706 case TARGET_F_SETFL: 6707 ret = get_errno(safe_fcntl(fd, host_cmd, 6708 target_to_host_bitmask(arg, 6709 fcntl_flags_tbl))); 6710 break; 6711 6712 #ifdef F_GETOWN_EX 6713 case TARGET_F_GETOWN_EX: 6714 ret = get_errno(safe_fcntl(fd, host_cmd, &fox)); 6715 if (ret >= 0) { 6716 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0)) 6717 return -TARGET_EFAULT; 6718 target_fox->type = tswap32(fox.type); 6719 target_fox->pid = tswap32(fox.pid); 6720 unlock_user_struct(target_fox, arg, 1); 6721 } 6722 break; 6723 #endif 6724 6725 #ifdef F_SETOWN_EX 6726 case TARGET_F_SETOWN_EX: 6727 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1)) 6728 return -TARGET_EFAULT; 6729 fox.type = tswap32(target_fox->type); 6730 fox.pid = tswap32(target_fox->pid); 6731 unlock_user_struct(target_fox, arg, 0); 6732 ret = get_errno(safe_fcntl(fd, host_cmd, &fox)); 6733 break; 6734 #endif 6735 6736 case TARGET_F_SETOWN: 6737 case TARGET_F_GETOWN: 6738 case TARGET_F_SETSIG: 6739 case TARGET_F_GETSIG: 6740 case TARGET_F_SETLEASE: 6741 case TARGET_F_GETLEASE: 6742 case TARGET_F_SETPIPE_SZ: 6743 case TARGET_F_GETPIPE_SZ: 6744 ret = get_errno(safe_fcntl(fd, host_cmd, arg)); 6745 break; 6746 6747 default: 6748 ret = get_errno(safe_fcntl(fd, cmd, arg)); 6749 break; 6750 } 6751 return ret; 6752 } 6753 6754 #ifdef USE_UID16 6755 6756 static inline int high2lowuid(int uid) 6757 { 6758 if (uid > 65535) 6759 return 65534; 6760 else 6761 return uid; 6762 } 6763 6764 static inline int high2lowgid(int gid) 6765 { 6766 if (gid > 65535) 6767 return 65534; 6768 else 6769 return gid; 6770 } 6771 6772 static inline int low2highuid(int uid) 6773 { 6774 if ((int16_t)uid == -1) 6775 return -1; 6776 else 6777 return uid; 6778 } 6779 6780 static inline int low2highgid(int gid) 6781 { 6782 if ((int16_t)gid == -1) 6783 return -1; 6784 else 6785 return gid; 6786 } 6787 static inline int tswapid(int id) 6788 { 6789 return tswap16(id); 6790 } 6791 6792 #define put_user_id(x, gaddr) put_user_u16(x, gaddr) 6793 6794 #else /* !USE_UID16 */ 6795 static inline int high2lowuid(int uid) 6796 { 6797 return uid; 6798 } 6799 static inline int high2lowgid(int gid) 6800 { 6801 return gid; 6802 } 6803 static inline int low2highuid(int uid) 6804 { 6805 return uid; 6806 } 6807 static inline int low2highgid(int gid) 6808 { 6809 return gid; 6810 } 6811 static inline int tswapid(int id) 6812 { 6813 return tswap32(id); 6814 } 6815 6816 #define put_user_id(x, gaddr) put_user_u32(x, gaddr) 6817 6818 #endif /* USE_UID16 */ 6819 6820 /* We must do direct syscalls for setting UID/GID, because we want to 6821 * implement the Linux system call semantics of "change only for this thread", 6822 * not the libc/POSIX semantics of "change for all threads in process". 6823 * (See http://ewontfix.com/17/ for more details.) 6824 * We use the 32-bit version of the syscalls if present; if it is not 6825 * then either the host architecture supports 32-bit UIDs natively with 6826 * the standard syscall, or the 16-bit UID is the best we can do. 6827 */ 6828 #ifdef __NR_setuid32 6829 #define __NR_sys_setuid __NR_setuid32 6830 #else 6831 #define __NR_sys_setuid __NR_setuid 6832 #endif 6833 #ifdef __NR_setgid32 6834 #define __NR_sys_setgid __NR_setgid32 6835 #else 6836 #define __NR_sys_setgid __NR_setgid 6837 #endif 6838 #ifdef __NR_setresuid32 6839 #define __NR_sys_setresuid __NR_setresuid32 6840 #else 6841 #define __NR_sys_setresuid __NR_setresuid 6842 #endif 6843 #ifdef __NR_setresgid32 6844 #define __NR_sys_setresgid __NR_setresgid32 6845 #else 6846 #define __NR_sys_setresgid __NR_setresgid 6847 #endif 6848 6849 _syscall1(int, sys_setuid, uid_t, uid) 6850 _syscall1(int, sys_setgid, gid_t, gid) 6851 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) 6852 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid) 6853 6854 void syscall_init(void) 6855 { 6856 IOCTLEntry *ie; 6857 const argtype *arg_type; 6858 int size; 6859 int i; 6860 6861 thunk_init(STRUCT_MAX); 6862 6863 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def); 6864 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def); 6865 #include "syscall_types.h" 6866 #undef STRUCT 6867 #undef STRUCT_SPECIAL 6868 6869 /* Build target_to_host_errno_table[] table from 6870 * host_to_target_errno_table[]. */ 6871 for (i = 0; i < ERRNO_TABLE_SIZE; i++) { 6872 target_to_host_errno_table[host_to_target_errno_table[i]] = i; 6873 } 6874 6875 /* we patch the ioctl size if necessary. We rely on the fact that 6876 no ioctl has all the bits at '1' in the size field */ 6877 ie = ioctl_entries; 6878 while (ie->target_cmd != 0) { 6879 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) == 6880 TARGET_IOC_SIZEMASK) { 6881 arg_type = ie->arg_type; 6882 if (arg_type[0] != TYPE_PTR) { 6883 fprintf(stderr, "cannot patch size for ioctl 0x%x\n", 6884 ie->target_cmd); 6885 exit(1); 6886 } 6887 arg_type++; 6888 size = thunk_type_size(arg_type, 0); 6889 ie->target_cmd = (ie->target_cmd & 6890 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) | 6891 (size << TARGET_IOC_SIZESHIFT); 6892 } 6893 6894 /* automatic consistency check if same arch */ 6895 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 6896 (defined(__x86_64__) && defined(TARGET_X86_64)) 6897 if (unlikely(ie->target_cmd != ie->host_cmd)) { 6898 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n", 6899 ie->name, ie->target_cmd, ie->host_cmd); 6900 } 6901 #endif 6902 ie++; 6903 } 6904 } 6905 6906 #if TARGET_ABI_BITS == 32 6907 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1) 6908 { 6909 #ifdef TARGET_WORDS_BIGENDIAN 6910 return ((uint64_t)word0 << 32) | word1; 6911 #else 6912 return ((uint64_t)word1 << 32) | word0; 6913 #endif 6914 } 6915 #else /* TARGET_ABI_BITS == 32 */ 6916 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1) 6917 { 6918 return word0; 6919 } 6920 #endif /* TARGET_ABI_BITS != 32 */ 6921 6922 #ifdef TARGET_NR_truncate64 6923 static inline abi_long target_truncate64(void *cpu_env, const char *arg1, 6924 abi_long arg2, 6925 abi_long arg3, 6926 abi_long arg4) 6927 { 6928 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) { 6929 arg2 = arg3; 6930 arg3 = arg4; 6931 } 6932 return get_errno(truncate64(arg1, target_offset64(arg2, arg3))); 6933 } 6934 #endif 6935 6936 #ifdef TARGET_NR_ftruncate64 6937 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1, 6938 abi_long arg2, 6939 abi_long arg3, 6940 abi_long arg4) 6941 { 6942 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) { 6943 arg2 = arg3; 6944 arg3 = arg4; 6945 } 6946 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3))); 6947 } 6948 #endif 6949 6950 static inline abi_long target_to_host_timespec(struct timespec *host_ts, 6951 abi_ulong target_addr) 6952 { 6953 struct target_timespec *target_ts; 6954 6955 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) 6956 return -TARGET_EFAULT; 6957 __get_user(host_ts->tv_sec, &target_ts->tv_sec); 6958 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec); 6959 unlock_user_struct(target_ts, target_addr, 0); 6960 return 0; 6961 } 6962 6963 static inline abi_long host_to_target_timespec(abi_ulong target_addr, 6964 struct timespec *host_ts) 6965 { 6966 struct target_timespec *target_ts; 6967 6968 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) 6969 return -TARGET_EFAULT; 6970 __put_user(host_ts->tv_sec, &target_ts->tv_sec); 6971 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec); 6972 unlock_user_struct(target_ts, target_addr, 1); 6973 return 0; 6974 } 6975 6976 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec, 6977 abi_ulong target_addr) 6978 { 6979 struct target_itimerspec *target_itspec; 6980 6981 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) { 6982 return -TARGET_EFAULT; 6983 } 6984 6985 host_itspec->it_interval.tv_sec = 6986 tswapal(target_itspec->it_interval.tv_sec); 6987 host_itspec->it_interval.tv_nsec = 6988 tswapal(target_itspec->it_interval.tv_nsec); 6989 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec); 6990 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec); 6991 6992 unlock_user_struct(target_itspec, target_addr, 1); 6993 return 0; 6994 } 6995 6996 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr, 6997 struct itimerspec *host_its) 6998 { 6999 struct target_itimerspec *target_itspec; 7000 7001 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) { 7002 return -TARGET_EFAULT; 7003 } 7004 7005 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec); 7006 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec); 7007 7008 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec); 7009 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec); 7010 7011 unlock_user_struct(target_itspec, target_addr, 0); 7012 return 0; 7013 } 7014 7015 static inline abi_long target_to_host_timex(struct timex *host_tx, 7016 abi_long target_addr) 7017 { 7018 struct target_timex *target_tx; 7019 7020 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) { 7021 return -TARGET_EFAULT; 7022 } 7023 7024 __get_user(host_tx->modes, &target_tx->modes); 7025 __get_user(host_tx->offset, &target_tx->offset); 7026 __get_user(host_tx->freq, &target_tx->freq); 7027 __get_user(host_tx->maxerror, &target_tx->maxerror); 7028 __get_user(host_tx->esterror, &target_tx->esterror); 7029 __get_user(host_tx->status, &target_tx->status); 7030 __get_user(host_tx->constant, &target_tx->constant); 7031 __get_user(host_tx->precision, &target_tx->precision); 7032 __get_user(host_tx->tolerance, &target_tx->tolerance); 7033 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec); 7034 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec); 7035 __get_user(host_tx->tick, &target_tx->tick); 7036 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7037 __get_user(host_tx->jitter, &target_tx->jitter); 7038 __get_user(host_tx->shift, &target_tx->shift); 7039 __get_user(host_tx->stabil, &target_tx->stabil); 7040 __get_user(host_tx->jitcnt, &target_tx->jitcnt); 7041 __get_user(host_tx->calcnt, &target_tx->calcnt); 7042 __get_user(host_tx->errcnt, &target_tx->errcnt); 7043 __get_user(host_tx->stbcnt, &target_tx->stbcnt); 7044 __get_user(host_tx->tai, &target_tx->tai); 7045 7046 unlock_user_struct(target_tx, target_addr, 0); 7047 return 0; 7048 } 7049 7050 static inline abi_long host_to_target_timex(abi_long target_addr, 7051 struct timex *host_tx) 7052 { 7053 struct target_timex *target_tx; 7054 7055 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) { 7056 return -TARGET_EFAULT; 7057 } 7058 7059 __put_user(host_tx->modes, &target_tx->modes); 7060 __put_user(host_tx->offset, &target_tx->offset); 7061 __put_user(host_tx->freq, &target_tx->freq); 7062 __put_user(host_tx->maxerror, &target_tx->maxerror); 7063 __put_user(host_tx->esterror, &target_tx->esterror); 7064 __put_user(host_tx->status, &target_tx->status); 7065 __put_user(host_tx->constant, &target_tx->constant); 7066 __put_user(host_tx->precision, &target_tx->precision); 7067 __put_user(host_tx->tolerance, &target_tx->tolerance); 7068 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec); 7069 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec); 7070 __put_user(host_tx->tick, &target_tx->tick); 7071 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7072 __put_user(host_tx->jitter, &target_tx->jitter); 7073 __put_user(host_tx->shift, &target_tx->shift); 7074 __put_user(host_tx->stabil, &target_tx->stabil); 7075 __put_user(host_tx->jitcnt, &target_tx->jitcnt); 7076 __put_user(host_tx->calcnt, &target_tx->calcnt); 7077 __put_user(host_tx->errcnt, &target_tx->errcnt); 7078 __put_user(host_tx->stbcnt, &target_tx->stbcnt); 7079 __put_user(host_tx->tai, &target_tx->tai); 7080 7081 unlock_user_struct(target_tx, target_addr, 1); 7082 return 0; 7083 } 7084 7085 7086 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp, 7087 abi_ulong target_addr) 7088 { 7089 struct target_sigevent *target_sevp; 7090 7091 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) { 7092 return -TARGET_EFAULT; 7093 } 7094 7095 /* This union is awkward on 64 bit systems because it has a 32 bit 7096 * integer and a pointer in it; we follow the conversion approach 7097 * used for handling sigval types in signal.c so the guest should get 7098 * the correct value back even if we did a 64 bit byteswap and it's 7099 * using the 32 bit integer. 7100 */ 7101 host_sevp->sigev_value.sival_ptr = 7102 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr); 7103 host_sevp->sigev_signo = 7104 target_to_host_signal(tswap32(target_sevp->sigev_signo)); 7105 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify); 7106 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid); 7107 7108 unlock_user_struct(target_sevp, target_addr, 1); 7109 return 0; 7110 } 7111 7112 #if defined(TARGET_NR_mlockall) 7113 static inline int target_to_host_mlockall_arg(int arg) 7114 { 7115 int result = 0; 7116 7117 if (arg & TARGET_MLOCKALL_MCL_CURRENT) { 7118 result |= MCL_CURRENT; 7119 } 7120 if (arg & TARGET_MLOCKALL_MCL_FUTURE) { 7121 result |= MCL_FUTURE; 7122 } 7123 return result; 7124 } 7125 #endif 7126 7127 static inline abi_long host_to_target_stat64(void *cpu_env, 7128 abi_ulong target_addr, 7129 struct stat *host_st) 7130 { 7131 #if defined(TARGET_ARM) && defined(TARGET_ABI32) 7132 if (((CPUARMState *)cpu_env)->eabi) { 7133 struct target_eabi_stat64 *target_st; 7134 7135 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 7136 return -TARGET_EFAULT; 7137 memset(target_st, 0, sizeof(struct target_eabi_stat64)); 7138 __put_user(host_st->st_dev, &target_st->st_dev); 7139 __put_user(host_st->st_ino, &target_st->st_ino); 7140 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 7141 __put_user(host_st->st_ino, &target_st->__st_ino); 7142 #endif 7143 __put_user(host_st->st_mode, &target_st->st_mode); 7144 __put_user(host_st->st_nlink, &target_st->st_nlink); 7145 __put_user(host_st->st_uid, &target_st->st_uid); 7146 __put_user(host_st->st_gid, &target_st->st_gid); 7147 __put_user(host_st->st_rdev, &target_st->st_rdev); 7148 __put_user(host_st->st_size, &target_st->st_size); 7149 __put_user(host_st->st_blksize, &target_st->st_blksize); 7150 __put_user(host_st->st_blocks, &target_st->st_blocks); 7151 __put_user(host_st->st_atime, &target_st->target_st_atime); 7152 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 7153 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 7154 unlock_user_struct(target_st, target_addr, 1); 7155 } else 7156 #endif 7157 { 7158 #if defined(TARGET_HAS_STRUCT_STAT64) 7159 struct target_stat64 *target_st; 7160 #else 7161 struct target_stat *target_st; 7162 #endif 7163 7164 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 7165 return -TARGET_EFAULT; 7166 memset(target_st, 0, sizeof(*target_st)); 7167 __put_user(host_st->st_dev, &target_st->st_dev); 7168 __put_user(host_st->st_ino, &target_st->st_ino); 7169 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 7170 __put_user(host_st->st_ino, &target_st->__st_ino); 7171 #endif 7172 __put_user(host_st->st_mode, &target_st->st_mode); 7173 __put_user(host_st->st_nlink, &target_st->st_nlink); 7174 __put_user(host_st->st_uid, &target_st->st_uid); 7175 __put_user(host_st->st_gid, &target_st->st_gid); 7176 __put_user(host_st->st_rdev, &target_st->st_rdev); 7177 /* XXX: better use of kernel struct */ 7178 __put_user(host_st->st_size, &target_st->st_size); 7179 __put_user(host_st->st_blksize, &target_st->st_blksize); 7180 __put_user(host_st->st_blocks, &target_st->st_blocks); 7181 __put_user(host_st->st_atime, &target_st->target_st_atime); 7182 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 7183 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 7184 unlock_user_struct(target_st, target_addr, 1); 7185 } 7186 7187 return 0; 7188 } 7189 7190 /* ??? Using host futex calls even when target atomic operations 7191 are not really atomic probably breaks things. However implementing 7192 futexes locally would make futexes shared between multiple processes 7193 tricky. However they're probably useless because guest atomic 7194 operations won't work either. */ 7195 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout, 7196 target_ulong uaddr2, int val3) 7197 { 7198 struct timespec ts, *pts; 7199 int base_op; 7200 7201 /* ??? We assume FUTEX_* constants are the same on both host 7202 and target. */ 7203 #ifdef FUTEX_CMD_MASK 7204 base_op = op & FUTEX_CMD_MASK; 7205 #else 7206 base_op = op; 7207 #endif 7208 switch (base_op) { 7209 case FUTEX_WAIT: 7210 case FUTEX_WAIT_BITSET: 7211 if (timeout) { 7212 pts = &ts; 7213 target_to_host_timespec(pts, timeout); 7214 } else { 7215 pts = NULL; 7216 } 7217 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val), 7218 pts, NULL, val3)); 7219 case FUTEX_WAKE: 7220 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 7221 case FUTEX_FD: 7222 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 7223 case FUTEX_REQUEUE: 7224 case FUTEX_CMP_REQUEUE: 7225 case FUTEX_WAKE_OP: 7226 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 7227 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 7228 But the prototype takes a `struct timespec *'; insert casts 7229 to satisfy the compiler. We do not need to tswap TIMEOUT 7230 since it's not compared to guest memory. */ 7231 pts = (struct timespec *)(uintptr_t) timeout; 7232 return get_errno(safe_futex(g2h(uaddr), op, val, pts, 7233 g2h(uaddr2), 7234 (base_op == FUTEX_CMP_REQUEUE 7235 ? tswap32(val3) 7236 : val3))); 7237 default: 7238 return -TARGET_ENOSYS; 7239 } 7240 } 7241 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 7242 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname, 7243 abi_long handle, abi_long mount_id, 7244 abi_long flags) 7245 { 7246 struct file_handle *target_fh; 7247 struct file_handle *fh; 7248 int mid = 0; 7249 abi_long ret; 7250 char *name; 7251 unsigned int size, total_size; 7252 7253 if (get_user_s32(size, handle)) { 7254 return -TARGET_EFAULT; 7255 } 7256 7257 name = lock_user_string(pathname); 7258 if (!name) { 7259 return -TARGET_EFAULT; 7260 } 7261 7262 total_size = sizeof(struct file_handle) + size; 7263 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0); 7264 if (!target_fh) { 7265 unlock_user(name, pathname, 0); 7266 return -TARGET_EFAULT; 7267 } 7268 7269 fh = g_malloc0(total_size); 7270 fh->handle_bytes = size; 7271 7272 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags)); 7273 unlock_user(name, pathname, 0); 7274 7275 /* man name_to_handle_at(2): 7276 * Other than the use of the handle_bytes field, the caller should treat 7277 * the file_handle structure as an opaque data type 7278 */ 7279 7280 memcpy(target_fh, fh, total_size); 7281 target_fh->handle_bytes = tswap32(fh->handle_bytes); 7282 target_fh->handle_type = tswap32(fh->handle_type); 7283 g_free(fh); 7284 unlock_user(target_fh, handle, total_size); 7285 7286 if (put_user_s32(mid, mount_id)) { 7287 return -TARGET_EFAULT; 7288 } 7289 7290 return ret; 7291 7292 } 7293 #endif 7294 7295 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 7296 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle, 7297 abi_long flags) 7298 { 7299 struct file_handle *target_fh; 7300 struct file_handle *fh; 7301 unsigned int size, total_size; 7302 abi_long ret; 7303 7304 if (get_user_s32(size, handle)) { 7305 return -TARGET_EFAULT; 7306 } 7307 7308 total_size = sizeof(struct file_handle) + size; 7309 target_fh = lock_user(VERIFY_READ, handle, total_size, 1); 7310 if (!target_fh) { 7311 return -TARGET_EFAULT; 7312 } 7313 7314 fh = g_memdup(target_fh, total_size); 7315 fh->handle_bytes = size; 7316 fh->handle_type = tswap32(target_fh->handle_type); 7317 7318 ret = get_errno(open_by_handle_at(mount_fd, fh, 7319 target_to_host_bitmask(flags, fcntl_flags_tbl))); 7320 7321 g_free(fh); 7322 7323 unlock_user(target_fh, handle, total_size); 7324 7325 return ret; 7326 } 7327 #endif 7328 7329 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4) 7330 7331 /* signalfd siginfo conversion */ 7332 7333 static void 7334 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo, 7335 const struct signalfd_siginfo *info) 7336 { 7337 int sig = host_to_target_signal(info->ssi_signo); 7338 7339 /* linux/signalfd.h defines a ssi_addr_lsb 7340 * not defined in sys/signalfd.h but used by some kernels 7341 */ 7342 7343 #ifdef BUS_MCEERR_AO 7344 if (tinfo->ssi_signo == SIGBUS && 7345 (tinfo->ssi_code == BUS_MCEERR_AR || 7346 tinfo->ssi_code == BUS_MCEERR_AO)) { 7347 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1); 7348 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1); 7349 *tssi_addr_lsb = tswap16(*ssi_addr_lsb); 7350 } 7351 #endif 7352 7353 tinfo->ssi_signo = tswap32(sig); 7354 tinfo->ssi_errno = tswap32(tinfo->ssi_errno); 7355 tinfo->ssi_code = tswap32(info->ssi_code); 7356 tinfo->ssi_pid = tswap32(info->ssi_pid); 7357 tinfo->ssi_uid = tswap32(info->ssi_uid); 7358 tinfo->ssi_fd = tswap32(info->ssi_fd); 7359 tinfo->ssi_tid = tswap32(info->ssi_tid); 7360 tinfo->ssi_band = tswap32(info->ssi_band); 7361 tinfo->ssi_overrun = tswap32(info->ssi_overrun); 7362 tinfo->ssi_trapno = tswap32(info->ssi_trapno); 7363 tinfo->ssi_status = tswap32(info->ssi_status); 7364 tinfo->ssi_int = tswap32(info->ssi_int); 7365 tinfo->ssi_ptr = tswap64(info->ssi_ptr); 7366 tinfo->ssi_utime = tswap64(info->ssi_utime); 7367 tinfo->ssi_stime = tswap64(info->ssi_stime); 7368 tinfo->ssi_addr = tswap64(info->ssi_addr); 7369 } 7370 7371 static abi_long host_to_target_data_signalfd(void *buf, size_t len) 7372 { 7373 int i; 7374 7375 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) { 7376 host_to_target_signalfd_siginfo(buf + i, buf + i); 7377 } 7378 7379 return len; 7380 } 7381 7382 static TargetFdTrans target_signalfd_trans = { 7383 .host_to_target_data = host_to_target_data_signalfd, 7384 }; 7385 7386 static abi_long do_signalfd4(int fd, abi_long mask, int flags) 7387 { 7388 int host_flags; 7389 target_sigset_t *target_mask; 7390 sigset_t host_mask; 7391 abi_long ret; 7392 7393 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) { 7394 return -TARGET_EINVAL; 7395 } 7396 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) { 7397 return -TARGET_EFAULT; 7398 } 7399 7400 target_to_host_sigset(&host_mask, target_mask); 7401 7402 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl); 7403 7404 ret = get_errno(signalfd(fd, &host_mask, host_flags)); 7405 if (ret >= 0) { 7406 fd_trans_register(ret, &target_signalfd_trans); 7407 } 7408 7409 unlock_user_struct(target_mask, mask, 0); 7410 7411 return ret; 7412 } 7413 #endif 7414 7415 /* Map host to target signal numbers for the wait family of syscalls. 7416 Assume all other status bits are the same. */ 7417 int host_to_target_waitstatus(int status) 7418 { 7419 if (WIFSIGNALED(status)) { 7420 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f); 7421 } 7422 if (WIFSTOPPED(status)) { 7423 return (host_to_target_signal(WSTOPSIG(status)) << 8) 7424 | (status & 0xff); 7425 } 7426 return status; 7427 } 7428 7429 static int open_self_cmdline(void *cpu_env, int fd) 7430 { 7431 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 7432 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm; 7433 int i; 7434 7435 for (i = 0; i < bprm->argc; i++) { 7436 size_t len = strlen(bprm->argv[i]) + 1; 7437 7438 if (write(fd, bprm->argv[i], len) != len) { 7439 return -1; 7440 } 7441 } 7442 7443 return 0; 7444 } 7445 7446 static int open_self_maps(void *cpu_env, int fd) 7447 { 7448 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 7449 TaskState *ts = cpu->opaque; 7450 FILE *fp; 7451 char *line = NULL; 7452 size_t len = 0; 7453 ssize_t read; 7454 7455 fp = fopen("/proc/self/maps", "r"); 7456 if (fp == NULL) { 7457 return -1; 7458 } 7459 7460 while ((read = getline(&line, &len, fp)) != -1) { 7461 int fields, dev_maj, dev_min, inode; 7462 uint64_t min, max, offset; 7463 char flag_r, flag_w, flag_x, flag_p; 7464 char path[512] = ""; 7465 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d" 7466 " %512s", &min, &max, &flag_r, &flag_w, &flag_x, 7467 &flag_p, &offset, &dev_maj, &dev_min, &inode, path); 7468 7469 if ((fields < 10) || (fields > 11)) { 7470 continue; 7471 } 7472 if (h2g_valid(min)) { 7473 int flags = page_get_flags(h2g(min)); 7474 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1; 7475 if (page_check_range(h2g(min), max - min, flags) == -1) { 7476 continue; 7477 } 7478 if (h2g(min) == ts->info->stack_limit) { 7479 pstrcpy(path, sizeof(path), " [stack]"); 7480 } 7481 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx 7482 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n", 7483 h2g(min), h2g(max - 1) + 1, flag_r, flag_w, 7484 flag_x, flag_p, offset, dev_maj, dev_min, inode, 7485 path[0] ? " " : "", path); 7486 } 7487 } 7488 7489 free(line); 7490 fclose(fp); 7491 7492 return 0; 7493 } 7494 7495 static int open_self_stat(void *cpu_env, int fd) 7496 { 7497 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 7498 TaskState *ts = cpu->opaque; 7499 abi_ulong start_stack = ts->info->start_stack; 7500 int i; 7501 7502 for (i = 0; i < 44; i++) { 7503 char buf[128]; 7504 int len; 7505 uint64_t val = 0; 7506 7507 if (i == 0) { 7508 /* pid */ 7509 val = getpid(); 7510 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 7511 } else if (i == 1) { 7512 /* app name */ 7513 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]); 7514 } else if (i == 27) { 7515 /* stack bottom */ 7516 val = start_stack; 7517 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 7518 } else { 7519 /* for the rest, there is MasterCard */ 7520 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' '); 7521 } 7522 7523 len = strlen(buf); 7524 if (write(fd, buf, len) != len) { 7525 return -1; 7526 } 7527 } 7528 7529 return 0; 7530 } 7531 7532 static int open_self_auxv(void *cpu_env, int fd) 7533 { 7534 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 7535 TaskState *ts = cpu->opaque; 7536 abi_ulong auxv = ts->info->saved_auxv; 7537 abi_ulong len = ts->info->auxv_len; 7538 char *ptr; 7539 7540 /* 7541 * Auxiliary vector is stored in target process stack. 7542 * read in whole auxv vector and copy it to file 7543 */ 7544 ptr = lock_user(VERIFY_READ, auxv, len, 0); 7545 if (ptr != NULL) { 7546 while (len > 0) { 7547 ssize_t r; 7548 r = write(fd, ptr, len); 7549 if (r <= 0) { 7550 break; 7551 } 7552 len -= r; 7553 ptr += r; 7554 } 7555 lseek(fd, 0, SEEK_SET); 7556 unlock_user(ptr, auxv, len); 7557 } 7558 7559 return 0; 7560 } 7561 7562 static int is_proc_myself(const char *filename, const char *entry) 7563 { 7564 if (!strncmp(filename, "/proc/", strlen("/proc/"))) { 7565 filename += strlen("/proc/"); 7566 if (!strncmp(filename, "self/", strlen("self/"))) { 7567 filename += strlen("self/"); 7568 } else if (*filename >= '1' && *filename <= '9') { 7569 char myself[80]; 7570 snprintf(myself, sizeof(myself), "%d/", getpid()); 7571 if (!strncmp(filename, myself, strlen(myself))) { 7572 filename += strlen(myself); 7573 } else { 7574 return 0; 7575 } 7576 } else { 7577 return 0; 7578 } 7579 if (!strcmp(filename, entry)) { 7580 return 1; 7581 } 7582 } 7583 return 0; 7584 } 7585 7586 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 7587 static int is_proc(const char *filename, const char *entry) 7588 { 7589 return strcmp(filename, entry) == 0; 7590 } 7591 7592 static int open_net_route(void *cpu_env, int fd) 7593 { 7594 FILE *fp; 7595 char *line = NULL; 7596 size_t len = 0; 7597 ssize_t read; 7598 7599 fp = fopen("/proc/net/route", "r"); 7600 if (fp == NULL) { 7601 return -1; 7602 } 7603 7604 /* read header */ 7605 7606 read = getline(&line, &len, fp); 7607 dprintf(fd, "%s", line); 7608 7609 /* read routes */ 7610 7611 while ((read = getline(&line, &len, fp)) != -1) { 7612 char iface[16]; 7613 uint32_t dest, gw, mask; 7614 unsigned int flags, refcnt, use, metric, mtu, window, irtt; 7615 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 7616 iface, &dest, &gw, &flags, &refcnt, &use, &metric, 7617 &mask, &mtu, &window, &irtt); 7618 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 7619 iface, tswap32(dest), tswap32(gw), flags, refcnt, use, 7620 metric, tswap32(mask), mtu, window, irtt); 7621 } 7622 7623 free(line); 7624 fclose(fp); 7625 7626 return 0; 7627 } 7628 #endif 7629 7630 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode) 7631 { 7632 struct fake_open { 7633 const char *filename; 7634 int (*fill)(void *cpu_env, int fd); 7635 int (*cmp)(const char *s1, const char *s2); 7636 }; 7637 const struct fake_open *fake_open; 7638 static const struct fake_open fakes[] = { 7639 { "maps", open_self_maps, is_proc_myself }, 7640 { "stat", open_self_stat, is_proc_myself }, 7641 { "auxv", open_self_auxv, is_proc_myself }, 7642 { "cmdline", open_self_cmdline, is_proc_myself }, 7643 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 7644 { "/proc/net/route", open_net_route, is_proc }, 7645 #endif 7646 { NULL, NULL, NULL } 7647 }; 7648 7649 if (is_proc_myself(pathname, "exe")) { 7650 int execfd = qemu_getauxval(AT_EXECFD); 7651 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode); 7652 } 7653 7654 for (fake_open = fakes; fake_open->filename; fake_open++) { 7655 if (fake_open->cmp(pathname, fake_open->filename)) { 7656 break; 7657 } 7658 } 7659 7660 if (fake_open->filename) { 7661 const char *tmpdir; 7662 char filename[PATH_MAX]; 7663 int fd, r; 7664 7665 /* create temporary file to map stat to */ 7666 tmpdir = getenv("TMPDIR"); 7667 if (!tmpdir) 7668 tmpdir = "/tmp"; 7669 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir); 7670 fd = mkstemp(filename); 7671 if (fd < 0) { 7672 return fd; 7673 } 7674 unlink(filename); 7675 7676 if ((r = fake_open->fill(cpu_env, fd))) { 7677 int e = errno; 7678 close(fd); 7679 errno = e; 7680 return r; 7681 } 7682 lseek(fd, 0, SEEK_SET); 7683 7684 return fd; 7685 } 7686 7687 return safe_openat(dirfd, path(pathname), flags, mode); 7688 } 7689 7690 #define TIMER_MAGIC 0x0caf0000 7691 #define TIMER_MAGIC_MASK 0xffff0000 7692 7693 /* Convert QEMU provided timer ID back to internal 16bit index format */ 7694 static target_timer_t get_timer_id(abi_long arg) 7695 { 7696 target_timer_t timerid = arg; 7697 7698 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) { 7699 return -TARGET_EINVAL; 7700 } 7701 7702 timerid &= 0xffff; 7703 7704 if (timerid >= ARRAY_SIZE(g_posix_timers)) { 7705 return -TARGET_EINVAL; 7706 } 7707 7708 return timerid; 7709 } 7710 7711 static abi_long swap_data_eventfd(void *buf, size_t len) 7712 { 7713 uint64_t *counter = buf; 7714 int i; 7715 7716 if (len < sizeof(uint64_t)) { 7717 return -EINVAL; 7718 } 7719 7720 for (i = 0; i < len; i += sizeof(uint64_t)) { 7721 *counter = tswap64(*counter); 7722 counter++; 7723 } 7724 7725 return len; 7726 } 7727 7728 static TargetFdTrans target_eventfd_trans = { 7729 .host_to_target_data = swap_data_eventfd, 7730 .target_to_host_data = swap_data_eventfd, 7731 }; 7732 7733 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \ 7734 (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \ 7735 defined(__NR_inotify_init1)) 7736 static abi_long host_to_target_data_inotify(void *buf, size_t len) 7737 { 7738 struct inotify_event *ev; 7739 int i; 7740 uint32_t name_len; 7741 7742 for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) { 7743 ev = (struct inotify_event *)((char *)buf + i); 7744 name_len = ev->len; 7745 7746 ev->wd = tswap32(ev->wd); 7747 ev->mask = tswap32(ev->mask); 7748 ev->cookie = tswap32(ev->cookie); 7749 ev->len = tswap32(name_len); 7750 } 7751 7752 return len; 7753 } 7754 7755 static TargetFdTrans target_inotify_trans = { 7756 .host_to_target_data = host_to_target_data_inotify, 7757 }; 7758 #endif 7759 7760 static int target_to_host_cpu_mask(unsigned long *host_mask, 7761 size_t host_size, 7762 abi_ulong target_addr, 7763 size_t target_size) 7764 { 7765 unsigned target_bits = sizeof(abi_ulong) * 8; 7766 unsigned host_bits = sizeof(*host_mask) * 8; 7767 abi_ulong *target_mask; 7768 unsigned i, j; 7769 7770 assert(host_size >= target_size); 7771 7772 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1); 7773 if (!target_mask) { 7774 return -TARGET_EFAULT; 7775 } 7776 memset(host_mask, 0, host_size); 7777 7778 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) { 7779 unsigned bit = i * target_bits; 7780 abi_ulong val; 7781 7782 __get_user(val, &target_mask[i]); 7783 for (j = 0; j < target_bits; j++, bit++) { 7784 if (val & (1UL << j)) { 7785 host_mask[bit / host_bits] |= 1UL << (bit % host_bits); 7786 } 7787 } 7788 } 7789 7790 unlock_user(target_mask, target_addr, 0); 7791 return 0; 7792 } 7793 7794 static int host_to_target_cpu_mask(const unsigned long *host_mask, 7795 size_t host_size, 7796 abi_ulong target_addr, 7797 size_t target_size) 7798 { 7799 unsigned target_bits = sizeof(abi_ulong) * 8; 7800 unsigned host_bits = sizeof(*host_mask) * 8; 7801 abi_ulong *target_mask; 7802 unsigned i, j; 7803 7804 assert(host_size >= target_size); 7805 7806 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0); 7807 if (!target_mask) { 7808 return -TARGET_EFAULT; 7809 } 7810 7811 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) { 7812 unsigned bit = i * target_bits; 7813 abi_ulong val = 0; 7814 7815 for (j = 0; j < target_bits; j++, bit++) { 7816 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) { 7817 val |= 1UL << j; 7818 } 7819 } 7820 __put_user(val, &target_mask[i]); 7821 } 7822 7823 unlock_user(target_mask, target_addr, target_size); 7824 return 0; 7825 } 7826 7827 /* do_syscall() should always have a single exit point at the end so 7828 that actions, such as logging of syscall results, can be performed. 7829 All errnos that do_syscall() returns must be -TARGET_<errcode>. */ 7830 abi_long do_syscall(void *cpu_env, int num, abi_long arg1, 7831 abi_long arg2, abi_long arg3, abi_long arg4, 7832 abi_long arg5, abi_long arg6, abi_long arg7, 7833 abi_long arg8) 7834 { 7835 CPUState *cpu = ENV_GET_CPU(cpu_env); 7836 abi_long ret; 7837 struct stat st; 7838 struct statfs stfs; 7839 void *p; 7840 7841 #if defined(DEBUG_ERESTARTSYS) 7842 /* Debug-only code for exercising the syscall-restart code paths 7843 * in the per-architecture cpu main loops: restart every syscall 7844 * the guest makes once before letting it through. 7845 */ 7846 { 7847 static int flag; 7848 7849 flag = !flag; 7850 if (flag) { 7851 return -TARGET_ERESTARTSYS; 7852 } 7853 } 7854 #endif 7855 7856 #ifdef DEBUG 7857 gemu_log("syscall %d", num); 7858 #endif 7859 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8); 7860 if(do_strace) 7861 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); 7862 7863 switch(num) { 7864 case TARGET_NR_exit: 7865 /* In old applications this may be used to implement _exit(2). 7866 However in threaded applictions it is used for thread termination, 7867 and _exit_group is used for application termination. 7868 Do thread termination if we have more then one thread. */ 7869 7870 if (block_signals()) { 7871 ret = -TARGET_ERESTARTSYS; 7872 break; 7873 } 7874 7875 cpu_list_lock(); 7876 7877 if (CPU_NEXT(first_cpu)) { 7878 TaskState *ts; 7879 7880 /* Remove the CPU from the list. */ 7881 QTAILQ_REMOVE(&cpus, cpu, node); 7882 7883 cpu_list_unlock(); 7884 7885 ts = cpu->opaque; 7886 if (ts->child_tidptr) { 7887 put_user_u32(0, ts->child_tidptr); 7888 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX, 7889 NULL, NULL, 0); 7890 } 7891 thread_cpu = NULL; 7892 object_unref(OBJECT(cpu)); 7893 g_free(ts); 7894 rcu_unregister_thread(); 7895 pthread_exit(NULL); 7896 } 7897 7898 cpu_list_unlock(); 7899 #ifdef TARGET_GPROF 7900 _mcleanup(); 7901 #endif 7902 gdb_exit(cpu_env, arg1); 7903 _exit(arg1); 7904 ret = 0; /* avoid warning */ 7905 break; 7906 case TARGET_NR_read: 7907 if (arg3 == 0) 7908 ret = 0; 7909 else { 7910 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 7911 goto efault; 7912 ret = get_errno(safe_read(arg1, p, arg3)); 7913 if (ret >= 0 && 7914 fd_trans_host_to_target_data(arg1)) { 7915 ret = fd_trans_host_to_target_data(arg1)(p, ret); 7916 } 7917 unlock_user(p, arg2, ret); 7918 } 7919 break; 7920 case TARGET_NR_write: 7921 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 7922 goto efault; 7923 if (fd_trans_target_to_host_data(arg1)) { 7924 void *copy = g_malloc(arg3); 7925 memcpy(copy, p, arg3); 7926 ret = fd_trans_target_to_host_data(arg1)(copy, arg3); 7927 if (ret >= 0) { 7928 ret = get_errno(safe_write(arg1, copy, ret)); 7929 } 7930 g_free(copy); 7931 } else { 7932 ret = get_errno(safe_write(arg1, p, arg3)); 7933 } 7934 unlock_user(p, arg2, 0); 7935 break; 7936 #ifdef TARGET_NR_open 7937 case TARGET_NR_open: 7938 if (!(p = lock_user_string(arg1))) 7939 goto efault; 7940 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p, 7941 target_to_host_bitmask(arg2, fcntl_flags_tbl), 7942 arg3)); 7943 fd_trans_unregister(ret); 7944 unlock_user(p, arg1, 0); 7945 break; 7946 #endif 7947 case TARGET_NR_openat: 7948 if (!(p = lock_user_string(arg2))) 7949 goto efault; 7950 ret = get_errno(do_openat(cpu_env, arg1, p, 7951 target_to_host_bitmask(arg3, fcntl_flags_tbl), 7952 arg4)); 7953 fd_trans_unregister(ret); 7954 unlock_user(p, arg2, 0); 7955 break; 7956 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 7957 case TARGET_NR_name_to_handle_at: 7958 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5); 7959 break; 7960 #endif 7961 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 7962 case TARGET_NR_open_by_handle_at: 7963 ret = do_open_by_handle_at(arg1, arg2, arg3); 7964 fd_trans_unregister(ret); 7965 break; 7966 #endif 7967 case TARGET_NR_close: 7968 fd_trans_unregister(arg1); 7969 ret = get_errno(close(arg1)); 7970 break; 7971 case TARGET_NR_brk: 7972 ret = do_brk(arg1); 7973 break; 7974 #ifdef TARGET_NR_fork 7975 case TARGET_NR_fork: 7976 ret = get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0)); 7977 break; 7978 #endif 7979 #ifdef TARGET_NR_waitpid 7980 case TARGET_NR_waitpid: 7981 { 7982 int status; 7983 ret = get_errno(safe_wait4(arg1, &status, arg3, 0)); 7984 if (!is_error(ret) && arg2 && ret 7985 && put_user_s32(host_to_target_waitstatus(status), arg2)) 7986 goto efault; 7987 } 7988 break; 7989 #endif 7990 #ifdef TARGET_NR_waitid 7991 case TARGET_NR_waitid: 7992 { 7993 siginfo_t info; 7994 info.si_pid = 0; 7995 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL)); 7996 if (!is_error(ret) && arg3 && info.si_pid != 0) { 7997 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) 7998 goto efault; 7999 host_to_target_siginfo(p, &info); 8000 unlock_user(p, arg3, sizeof(target_siginfo_t)); 8001 } 8002 } 8003 break; 8004 #endif 8005 #ifdef TARGET_NR_creat /* not on alpha */ 8006 case TARGET_NR_creat: 8007 if (!(p = lock_user_string(arg1))) 8008 goto efault; 8009 ret = get_errno(creat(p, arg2)); 8010 fd_trans_unregister(ret); 8011 unlock_user(p, arg1, 0); 8012 break; 8013 #endif 8014 #ifdef TARGET_NR_link 8015 case TARGET_NR_link: 8016 { 8017 void * p2; 8018 p = lock_user_string(arg1); 8019 p2 = lock_user_string(arg2); 8020 if (!p || !p2) 8021 ret = -TARGET_EFAULT; 8022 else 8023 ret = get_errno(link(p, p2)); 8024 unlock_user(p2, arg2, 0); 8025 unlock_user(p, arg1, 0); 8026 } 8027 break; 8028 #endif 8029 #if defined(TARGET_NR_linkat) 8030 case TARGET_NR_linkat: 8031 { 8032 void * p2 = NULL; 8033 if (!arg2 || !arg4) 8034 goto efault; 8035 p = lock_user_string(arg2); 8036 p2 = lock_user_string(arg4); 8037 if (!p || !p2) 8038 ret = -TARGET_EFAULT; 8039 else 8040 ret = get_errno(linkat(arg1, p, arg3, p2, arg5)); 8041 unlock_user(p, arg2, 0); 8042 unlock_user(p2, arg4, 0); 8043 } 8044 break; 8045 #endif 8046 #ifdef TARGET_NR_unlink 8047 case TARGET_NR_unlink: 8048 if (!(p = lock_user_string(arg1))) 8049 goto efault; 8050 ret = get_errno(unlink(p)); 8051 unlock_user(p, arg1, 0); 8052 break; 8053 #endif 8054 #if defined(TARGET_NR_unlinkat) 8055 case TARGET_NR_unlinkat: 8056 if (!(p = lock_user_string(arg2))) 8057 goto efault; 8058 ret = get_errno(unlinkat(arg1, p, arg3)); 8059 unlock_user(p, arg2, 0); 8060 break; 8061 #endif 8062 case TARGET_NR_execve: 8063 { 8064 char **argp, **envp; 8065 int argc, envc; 8066 abi_ulong gp; 8067 abi_ulong guest_argp; 8068 abi_ulong guest_envp; 8069 abi_ulong addr; 8070 char **q; 8071 int total_size = 0; 8072 8073 argc = 0; 8074 guest_argp = arg2; 8075 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { 8076 if (get_user_ual(addr, gp)) 8077 goto efault; 8078 if (!addr) 8079 break; 8080 argc++; 8081 } 8082 envc = 0; 8083 guest_envp = arg3; 8084 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { 8085 if (get_user_ual(addr, gp)) 8086 goto efault; 8087 if (!addr) 8088 break; 8089 envc++; 8090 } 8091 8092 argp = g_new0(char *, argc + 1); 8093 envp = g_new0(char *, envc + 1); 8094 8095 for (gp = guest_argp, q = argp; gp; 8096 gp += sizeof(abi_ulong), q++) { 8097 if (get_user_ual(addr, gp)) 8098 goto execve_efault; 8099 if (!addr) 8100 break; 8101 if (!(*q = lock_user_string(addr))) 8102 goto execve_efault; 8103 total_size += strlen(*q) + 1; 8104 } 8105 *q = NULL; 8106 8107 for (gp = guest_envp, q = envp; gp; 8108 gp += sizeof(abi_ulong), q++) { 8109 if (get_user_ual(addr, gp)) 8110 goto execve_efault; 8111 if (!addr) 8112 break; 8113 if (!(*q = lock_user_string(addr))) 8114 goto execve_efault; 8115 total_size += strlen(*q) + 1; 8116 } 8117 *q = NULL; 8118 8119 if (!(p = lock_user_string(arg1))) 8120 goto execve_efault; 8121 /* Although execve() is not an interruptible syscall it is 8122 * a special case where we must use the safe_syscall wrapper: 8123 * if we allow a signal to happen before we make the host 8124 * syscall then we will 'lose' it, because at the point of 8125 * execve the process leaves QEMU's control. So we use the 8126 * safe syscall wrapper to ensure that we either take the 8127 * signal as a guest signal, or else it does not happen 8128 * before the execve completes and makes it the other 8129 * program's problem. 8130 */ 8131 ret = get_errno(safe_execve(p, argp, envp)); 8132 unlock_user(p, arg1, 0); 8133 8134 goto execve_end; 8135 8136 execve_efault: 8137 ret = -TARGET_EFAULT; 8138 8139 execve_end: 8140 for (gp = guest_argp, q = argp; *q; 8141 gp += sizeof(abi_ulong), q++) { 8142 if (get_user_ual(addr, gp) 8143 || !addr) 8144 break; 8145 unlock_user(*q, addr, 0); 8146 } 8147 for (gp = guest_envp, q = envp; *q; 8148 gp += sizeof(abi_ulong), q++) { 8149 if (get_user_ual(addr, gp) 8150 || !addr) 8151 break; 8152 unlock_user(*q, addr, 0); 8153 } 8154 8155 g_free(argp); 8156 g_free(envp); 8157 } 8158 break; 8159 case TARGET_NR_chdir: 8160 if (!(p = lock_user_string(arg1))) 8161 goto efault; 8162 ret = get_errno(chdir(p)); 8163 unlock_user(p, arg1, 0); 8164 break; 8165 #ifdef TARGET_NR_time 8166 case TARGET_NR_time: 8167 { 8168 time_t host_time; 8169 ret = get_errno(time(&host_time)); 8170 if (!is_error(ret) 8171 && arg1 8172 && put_user_sal(host_time, arg1)) 8173 goto efault; 8174 } 8175 break; 8176 #endif 8177 #ifdef TARGET_NR_mknod 8178 case TARGET_NR_mknod: 8179 if (!(p = lock_user_string(arg1))) 8180 goto efault; 8181 ret = get_errno(mknod(p, arg2, arg3)); 8182 unlock_user(p, arg1, 0); 8183 break; 8184 #endif 8185 #if defined(TARGET_NR_mknodat) 8186 case TARGET_NR_mknodat: 8187 if (!(p = lock_user_string(arg2))) 8188 goto efault; 8189 ret = get_errno(mknodat(arg1, p, arg3, arg4)); 8190 unlock_user(p, arg2, 0); 8191 break; 8192 #endif 8193 #ifdef TARGET_NR_chmod 8194 case TARGET_NR_chmod: 8195 if (!(p = lock_user_string(arg1))) 8196 goto efault; 8197 ret = get_errno(chmod(p, arg2)); 8198 unlock_user(p, arg1, 0); 8199 break; 8200 #endif 8201 #ifdef TARGET_NR_break 8202 case TARGET_NR_break: 8203 goto unimplemented; 8204 #endif 8205 #ifdef TARGET_NR_oldstat 8206 case TARGET_NR_oldstat: 8207 goto unimplemented; 8208 #endif 8209 case TARGET_NR_lseek: 8210 ret = get_errno(lseek(arg1, arg2, arg3)); 8211 break; 8212 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) 8213 /* Alpha specific */ 8214 case TARGET_NR_getxpid: 8215 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid(); 8216 ret = get_errno(getpid()); 8217 break; 8218 #endif 8219 #ifdef TARGET_NR_getpid 8220 case TARGET_NR_getpid: 8221 ret = get_errno(getpid()); 8222 break; 8223 #endif 8224 case TARGET_NR_mount: 8225 { 8226 /* need to look at the data field */ 8227 void *p2, *p3; 8228 8229 if (arg1) { 8230 p = lock_user_string(arg1); 8231 if (!p) { 8232 goto efault; 8233 } 8234 } else { 8235 p = NULL; 8236 } 8237 8238 p2 = lock_user_string(arg2); 8239 if (!p2) { 8240 if (arg1) { 8241 unlock_user(p, arg1, 0); 8242 } 8243 goto efault; 8244 } 8245 8246 if (arg3) { 8247 p3 = lock_user_string(arg3); 8248 if (!p3) { 8249 if (arg1) { 8250 unlock_user(p, arg1, 0); 8251 } 8252 unlock_user(p2, arg2, 0); 8253 goto efault; 8254 } 8255 } else { 8256 p3 = NULL; 8257 } 8258 8259 /* FIXME - arg5 should be locked, but it isn't clear how to 8260 * do that since it's not guaranteed to be a NULL-terminated 8261 * string. 8262 */ 8263 if (!arg5) { 8264 ret = mount(p, p2, p3, (unsigned long)arg4, NULL); 8265 } else { 8266 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)); 8267 } 8268 ret = get_errno(ret); 8269 8270 if (arg1) { 8271 unlock_user(p, arg1, 0); 8272 } 8273 unlock_user(p2, arg2, 0); 8274 if (arg3) { 8275 unlock_user(p3, arg3, 0); 8276 } 8277 } 8278 break; 8279 #ifdef TARGET_NR_umount 8280 case TARGET_NR_umount: 8281 if (!(p = lock_user_string(arg1))) 8282 goto efault; 8283 ret = get_errno(umount(p)); 8284 unlock_user(p, arg1, 0); 8285 break; 8286 #endif 8287 #ifdef TARGET_NR_stime /* not on alpha */ 8288 case TARGET_NR_stime: 8289 { 8290 time_t host_time; 8291 if (get_user_sal(host_time, arg1)) 8292 goto efault; 8293 ret = get_errno(stime(&host_time)); 8294 } 8295 break; 8296 #endif 8297 case TARGET_NR_ptrace: 8298 goto unimplemented; 8299 #ifdef TARGET_NR_alarm /* not on alpha */ 8300 case TARGET_NR_alarm: 8301 ret = alarm(arg1); 8302 break; 8303 #endif 8304 #ifdef TARGET_NR_oldfstat 8305 case TARGET_NR_oldfstat: 8306 goto unimplemented; 8307 #endif 8308 #ifdef TARGET_NR_pause /* not on alpha */ 8309 case TARGET_NR_pause: 8310 if (!block_signals()) { 8311 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask); 8312 } 8313 ret = -TARGET_EINTR; 8314 break; 8315 #endif 8316 #ifdef TARGET_NR_utime 8317 case TARGET_NR_utime: 8318 { 8319 struct utimbuf tbuf, *host_tbuf; 8320 struct target_utimbuf *target_tbuf; 8321 if (arg2) { 8322 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) 8323 goto efault; 8324 tbuf.actime = tswapal(target_tbuf->actime); 8325 tbuf.modtime = tswapal(target_tbuf->modtime); 8326 unlock_user_struct(target_tbuf, arg2, 0); 8327 host_tbuf = &tbuf; 8328 } else { 8329 host_tbuf = NULL; 8330 } 8331 if (!(p = lock_user_string(arg1))) 8332 goto efault; 8333 ret = get_errno(utime(p, host_tbuf)); 8334 unlock_user(p, arg1, 0); 8335 } 8336 break; 8337 #endif 8338 #ifdef TARGET_NR_utimes 8339 case TARGET_NR_utimes: 8340 { 8341 struct timeval *tvp, tv[2]; 8342 if (arg2) { 8343 if (copy_from_user_timeval(&tv[0], arg2) 8344 || copy_from_user_timeval(&tv[1], 8345 arg2 + sizeof(struct target_timeval))) 8346 goto efault; 8347 tvp = tv; 8348 } else { 8349 tvp = NULL; 8350 } 8351 if (!(p = lock_user_string(arg1))) 8352 goto efault; 8353 ret = get_errno(utimes(p, tvp)); 8354 unlock_user(p, arg1, 0); 8355 } 8356 break; 8357 #endif 8358 #if defined(TARGET_NR_futimesat) 8359 case TARGET_NR_futimesat: 8360 { 8361 struct timeval *tvp, tv[2]; 8362 if (arg3) { 8363 if (copy_from_user_timeval(&tv[0], arg3) 8364 || copy_from_user_timeval(&tv[1], 8365 arg3 + sizeof(struct target_timeval))) 8366 goto efault; 8367 tvp = tv; 8368 } else { 8369 tvp = NULL; 8370 } 8371 if (!(p = lock_user_string(arg2))) 8372 goto efault; 8373 ret = get_errno(futimesat(arg1, path(p), tvp)); 8374 unlock_user(p, arg2, 0); 8375 } 8376 break; 8377 #endif 8378 #ifdef TARGET_NR_stty 8379 case TARGET_NR_stty: 8380 goto unimplemented; 8381 #endif 8382 #ifdef TARGET_NR_gtty 8383 case TARGET_NR_gtty: 8384 goto unimplemented; 8385 #endif 8386 #ifdef TARGET_NR_access 8387 case TARGET_NR_access: 8388 if (!(p = lock_user_string(arg1))) 8389 goto efault; 8390 ret = get_errno(access(path(p), arg2)); 8391 unlock_user(p, arg1, 0); 8392 break; 8393 #endif 8394 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 8395 case TARGET_NR_faccessat: 8396 if (!(p = lock_user_string(arg2))) 8397 goto efault; 8398 ret = get_errno(faccessat(arg1, p, arg3, 0)); 8399 unlock_user(p, arg2, 0); 8400 break; 8401 #endif 8402 #ifdef TARGET_NR_nice /* not on alpha */ 8403 case TARGET_NR_nice: 8404 ret = get_errno(nice(arg1)); 8405 break; 8406 #endif 8407 #ifdef TARGET_NR_ftime 8408 case TARGET_NR_ftime: 8409 goto unimplemented; 8410 #endif 8411 case TARGET_NR_sync: 8412 sync(); 8413 ret = 0; 8414 break; 8415 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS) 8416 case TARGET_NR_syncfs: 8417 ret = get_errno(syncfs(arg1)); 8418 break; 8419 #endif 8420 case TARGET_NR_kill: 8421 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2))); 8422 break; 8423 #ifdef TARGET_NR_rename 8424 case TARGET_NR_rename: 8425 { 8426 void *p2; 8427 p = lock_user_string(arg1); 8428 p2 = lock_user_string(arg2); 8429 if (!p || !p2) 8430 ret = -TARGET_EFAULT; 8431 else 8432 ret = get_errno(rename(p, p2)); 8433 unlock_user(p2, arg2, 0); 8434 unlock_user(p, arg1, 0); 8435 } 8436 break; 8437 #endif 8438 #if defined(TARGET_NR_renameat) 8439 case TARGET_NR_renameat: 8440 { 8441 void *p2; 8442 p = lock_user_string(arg2); 8443 p2 = lock_user_string(arg4); 8444 if (!p || !p2) 8445 ret = -TARGET_EFAULT; 8446 else 8447 ret = get_errno(renameat(arg1, p, arg3, p2)); 8448 unlock_user(p2, arg4, 0); 8449 unlock_user(p, arg2, 0); 8450 } 8451 break; 8452 #endif 8453 #if defined(TARGET_NR_renameat2) 8454 case TARGET_NR_renameat2: 8455 { 8456 void *p2; 8457 p = lock_user_string(arg2); 8458 p2 = lock_user_string(arg4); 8459 if (!p || !p2) { 8460 ret = -TARGET_EFAULT; 8461 } else { 8462 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5)); 8463 } 8464 unlock_user(p2, arg4, 0); 8465 unlock_user(p, arg2, 0); 8466 } 8467 break; 8468 #endif 8469 #ifdef TARGET_NR_mkdir 8470 case TARGET_NR_mkdir: 8471 if (!(p = lock_user_string(arg1))) 8472 goto efault; 8473 ret = get_errno(mkdir(p, arg2)); 8474 unlock_user(p, arg1, 0); 8475 break; 8476 #endif 8477 #if defined(TARGET_NR_mkdirat) 8478 case TARGET_NR_mkdirat: 8479 if (!(p = lock_user_string(arg2))) 8480 goto efault; 8481 ret = get_errno(mkdirat(arg1, p, arg3)); 8482 unlock_user(p, arg2, 0); 8483 break; 8484 #endif 8485 #ifdef TARGET_NR_rmdir 8486 case TARGET_NR_rmdir: 8487 if (!(p = lock_user_string(arg1))) 8488 goto efault; 8489 ret = get_errno(rmdir(p)); 8490 unlock_user(p, arg1, 0); 8491 break; 8492 #endif 8493 case TARGET_NR_dup: 8494 ret = get_errno(dup(arg1)); 8495 if (ret >= 0) { 8496 fd_trans_dup(arg1, ret); 8497 } 8498 break; 8499 #ifdef TARGET_NR_pipe 8500 case TARGET_NR_pipe: 8501 ret = do_pipe(cpu_env, arg1, 0, 0); 8502 break; 8503 #endif 8504 #ifdef TARGET_NR_pipe2 8505 case TARGET_NR_pipe2: 8506 ret = do_pipe(cpu_env, arg1, 8507 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1); 8508 break; 8509 #endif 8510 case TARGET_NR_times: 8511 { 8512 struct target_tms *tmsp; 8513 struct tms tms; 8514 ret = get_errno(times(&tms)); 8515 if (arg1) { 8516 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); 8517 if (!tmsp) 8518 goto efault; 8519 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime)); 8520 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime)); 8521 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime)); 8522 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime)); 8523 } 8524 if (!is_error(ret)) 8525 ret = host_to_target_clock_t(ret); 8526 } 8527 break; 8528 #ifdef TARGET_NR_prof 8529 case TARGET_NR_prof: 8530 goto unimplemented; 8531 #endif 8532 #ifdef TARGET_NR_signal 8533 case TARGET_NR_signal: 8534 goto unimplemented; 8535 #endif 8536 case TARGET_NR_acct: 8537 if (arg1 == 0) { 8538 ret = get_errno(acct(NULL)); 8539 } else { 8540 if (!(p = lock_user_string(arg1))) 8541 goto efault; 8542 ret = get_errno(acct(path(p))); 8543 unlock_user(p, arg1, 0); 8544 } 8545 break; 8546 #ifdef TARGET_NR_umount2 8547 case TARGET_NR_umount2: 8548 if (!(p = lock_user_string(arg1))) 8549 goto efault; 8550 ret = get_errno(umount2(p, arg2)); 8551 unlock_user(p, arg1, 0); 8552 break; 8553 #endif 8554 #ifdef TARGET_NR_lock 8555 case TARGET_NR_lock: 8556 goto unimplemented; 8557 #endif 8558 case TARGET_NR_ioctl: 8559 ret = do_ioctl(arg1, arg2, arg3); 8560 break; 8561 #ifdef TARGET_NR_fcntl 8562 case TARGET_NR_fcntl: 8563 ret = do_fcntl(arg1, arg2, arg3); 8564 break; 8565 #endif 8566 #ifdef TARGET_NR_mpx 8567 case TARGET_NR_mpx: 8568 goto unimplemented; 8569 #endif 8570 case TARGET_NR_setpgid: 8571 ret = get_errno(setpgid(arg1, arg2)); 8572 break; 8573 #ifdef TARGET_NR_ulimit 8574 case TARGET_NR_ulimit: 8575 goto unimplemented; 8576 #endif 8577 #ifdef TARGET_NR_oldolduname 8578 case TARGET_NR_oldolduname: 8579 goto unimplemented; 8580 #endif 8581 case TARGET_NR_umask: 8582 ret = get_errno(umask(arg1)); 8583 break; 8584 case TARGET_NR_chroot: 8585 if (!(p = lock_user_string(arg1))) 8586 goto efault; 8587 ret = get_errno(chroot(p)); 8588 unlock_user(p, arg1, 0); 8589 break; 8590 #ifdef TARGET_NR_ustat 8591 case TARGET_NR_ustat: 8592 goto unimplemented; 8593 #endif 8594 #ifdef TARGET_NR_dup2 8595 case TARGET_NR_dup2: 8596 ret = get_errno(dup2(arg1, arg2)); 8597 if (ret >= 0) { 8598 fd_trans_dup(arg1, arg2); 8599 } 8600 break; 8601 #endif 8602 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) 8603 case TARGET_NR_dup3: 8604 { 8605 int host_flags; 8606 8607 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) { 8608 return -EINVAL; 8609 } 8610 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl); 8611 ret = get_errno(dup3(arg1, arg2, host_flags)); 8612 if (ret >= 0) { 8613 fd_trans_dup(arg1, arg2); 8614 } 8615 break; 8616 } 8617 #endif 8618 #ifdef TARGET_NR_getppid /* not on alpha */ 8619 case TARGET_NR_getppid: 8620 ret = get_errno(getppid()); 8621 break; 8622 #endif 8623 #ifdef TARGET_NR_getpgrp 8624 case TARGET_NR_getpgrp: 8625 ret = get_errno(getpgrp()); 8626 break; 8627 #endif 8628 case TARGET_NR_setsid: 8629 ret = get_errno(setsid()); 8630 break; 8631 #ifdef TARGET_NR_sigaction 8632 case TARGET_NR_sigaction: 8633 { 8634 #if defined(TARGET_ALPHA) 8635 struct target_sigaction act, oact, *pact = 0; 8636 struct target_old_sigaction *old_act; 8637 if (arg2) { 8638 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 8639 goto efault; 8640 act._sa_handler = old_act->_sa_handler; 8641 target_siginitset(&act.sa_mask, old_act->sa_mask); 8642 act.sa_flags = old_act->sa_flags; 8643 act.sa_restorer = 0; 8644 unlock_user_struct(old_act, arg2, 0); 8645 pact = &act; 8646 } 8647 ret = get_errno(do_sigaction(arg1, pact, &oact)); 8648 if (!is_error(ret) && arg3) { 8649 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 8650 goto efault; 8651 old_act->_sa_handler = oact._sa_handler; 8652 old_act->sa_mask = oact.sa_mask.sig[0]; 8653 old_act->sa_flags = oact.sa_flags; 8654 unlock_user_struct(old_act, arg3, 1); 8655 } 8656 #elif defined(TARGET_MIPS) 8657 struct target_sigaction act, oact, *pact, *old_act; 8658 8659 if (arg2) { 8660 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 8661 goto efault; 8662 act._sa_handler = old_act->_sa_handler; 8663 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); 8664 act.sa_flags = old_act->sa_flags; 8665 unlock_user_struct(old_act, arg2, 0); 8666 pact = &act; 8667 } else { 8668 pact = NULL; 8669 } 8670 8671 ret = get_errno(do_sigaction(arg1, pact, &oact)); 8672 8673 if (!is_error(ret) && arg3) { 8674 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 8675 goto efault; 8676 old_act->_sa_handler = oact._sa_handler; 8677 old_act->sa_flags = oact.sa_flags; 8678 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; 8679 old_act->sa_mask.sig[1] = 0; 8680 old_act->sa_mask.sig[2] = 0; 8681 old_act->sa_mask.sig[3] = 0; 8682 unlock_user_struct(old_act, arg3, 1); 8683 } 8684 #else 8685 struct target_old_sigaction *old_act; 8686 struct target_sigaction act, oact, *pact; 8687 if (arg2) { 8688 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 8689 goto efault; 8690 act._sa_handler = old_act->_sa_handler; 8691 target_siginitset(&act.sa_mask, old_act->sa_mask); 8692 act.sa_flags = old_act->sa_flags; 8693 act.sa_restorer = old_act->sa_restorer; 8694 unlock_user_struct(old_act, arg2, 0); 8695 pact = &act; 8696 } else { 8697 pact = NULL; 8698 } 8699 ret = get_errno(do_sigaction(arg1, pact, &oact)); 8700 if (!is_error(ret) && arg3) { 8701 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 8702 goto efault; 8703 old_act->_sa_handler = oact._sa_handler; 8704 old_act->sa_mask = oact.sa_mask.sig[0]; 8705 old_act->sa_flags = oact.sa_flags; 8706 old_act->sa_restorer = oact.sa_restorer; 8707 unlock_user_struct(old_act, arg3, 1); 8708 } 8709 #endif 8710 } 8711 break; 8712 #endif 8713 case TARGET_NR_rt_sigaction: 8714 { 8715 #if defined(TARGET_ALPHA) 8716 /* For Alpha and SPARC this is a 5 argument syscall, with 8717 * a 'restorer' parameter which must be copied into the 8718 * sa_restorer field of the sigaction struct. 8719 * For Alpha that 'restorer' is arg5; for SPARC it is arg4, 8720 * and arg5 is the sigsetsize. 8721 * Alpha also has a separate rt_sigaction struct that it uses 8722 * here; SPARC uses the usual sigaction struct. 8723 */ 8724 struct target_rt_sigaction *rt_act; 8725 struct target_sigaction act, oact, *pact = 0; 8726 8727 if (arg4 != sizeof(target_sigset_t)) { 8728 ret = -TARGET_EINVAL; 8729 break; 8730 } 8731 if (arg2) { 8732 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1)) 8733 goto efault; 8734 act._sa_handler = rt_act->_sa_handler; 8735 act.sa_mask = rt_act->sa_mask; 8736 act.sa_flags = rt_act->sa_flags; 8737 act.sa_restorer = arg5; 8738 unlock_user_struct(rt_act, arg2, 0); 8739 pact = &act; 8740 } 8741 ret = get_errno(do_sigaction(arg1, pact, &oact)); 8742 if (!is_error(ret) && arg3) { 8743 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0)) 8744 goto efault; 8745 rt_act->_sa_handler = oact._sa_handler; 8746 rt_act->sa_mask = oact.sa_mask; 8747 rt_act->sa_flags = oact.sa_flags; 8748 unlock_user_struct(rt_act, arg3, 1); 8749 } 8750 #else 8751 #ifdef TARGET_SPARC 8752 target_ulong restorer = arg4; 8753 target_ulong sigsetsize = arg5; 8754 #else 8755 target_ulong sigsetsize = arg4; 8756 #endif 8757 struct target_sigaction *act; 8758 struct target_sigaction *oact; 8759 8760 if (sigsetsize != sizeof(target_sigset_t)) { 8761 ret = -TARGET_EINVAL; 8762 break; 8763 } 8764 if (arg2) { 8765 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) { 8766 goto efault; 8767 } 8768 #ifdef TARGET_SPARC 8769 act->sa_restorer = restorer; 8770 #endif 8771 } else { 8772 act = NULL; 8773 } 8774 if (arg3) { 8775 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { 8776 ret = -TARGET_EFAULT; 8777 goto rt_sigaction_fail; 8778 } 8779 } else 8780 oact = NULL; 8781 ret = get_errno(do_sigaction(arg1, act, oact)); 8782 rt_sigaction_fail: 8783 if (act) 8784 unlock_user_struct(act, arg2, 0); 8785 if (oact) 8786 unlock_user_struct(oact, arg3, 1); 8787 #endif 8788 } 8789 break; 8790 #ifdef TARGET_NR_sgetmask /* not on alpha */ 8791 case TARGET_NR_sgetmask: 8792 { 8793 sigset_t cur_set; 8794 abi_ulong target_set; 8795 ret = do_sigprocmask(0, NULL, &cur_set); 8796 if (!ret) { 8797 host_to_target_old_sigset(&target_set, &cur_set); 8798 ret = target_set; 8799 } 8800 } 8801 break; 8802 #endif 8803 #ifdef TARGET_NR_ssetmask /* not on alpha */ 8804 case TARGET_NR_ssetmask: 8805 { 8806 sigset_t set, oset; 8807 abi_ulong target_set = arg1; 8808 target_to_host_old_sigset(&set, &target_set); 8809 ret = do_sigprocmask(SIG_SETMASK, &set, &oset); 8810 if (!ret) { 8811 host_to_target_old_sigset(&target_set, &oset); 8812 ret = target_set; 8813 } 8814 } 8815 break; 8816 #endif 8817 #ifdef TARGET_NR_sigprocmask 8818 case TARGET_NR_sigprocmask: 8819 { 8820 #if defined(TARGET_ALPHA) 8821 sigset_t set, oldset; 8822 abi_ulong mask; 8823 int how; 8824 8825 switch (arg1) { 8826 case TARGET_SIG_BLOCK: 8827 how = SIG_BLOCK; 8828 break; 8829 case TARGET_SIG_UNBLOCK: 8830 how = SIG_UNBLOCK; 8831 break; 8832 case TARGET_SIG_SETMASK: 8833 how = SIG_SETMASK; 8834 break; 8835 default: 8836 ret = -TARGET_EINVAL; 8837 goto fail; 8838 } 8839 mask = arg2; 8840 target_to_host_old_sigset(&set, &mask); 8841 8842 ret = do_sigprocmask(how, &set, &oldset); 8843 if (!is_error(ret)) { 8844 host_to_target_old_sigset(&mask, &oldset); 8845 ret = mask; 8846 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */ 8847 } 8848 #else 8849 sigset_t set, oldset, *set_ptr; 8850 int how; 8851 8852 if (arg2) { 8853 switch (arg1) { 8854 case TARGET_SIG_BLOCK: 8855 how = SIG_BLOCK; 8856 break; 8857 case TARGET_SIG_UNBLOCK: 8858 how = SIG_UNBLOCK; 8859 break; 8860 case TARGET_SIG_SETMASK: 8861 how = SIG_SETMASK; 8862 break; 8863 default: 8864 ret = -TARGET_EINVAL; 8865 goto fail; 8866 } 8867 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 8868 goto efault; 8869 target_to_host_old_sigset(&set, p); 8870 unlock_user(p, arg2, 0); 8871 set_ptr = &set; 8872 } else { 8873 how = 0; 8874 set_ptr = NULL; 8875 } 8876 ret = do_sigprocmask(how, set_ptr, &oldset); 8877 if (!is_error(ret) && arg3) { 8878 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 8879 goto efault; 8880 host_to_target_old_sigset(p, &oldset); 8881 unlock_user(p, arg3, sizeof(target_sigset_t)); 8882 } 8883 #endif 8884 } 8885 break; 8886 #endif 8887 case TARGET_NR_rt_sigprocmask: 8888 { 8889 int how = arg1; 8890 sigset_t set, oldset, *set_ptr; 8891 8892 if (arg4 != sizeof(target_sigset_t)) { 8893 ret = -TARGET_EINVAL; 8894 break; 8895 } 8896 8897 if (arg2) { 8898 switch(how) { 8899 case TARGET_SIG_BLOCK: 8900 how = SIG_BLOCK; 8901 break; 8902 case TARGET_SIG_UNBLOCK: 8903 how = SIG_UNBLOCK; 8904 break; 8905 case TARGET_SIG_SETMASK: 8906 how = SIG_SETMASK; 8907 break; 8908 default: 8909 ret = -TARGET_EINVAL; 8910 goto fail; 8911 } 8912 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 8913 goto efault; 8914 target_to_host_sigset(&set, p); 8915 unlock_user(p, arg2, 0); 8916 set_ptr = &set; 8917 } else { 8918 how = 0; 8919 set_ptr = NULL; 8920 } 8921 ret = do_sigprocmask(how, set_ptr, &oldset); 8922 if (!is_error(ret) && arg3) { 8923 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 8924 goto efault; 8925 host_to_target_sigset(p, &oldset); 8926 unlock_user(p, arg3, sizeof(target_sigset_t)); 8927 } 8928 } 8929 break; 8930 #ifdef TARGET_NR_sigpending 8931 case TARGET_NR_sigpending: 8932 { 8933 sigset_t set; 8934 ret = get_errno(sigpending(&set)); 8935 if (!is_error(ret)) { 8936 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 8937 goto efault; 8938 host_to_target_old_sigset(p, &set); 8939 unlock_user(p, arg1, sizeof(target_sigset_t)); 8940 } 8941 } 8942 break; 8943 #endif 8944 case TARGET_NR_rt_sigpending: 8945 { 8946 sigset_t set; 8947 8948 /* Yes, this check is >, not != like most. We follow the kernel's 8949 * logic and it does it like this because it implements 8950 * NR_sigpending through the same code path, and in that case 8951 * the old_sigset_t is smaller in size. 8952 */ 8953 if (arg2 > sizeof(target_sigset_t)) { 8954 ret = -TARGET_EINVAL; 8955 break; 8956 } 8957 8958 ret = get_errno(sigpending(&set)); 8959 if (!is_error(ret)) { 8960 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 8961 goto efault; 8962 host_to_target_sigset(p, &set); 8963 unlock_user(p, arg1, sizeof(target_sigset_t)); 8964 } 8965 } 8966 break; 8967 #ifdef TARGET_NR_sigsuspend 8968 case TARGET_NR_sigsuspend: 8969 { 8970 TaskState *ts = cpu->opaque; 8971 #if defined(TARGET_ALPHA) 8972 abi_ulong mask = arg1; 8973 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask); 8974 #else 8975 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 8976 goto efault; 8977 target_to_host_old_sigset(&ts->sigsuspend_mask, p); 8978 unlock_user(p, arg1, 0); 8979 #endif 8980 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask, 8981 SIGSET_T_SIZE)); 8982 if (ret != -TARGET_ERESTARTSYS) { 8983 ts->in_sigsuspend = 1; 8984 } 8985 } 8986 break; 8987 #endif 8988 case TARGET_NR_rt_sigsuspend: 8989 { 8990 TaskState *ts = cpu->opaque; 8991 8992 if (arg2 != sizeof(target_sigset_t)) { 8993 ret = -TARGET_EINVAL; 8994 break; 8995 } 8996 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 8997 goto efault; 8998 target_to_host_sigset(&ts->sigsuspend_mask, p); 8999 unlock_user(p, arg1, 0); 9000 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask, 9001 SIGSET_T_SIZE)); 9002 if (ret != -TARGET_ERESTARTSYS) { 9003 ts->in_sigsuspend = 1; 9004 } 9005 } 9006 break; 9007 case TARGET_NR_rt_sigtimedwait: 9008 { 9009 sigset_t set; 9010 struct timespec uts, *puts; 9011 siginfo_t uinfo; 9012 9013 if (arg4 != sizeof(target_sigset_t)) { 9014 ret = -TARGET_EINVAL; 9015 break; 9016 } 9017 9018 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 9019 goto efault; 9020 target_to_host_sigset(&set, p); 9021 unlock_user(p, arg1, 0); 9022 if (arg3) { 9023 puts = &uts; 9024 target_to_host_timespec(puts, arg3); 9025 } else { 9026 puts = NULL; 9027 } 9028 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts, 9029 SIGSET_T_SIZE)); 9030 if (!is_error(ret)) { 9031 if (arg2) { 9032 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 9033 0); 9034 if (!p) { 9035 goto efault; 9036 } 9037 host_to_target_siginfo(p, &uinfo); 9038 unlock_user(p, arg2, sizeof(target_siginfo_t)); 9039 } 9040 ret = host_to_target_signal(ret); 9041 } 9042 } 9043 break; 9044 case TARGET_NR_rt_sigqueueinfo: 9045 { 9046 siginfo_t uinfo; 9047 9048 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1); 9049 if (!p) { 9050 goto efault; 9051 } 9052 target_to_host_siginfo(&uinfo, p); 9053 unlock_user(p, arg3, 0); 9054 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo)); 9055 } 9056 break; 9057 case TARGET_NR_rt_tgsigqueueinfo: 9058 { 9059 siginfo_t uinfo; 9060 9061 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1); 9062 if (!p) { 9063 goto efault; 9064 } 9065 target_to_host_siginfo(&uinfo, p); 9066 unlock_user(p, arg4, 0); 9067 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo)); 9068 } 9069 break; 9070 #ifdef TARGET_NR_sigreturn 9071 case TARGET_NR_sigreturn: 9072 if (block_signals()) { 9073 ret = -TARGET_ERESTARTSYS; 9074 } else { 9075 ret = do_sigreturn(cpu_env); 9076 } 9077 break; 9078 #endif 9079 case TARGET_NR_rt_sigreturn: 9080 if (block_signals()) { 9081 ret = -TARGET_ERESTARTSYS; 9082 } else { 9083 ret = do_rt_sigreturn(cpu_env); 9084 } 9085 break; 9086 case TARGET_NR_sethostname: 9087 if (!(p = lock_user_string(arg1))) 9088 goto efault; 9089 ret = get_errno(sethostname(p, arg2)); 9090 unlock_user(p, arg1, 0); 9091 break; 9092 case TARGET_NR_setrlimit: 9093 { 9094 int resource = target_to_host_resource(arg1); 9095 struct target_rlimit *target_rlim; 9096 struct rlimit rlim; 9097 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) 9098 goto efault; 9099 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur); 9100 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max); 9101 unlock_user_struct(target_rlim, arg2, 0); 9102 ret = get_errno(setrlimit(resource, &rlim)); 9103 } 9104 break; 9105 case TARGET_NR_getrlimit: 9106 { 9107 int resource = target_to_host_resource(arg1); 9108 struct target_rlimit *target_rlim; 9109 struct rlimit rlim; 9110 9111 ret = get_errno(getrlimit(resource, &rlim)); 9112 if (!is_error(ret)) { 9113 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 9114 goto efault; 9115 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 9116 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 9117 unlock_user_struct(target_rlim, arg2, 1); 9118 } 9119 } 9120 break; 9121 case TARGET_NR_getrusage: 9122 { 9123 struct rusage rusage; 9124 ret = get_errno(getrusage(arg1, &rusage)); 9125 if (!is_error(ret)) { 9126 ret = host_to_target_rusage(arg2, &rusage); 9127 } 9128 } 9129 break; 9130 case TARGET_NR_gettimeofday: 9131 { 9132 struct timeval tv; 9133 ret = get_errno(gettimeofday(&tv, NULL)); 9134 if (!is_error(ret)) { 9135 if (copy_to_user_timeval(arg1, &tv)) 9136 goto efault; 9137 } 9138 } 9139 break; 9140 case TARGET_NR_settimeofday: 9141 { 9142 struct timeval tv, *ptv = NULL; 9143 struct timezone tz, *ptz = NULL; 9144 9145 if (arg1) { 9146 if (copy_from_user_timeval(&tv, arg1)) { 9147 goto efault; 9148 } 9149 ptv = &tv; 9150 } 9151 9152 if (arg2) { 9153 if (copy_from_user_timezone(&tz, arg2)) { 9154 goto efault; 9155 } 9156 ptz = &tz; 9157 } 9158 9159 ret = get_errno(settimeofday(ptv, ptz)); 9160 } 9161 break; 9162 #if defined(TARGET_NR_select) 9163 case TARGET_NR_select: 9164 #if defined(TARGET_WANT_NI_OLD_SELECT) 9165 /* some architectures used to have old_select here 9166 * but now ENOSYS it. 9167 */ 9168 ret = -TARGET_ENOSYS; 9169 #elif defined(TARGET_WANT_OLD_SYS_SELECT) 9170 ret = do_old_select(arg1); 9171 #else 9172 ret = do_select(arg1, arg2, arg3, arg4, arg5); 9173 #endif 9174 break; 9175 #endif 9176 #ifdef TARGET_NR_pselect6 9177 case TARGET_NR_pselect6: 9178 { 9179 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr; 9180 fd_set rfds, wfds, efds; 9181 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 9182 struct timespec ts, *ts_ptr; 9183 9184 /* 9185 * The 6th arg is actually two args smashed together, 9186 * so we cannot use the C library. 9187 */ 9188 sigset_t set; 9189 struct { 9190 sigset_t *set; 9191 size_t size; 9192 } sig, *sig_ptr; 9193 9194 abi_ulong arg_sigset, arg_sigsize, *arg7; 9195 target_sigset_t *target_sigset; 9196 9197 n = arg1; 9198 rfd_addr = arg2; 9199 wfd_addr = arg3; 9200 efd_addr = arg4; 9201 ts_addr = arg5; 9202 9203 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 9204 if (ret) { 9205 goto fail; 9206 } 9207 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 9208 if (ret) { 9209 goto fail; 9210 } 9211 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 9212 if (ret) { 9213 goto fail; 9214 } 9215 9216 /* 9217 * This takes a timespec, and not a timeval, so we cannot 9218 * use the do_select() helper ... 9219 */ 9220 if (ts_addr) { 9221 if (target_to_host_timespec(&ts, ts_addr)) { 9222 goto efault; 9223 } 9224 ts_ptr = &ts; 9225 } else { 9226 ts_ptr = NULL; 9227 } 9228 9229 /* Extract the two packed args for the sigset */ 9230 if (arg6) { 9231 sig_ptr = &sig; 9232 sig.size = SIGSET_T_SIZE; 9233 9234 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1); 9235 if (!arg7) { 9236 goto efault; 9237 } 9238 arg_sigset = tswapal(arg7[0]); 9239 arg_sigsize = tswapal(arg7[1]); 9240 unlock_user(arg7, arg6, 0); 9241 9242 if (arg_sigset) { 9243 sig.set = &set; 9244 if (arg_sigsize != sizeof(*target_sigset)) { 9245 /* Like the kernel, we enforce correct size sigsets */ 9246 ret = -TARGET_EINVAL; 9247 goto fail; 9248 } 9249 target_sigset = lock_user(VERIFY_READ, arg_sigset, 9250 sizeof(*target_sigset), 1); 9251 if (!target_sigset) { 9252 goto efault; 9253 } 9254 target_to_host_sigset(&set, target_sigset); 9255 unlock_user(target_sigset, arg_sigset, 0); 9256 } else { 9257 sig.set = NULL; 9258 } 9259 } else { 9260 sig_ptr = NULL; 9261 } 9262 9263 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 9264 ts_ptr, sig_ptr)); 9265 9266 if (!is_error(ret)) { 9267 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 9268 goto efault; 9269 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 9270 goto efault; 9271 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 9272 goto efault; 9273 9274 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) 9275 goto efault; 9276 } 9277 } 9278 break; 9279 #endif 9280 #ifdef TARGET_NR_symlink 9281 case TARGET_NR_symlink: 9282 { 9283 void *p2; 9284 p = lock_user_string(arg1); 9285 p2 = lock_user_string(arg2); 9286 if (!p || !p2) 9287 ret = -TARGET_EFAULT; 9288 else 9289 ret = get_errno(symlink(p, p2)); 9290 unlock_user(p2, arg2, 0); 9291 unlock_user(p, arg1, 0); 9292 } 9293 break; 9294 #endif 9295 #if defined(TARGET_NR_symlinkat) 9296 case TARGET_NR_symlinkat: 9297 { 9298 void *p2; 9299 p = lock_user_string(arg1); 9300 p2 = lock_user_string(arg3); 9301 if (!p || !p2) 9302 ret = -TARGET_EFAULT; 9303 else 9304 ret = get_errno(symlinkat(p, arg2, p2)); 9305 unlock_user(p2, arg3, 0); 9306 unlock_user(p, arg1, 0); 9307 } 9308 break; 9309 #endif 9310 #ifdef TARGET_NR_oldlstat 9311 case TARGET_NR_oldlstat: 9312 goto unimplemented; 9313 #endif 9314 #ifdef TARGET_NR_readlink 9315 case TARGET_NR_readlink: 9316 { 9317 void *p2; 9318 p = lock_user_string(arg1); 9319 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); 9320 if (!p || !p2) { 9321 ret = -TARGET_EFAULT; 9322 } else if (!arg3) { 9323 /* Short circuit this for the magic exe check. */ 9324 ret = -TARGET_EINVAL; 9325 } else if (is_proc_myself((const char *)p, "exe")) { 9326 char real[PATH_MAX], *temp; 9327 temp = realpath(exec_path, real); 9328 /* Return value is # of bytes that we wrote to the buffer. */ 9329 if (temp == NULL) { 9330 ret = get_errno(-1); 9331 } else { 9332 /* Don't worry about sign mismatch as earlier mapping 9333 * logic would have thrown a bad address error. */ 9334 ret = MIN(strlen(real), arg3); 9335 /* We cannot NUL terminate the string. */ 9336 memcpy(p2, real, ret); 9337 } 9338 } else { 9339 ret = get_errno(readlink(path(p), p2, arg3)); 9340 } 9341 unlock_user(p2, arg2, ret); 9342 unlock_user(p, arg1, 0); 9343 } 9344 break; 9345 #endif 9346 #if defined(TARGET_NR_readlinkat) 9347 case TARGET_NR_readlinkat: 9348 { 9349 void *p2; 9350 p = lock_user_string(arg2); 9351 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); 9352 if (!p || !p2) { 9353 ret = -TARGET_EFAULT; 9354 } else if (is_proc_myself((const char *)p, "exe")) { 9355 char real[PATH_MAX], *temp; 9356 temp = realpath(exec_path, real); 9357 ret = temp == NULL ? get_errno(-1) : strlen(real) ; 9358 snprintf((char *)p2, arg4, "%s", real); 9359 } else { 9360 ret = get_errno(readlinkat(arg1, path(p), p2, arg4)); 9361 } 9362 unlock_user(p2, arg3, ret); 9363 unlock_user(p, arg2, 0); 9364 } 9365 break; 9366 #endif 9367 #ifdef TARGET_NR_uselib 9368 case TARGET_NR_uselib: 9369 goto unimplemented; 9370 #endif 9371 #ifdef TARGET_NR_swapon 9372 case TARGET_NR_swapon: 9373 if (!(p = lock_user_string(arg1))) 9374 goto efault; 9375 ret = get_errno(swapon(p, arg2)); 9376 unlock_user(p, arg1, 0); 9377 break; 9378 #endif 9379 case TARGET_NR_reboot: 9380 if (arg3 == LINUX_REBOOT_CMD_RESTART2) { 9381 /* arg4 must be ignored in all other cases */ 9382 p = lock_user_string(arg4); 9383 if (!p) { 9384 goto efault; 9385 } 9386 ret = get_errno(reboot(arg1, arg2, arg3, p)); 9387 unlock_user(p, arg4, 0); 9388 } else { 9389 ret = get_errno(reboot(arg1, arg2, arg3, NULL)); 9390 } 9391 break; 9392 #ifdef TARGET_NR_readdir 9393 case TARGET_NR_readdir: 9394 goto unimplemented; 9395 #endif 9396 #ifdef TARGET_NR_mmap 9397 case TARGET_NR_mmap: 9398 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 9399 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \ 9400 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ 9401 || defined(TARGET_S390X) 9402 { 9403 abi_ulong *v; 9404 abi_ulong v1, v2, v3, v4, v5, v6; 9405 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) 9406 goto efault; 9407 v1 = tswapal(v[0]); 9408 v2 = tswapal(v[1]); 9409 v3 = tswapal(v[2]); 9410 v4 = tswapal(v[3]); 9411 v5 = tswapal(v[4]); 9412 v6 = tswapal(v[5]); 9413 unlock_user(v, arg1, 0); 9414 ret = get_errno(target_mmap(v1, v2, v3, 9415 target_to_host_bitmask(v4, mmap_flags_tbl), 9416 v5, v6)); 9417 } 9418 #else 9419 ret = get_errno(target_mmap(arg1, arg2, arg3, 9420 target_to_host_bitmask(arg4, mmap_flags_tbl), 9421 arg5, 9422 arg6)); 9423 #endif 9424 break; 9425 #endif 9426 #ifdef TARGET_NR_mmap2 9427 case TARGET_NR_mmap2: 9428 #ifndef MMAP_SHIFT 9429 #define MMAP_SHIFT 12 9430 #endif 9431 ret = get_errno(target_mmap(arg1, arg2, arg3, 9432 target_to_host_bitmask(arg4, mmap_flags_tbl), 9433 arg5, 9434 arg6 << MMAP_SHIFT)); 9435 break; 9436 #endif 9437 case TARGET_NR_munmap: 9438 ret = get_errno(target_munmap(arg1, arg2)); 9439 break; 9440 case TARGET_NR_mprotect: 9441 { 9442 TaskState *ts = cpu->opaque; 9443 /* Special hack to detect libc making the stack executable. */ 9444 if ((arg3 & PROT_GROWSDOWN) 9445 && arg1 >= ts->info->stack_limit 9446 && arg1 <= ts->info->start_stack) { 9447 arg3 &= ~PROT_GROWSDOWN; 9448 arg2 = arg2 + arg1 - ts->info->stack_limit; 9449 arg1 = ts->info->stack_limit; 9450 } 9451 } 9452 ret = get_errno(target_mprotect(arg1, arg2, arg3)); 9453 break; 9454 #ifdef TARGET_NR_mremap 9455 case TARGET_NR_mremap: 9456 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); 9457 break; 9458 #endif 9459 /* ??? msync/mlock/munlock are broken for softmmu. */ 9460 #ifdef TARGET_NR_msync 9461 case TARGET_NR_msync: 9462 ret = get_errno(msync(g2h(arg1), arg2, arg3)); 9463 break; 9464 #endif 9465 #ifdef TARGET_NR_mlock 9466 case TARGET_NR_mlock: 9467 ret = get_errno(mlock(g2h(arg1), arg2)); 9468 break; 9469 #endif 9470 #ifdef TARGET_NR_munlock 9471 case TARGET_NR_munlock: 9472 ret = get_errno(munlock(g2h(arg1), arg2)); 9473 break; 9474 #endif 9475 #ifdef TARGET_NR_mlockall 9476 case TARGET_NR_mlockall: 9477 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1))); 9478 break; 9479 #endif 9480 #ifdef TARGET_NR_munlockall 9481 case TARGET_NR_munlockall: 9482 ret = get_errno(munlockall()); 9483 break; 9484 #endif 9485 case TARGET_NR_truncate: 9486 if (!(p = lock_user_string(arg1))) 9487 goto efault; 9488 ret = get_errno(truncate(p, arg2)); 9489 unlock_user(p, arg1, 0); 9490 break; 9491 case TARGET_NR_ftruncate: 9492 ret = get_errno(ftruncate(arg1, arg2)); 9493 break; 9494 case TARGET_NR_fchmod: 9495 ret = get_errno(fchmod(arg1, arg2)); 9496 break; 9497 #if defined(TARGET_NR_fchmodat) 9498 case TARGET_NR_fchmodat: 9499 if (!(p = lock_user_string(arg2))) 9500 goto efault; 9501 ret = get_errno(fchmodat(arg1, p, arg3, 0)); 9502 unlock_user(p, arg2, 0); 9503 break; 9504 #endif 9505 case TARGET_NR_getpriority: 9506 /* Note that negative values are valid for getpriority, so we must 9507 differentiate based on errno settings. */ 9508 errno = 0; 9509 ret = getpriority(arg1, arg2); 9510 if (ret == -1 && errno != 0) { 9511 ret = -host_to_target_errno(errno); 9512 break; 9513 } 9514 #ifdef TARGET_ALPHA 9515 /* Return value is the unbiased priority. Signal no error. */ 9516 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; 9517 #else 9518 /* Return value is a biased priority to avoid negative numbers. */ 9519 ret = 20 - ret; 9520 #endif 9521 break; 9522 case TARGET_NR_setpriority: 9523 ret = get_errno(setpriority(arg1, arg2, arg3)); 9524 break; 9525 #ifdef TARGET_NR_profil 9526 case TARGET_NR_profil: 9527 goto unimplemented; 9528 #endif 9529 case TARGET_NR_statfs: 9530 if (!(p = lock_user_string(arg1))) 9531 goto efault; 9532 ret = get_errno(statfs(path(p), &stfs)); 9533 unlock_user(p, arg1, 0); 9534 convert_statfs: 9535 if (!is_error(ret)) { 9536 struct target_statfs *target_stfs; 9537 9538 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) 9539 goto efault; 9540 __put_user(stfs.f_type, &target_stfs->f_type); 9541 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 9542 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 9543 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 9544 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 9545 __put_user(stfs.f_files, &target_stfs->f_files); 9546 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 9547 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 9548 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 9549 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 9550 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 9551 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 9552 unlock_user_struct(target_stfs, arg2, 1); 9553 } 9554 break; 9555 case TARGET_NR_fstatfs: 9556 ret = get_errno(fstatfs(arg1, &stfs)); 9557 goto convert_statfs; 9558 #ifdef TARGET_NR_statfs64 9559 case TARGET_NR_statfs64: 9560 if (!(p = lock_user_string(arg1))) 9561 goto efault; 9562 ret = get_errno(statfs(path(p), &stfs)); 9563 unlock_user(p, arg1, 0); 9564 convert_statfs64: 9565 if (!is_error(ret)) { 9566 struct target_statfs64 *target_stfs; 9567 9568 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) 9569 goto efault; 9570 __put_user(stfs.f_type, &target_stfs->f_type); 9571 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 9572 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 9573 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 9574 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 9575 __put_user(stfs.f_files, &target_stfs->f_files); 9576 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 9577 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 9578 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 9579 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 9580 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 9581 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 9582 unlock_user_struct(target_stfs, arg3, 1); 9583 } 9584 break; 9585 case TARGET_NR_fstatfs64: 9586 ret = get_errno(fstatfs(arg1, &stfs)); 9587 goto convert_statfs64; 9588 #endif 9589 #ifdef TARGET_NR_ioperm 9590 case TARGET_NR_ioperm: 9591 goto unimplemented; 9592 #endif 9593 #ifdef TARGET_NR_socketcall 9594 case TARGET_NR_socketcall: 9595 ret = do_socketcall(arg1, arg2); 9596 break; 9597 #endif 9598 #ifdef TARGET_NR_accept 9599 case TARGET_NR_accept: 9600 ret = do_accept4(arg1, arg2, arg3, 0); 9601 break; 9602 #endif 9603 #ifdef TARGET_NR_accept4 9604 case TARGET_NR_accept4: 9605 ret = do_accept4(arg1, arg2, arg3, arg4); 9606 break; 9607 #endif 9608 #ifdef TARGET_NR_bind 9609 case TARGET_NR_bind: 9610 ret = do_bind(arg1, arg2, arg3); 9611 break; 9612 #endif 9613 #ifdef TARGET_NR_connect 9614 case TARGET_NR_connect: 9615 ret = do_connect(arg1, arg2, arg3); 9616 break; 9617 #endif 9618 #ifdef TARGET_NR_getpeername 9619 case TARGET_NR_getpeername: 9620 ret = do_getpeername(arg1, arg2, arg3); 9621 break; 9622 #endif 9623 #ifdef TARGET_NR_getsockname 9624 case TARGET_NR_getsockname: 9625 ret = do_getsockname(arg1, arg2, arg3); 9626 break; 9627 #endif 9628 #ifdef TARGET_NR_getsockopt 9629 case TARGET_NR_getsockopt: 9630 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5); 9631 break; 9632 #endif 9633 #ifdef TARGET_NR_listen 9634 case TARGET_NR_listen: 9635 ret = get_errno(listen(arg1, arg2)); 9636 break; 9637 #endif 9638 #ifdef TARGET_NR_recv 9639 case TARGET_NR_recv: 9640 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); 9641 break; 9642 #endif 9643 #ifdef TARGET_NR_recvfrom 9644 case TARGET_NR_recvfrom: 9645 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); 9646 break; 9647 #endif 9648 #ifdef TARGET_NR_recvmsg 9649 case TARGET_NR_recvmsg: 9650 ret = do_sendrecvmsg(arg1, arg2, arg3, 0); 9651 break; 9652 #endif 9653 #ifdef TARGET_NR_send 9654 case TARGET_NR_send: 9655 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0); 9656 break; 9657 #endif 9658 #ifdef TARGET_NR_sendmsg 9659 case TARGET_NR_sendmsg: 9660 ret = do_sendrecvmsg(arg1, arg2, arg3, 1); 9661 break; 9662 #endif 9663 #ifdef TARGET_NR_sendmmsg 9664 case TARGET_NR_sendmmsg: 9665 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1); 9666 break; 9667 case TARGET_NR_recvmmsg: 9668 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0); 9669 break; 9670 #endif 9671 #ifdef TARGET_NR_sendto 9672 case TARGET_NR_sendto: 9673 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); 9674 break; 9675 #endif 9676 #ifdef TARGET_NR_shutdown 9677 case TARGET_NR_shutdown: 9678 ret = get_errno(shutdown(arg1, arg2)); 9679 break; 9680 #endif 9681 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom) 9682 case TARGET_NR_getrandom: 9683 p = lock_user(VERIFY_WRITE, arg1, arg2, 0); 9684 if (!p) { 9685 goto efault; 9686 } 9687 ret = get_errno(getrandom(p, arg2, arg3)); 9688 unlock_user(p, arg1, ret); 9689 break; 9690 #endif 9691 #ifdef TARGET_NR_socket 9692 case TARGET_NR_socket: 9693 ret = do_socket(arg1, arg2, arg3); 9694 break; 9695 #endif 9696 #ifdef TARGET_NR_socketpair 9697 case TARGET_NR_socketpair: 9698 ret = do_socketpair(arg1, arg2, arg3, arg4); 9699 break; 9700 #endif 9701 #ifdef TARGET_NR_setsockopt 9702 case TARGET_NR_setsockopt: 9703 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); 9704 break; 9705 #endif 9706 #if defined(TARGET_NR_syslog) 9707 case TARGET_NR_syslog: 9708 { 9709 int len = arg2; 9710 9711 switch (arg1) { 9712 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */ 9713 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */ 9714 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */ 9715 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */ 9716 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */ 9717 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */ 9718 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */ 9719 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */ 9720 { 9721 ret = get_errno(sys_syslog((int)arg1, NULL, (int)arg3)); 9722 } 9723 break; 9724 case TARGET_SYSLOG_ACTION_READ: /* Read from log */ 9725 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */ 9726 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */ 9727 { 9728 ret = -TARGET_EINVAL; 9729 if (len < 0) { 9730 goto fail; 9731 } 9732 ret = 0; 9733 if (len == 0) { 9734 break; 9735 } 9736 p = lock_user(VERIFY_WRITE, arg2, arg3, 0); 9737 if (!p) { 9738 ret = -TARGET_EFAULT; 9739 goto fail; 9740 } 9741 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); 9742 unlock_user(p, arg2, arg3); 9743 } 9744 break; 9745 default: 9746 ret = -EINVAL; 9747 break; 9748 } 9749 } 9750 break; 9751 #endif 9752 case TARGET_NR_setitimer: 9753 { 9754 struct itimerval value, ovalue, *pvalue; 9755 9756 if (arg2) { 9757 pvalue = &value; 9758 if (copy_from_user_timeval(&pvalue->it_interval, arg2) 9759 || copy_from_user_timeval(&pvalue->it_value, 9760 arg2 + sizeof(struct target_timeval))) 9761 goto efault; 9762 } else { 9763 pvalue = NULL; 9764 } 9765 ret = get_errno(setitimer(arg1, pvalue, &ovalue)); 9766 if (!is_error(ret) && arg3) { 9767 if (copy_to_user_timeval(arg3, 9768 &ovalue.it_interval) 9769 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), 9770 &ovalue.it_value)) 9771 goto efault; 9772 } 9773 } 9774 break; 9775 case TARGET_NR_getitimer: 9776 { 9777 struct itimerval value; 9778 9779 ret = get_errno(getitimer(arg1, &value)); 9780 if (!is_error(ret) && arg2) { 9781 if (copy_to_user_timeval(arg2, 9782 &value.it_interval) 9783 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), 9784 &value.it_value)) 9785 goto efault; 9786 } 9787 } 9788 break; 9789 #ifdef TARGET_NR_stat 9790 case TARGET_NR_stat: 9791 if (!(p = lock_user_string(arg1))) 9792 goto efault; 9793 ret = get_errno(stat(path(p), &st)); 9794 unlock_user(p, arg1, 0); 9795 goto do_stat; 9796 #endif 9797 #ifdef TARGET_NR_lstat 9798 case TARGET_NR_lstat: 9799 if (!(p = lock_user_string(arg1))) 9800 goto efault; 9801 ret = get_errno(lstat(path(p), &st)); 9802 unlock_user(p, arg1, 0); 9803 goto do_stat; 9804 #endif 9805 case TARGET_NR_fstat: 9806 { 9807 ret = get_errno(fstat(arg1, &st)); 9808 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat) 9809 do_stat: 9810 #endif 9811 if (!is_error(ret)) { 9812 struct target_stat *target_st; 9813 9814 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) 9815 goto efault; 9816 memset(target_st, 0, sizeof(*target_st)); 9817 __put_user(st.st_dev, &target_st->st_dev); 9818 __put_user(st.st_ino, &target_st->st_ino); 9819 __put_user(st.st_mode, &target_st->st_mode); 9820 __put_user(st.st_uid, &target_st->st_uid); 9821 __put_user(st.st_gid, &target_st->st_gid); 9822 __put_user(st.st_nlink, &target_st->st_nlink); 9823 __put_user(st.st_rdev, &target_st->st_rdev); 9824 __put_user(st.st_size, &target_st->st_size); 9825 __put_user(st.st_blksize, &target_st->st_blksize); 9826 __put_user(st.st_blocks, &target_st->st_blocks); 9827 __put_user(st.st_atime, &target_st->target_st_atime); 9828 __put_user(st.st_mtime, &target_st->target_st_mtime); 9829 __put_user(st.st_ctime, &target_st->target_st_ctime); 9830 unlock_user_struct(target_st, arg2, 1); 9831 } 9832 } 9833 break; 9834 #ifdef TARGET_NR_olduname 9835 case TARGET_NR_olduname: 9836 goto unimplemented; 9837 #endif 9838 #ifdef TARGET_NR_iopl 9839 case TARGET_NR_iopl: 9840 goto unimplemented; 9841 #endif 9842 case TARGET_NR_vhangup: 9843 ret = get_errno(vhangup()); 9844 break; 9845 #ifdef TARGET_NR_idle 9846 case TARGET_NR_idle: 9847 goto unimplemented; 9848 #endif 9849 #ifdef TARGET_NR_syscall 9850 case TARGET_NR_syscall: 9851 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5, 9852 arg6, arg7, arg8, 0); 9853 break; 9854 #endif 9855 case TARGET_NR_wait4: 9856 { 9857 int status; 9858 abi_long status_ptr = arg2; 9859 struct rusage rusage, *rusage_ptr; 9860 abi_ulong target_rusage = arg4; 9861 abi_long rusage_err; 9862 if (target_rusage) 9863 rusage_ptr = &rusage; 9864 else 9865 rusage_ptr = NULL; 9866 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr)); 9867 if (!is_error(ret)) { 9868 if (status_ptr && ret) { 9869 status = host_to_target_waitstatus(status); 9870 if (put_user_s32(status, status_ptr)) 9871 goto efault; 9872 } 9873 if (target_rusage) { 9874 rusage_err = host_to_target_rusage(target_rusage, &rusage); 9875 if (rusage_err) { 9876 ret = rusage_err; 9877 } 9878 } 9879 } 9880 } 9881 break; 9882 #ifdef TARGET_NR_swapoff 9883 case TARGET_NR_swapoff: 9884 if (!(p = lock_user_string(arg1))) 9885 goto efault; 9886 ret = get_errno(swapoff(p)); 9887 unlock_user(p, arg1, 0); 9888 break; 9889 #endif 9890 case TARGET_NR_sysinfo: 9891 { 9892 struct target_sysinfo *target_value; 9893 struct sysinfo value; 9894 ret = get_errno(sysinfo(&value)); 9895 if (!is_error(ret) && arg1) 9896 { 9897 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) 9898 goto efault; 9899 __put_user(value.uptime, &target_value->uptime); 9900 __put_user(value.loads[0], &target_value->loads[0]); 9901 __put_user(value.loads[1], &target_value->loads[1]); 9902 __put_user(value.loads[2], &target_value->loads[2]); 9903 __put_user(value.totalram, &target_value->totalram); 9904 __put_user(value.freeram, &target_value->freeram); 9905 __put_user(value.sharedram, &target_value->sharedram); 9906 __put_user(value.bufferram, &target_value->bufferram); 9907 __put_user(value.totalswap, &target_value->totalswap); 9908 __put_user(value.freeswap, &target_value->freeswap); 9909 __put_user(value.procs, &target_value->procs); 9910 __put_user(value.totalhigh, &target_value->totalhigh); 9911 __put_user(value.freehigh, &target_value->freehigh); 9912 __put_user(value.mem_unit, &target_value->mem_unit); 9913 unlock_user_struct(target_value, arg1, 1); 9914 } 9915 } 9916 break; 9917 #ifdef TARGET_NR_ipc 9918 case TARGET_NR_ipc: 9919 ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6); 9920 break; 9921 #endif 9922 #ifdef TARGET_NR_semget 9923 case TARGET_NR_semget: 9924 ret = get_errno(semget(arg1, arg2, arg3)); 9925 break; 9926 #endif 9927 #ifdef TARGET_NR_semop 9928 case TARGET_NR_semop: 9929 ret = do_semop(arg1, arg2, arg3); 9930 break; 9931 #endif 9932 #ifdef TARGET_NR_semctl 9933 case TARGET_NR_semctl: 9934 ret = do_semctl(arg1, arg2, arg3, arg4); 9935 break; 9936 #endif 9937 #ifdef TARGET_NR_msgctl 9938 case TARGET_NR_msgctl: 9939 ret = do_msgctl(arg1, arg2, arg3); 9940 break; 9941 #endif 9942 #ifdef TARGET_NR_msgget 9943 case TARGET_NR_msgget: 9944 ret = get_errno(msgget(arg1, arg2)); 9945 break; 9946 #endif 9947 #ifdef TARGET_NR_msgrcv 9948 case TARGET_NR_msgrcv: 9949 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5); 9950 break; 9951 #endif 9952 #ifdef TARGET_NR_msgsnd 9953 case TARGET_NR_msgsnd: 9954 ret = do_msgsnd(arg1, arg2, arg3, arg4); 9955 break; 9956 #endif 9957 #ifdef TARGET_NR_shmget 9958 case TARGET_NR_shmget: 9959 ret = get_errno(shmget(arg1, arg2, arg3)); 9960 break; 9961 #endif 9962 #ifdef TARGET_NR_shmctl 9963 case TARGET_NR_shmctl: 9964 ret = do_shmctl(arg1, arg2, arg3); 9965 break; 9966 #endif 9967 #ifdef TARGET_NR_shmat 9968 case TARGET_NR_shmat: 9969 ret = do_shmat(cpu_env, arg1, arg2, arg3); 9970 break; 9971 #endif 9972 #ifdef TARGET_NR_shmdt 9973 case TARGET_NR_shmdt: 9974 ret = do_shmdt(arg1); 9975 break; 9976 #endif 9977 case TARGET_NR_fsync: 9978 ret = get_errno(fsync(arg1)); 9979 break; 9980 case TARGET_NR_clone: 9981 /* Linux manages to have three different orderings for its 9982 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines 9983 * match the kernel's CONFIG_CLONE_* settings. 9984 * Microblaze is further special in that it uses a sixth 9985 * implicit argument to clone for the TLS pointer. 9986 */ 9987 #if defined(TARGET_MICROBLAZE) 9988 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5)); 9989 #elif defined(TARGET_CLONE_BACKWARDS) 9990 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); 9991 #elif defined(TARGET_CLONE_BACKWARDS2) 9992 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); 9993 #else 9994 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); 9995 #endif 9996 break; 9997 #ifdef __NR_exit_group 9998 /* new thread calls */ 9999 case TARGET_NR_exit_group: 10000 #ifdef TARGET_GPROF 10001 _mcleanup(); 10002 #endif 10003 gdb_exit(cpu_env, arg1); 10004 ret = get_errno(exit_group(arg1)); 10005 break; 10006 #endif 10007 case TARGET_NR_setdomainname: 10008 if (!(p = lock_user_string(arg1))) 10009 goto efault; 10010 ret = get_errno(setdomainname(p, arg2)); 10011 unlock_user(p, arg1, 0); 10012 break; 10013 case TARGET_NR_uname: 10014 /* no need to transcode because we use the linux syscall */ 10015 { 10016 struct new_utsname * buf; 10017 10018 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) 10019 goto efault; 10020 ret = get_errno(sys_uname(buf)); 10021 if (!is_error(ret)) { 10022 /* Overwrite the native machine name with whatever is being 10023 emulated. */ 10024 strcpy (buf->machine, cpu_to_uname_machine(cpu_env)); 10025 /* Allow the user to override the reported release. */ 10026 if (qemu_uname_release && *qemu_uname_release) { 10027 g_strlcpy(buf->release, qemu_uname_release, 10028 sizeof(buf->release)); 10029 } 10030 } 10031 unlock_user_struct(buf, arg1, 1); 10032 } 10033 break; 10034 #ifdef TARGET_I386 10035 case TARGET_NR_modify_ldt: 10036 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3); 10037 break; 10038 #if !defined(TARGET_X86_64) 10039 case TARGET_NR_vm86old: 10040 goto unimplemented; 10041 case TARGET_NR_vm86: 10042 ret = do_vm86(cpu_env, arg1, arg2); 10043 break; 10044 #endif 10045 #endif 10046 case TARGET_NR_adjtimex: 10047 { 10048 struct timex host_buf; 10049 10050 if (target_to_host_timex(&host_buf, arg1) != 0) { 10051 goto efault; 10052 } 10053 ret = get_errno(adjtimex(&host_buf)); 10054 if (!is_error(ret)) { 10055 if (host_to_target_timex(arg1, &host_buf) != 0) { 10056 goto efault; 10057 } 10058 } 10059 } 10060 break; 10061 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME) 10062 case TARGET_NR_clock_adjtime: 10063 { 10064 struct timex htx, *phtx = &htx; 10065 10066 if (target_to_host_timex(phtx, arg2) != 0) { 10067 goto efault; 10068 } 10069 ret = get_errno(clock_adjtime(arg1, phtx)); 10070 if (!is_error(ret) && phtx) { 10071 if (host_to_target_timex(arg2, phtx) != 0) { 10072 goto efault; 10073 } 10074 } 10075 } 10076 break; 10077 #endif 10078 #ifdef TARGET_NR_create_module 10079 case TARGET_NR_create_module: 10080 #endif 10081 case TARGET_NR_init_module: 10082 case TARGET_NR_delete_module: 10083 #ifdef TARGET_NR_get_kernel_syms 10084 case TARGET_NR_get_kernel_syms: 10085 #endif 10086 goto unimplemented; 10087 case TARGET_NR_quotactl: 10088 goto unimplemented; 10089 case TARGET_NR_getpgid: 10090 ret = get_errno(getpgid(arg1)); 10091 break; 10092 case TARGET_NR_fchdir: 10093 ret = get_errno(fchdir(arg1)); 10094 break; 10095 #ifdef TARGET_NR_bdflush /* not on x86_64 */ 10096 case TARGET_NR_bdflush: 10097 goto unimplemented; 10098 #endif 10099 #ifdef TARGET_NR_sysfs 10100 case TARGET_NR_sysfs: 10101 goto unimplemented; 10102 #endif 10103 case TARGET_NR_personality: 10104 ret = get_errno(personality(arg1)); 10105 break; 10106 #ifdef TARGET_NR_afs_syscall 10107 case TARGET_NR_afs_syscall: 10108 goto unimplemented; 10109 #endif 10110 #ifdef TARGET_NR__llseek /* Not on alpha */ 10111 case TARGET_NR__llseek: 10112 { 10113 int64_t res; 10114 #if !defined(__NR_llseek) 10115 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5); 10116 if (res == -1) { 10117 ret = get_errno(res); 10118 } else { 10119 ret = 0; 10120 } 10121 #else 10122 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); 10123 #endif 10124 if ((ret == 0) && put_user_s64(res, arg4)) { 10125 goto efault; 10126 } 10127 } 10128 break; 10129 #endif 10130 #ifdef TARGET_NR_getdents 10131 case TARGET_NR_getdents: 10132 #ifdef __NR_getdents 10133 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 10134 { 10135 struct target_dirent *target_dirp; 10136 struct linux_dirent *dirp; 10137 abi_long count = arg3; 10138 10139 dirp = g_try_malloc(count); 10140 if (!dirp) { 10141 ret = -TARGET_ENOMEM; 10142 goto fail; 10143 } 10144 10145 ret = get_errno(sys_getdents(arg1, dirp, count)); 10146 if (!is_error(ret)) { 10147 struct linux_dirent *de; 10148 struct target_dirent *tde; 10149 int len = ret; 10150 int reclen, treclen; 10151 int count1, tnamelen; 10152 10153 count1 = 0; 10154 de = dirp; 10155 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 10156 goto efault; 10157 tde = target_dirp; 10158 while (len > 0) { 10159 reclen = de->d_reclen; 10160 tnamelen = reclen - offsetof(struct linux_dirent, d_name); 10161 assert(tnamelen >= 0); 10162 treclen = tnamelen + offsetof(struct target_dirent, d_name); 10163 assert(count1 + treclen <= count); 10164 tde->d_reclen = tswap16(treclen); 10165 tde->d_ino = tswapal(de->d_ino); 10166 tde->d_off = tswapal(de->d_off); 10167 memcpy(tde->d_name, de->d_name, tnamelen); 10168 de = (struct linux_dirent *)((char *)de + reclen); 10169 len -= reclen; 10170 tde = (struct target_dirent *)((char *)tde + treclen); 10171 count1 += treclen; 10172 } 10173 ret = count1; 10174 unlock_user(target_dirp, arg2, ret); 10175 } 10176 g_free(dirp); 10177 } 10178 #else 10179 { 10180 struct linux_dirent *dirp; 10181 abi_long count = arg3; 10182 10183 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 10184 goto efault; 10185 ret = get_errno(sys_getdents(arg1, dirp, count)); 10186 if (!is_error(ret)) { 10187 struct linux_dirent *de; 10188 int len = ret; 10189 int reclen; 10190 de = dirp; 10191 while (len > 0) { 10192 reclen = de->d_reclen; 10193 if (reclen > len) 10194 break; 10195 de->d_reclen = tswap16(reclen); 10196 tswapls(&de->d_ino); 10197 tswapls(&de->d_off); 10198 de = (struct linux_dirent *)((char *)de + reclen); 10199 len -= reclen; 10200 } 10201 } 10202 unlock_user(dirp, arg2, ret); 10203 } 10204 #endif 10205 #else 10206 /* Implement getdents in terms of getdents64 */ 10207 { 10208 struct linux_dirent64 *dirp; 10209 abi_long count = arg3; 10210 10211 dirp = lock_user(VERIFY_WRITE, arg2, count, 0); 10212 if (!dirp) { 10213 goto efault; 10214 } 10215 ret = get_errno(sys_getdents64(arg1, dirp, count)); 10216 if (!is_error(ret)) { 10217 /* Convert the dirent64 structs to target dirent. We do this 10218 * in-place, since we can guarantee that a target_dirent is no 10219 * larger than a dirent64; however this means we have to be 10220 * careful to read everything before writing in the new format. 10221 */ 10222 struct linux_dirent64 *de; 10223 struct target_dirent *tde; 10224 int len = ret; 10225 int tlen = 0; 10226 10227 de = dirp; 10228 tde = (struct target_dirent *)dirp; 10229 while (len > 0) { 10230 int namelen, treclen; 10231 int reclen = de->d_reclen; 10232 uint64_t ino = de->d_ino; 10233 int64_t off = de->d_off; 10234 uint8_t type = de->d_type; 10235 10236 namelen = strlen(de->d_name); 10237 treclen = offsetof(struct target_dirent, d_name) 10238 + namelen + 2; 10239 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long)); 10240 10241 memmove(tde->d_name, de->d_name, namelen + 1); 10242 tde->d_ino = tswapal(ino); 10243 tde->d_off = tswapal(off); 10244 tde->d_reclen = tswap16(treclen); 10245 /* The target_dirent type is in what was formerly a padding 10246 * byte at the end of the structure: 10247 */ 10248 *(((char *)tde) + treclen - 1) = type; 10249 10250 de = (struct linux_dirent64 *)((char *)de + reclen); 10251 tde = (struct target_dirent *)((char *)tde + treclen); 10252 len -= reclen; 10253 tlen += treclen; 10254 } 10255 ret = tlen; 10256 } 10257 unlock_user(dirp, arg2, ret); 10258 } 10259 #endif 10260 break; 10261 #endif /* TARGET_NR_getdents */ 10262 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 10263 case TARGET_NR_getdents64: 10264 { 10265 struct linux_dirent64 *dirp; 10266 abi_long count = arg3; 10267 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 10268 goto efault; 10269 ret = get_errno(sys_getdents64(arg1, dirp, count)); 10270 if (!is_error(ret)) { 10271 struct linux_dirent64 *de; 10272 int len = ret; 10273 int reclen; 10274 de = dirp; 10275 while (len > 0) { 10276 reclen = de->d_reclen; 10277 if (reclen > len) 10278 break; 10279 de->d_reclen = tswap16(reclen); 10280 tswap64s((uint64_t *)&de->d_ino); 10281 tswap64s((uint64_t *)&de->d_off); 10282 de = (struct linux_dirent64 *)((char *)de + reclen); 10283 len -= reclen; 10284 } 10285 } 10286 unlock_user(dirp, arg2, ret); 10287 } 10288 break; 10289 #endif /* TARGET_NR_getdents64 */ 10290 #if defined(TARGET_NR__newselect) 10291 case TARGET_NR__newselect: 10292 ret = do_select(arg1, arg2, arg3, arg4, arg5); 10293 break; 10294 #endif 10295 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) 10296 # ifdef TARGET_NR_poll 10297 case TARGET_NR_poll: 10298 # endif 10299 # ifdef TARGET_NR_ppoll 10300 case TARGET_NR_ppoll: 10301 # endif 10302 { 10303 struct target_pollfd *target_pfd; 10304 unsigned int nfds = arg2; 10305 struct pollfd *pfd; 10306 unsigned int i; 10307 10308 pfd = NULL; 10309 target_pfd = NULL; 10310 if (nfds) { 10311 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) { 10312 ret = -TARGET_EINVAL; 10313 break; 10314 } 10315 10316 target_pfd = lock_user(VERIFY_WRITE, arg1, 10317 sizeof(struct target_pollfd) * nfds, 1); 10318 if (!target_pfd) { 10319 goto efault; 10320 } 10321 10322 pfd = alloca(sizeof(struct pollfd) * nfds); 10323 for (i = 0; i < nfds; i++) { 10324 pfd[i].fd = tswap32(target_pfd[i].fd); 10325 pfd[i].events = tswap16(target_pfd[i].events); 10326 } 10327 } 10328 10329 switch (num) { 10330 # ifdef TARGET_NR_ppoll 10331 case TARGET_NR_ppoll: 10332 { 10333 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; 10334 target_sigset_t *target_set; 10335 sigset_t _set, *set = &_set; 10336 10337 if (arg3) { 10338 if (target_to_host_timespec(timeout_ts, arg3)) { 10339 unlock_user(target_pfd, arg1, 0); 10340 goto efault; 10341 } 10342 } else { 10343 timeout_ts = NULL; 10344 } 10345 10346 if (arg4) { 10347 if (arg5 != sizeof(target_sigset_t)) { 10348 unlock_user(target_pfd, arg1, 0); 10349 ret = -TARGET_EINVAL; 10350 break; 10351 } 10352 10353 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1); 10354 if (!target_set) { 10355 unlock_user(target_pfd, arg1, 0); 10356 goto efault; 10357 } 10358 target_to_host_sigset(set, target_set); 10359 } else { 10360 set = NULL; 10361 } 10362 10363 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts, 10364 set, SIGSET_T_SIZE)); 10365 10366 if (!is_error(ret) && arg3) { 10367 host_to_target_timespec(arg3, timeout_ts); 10368 } 10369 if (arg4) { 10370 unlock_user(target_set, arg4, 0); 10371 } 10372 break; 10373 } 10374 # endif 10375 # ifdef TARGET_NR_poll 10376 case TARGET_NR_poll: 10377 { 10378 struct timespec ts, *pts; 10379 10380 if (arg3 >= 0) { 10381 /* Convert ms to secs, ns */ 10382 ts.tv_sec = arg3 / 1000; 10383 ts.tv_nsec = (arg3 % 1000) * 1000000LL; 10384 pts = &ts; 10385 } else { 10386 /* -ve poll() timeout means "infinite" */ 10387 pts = NULL; 10388 } 10389 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0)); 10390 break; 10391 } 10392 # endif 10393 default: 10394 g_assert_not_reached(); 10395 } 10396 10397 if (!is_error(ret)) { 10398 for(i = 0; i < nfds; i++) { 10399 target_pfd[i].revents = tswap16(pfd[i].revents); 10400 } 10401 } 10402 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds); 10403 } 10404 break; 10405 #endif 10406 case TARGET_NR_flock: 10407 /* NOTE: the flock constant seems to be the same for every 10408 Linux platform */ 10409 ret = get_errno(safe_flock(arg1, arg2)); 10410 break; 10411 case TARGET_NR_readv: 10412 { 10413 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 10414 if (vec != NULL) { 10415 ret = get_errno(safe_readv(arg1, vec, arg3)); 10416 unlock_iovec(vec, arg2, arg3, 1); 10417 } else { 10418 ret = -host_to_target_errno(errno); 10419 } 10420 } 10421 break; 10422 case TARGET_NR_writev: 10423 { 10424 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 10425 if (vec != NULL) { 10426 ret = get_errno(safe_writev(arg1, vec, arg3)); 10427 unlock_iovec(vec, arg2, arg3, 0); 10428 } else { 10429 ret = -host_to_target_errno(errno); 10430 } 10431 } 10432 break; 10433 #if defined(TARGET_NR_preadv) 10434 case TARGET_NR_preadv: 10435 { 10436 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 10437 if (vec != NULL) { 10438 ret = get_errno(safe_preadv(arg1, vec, arg3, arg4, arg5)); 10439 unlock_iovec(vec, arg2, arg3, 1); 10440 } else { 10441 ret = -host_to_target_errno(errno); 10442 } 10443 } 10444 break; 10445 #endif 10446 #if defined(TARGET_NR_pwritev) 10447 case TARGET_NR_pwritev: 10448 { 10449 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 10450 if (vec != NULL) { 10451 ret = get_errno(safe_pwritev(arg1, vec, arg3, arg4, arg5)); 10452 unlock_iovec(vec, arg2, arg3, 0); 10453 } else { 10454 ret = -host_to_target_errno(errno); 10455 } 10456 } 10457 break; 10458 #endif 10459 case TARGET_NR_getsid: 10460 ret = get_errno(getsid(arg1)); 10461 break; 10462 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ 10463 case TARGET_NR_fdatasync: 10464 ret = get_errno(fdatasync(arg1)); 10465 break; 10466 #endif 10467 #ifdef TARGET_NR__sysctl 10468 case TARGET_NR__sysctl: 10469 /* We don't implement this, but ENOTDIR is always a safe 10470 return value. */ 10471 ret = -TARGET_ENOTDIR; 10472 break; 10473 #endif 10474 case TARGET_NR_sched_getaffinity: 10475 { 10476 unsigned int mask_size; 10477 unsigned long *mask; 10478 10479 /* 10480 * sched_getaffinity needs multiples of ulong, so need to take 10481 * care of mismatches between target ulong and host ulong sizes. 10482 */ 10483 if (arg2 & (sizeof(abi_ulong) - 1)) { 10484 ret = -TARGET_EINVAL; 10485 break; 10486 } 10487 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 10488 10489 mask = alloca(mask_size); 10490 memset(mask, 0, mask_size); 10491 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask)); 10492 10493 if (!is_error(ret)) { 10494 if (ret > arg2) { 10495 /* More data returned than the caller's buffer will fit. 10496 * This only happens if sizeof(abi_long) < sizeof(long) 10497 * and the caller passed us a buffer holding an odd number 10498 * of abi_longs. If the host kernel is actually using the 10499 * extra 4 bytes then fail EINVAL; otherwise we can just 10500 * ignore them and only copy the interesting part. 10501 */ 10502 int numcpus = sysconf(_SC_NPROCESSORS_CONF); 10503 if (numcpus > arg2 * 8) { 10504 ret = -TARGET_EINVAL; 10505 break; 10506 } 10507 ret = arg2; 10508 } 10509 10510 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) { 10511 goto efault; 10512 } 10513 } 10514 } 10515 break; 10516 case TARGET_NR_sched_setaffinity: 10517 { 10518 unsigned int mask_size; 10519 unsigned long *mask; 10520 10521 /* 10522 * sched_setaffinity needs multiples of ulong, so need to take 10523 * care of mismatches between target ulong and host ulong sizes. 10524 */ 10525 if (arg2 & (sizeof(abi_ulong) - 1)) { 10526 ret = -TARGET_EINVAL; 10527 break; 10528 } 10529 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 10530 mask = alloca(mask_size); 10531 10532 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2); 10533 if (ret) { 10534 break; 10535 } 10536 10537 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask)); 10538 } 10539 break; 10540 case TARGET_NR_getcpu: 10541 { 10542 unsigned cpu, node; 10543 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL, 10544 arg2 ? &node : NULL, 10545 NULL)); 10546 if (is_error(ret)) { 10547 goto fail; 10548 } 10549 if (arg1 && put_user_u32(cpu, arg1)) { 10550 goto efault; 10551 } 10552 if (arg2 && put_user_u32(node, arg2)) { 10553 goto efault; 10554 } 10555 } 10556 break; 10557 case TARGET_NR_sched_setparam: 10558 { 10559 struct sched_param *target_schp; 10560 struct sched_param schp; 10561 10562 if (arg2 == 0) { 10563 return -TARGET_EINVAL; 10564 } 10565 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) 10566 goto efault; 10567 schp.sched_priority = tswap32(target_schp->sched_priority); 10568 unlock_user_struct(target_schp, arg2, 0); 10569 ret = get_errno(sched_setparam(arg1, &schp)); 10570 } 10571 break; 10572 case TARGET_NR_sched_getparam: 10573 { 10574 struct sched_param *target_schp; 10575 struct sched_param schp; 10576 10577 if (arg2 == 0) { 10578 return -TARGET_EINVAL; 10579 } 10580 ret = get_errno(sched_getparam(arg1, &schp)); 10581 if (!is_error(ret)) { 10582 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) 10583 goto efault; 10584 target_schp->sched_priority = tswap32(schp.sched_priority); 10585 unlock_user_struct(target_schp, arg2, 1); 10586 } 10587 } 10588 break; 10589 case TARGET_NR_sched_setscheduler: 10590 { 10591 struct sched_param *target_schp; 10592 struct sched_param schp; 10593 if (arg3 == 0) { 10594 return -TARGET_EINVAL; 10595 } 10596 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) 10597 goto efault; 10598 schp.sched_priority = tswap32(target_schp->sched_priority); 10599 unlock_user_struct(target_schp, arg3, 0); 10600 ret = get_errno(sched_setscheduler(arg1, arg2, &schp)); 10601 } 10602 break; 10603 case TARGET_NR_sched_getscheduler: 10604 ret = get_errno(sched_getscheduler(arg1)); 10605 break; 10606 case TARGET_NR_sched_yield: 10607 ret = get_errno(sched_yield()); 10608 break; 10609 case TARGET_NR_sched_get_priority_max: 10610 ret = get_errno(sched_get_priority_max(arg1)); 10611 break; 10612 case TARGET_NR_sched_get_priority_min: 10613 ret = get_errno(sched_get_priority_min(arg1)); 10614 break; 10615 case TARGET_NR_sched_rr_get_interval: 10616 { 10617 struct timespec ts; 10618 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 10619 if (!is_error(ret)) { 10620 ret = host_to_target_timespec(arg2, &ts); 10621 } 10622 } 10623 break; 10624 case TARGET_NR_nanosleep: 10625 { 10626 struct timespec req, rem; 10627 target_to_host_timespec(&req, arg1); 10628 ret = get_errno(safe_nanosleep(&req, &rem)); 10629 if (is_error(ret) && arg2) { 10630 host_to_target_timespec(arg2, &rem); 10631 } 10632 } 10633 break; 10634 #ifdef TARGET_NR_query_module 10635 case TARGET_NR_query_module: 10636 goto unimplemented; 10637 #endif 10638 #ifdef TARGET_NR_nfsservctl 10639 case TARGET_NR_nfsservctl: 10640 goto unimplemented; 10641 #endif 10642 case TARGET_NR_prctl: 10643 switch (arg1) { 10644 case PR_GET_PDEATHSIG: 10645 { 10646 int deathsig; 10647 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5)); 10648 if (!is_error(ret) && arg2 10649 && put_user_ual(deathsig, arg2)) { 10650 goto efault; 10651 } 10652 break; 10653 } 10654 #ifdef PR_GET_NAME 10655 case PR_GET_NAME: 10656 { 10657 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1); 10658 if (!name) { 10659 goto efault; 10660 } 10661 ret = get_errno(prctl(arg1, (unsigned long)name, 10662 arg3, arg4, arg5)); 10663 unlock_user(name, arg2, 16); 10664 break; 10665 } 10666 case PR_SET_NAME: 10667 { 10668 void *name = lock_user(VERIFY_READ, arg2, 16, 1); 10669 if (!name) { 10670 goto efault; 10671 } 10672 ret = get_errno(prctl(arg1, (unsigned long)name, 10673 arg3, arg4, arg5)); 10674 unlock_user(name, arg2, 0); 10675 break; 10676 } 10677 #endif 10678 #ifdef TARGET_AARCH64 10679 case TARGET_PR_SVE_SET_VL: 10680 /* We cannot support either PR_SVE_SET_VL_ONEXEC 10681 or PR_SVE_VL_INHERIT. Therefore, anything above 10682 ARM_MAX_VQ results in EINVAL. */ 10683 ret = -TARGET_EINVAL; 10684 if (arm_feature(cpu_env, ARM_FEATURE_SVE) 10685 && arg2 >= 0 && arg2 <= ARM_MAX_VQ * 16 && !(arg2 & 15)) { 10686 CPUARMState *env = cpu_env; 10687 int old_vq = (env->vfp.zcr_el[1] & 0xf) + 1; 10688 int vq = MAX(arg2 / 16, 1); 10689 10690 if (vq < old_vq) { 10691 aarch64_sve_narrow_vq(env, vq); 10692 } 10693 env->vfp.zcr_el[1] = vq - 1; 10694 ret = vq * 16; 10695 } 10696 break; 10697 case TARGET_PR_SVE_GET_VL: 10698 ret = -TARGET_EINVAL; 10699 if (arm_feature(cpu_env, ARM_FEATURE_SVE)) { 10700 CPUARMState *env = cpu_env; 10701 ret = ((env->vfp.zcr_el[1] & 0xf) + 1) * 16; 10702 } 10703 break; 10704 #endif /* AARCH64 */ 10705 case PR_GET_SECCOMP: 10706 case PR_SET_SECCOMP: 10707 /* Disable seccomp to prevent the target disabling syscalls we 10708 * need. */ 10709 ret = -TARGET_EINVAL; 10710 break; 10711 default: 10712 /* Most prctl options have no pointer arguments */ 10713 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5)); 10714 break; 10715 } 10716 break; 10717 #ifdef TARGET_NR_arch_prctl 10718 case TARGET_NR_arch_prctl: 10719 #if defined(TARGET_I386) && !defined(TARGET_ABI32) 10720 ret = do_arch_prctl(cpu_env, arg1, arg2); 10721 break; 10722 #else 10723 goto unimplemented; 10724 #endif 10725 #endif 10726 #ifdef TARGET_NR_pread64 10727 case TARGET_NR_pread64: 10728 if (regpairs_aligned(cpu_env, num)) { 10729 arg4 = arg5; 10730 arg5 = arg6; 10731 } 10732 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 10733 goto efault; 10734 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); 10735 unlock_user(p, arg2, ret); 10736 break; 10737 case TARGET_NR_pwrite64: 10738 if (regpairs_aligned(cpu_env, num)) { 10739 arg4 = arg5; 10740 arg5 = arg6; 10741 } 10742 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 10743 goto efault; 10744 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); 10745 unlock_user(p, arg2, 0); 10746 break; 10747 #endif 10748 case TARGET_NR_getcwd: 10749 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) 10750 goto efault; 10751 ret = get_errno(sys_getcwd1(p, arg2)); 10752 unlock_user(p, arg1, ret); 10753 break; 10754 case TARGET_NR_capget: 10755 case TARGET_NR_capset: 10756 { 10757 struct target_user_cap_header *target_header; 10758 struct target_user_cap_data *target_data = NULL; 10759 struct __user_cap_header_struct header; 10760 struct __user_cap_data_struct data[2]; 10761 struct __user_cap_data_struct *dataptr = NULL; 10762 int i, target_datalen; 10763 int data_items = 1; 10764 10765 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) { 10766 goto efault; 10767 } 10768 header.version = tswap32(target_header->version); 10769 header.pid = tswap32(target_header->pid); 10770 10771 if (header.version != _LINUX_CAPABILITY_VERSION) { 10772 /* Version 2 and up takes pointer to two user_data structs */ 10773 data_items = 2; 10774 } 10775 10776 target_datalen = sizeof(*target_data) * data_items; 10777 10778 if (arg2) { 10779 if (num == TARGET_NR_capget) { 10780 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0); 10781 } else { 10782 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1); 10783 } 10784 if (!target_data) { 10785 unlock_user_struct(target_header, arg1, 0); 10786 goto efault; 10787 } 10788 10789 if (num == TARGET_NR_capset) { 10790 for (i = 0; i < data_items; i++) { 10791 data[i].effective = tswap32(target_data[i].effective); 10792 data[i].permitted = tswap32(target_data[i].permitted); 10793 data[i].inheritable = tswap32(target_data[i].inheritable); 10794 } 10795 } 10796 10797 dataptr = data; 10798 } 10799 10800 if (num == TARGET_NR_capget) { 10801 ret = get_errno(capget(&header, dataptr)); 10802 } else { 10803 ret = get_errno(capset(&header, dataptr)); 10804 } 10805 10806 /* The kernel always updates version for both capget and capset */ 10807 target_header->version = tswap32(header.version); 10808 unlock_user_struct(target_header, arg1, 1); 10809 10810 if (arg2) { 10811 if (num == TARGET_NR_capget) { 10812 for (i = 0; i < data_items; i++) { 10813 target_data[i].effective = tswap32(data[i].effective); 10814 target_data[i].permitted = tswap32(data[i].permitted); 10815 target_data[i].inheritable = tswap32(data[i].inheritable); 10816 } 10817 unlock_user(target_data, arg2, target_datalen); 10818 } else { 10819 unlock_user(target_data, arg2, 0); 10820 } 10821 } 10822 break; 10823 } 10824 case TARGET_NR_sigaltstack: 10825 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env)); 10826 break; 10827 10828 #ifdef CONFIG_SENDFILE 10829 case TARGET_NR_sendfile: 10830 { 10831 off_t *offp = NULL; 10832 off_t off; 10833 if (arg3) { 10834 ret = get_user_sal(off, arg3); 10835 if (is_error(ret)) { 10836 break; 10837 } 10838 offp = &off; 10839 } 10840 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 10841 if (!is_error(ret) && arg3) { 10842 abi_long ret2 = put_user_sal(off, arg3); 10843 if (is_error(ret2)) { 10844 ret = ret2; 10845 } 10846 } 10847 break; 10848 } 10849 #ifdef TARGET_NR_sendfile64 10850 case TARGET_NR_sendfile64: 10851 { 10852 off_t *offp = NULL; 10853 off_t off; 10854 if (arg3) { 10855 ret = get_user_s64(off, arg3); 10856 if (is_error(ret)) { 10857 break; 10858 } 10859 offp = &off; 10860 } 10861 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 10862 if (!is_error(ret) && arg3) { 10863 abi_long ret2 = put_user_s64(off, arg3); 10864 if (is_error(ret2)) { 10865 ret = ret2; 10866 } 10867 } 10868 break; 10869 } 10870 #endif 10871 #else 10872 case TARGET_NR_sendfile: 10873 #ifdef TARGET_NR_sendfile64 10874 case TARGET_NR_sendfile64: 10875 #endif 10876 goto unimplemented; 10877 #endif 10878 10879 #ifdef TARGET_NR_getpmsg 10880 case TARGET_NR_getpmsg: 10881 goto unimplemented; 10882 #endif 10883 #ifdef TARGET_NR_putpmsg 10884 case TARGET_NR_putpmsg: 10885 goto unimplemented; 10886 #endif 10887 #ifdef TARGET_NR_vfork 10888 case TARGET_NR_vfork: 10889 ret = get_errno(do_fork(cpu_env, 10890 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD, 10891 0, 0, 0, 0)); 10892 break; 10893 #endif 10894 #ifdef TARGET_NR_ugetrlimit 10895 case TARGET_NR_ugetrlimit: 10896 { 10897 struct rlimit rlim; 10898 int resource = target_to_host_resource(arg1); 10899 ret = get_errno(getrlimit(resource, &rlim)); 10900 if (!is_error(ret)) { 10901 struct target_rlimit *target_rlim; 10902 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 10903 goto efault; 10904 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 10905 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 10906 unlock_user_struct(target_rlim, arg2, 1); 10907 } 10908 break; 10909 } 10910 #endif 10911 #ifdef TARGET_NR_truncate64 10912 case TARGET_NR_truncate64: 10913 if (!(p = lock_user_string(arg1))) 10914 goto efault; 10915 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); 10916 unlock_user(p, arg1, 0); 10917 break; 10918 #endif 10919 #ifdef TARGET_NR_ftruncate64 10920 case TARGET_NR_ftruncate64: 10921 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); 10922 break; 10923 #endif 10924 #ifdef TARGET_NR_stat64 10925 case TARGET_NR_stat64: 10926 if (!(p = lock_user_string(arg1))) 10927 goto efault; 10928 ret = get_errno(stat(path(p), &st)); 10929 unlock_user(p, arg1, 0); 10930 if (!is_error(ret)) 10931 ret = host_to_target_stat64(cpu_env, arg2, &st); 10932 break; 10933 #endif 10934 #ifdef TARGET_NR_lstat64 10935 case TARGET_NR_lstat64: 10936 if (!(p = lock_user_string(arg1))) 10937 goto efault; 10938 ret = get_errno(lstat(path(p), &st)); 10939 unlock_user(p, arg1, 0); 10940 if (!is_error(ret)) 10941 ret = host_to_target_stat64(cpu_env, arg2, &st); 10942 break; 10943 #endif 10944 #ifdef TARGET_NR_fstat64 10945 case TARGET_NR_fstat64: 10946 ret = get_errno(fstat(arg1, &st)); 10947 if (!is_error(ret)) 10948 ret = host_to_target_stat64(cpu_env, arg2, &st); 10949 break; 10950 #endif 10951 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) 10952 #ifdef TARGET_NR_fstatat64 10953 case TARGET_NR_fstatat64: 10954 #endif 10955 #ifdef TARGET_NR_newfstatat 10956 case TARGET_NR_newfstatat: 10957 #endif 10958 if (!(p = lock_user_string(arg2))) 10959 goto efault; 10960 ret = get_errno(fstatat(arg1, path(p), &st, arg4)); 10961 if (!is_error(ret)) 10962 ret = host_to_target_stat64(cpu_env, arg3, &st); 10963 break; 10964 #endif 10965 #ifdef TARGET_NR_lchown 10966 case TARGET_NR_lchown: 10967 if (!(p = lock_user_string(arg1))) 10968 goto efault; 10969 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); 10970 unlock_user(p, arg1, 0); 10971 break; 10972 #endif 10973 #ifdef TARGET_NR_getuid 10974 case TARGET_NR_getuid: 10975 ret = get_errno(high2lowuid(getuid())); 10976 break; 10977 #endif 10978 #ifdef TARGET_NR_getgid 10979 case TARGET_NR_getgid: 10980 ret = get_errno(high2lowgid(getgid())); 10981 break; 10982 #endif 10983 #ifdef TARGET_NR_geteuid 10984 case TARGET_NR_geteuid: 10985 ret = get_errno(high2lowuid(geteuid())); 10986 break; 10987 #endif 10988 #ifdef TARGET_NR_getegid 10989 case TARGET_NR_getegid: 10990 ret = get_errno(high2lowgid(getegid())); 10991 break; 10992 #endif 10993 case TARGET_NR_setreuid: 10994 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); 10995 break; 10996 case TARGET_NR_setregid: 10997 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); 10998 break; 10999 case TARGET_NR_getgroups: 11000 { 11001 int gidsetsize = arg1; 11002 target_id *target_grouplist; 11003 gid_t *grouplist; 11004 int i; 11005 11006 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11007 ret = get_errno(getgroups(gidsetsize, grouplist)); 11008 if (gidsetsize == 0) 11009 break; 11010 if (!is_error(ret)) { 11011 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0); 11012 if (!target_grouplist) 11013 goto efault; 11014 for(i = 0;i < ret; i++) 11015 target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); 11016 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id)); 11017 } 11018 } 11019 break; 11020 case TARGET_NR_setgroups: 11021 { 11022 int gidsetsize = arg1; 11023 target_id *target_grouplist; 11024 gid_t *grouplist = NULL; 11025 int i; 11026 if (gidsetsize) { 11027 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11028 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1); 11029 if (!target_grouplist) { 11030 ret = -TARGET_EFAULT; 11031 goto fail; 11032 } 11033 for (i = 0; i < gidsetsize; i++) { 11034 grouplist[i] = low2highgid(tswapid(target_grouplist[i])); 11035 } 11036 unlock_user(target_grouplist, arg2, 0); 11037 } 11038 ret = get_errno(setgroups(gidsetsize, grouplist)); 11039 } 11040 break; 11041 case TARGET_NR_fchown: 11042 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); 11043 break; 11044 #if defined(TARGET_NR_fchownat) 11045 case TARGET_NR_fchownat: 11046 if (!(p = lock_user_string(arg2))) 11047 goto efault; 11048 ret = get_errno(fchownat(arg1, p, low2highuid(arg3), 11049 low2highgid(arg4), arg5)); 11050 unlock_user(p, arg2, 0); 11051 break; 11052 #endif 11053 #ifdef TARGET_NR_setresuid 11054 case TARGET_NR_setresuid: 11055 ret = get_errno(sys_setresuid(low2highuid(arg1), 11056 low2highuid(arg2), 11057 low2highuid(arg3))); 11058 break; 11059 #endif 11060 #ifdef TARGET_NR_getresuid 11061 case TARGET_NR_getresuid: 11062 { 11063 uid_t ruid, euid, suid; 11064 ret = get_errno(getresuid(&ruid, &euid, &suid)); 11065 if (!is_error(ret)) { 11066 if (put_user_id(high2lowuid(ruid), arg1) 11067 || put_user_id(high2lowuid(euid), arg2) 11068 || put_user_id(high2lowuid(suid), arg3)) 11069 goto efault; 11070 } 11071 } 11072 break; 11073 #endif 11074 #ifdef TARGET_NR_getresgid 11075 case TARGET_NR_setresgid: 11076 ret = get_errno(sys_setresgid(low2highgid(arg1), 11077 low2highgid(arg2), 11078 low2highgid(arg3))); 11079 break; 11080 #endif 11081 #ifdef TARGET_NR_getresgid 11082 case TARGET_NR_getresgid: 11083 { 11084 gid_t rgid, egid, sgid; 11085 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 11086 if (!is_error(ret)) { 11087 if (put_user_id(high2lowgid(rgid), arg1) 11088 || put_user_id(high2lowgid(egid), arg2) 11089 || put_user_id(high2lowgid(sgid), arg3)) 11090 goto efault; 11091 } 11092 } 11093 break; 11094 #endif 11095 #ifdef TARGET_NR_chown 11096 case TARGET_NR_chown: 11097 if (!(p = lock_user_string(arg1))) 11098 goto efault; 11099 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); 11100 unlock_user(p, arg1, 0); 11101 break; 11102 #endif 11103 case TARGET_NR_setuid: 11104 ret = get_errno(sys_setuid(low2highuid(arg1))); 11105 break; 11106 case TARGET_NR_setgid: 11107 ret = get_errno(sys_setgid(low2highgid(arg1))); 11108 break; 11109 case TARGET_NR_setfsuid: 11110 ret = get_errno(setfsuid(arg1)); 11111 break; 11112 case TARGET_NR_setfsgid: 11113 ret = get_errno(setfsgid(arg1)); 11114 break; 11115 11116 #ifdef TARGET_NR_lchown32 11117 case TARGET_NR_lchown32: 11118 if (!(p = lock_user_string(arg1))) 11119 goto efault; 11120 ret = get_errno(lchown(p, arg2, arg3)); 11121 unlock_user(p, arg1, 0); 11122 break; 11123 #endif 11124 #ifdef TARGET_NR_getuid32 11125 case TARGET_NR_getuid32: 11126 ret = get_errno(getuid()); 11127 break; 11128 #endif 11129 11130 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) 11131 /* Alpha specific */ 11132 case TARGET_NR_getxuid: 11133 { 11134 uid_t euid; 11135 euid=geteuid(); 11136 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid; 11137 } 11138 ret = get_errno(getuid()); 11139 break; 11140 #endif 11141 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) 11142 /* Alpha specific */ 11143 case TARGET_NR_getxgid: 11144 { 11145 uid_t egid; 11146 egid=getegid(); 11147 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid; 11148 } 11149 ret = get_errno(getgid()); 11150 break; 11151 #endif 11152 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) 11153 /* Alpha specific */ 11154 case TARGET_NR_osf_getsysinfo: 11155 ret = -TARGET_EOPNOTSUPP; 11156 switch (arg1) { 11157 case TARGET_GSI_IEEE_FP_CONTROL: 11158 { 11159 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env); 11160 11161 /* Copied from linux ieee_fpcr_to_swcr. */ 11162 swcr = (fpcr >> 35) & SWCR_STATUS_MASK; 11163 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ; 11164 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV 11165 | SWCR_TRAP_ENABLE_DZE 11166 | SWCR_TRAP_ENABLE_OVF); 11167 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF 11168 | SWCR_TRAP_ENABLE_INE); 11169 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ; 11170 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO; 11171 11172 if (put_user_u64 (swcr, arg2)) 11173 goto efault; 11174 ret = 0; 11175 } 11176 break; 11177 11178 /* case GSI_IEEE_STATE_AT_SIGNAL: 11179 -- Not implemented in linux kernel. 11180 case GSI_UACPROC: 11181 -- Retrieves current unaligned access state; not much used. 11182 case GSI_PROC_TYPE: 11183 -- Retrieves implver information; surely not used. 11184 case GSI_GET_HWRPB: 11185 -- Grabs a copy of the HWRPB; surely not used. 11186 */ 11187 } 11188 break; 11189 #endif 11190 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) 11191 /* Alpha specific */ 11192 case TARGET_NR_osf_setsysinfo: 11193 ret = -TARGET_EOPNOTSUPP; 11194 switch (arg1) { 11195 case TARGET_SSI_IEEE_FP_CONTROL: 11196 { 11197 uint64_t swcr, fpcr, orig_fpcr; 11198 11199 if (get_user_u64 (swcr, arg2)) { 11200 goto efault; 11201 } 11202 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 11203 fpcr = orig_fpcr & FPCR_DYN_MASK; 11204 11205 /* Copied from linux ieee_swcr_to_fpcr. */ 11206 fpcr |= (swcr & SWCR_STATUS_MASK) << 35; 11207 fpcr |= (swcr & SWCR_MAP_DMZ) << 36; 11208 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV 11209 | SWCR_TRAP_ENABLE_DZE 11210 | SWCR_TRAP_ENABLE_OVF)) << 48; 11211 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF 11212 | SWCR_TRAP_ENABLE_INE)) << 57; 11213 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0); 11214 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41; 11215 11216 cpu_alpha_store_fpcr(cpu_env, fpcr); 11217 ret = 0; 11218 } 11219 break; 11220 11221 case TARGET_SSI_IEEE_RAISE_EXCEPTION: 11222 { 11223 uint64_t exc, fpcr, orig_fpcr; 11224 int si_code; 11225 11226 if (get_user_u64(exc, arg2)) { 11227 goto efault; 11228 } 11229 11230 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 11231 11232 /* We only add to the exception status here. */ 11233 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35); 11234 11235 cpu_alpha_store_fpcr(cpu_env, fpcr); 11236 ret = 0; 11237 11238 /* Old exceptions are not signaled. */ 11239 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK); 11240 11241 /* If any exceptions set by this call, 11242 and are unmasked, send a signal. */ 11243 si_code = 0; 11244 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) { 11245 si_code = TARGET_FPE_FLTRES; 11246 } 11247 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) { 11248 si_code = TARGET_FPE_FLTUND; 11249 } 11250 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) { 11251 si_code = TARGET_FPE_FLTOVF; 11252 } 11253 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) { 11254 si_code = TARGET_FPE_FLTDIV; 11255 } 11256 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) { 11257 si_code = TARGET_FPE_FLTINV; 11258 } 11259 if (si_code != 0) { 11260 target_siginfo_t info; 11261 info.si_signo = SIGFPE; 11262 info.si_errno = 0; 11263 info.si_code = si_code; 11264 info._sifields._sigfault._addr 11265 = ((CPUArchState *)cpu_env)->pc; 11266 queue_signal((CPUArchState *)cpu_env, info.si_signo, 11267 QEMU_SI_FAULT, &info); 11268 } 11269 } 11270 break; 11271 11272 /* case SSI_NVPAIRS: 11273 -- Used with SSIN_UACPROC to enable unaligned accesses. 11274 case SSI_IEEE_STATE_AT_SIGNAL: 11275 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: 11276 -- Not implemented in linux kernel 11277 */ 11278 } 11279 break; 11280 #endif 11281 #ifdef TARGET_NR_osf_sigprocmask 11282 /* Alpha specific. */ 11283 case TARGET_NR_osf_sigprocmask: 11284 { 11285 abi_ulong mask; 11286 int how; 11287 sigset_t set, oldset; 11288 11289 switch(arg1) { 11290 case TARGET_SIG_BLOCK: 11291 how = SIG_BLOCK; 11292 break; 11293 case TARGET_SIG_UNBLOCK: 11294 how = SIG_UNBLOCK; 11295 break; 11296 case TARGET_SIG_SETMASK: 11297 how = SIG_SETMASK; 11298 break; 11299 default: 11300 ret = -TARGET_EINVAL; 11301 goto fail; 11302 } 11303 mask = arg2; 11304 target_to_host_old_sigset(&set, &mask); 11305 ret = do_sigprocmask(how, &set, &oldset); 11306 if (!ret) { 11307 host_to_target_old_sigset(&mask, &oldset); 11308 ret = mask; 11309 } 11310 } 11311 break; 11312 #endif 11313 11314 #ifdef TARGET_NR_getgid32 11315 case TARGET_NR_getgid32: 11316 ret = get_errno(getgid()); 11317 break; 11318 #endif 11319 #ifdef TARGET_NR_geteuid32 11320 case TARGET_NR_geteuid32: 11321 ret = get_errno(geteuid()); 11322 break; 11323 #endif 11324 #ifdef TARGET_NR_getegid32 11325 case TARGET_NR_getegid32: 11326 ret = get_errno(getegid()); 11327 break; 11328 #endif 11329 #ifdef TARGET_NR_setreuid32 11330 case TARGET_NR_setreuid32: 11331 ret = get_errno(setreuid(arg1, arg2)); 11332 break; 11333 #endif 11334 #ifdef TARGET_NR_setregid32 11335 case TARGET_NR_setregid32: 11336 ret = get_errno(setregid(arg1, arg2)); 11337 break; 11338 #endif 11339 #ifdef TARGET_NR_getgroups32 11340 case TARGET_NR_getgroups32: 11341 { 11342 int gidsetsize = arg1; 11343 uint32_t *target_grouplist; 11344 gid_t *grouplist; 11345 int i; 11346 11347 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11348 ret = get_errno(getgroups(gidsetsize, grouplist)); 11349 if (gidsetsize == 0) 11350 break; 11351 if (!is_error(ret)) { 11352 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); 11353 if (!target_grouplist) { 11354 ret = -TARGET_EFAULT; 11355 goto fail; 11356 } 11357 for(i = 0;i < ret; i++) 11358 target_grouplist[i] = tswap32(grouplist[i]); 11359 unlock_user(target_grouplist, arg2, gidsetsize * 4); 11360 } 11361 } 11362 break; 11363 #endif 11364 #ifdef TARGET_NR_setgroups32 11365 case TARGET_NR_setgroups32: 11366 { 11367 int gidsetsize = arg1; 11368 uint32_t *target_grouplist; 11369 gid_t *grouplist; 11370 int i; 11371 11372 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11373 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); 11374 if (!target_grouplist) { 11375 ret = -TARGET_EFAULT; 11376 goto fail; 11377 } 11378 for(i = 0;i < gidsetsize; i++) 11379 grouplist[i] = tswap32(target_grouplist[i]); 11380 unlock_user(target_grouplist, arg2, 0); 11381 ret = get_errno(setgroups(gidsetsize, grouplist)); 11382 } 11383 break; 11384 #endif 11385 #ifdef TARGET_NR_fchown32 11386 case TARGET_NR_fchown32: 11387 ret = get_errno(fchown(arg1, arg2, arg3)); 11388 break; 11389 #endif 11390 #ifdef TARGET_NR_setresuid32 11391 case TARGET_NR_setresuid32: 11392 ret = get_errno(sys_setresuid(arg1, arg2, arg3)); 11393 break; 11394 #endif 11395 #ifdef TARGET_NR_getresuid32 11396 case TARGET_NR_getresuid32: 11397 { 11398 uid_t ruid, euid, suid; 11399 ret = get_errno(getresuid(&ruid, &euid, &suid)); 11400 if (!is_error(ret)) { 11401 if (put_user_u32(ruid, arg1) 11402 || put_user_u32(euid, arg2) 11403 || put_user_u32(suid, arg3)) 11404 goto efault; 11405 } 11406 } 11407 break; 11408 #endif 11409 #ifdef TARGET_NR_setresgid32 11410 case TARGET_NR_setresgid32: 11411 ret = get_errno(sys_setresgid(arg1, arg2, arg3)); 11412 break; 11413 #endif 11414 #ifdef TARGET_NR_getresgid32 11415 case TARGET_NR_getresgid32: 11416 { 11417 gid_t rgid, egid, sgid; 11418 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 11419 if (!is_error(ret)) { 11420 if (put_user_u32(rgid, arg1) 11421 || put_user_u32(egid, arg2) 11422 || put_user_u32(sgid, arg3)) 11423 goto efault; 11424 } 11425 } 11426 break; 11427 #endif 11428 #ifdef TARGET_NR_chown32 11429 case TARGET_NR_chown32: 11430 if (!(p = lock_user_string(arg1))) 11431 goto efault; 11432 ret = get_errno(chown(p, arg2, arg3)); 11433 unlock_user(p, arg1, 0); 11434 break; 11435 #endif 11436 #ifdef TARGET_NR_setuid32 11437 case TARGET_NR_setuid32: 11438 ret = get_errno(sys_setuid(arg1)); 11439 break; 11440 #endif 11441 #ifdef TARGET_NR_setgid32 11442 case TARGET_NR_setgid32: 11443 ret = get_errno(sys_setgid(arg1)); 11444 break; 11445 #endif 11446 #ifdef TARGET_NR_setfsuid32 11447 case TARGET_NR_setfsuid32: 11448 ret = get_errno(setfsuid(arg1)); 11449 break; 11450 #endif 11451 #ifdef TARGET_NR_setfsgid32 11452 case TARGET_NR_setfsgid32: 11453 ret = get_errno(setfsgid(arg1)); 11454 break; 11455 #endif 11456 11457 case TARGET_NR_pivot_root: 11458 goto unimplemented; 11459 #ifdef TARGET_NR_mincore 11460 case TARGET_NR_mincore: 11461 { 11462 void *a; 11463 ret = -TARGET_ENOMEM; 11464 a = lock_user(VERIFY_READ, arg1, arg2, 0); 11465 if (!a) { 11466 goto fail; 11467 } 11468 ret = -TARGET_EFAULT; 11469 p = lock_user_string(arg3); 11470 if (!p) { 11471 goto mincore_fail; 11472 } 11473 ret = get_errno(mincore(a, arg2, p)); 11474 unlock_user(p, arg3, ret); 11475 mincore_fail: 11476 unlock_user(a, arg1, 0); 11477 } 11478 break; 11479 #endif 11480 #ifdef TARGET_NR_arm_fadvise64_64 11481 case TARGET_NR_arm_fadvise64_64: 11482 /* arm_fadvise64_64 looks like fadvise64_64 but 11483 * with different argument order: fd, advice, offset, len 11484 * rather than the usual fd, offset, len, advice. 11485 * Note that offset and len are both 64-bit so appear as 11486 * pairs of 32-bit registers. 11487 */ 11488 ret = posix_fadvise(arg1, target_offset64(arg3, arg4), 11489 target_offset64(arg5, arg6), arg2); 11490 ret = -host_to_target_errno(ret); 11491 break; 11492 #endif 11493 11494 #if TARGET_ABI_BITS == 32 11495 11496 #ifdef TARGET_NR_fadvise64_64 11497 case TARGET_NR_fadvise64_64: 11498 #if defined(TARGET_PPC) 11499 /* 6 args: fd, advice, offset (high, low), len (high, low) */ 11500 ret = arg2; 11501 arg2 = arg3; 11502 arg3 = arg4; 11503 arg4 = arg5; 11504 arg5 = arg6; 11505 arg6 = ret; 11506 #else 11507 /* 6 args: fd, offset (high, low), len (high, low), advice */ 11508 if (regpairs_aligned(cpu_env, num)) { 11509 /* offset is in (3,4), len in (5,6) and advice in 7 */ 11510 arg2 = arg3; 11511 arg3 = arg4; 11512 arg4 = arg5; 11513 arg5 = arg6; 11514 arg6 = arg7; 11515 } 11516 #endif 11517 ret = -host_to_target_errno(posix_fadvise(arg1, 11518 target_offset64(arg2, arg3), 11519 target_offset64(arg4, arg5), 11520 arg6)); 11521 break; 11522 #endif 11523 11524 #ifdef TARGET_NR_fadvise64 11525 case TARGET_NR_fadvise64: 11526 /* 5 args: fd, offset (high, low), len, advice */ 11527 if (regpairs_aligned(cpu_env, num)) { 11528 /* offset is in (3,4), len in 5 and advice in 6 */ 11529 arg2 = arg3; 11530 arg3 = arg4; 11531 arg4 = arg5; 11532 arg5 = arg6; 11533 } 11534 ret = -host_to_target_errno(posix_fadvise(arg1, 11535 target_offset64(arg2, arg3), 11536 arg4, arg5)); 11537 break; 11538 #endif 11539 11540 #else /* not a 32-bit ABI */ 11541 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64) 11542 #ifdef TARGET_NR_fadvise64_64 11543 case TARGET_NR_fadvise64_64: 11544 #endif 11545 #ifdef TARGET_NR_fadvise64 11546 case TARGET_NR_fadvise64: 11547 #endif 11548 #ifdef TARGET_S390X 11549 switch (arg4) { 11550 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ 11551 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ 11552 case 6: arg4 = POSIX_FADV_DONTNEED; break; 11553 case 7: arg4 = POSIX_FADV_NOREUSE; break; 11554 default: break; 11555 } 11556 #endif 11557 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4)); 11558 break; 11559 #endif 11560 #endif /* end of 64-bit ABI fadvise handling */ 11561 11562 #ifdef TARGET_NR_madvise 11563 case TARGET_NR_madvise: 11564 /* A straight passthrough may not be safe because qemu sometimes 11565 turns private file-backed mappings into anonymous mappings. 11566 This will break MADV_DONTNEED. 11567 This is a hint, so ignoring and returning success is ok. */ 11568 ret = get_errno(0); 11569 break; 11570 #endif 11571 #if TARGET_ABI_BITS == 32 11572 case TARGET_NR_fcntl64: 11573 { 11574 int cmd; 11575 struct flock64 fl; 11576 from_flock64_fn *copyfrom = copy_from_user_flock64; 11577 to_flock64_fn *copyto = copy_to_user_flock64; 11578 11579 #ifdef TARGET_ARM 11580 if (((CPUARMState *)cpu_env)->eabi) { 11581 copyfrom = copy_from_user_eabi_flock64; 11582 copyto = copy_to_user_eabi_flock64; 11583 } 11584 #endif 11585 11586 cmd = target_to_host_fcntl_cmd(arg2); 11587 if (cmd == -TARGET_EINVAL) { 11588 ret = cmd; 11589 break; 11590 } 11591 11592 switch(arg2) { 11593 case TARGET_F_GETLK64: 11594 ret = copyfrom(&fl, arg3); 11595 if (ret) { 11596 break; 11597 } 11598 ret = get_errno(fcntl(arg1, cmd, &fl)); 11599 if (ret == 0) { 11600 ret = copyto(arg3, &fl); 11601 } 11602 break; 11603 11604 case TARGET_F_SETLK64: 11605 case TARGET_F_SETLKW64: 11606 ret = copyfrom(&fl, arg3); 11607 if (ret) { 11608 break; 11609 } 11610 ret = get_errno(safe_fcntl(arg1, cmd, &fl)); 11611 break; 11612 default: 11613 ret = do_fcntl(arg1, arg2, arg3); 11614 break; 11615 } 11616 break; 11617 } 11618 #endif 11619 #ifdef TARGET_NR_cacheflush 11620 case TARGET_NR_cacheflush: 11621 /* self-modifying code is handled automatically, so nothing needed */ 11622 ret = 0; 11623 break; 11624 #endif 11625 #ifdef TARGET_NR_security 11626 case TARGET_NR_security: 11627 goto unimplemented; 11628 #endif 11629 #ifdef TARGET_NR_getpagesize 11630 case TARGET_NR_getpagesize: 11631 ret = TARGET_PAGE_SIZE; 11632 break; 11633 #endif 11634 case TARGET_NR_gettid: 11635 ret = get_errno(gettid()); 11636 break; 11637 #ifdef TARGET_NR_readahead 11638 case TARGET_NR_readahead: 11639 #if TARGET_ABI_BITS == 32 11640 if (regpairs_aligned(cpu_env, num)) { 11641 arg2 = arg3; 11642 arg3 = arg4; 11643 arg4 = arg5; 11644 } 11645 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4)); 11646 #else 11647 ret = get_errno(readahead(arg1, arg2, arg3)); 11648 #endif 11649 break; 11650 #endif 11651 #ifdef CONFIG_ATTR 11652 #ifdef TARGET_NR_setxattr 11653 case TARGET_NR_listxattr: 11654 case TARGET_NR_llistxattr: 11655 { 11656 void *p, *b = 0; 11657 if (arg2) { 11658 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 11659 if (!b) { 11660 ret = -TARGET_EFAULT; 11661 break; 11662 } 11663 } 11664 p = lock_user_string(arg1); 11665 if (p) { 11666 if (num == TARGET_NR_listxattr) { 11667 ret = get_errno(listxattr(p, b, arg3)); 11668 } else { 11669 ret = get_errno(llistxattr(p, b, arg3)); 11670 } 11671 } else { 11672 ret = -TARGET_EFAULT; 11673 } 11674 unlock_user(p, arg1, 0); 11675 unlock_user(b, arg2, arg3); 11676 break; 11677 } 11678 case TARGET_NR_flistxattr: 11679 { 11680 void *b = 0; 11681 if (arg2) { 11682 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 11683 if (!b) { 11684 ret = -TARGET_EFAULT; 11685 break; 11686 } 11687 } 11688 ret = get_errno(flistxattr(arg1, b, arg3)); 11689 unlock_user(b, arg2, arg3); 11690 break; 11691 } 11692 case TARGET_NR_setxattr: 11693 case TARGET_NR_lsetxattr: 11694 { 11695 void *p, *n, *v = 0; 11696 if (arg3) { 11697 v = lock_user(VERIFY_READ, arg3, arg4, 1); 11698 if (!v) { 11699 ret = -TARGET_EFAULT; 11700 break; 11701 } 11702 } 11703 p = lock_user_string(arg1); 11704 n = lock_user_string(arg2); 11705 if (p && n) { 11706 if (num == TARGET_NR_setxattr) { 11707 ret = get_errno(setxattr(p, n, v, arg4, arg5)); 11708 } else { 11709 ret = get_errno(lsetxattr(p, n, v, arg4, arg5)); 11710 } 11711 } else { 11712 ret = -TARGET_EFAULT; 11713 } 11714 unlock_user(p, arg1, 0); 11715 unlock_user(n, arg2, 0); 11716 unlock_user(v, arg3, 0); 11717 } 11718 break; 11719 case TARGET_NR_fsetxattr: 11720 { 11721 void *n, *v = 0; 11722 if (arg3) { 11723 v = lock_user(VERIFY_READ, arg3, arg4, 1); 11724 if (!v) { 11725 ret = -TARGET_EFAULT; 11726 break; 11727 } 11728 } 11729 n = lock_user_string(arg2); 11730 if (n) { 11731 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5)); 11732 } else { 11733 ret = -TARGET_EFAULT; 11734 } 11735 unlock_user(n, arg2, 0); 11736 unlock_user(v, arg3, 0); 11737 } 11738 break; 11739 case TARGET_NR_getxattr: 11740 case TARGET_NR_lgetxattr: 11741 { 11742 void *p, *n, *v = 0; 11743 if (arg3) { 11744 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 11745 if (!v) { 11746 ret = -TARGET_EFAULT; 11747 break; 11748 } 11749 } 11750 p = lock_user_string(arg1); 11751 n = lock_user_string(arg2); 11752 if (p && n) { 11753 if (num == TARGET_NR_getxattr) { 11754 ret = get_errno(getxattr(p, n, v, arg4)); 11755 } else { 11756 ret = get_errno(lgetxattr(p, n, v, arg4)); 11757 } 11758 } else { 11759 ret = -TARGET_EFAULT; 11760 } 11761 unlock_user(p, arg1, 0); 11762 unlock_user(n, arg2, 0); 11763 unlock_user(v, arg3, arg4); 11764 } 11765 break; 11766 case TARGET_NR_fgetxattr: 11767 { 11768 void *n, *v = 0; 11769 if (arg3) { 11770 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 11771 if (!v) { 11772 ret = -TARGET_EFAULT; 11773 break; 11774 } 11775 } 11776 n = lock_user_string(arg2); 11777 if (n) { 11778 ret = get_errno(fgetxattr(arg1, n, v, arg4)); 11779 } else { 11780 ret = -TARGET_EFAULT; 11781 } 11782 unlock_user(n, arg2, 0); 11783 unlock_user(v, arg3, arg4); 11784 } 11785 break; 11786 case TARGET_NR_removexattr: 11787 case TARGET_NR_lremovexattr: 11788 { 11789 void *p, *n; 11790 p = lock_user_string(arg1); 11791 n = lock_user_string(arg2); 11792 if (p && n) { 11793 if (num == TARGET_NR_removexattr) { 11794 ret = get_errno(removexattr(p, n)); 11795 } else { 11796 ret = get_errno(lremovexattr(p, n)); 11797 } 11798 } else { 11799 ret = -TARGET_EFAULT; 11800 } 11801 unlock_user(p, arg1, 0); 11802 unlock_user(n, arg2, 0); 11803 } 11804 break; 11805 case TARGET_NR_fremovexattr: 11806 { 11807 void *n; 11808 n = lock_user_string(arg2); 11809 if (n) { 11810 ret = get_errno(fremovexattr(arg1, n)); 11811 } else { 11812 ret = -TARGET_EFAULT; 11813 } 11814 unlock_user(n, arg2, 0); 11815 } 11816 break; 11817 #endif 11818 #endif /* CONFIG_ATTR */ 11819 #ifdef TARGET_NR_set_thread_area 11820 case TARGET_NR_set_thread_area: 11821 #if defined(TARGET_MIPS) 11822 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1; 11823 ret = 0; 11824 break; 11825 #elif defined(TARGET_CRIS) 11826 if (arg1 & 0xff) 11827 ret = -TARGET_EINVAL; 11828 else { 11829 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1; 11830 ret = 0; 11831 } 11832 break; 11833 #elif defined(TARGET_I386) && defined(TARGET_ABI32) 11834 ret = do_set_thread_area(cpu_env, arg1); 11835 break; 11836 #elif defined(TARGET_M68K) 11837 { 11838 TaskState *ts = cpu->opaque; 11839 ts->tp_value = arg1; 11840 ret = 0; 11841 break; 11842 } 11843 #else 11844 goto unimplemented_nowarn; 11845 #endif 11846 #endif 11847 #ifdef TARGET_NR_get_thread_area 11848 case TARGET_NR_get_thread_area: 11849 #if defined(TARGET_I386) && defined(TARGET_ABI32) 11850 ret = do_get_thread_area(cpu_env, arg1); 11851 break; 11852 #elif defined(TARGET_M68K) 11853 { 11854 TaskState *ts = cpu->opaque; 11855 ret = ts->tp_value; 11856 break; 11857 } 11858 #else 11859 goto unimplemented_nowarn; 11860 #endif 11861 #endif 11862 #ifdef TARGET_NR_getdomainname 11863 case TARGET_NR_getdomainname: 11864 goto unimplemented_nowarn; 11865 #endif 11866 11867 #ifdef TARGET_NR_clock_gettime 11868 case TARGET_NR_clock_gettime: 11869 { 11870 struct timespec ts; 11871 ret = get_errno(clock_gettime(arg1, &ts)); 11872 if (!is_error(ret)) { 11873 host_to_target_timespec(arg2, &ts); 11874 } 11875 break; 11876 } 11877 #endif 11878 #ifdef TARGET_NR_clock_getres 11879 case TARGET_NR_clock_getres: 11880 { 11881 struct timespec ts; 11882 ret = get_errno(clock_getres(arg1, &ts)); 11883 if (!is_error(ret)) { 11884 host_to_target_timespec(arg2, &ts); 11885 } 11886 break; 11887 } 11888 #endif 11889 #ifdef TARGET_NR_clock_nanosleep 11890 case TARGET_NR_clock_nanosleep: 11891 { 11892 struct timespec ts; 11893 target_to_host_timespec(&ts, arg3); 11894 ret = get_errno(safe_clock_nanosleep(arg1, arg2, 11895 &ts, arg4 ? &ts : NULL)); 11896 if (arg4) 11897 host_to_target_timespec(arg4, &ts); 11898 11899 #if defined(TARGET_PPC) 11900 /* clock_nanosleep is odd in that it returns positive errno values. 11901 * On PPC, CR0 bit 3 should be set in such a situation. */ 11902 if (ret && ret != -TARGET_ERESTARTSYS) { 11903 ((CPUPPCState *)cpu_env)->crf[0] |= 1; 11904 } 11905 #endif 11906 break; 11907 } 11908 #endif 11909 11910 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 11911 case TARGET_NR_set_tid_address: 11912 ret = get_errno(set_tid_address((int *)g2h(arg1))); 11913 break; 11914 #endif 11915 11916 case TARGET_NR_tkill: 11917 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2))); 11918 break; 11919 11920 case TARGET_NR_tgkill: 11921 ret = get_errno(safe_tgkill((int)arg1, (int)arg2, 11922 target_to_host_signal(arg3))); 11923 break; 11924 11925 #ifdef TARGET_NR_set_robust_list 11926 case TARGET_NR_set_robust_list: 11927 case TARGET_NR_get_robust_list: 11928 /* The ABI for supporting robust futexes has userspace pass 11929 * the kernel a pointer to a linked list which is updated by 11930 * userspace after the syscall; the list is walked by the kernel 11931 * when the thread exits. Since the linked list in QEMU guest 11932 * memory isn't a valid linked list for the host and we have 11933 * no way to reliably intercept the thread-death event, we can't 11934 * support these. Silently return ENOSYS so that guest userspace 11935 * falls back to a non-robust futex implementation (which should 11936 * be OK except in the corner case of the guest crashing while 11937 * holding a mutex that is shared with another process via 11938 * shared memory). 11939 */ 11940 goto unimplemented_nowarn; 11941 #endif 11942 11943 #if defined(TARGET_NR_utimensat) 11944 case TARGET_NR_utimensat: 11945 { 11946 struct timespec *tsp, ts[2]; 11947 if (!arg3) { 11948 tsp = NULL; 11949 } else { 11950 target_to_host_timespec(ts, arg3); 11951 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec)); 11952 tsp = ts; 11953 } 11954 if (!arg2) 11955 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 11956 else { 11957 if (!(p = lock_user_string(arg2))) { 11958 ret = -TARGET_EFAULT; 11959 goto fail; 11960 } 11961 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 11962 unlock_user(p, arg2, 0); 11963 } 11964 } 11965 break; 11966 #endif 11967 case TARGET_NR_futex: 11968 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6); 11969 break; 11970 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 11971 case TARGET_NR_inotify_init: 11972 ret = get_errno(sys_inotify_init()); 11973 if (ret >= 0) { 11974 fd_trans_register(ret, &target_inotify_trans); 11975 } 11976 break; 11977 #endif 11978 #ifdef CONFIG_INOTIFY1 11979 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 11980 case TARGET_NR_inotify_init1: 11981 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1, 11982 fcntl_flags_tbl))); 11983 if (ret >= 0) { 11984 fd_trans_register(ret, &target_inotify_trans); 11985 } 11986 break; 11987 #endif 11988 #endif 11989 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 11990 case TARGET_NR_inotify_add_watch: 11991 p = lock_user_string(arg2); 11992 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3)); 11993 unlock_user(p, arg2, 0); 11994 break; 11995 #endif 11996 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 11997 case TARGET_NR_inotify_rm_watch: 11998 ret = get_errno(sys_inotify_rm_watch(arg1, arg2)); 11999 break; 12000 #endif 12001 12002 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 12003 case TARGET_NR_mq_open: 12004 { 12005 struct mq_attr posix_mq_attr; 12006 struct mq_attr *pposix_mq_attr; 12007 int host_flags; 12008 12009 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl); 12010 pposix_mq_attr = NULL; 12011 if (arg4) { 12012 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) { 12013 goto efault; 12014 } 12015 pposix_mq_attr = &posix_mq_attr; 12016 } 12017 p = lock_user_string(arg1 - 1); 12018 if (!p) { 12019 goto efault; 12020 } 12021 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr)); 12022 unlock_user (p, arg1, 0); 12023 } 12024 break; 12025 12026 case TARGET_NR_mq_unlink: 12027 p = lock_user_string(arg1 - 1); 12028 if (!p) { 12029 ret = -TARGET_EFAULT; 12030 break; 12031 } 12032 ret = get_errno(mq_unlink(p)); 12033 unlock_user (p, arg1, 0); 12034 break; 12035 12036 case TARGET_NR_mq_timedsend: 12037 { 12038 struct timespec ts; 12039 12040 p = lock_user (VERIFY_READ, arg2, arg3, 1); 12041 if (arg5 != 0) { 12042 target_to_host_timespec(&ts, arg5); 12043 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts)); 12044 host_to_target_timespec(arg5, &ts); 12045 } else { 12046 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL)); 12047 } 12048 unlock_user (p, arg2, arg3); 12049 } 12050 break; 12051 12052 case TARGET_NR_mq_timedreceive: 12053 { 12054 struct timespec ts; 12055 unsigned int prio; 12056 12057 p = lock_user (VERIFY_READ, arg2, arg3, 1); 12058 if (arg5 != 0) { 12059 target_to_host_timespec(&ts, arg5); 12060 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12061 &prio, &ts)); 12062 host_to_target_timespec(arg5, &ts); 12063 } else { 12064 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12065 &prio, NULL)); 12066 } 12067 unlock_user (p, arg2, arg3); 12068 if (arg4 != 0) 12069 put_user_u32(prio, arg4); 12070 } 12071 break; 12072 12073 /* Not implemented for now... */ 12074 /* case TARGET_NR_mq_notify: */ 12075 /* break; */ 12076 12077 case TARGET_NR_mq_getsetattr: 12078 { 12079 struct mq_attr posix_mq_attr_in, posix_mq_attr_out; 12080 ret = 0; 12081 if (arg3 != 0) { 12082 ret = mq_getattr(arg1, &posix_mq_attr_out); 12083 copy_to_user_mq_attr(arg3, &posix_mq_attr_out); 12084 } 12085 if (arg2 != 0) { 12086 copy_from_user_mq_attr(&posix_mq_attr_in, arg2); 12087 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out); 12088 } 12089 12090 } 12091 break; 12092 #endif 12093 12094 #ifdef CONFIG_SPLICE 12095 #ifdef TARGET_NR_tee 12096 case TARGET_NR_tee: 12097 { 12098 ret = get_errno(tee(arg1,arg2,arg3,arg4)); 12099 } 12100 break; 12101 #endif 12102 #ifdef TARGET_NR_splice 12103 case TARGET_NR_splice: 12104 { 12105 loff_t loff_in, loff_out; 12106 loff_t *ploff_in = NULL, *ploff_out = NULL; 12107 if (arg2) { 12108 if (get_user_u64(loff_in, arg2)) { 12109 goto efault; 12110 } 12111 ploff_in = &loff_in; 12112 } 12113 if (arg4) { 12114 if (get_user_u64(loff_out, arg4)) { 12115 goto efault; 12116 } 12117 ploff_out = &loff_out; 12118 } 12119 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); 12120 if (arg2) { 12121 if (put_user_u64(loff_in, arg2)) { 12122 goto efault; 12123 } 12124 } 12125 if (arg4) { 12126 if (put_user_u64(loff_out, arg4)) { 12127 goto efault; 12128 } 12129 } 12130 } 12131 break; 12132 #endif 12133 #ifdef TARGET_NR_vmsplice 12134 case TARGET_NR_vmsplice: 12135 { 12136 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 12137 if (vec != NULL) { 12138 ret = get_errno(vmsplice(arg1, vec, arg3, arg4)); 12139 unlock_iovec(vec, arg2, arg3, 0); 12140 } else { 12141 ret = -host_to_target_errno(errno); 12142 } 12143 } 12144 break; 12145 #endif 12146 #endif /* CONFIG_SPLICE */ 12147 #ifdef CONFIG_EVENTFD 12148 #if defined(TARGET_NR_eventfd) 12149 case TARGET_NR_eventfd: 12150 ret = get_errno(eventfd(arg1, 0)); 12151 if (ret >= 0) { 12152 fd_trans_register(ret, &target_eventfd_trans); 12153 } 12154 break; 12155 #endif 12156 #if defined(TARGET_NR_eventfd2) 12157 case TARGET_NR_eventfd2: 12158 { 12159 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)); 12160 if (arg2 & TARGET_O_NONBLOCK) { 12161 host_flags |= O_NONBLOCK; 12162 } 12163 if (arg2 & TARGET_O_CLOEXEC) { 12164 host_flags |= O_CLOEXEC; 12165 } 12166 ret = get_errno(eventfd(arg1, host_flags)); 12167 if (ret >= 0) { 12168 fd_trans_register(ret, &target_eventfd_trans); 12169 } 12170 break; 12171 } 12172 #endif 12173 #endif /* CONFIG_EVENTFD */ 12174 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) 12175 case TARGET_NR_fallocate: 12176 #if TARGET_ABI_BITS == 32 12177 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4), 12178 target_offset64(arg5, arg6))); 12179 #else 12180 ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); 12181 #endif 12182 break; 12183 #endif 12184 #if defined(CONFIG_SYNC_FILE_RANGE) 12185 #if defined(TARGET_NR_sync_file_range) 12186 case TARGET_NR_sync_file_range: 12187 #if TARGET_ABI_BITS == 32 12188 #if defined(TARGET_MIPS) 12189 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 12190 target_offset64(arg5, arg6), arg7)); 12191 #else 12192 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3), 12193 target_offset64(arg4, arg5), arg6)); 12194 #endif /* !TARGET_MIPS */ 12195 #else 12196 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4)); 12197 #endif 12198 break; 12199 #endif 12200 #if defined(TARGET_NR_sync_file_range2) 12201 case TARGET_NR_sync_file_range2: 12202 /* This is like sync_file_range but the arguments are reordered */ 12203 #if TARGET_ABI_BITS == 32 12204 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 12205 target_offset64(arg5, arg6), arg2)); 12206 #else 12207 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2)); 12208 #endif 12209 break; 12210 #endif 12211 #endif 12212 #if defined(TARGET_NR_signalfd4) 12213 case TARGET_NR_signalfd4: 12214 ret = do_signalfd4(arg1, arg2, arg4); 12215 break; 12216 #endif 12217 #if defined(TARGET_NR_signalfd) 12218 case TARGET_NR_signalfd: 12219 ret = do_signalfd4(arg1, arg2, 0); 12220 break; 12221 #endif 12222 #if defined(CONFIG_EPOLL) 12223 #if defined(TARGET_NR_epoll_create) 12224 case TARGET_NR_epoll_create: 12225 ret = get_errno(epoll_create(arg1)); 12226 break; 12227 #endif 12228 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1) 12229 case TARGET_NR_epoll_create1: 12230 ret = get_errno(epoll_create1(arg1)); 12231 break; 12232 #endif 12233 #if defined(TARGET_NR_epoll_ctl) 12234 case TARGET_NR_epoll_ctl: 12235 { 12236 struct epoll_event ep; 12237 struct epoll_event *epp = 0; 12238 if (arg4) { 12239 struct target_epoll_event *target_ep; 12240 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) { 12241 goto efault; 12242 } 12243 ep.events = tswap32(target_ep->events); 12244 /* The epoll_data_t union is just opaque data to the kernel, 12245 * so we transfer all 64 bits across and need not worry what 12246 * actual data type it is. 12247 */ 12248 ep.data.u64 = tswap64(target_ep->data.u64); 12249 unlock_user_struct(target_ep, arg4, 0); 12250 epp = &ep; 12251 } 12252 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp)); 12253 break; 12254 } 12255 #endif 12256 12257 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait) 12258 #if defined(TARGET_NR_epoll_wait) 12259 case TARGET_NR_epoll_wait: 12260 #endif 12261 #if defined(TARGET_NR_epoll_pwait) 12262 case TARGET_NR_epoll_pwait: 12263 #endif 12264 { 12265 struct target_epoll_event *target_ep; 12266 struct epoll_event *ep; 12267 int epfd = arg1; 12268 int maxevents = arg3; 12269 int timeout = arg4; 12270 12271 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) { 12272 ret = -TARGET_EINVAL; 12273 break; 12274 } 12275 12276 target_ep = lock_user(VERIFY_WRITE, arg2, 12277 maxevents * sizeof(struct target_epoll_event), 1); 12278 if (!target_ep) { 12279 goto efault; 12280 } 12281 12282 ep = g_try_new(struct epoll_event, maxevents); 12283 if (!ep) { 12284 unlock_user(target_ep, arg2, 0); 12285 ret = -TARGET_ENOMEM; 12286 break; 12287 } 12288 12289 switch (num) { 12290 #if defined(TARGET_NR_epoll_pwait) 12291 case TARGET_NR_epoll_pwait: 12292 { 12293 target_sigset_t *target_set; 12294 sigset_t _set, *set = &_set; 12295 12296 if (arg5) { 12297 if (arg6 != sizeof(target_sigset_t)) { 12298 ret = -TARGET_EINVAL; 12299 break; 12300 } 12301 12302 target_set = lock_user(VERIFY_READ, arg5, 12303 sizeof(target_sigset_t), 1); 12304 if (!target_set) { 12305 ret = -TARGET_EFAULT; 12306 break; 12307 } 12308 target_to_host_sigset(set, target_set); 12309 unlock_user(target_set, arg5, 0); 12310 } else { 12311 set = NULL; 12312 } 12313 12314 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout, 12315 set, SIGSET_T_SIZE)); 12316 break; 12317 } 12318 #endif 12319 #if defined(TARGET_NR_epoll_wait) 12320 case TARGET_NR_epoll_wait: 12321 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout, 12322 NULL, 0)); 12323 break; 12324 #endif 12325 default: 12326 ret = -TARGET_ENOSYS; 12327 } 12328 if (!is_error(ret)) { 12329 int i; 12330 for (i = 0; i < ret; i++) { 12331 target_ep[i].events = tswap32(ep[i].events); 12332 target_ep[i].data.u64 = tswap64(ep[i].data.u64); 12333 } 12334 unlock_user(target_ep, arg2, 12335 ret * sizeof(struct target_epoll_event)); 12336 } else { 12337 unlock_user(target_ep, arg2, 0); 12338 } 12339 g_free(ep); 12340 break; 12341 } 12342 #endif 12343 #endif 12344 #ifdef TARGET_NR_prlimit64 12345 case TARGET_NR_prlimit64: 12346 { 12347 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */ 12348 struct target_rlimit64 *target_rnew, *target_rold; 12349 struct host_rlimit64 rnew, rold, *rnewp = 0; 12350 int resource = target_to_host_resource(arg2); 12351 if (arg3) { 12352 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { 12353 goto efault; 12354 } 12355 rnew.rlim_cur = tswap64(target_rnew->rlim_cur); 12356 rnew.rlim_max = tswap64(target_rnew->rlim_max); 12357 unlock_user_struct(target_rnew, arg3, 0); 12358 rnewp = &rnew; 12359 } 12360 12361 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0)); 12362 if (!is_error(ret) && arg4) { 12363 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { 12364 goto efault; 12365 } 12366 target_rold->rlim_cur = tswap64(rold.rlim_cur); 12367 target_rold->rlim_max = tswap64(rold.rlim_max); 12368 unlock_user_struct(target_rold, arg4, 1); 12369 } 12370 break; 12371 } 12372 #endif 12373 #ifdef TARGET_NR_gethostname 12374 case TARGET_NR_gethostname: 12375 { 12376 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0); 12377 if (name) { 12378 ret = get_errno(gethostname(name, arg2)); 12379 unlock_user(name, arg1, arg2); 12380 } else { 12381 ret = -TARGET_EFAULT; 12382 } 12383 break; 12384 } 12385 #endif 12386 #ifdef TARGET_NR_atomic_cmpxchg_32 12387 case TARGET_NR_atomic_cmpxchg_32: 12388 { 12389 /* should use start_exclusive from main.c */ 12390 abi_ulong mem_value; 12391 if (get_user_u32(mem_value, arg6)) { 12392 target_siginfo_t info; 12393 info.si_signo = SIGSEGV; 12394 info.si_errno = 0; 12395 info.si_code = TARGET_SEGV_MAPERR; 12396 info._sifields._sigfault._addr = arg6; 12397 queue_signal((CPUArchState *)cpu_env, info.si_signo, 12398 QEMU_SI_FAULT, &info); 12399 ret = 0xdeadbeef; 12400 12401 } 12402 if (mem_value == arg2) 12403 put_user_u32(arg1, arg6); 12404 ret = mem_value; 12405 break; 12406 } 12407 #endif 12408 #ifdef TARGET_NR_atomic_barrier 12409 case TARGET_NR_atomic_barrier: 12410 { 12411 /* Like the kernel implementation and the qemu arm barrier, no-op this? */ 12412 ret = 0; 12413 break; 12414 } 12415 #endif 12416 12417 #ifdef TARGET_NR_timer_create 12418 case TARGET_NR_timer_create: 12419 { 12420 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */ 12421 12422 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL; 12423 12424 int clkid = arg1; 12425 int timer_index = next_free_host_timer(); 12426 12427 if (timer_index < 0) { 12428 ret = -TARGET_EAGAIN; 12429 } else { 12430 timer_t *phtimer = g_posix_timers + timer_index; 12431 12432 if (arg2) { 12433 phost_sevp = &host_sevp; 12434 ret = target_to_host_sigevent(phost_sevp, arg2); 12435 if (ret != 0) { 12436 break; 12437 } 12438 } 12439 12440 ret = get_errno(timer_create(clkid, phost_sevp, phtimer)); 12441 if (ret) { 12442 phtimer = NULL; 12443 } else { 12444 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) { 12445 goto efault; 12446 } 12447 } 12448 } 12449 break; 12450 } 12451 #endif 12452 12453 #ifdef TARGET_NR_timer_settime 12454 case TARGET_NR_timer_settime: 12455 { 12456 /* args: timer_t timerid, int flags, const struct itimerspec *new_value, 12457 * struct itimerspec * old_value */ 12458 target_timer_t timerid = get_timer_id(arg1); 12459 12460 if (timerid < 0) { 12461 ret = timerid; 12462 } else if (arg3 == 0) { 12463 ret = -TARGET_EINVAL; 12464 } else { 12465 timer_t htimer = g_posix_timers[timerid]; 12466 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},}; 12467 12468 if (target_to_host_itimerspec(&hspec_new, arg3)) { 12469 goto efault; 12470 } 12471 ret = get_errno( 12472 timer_settime(htimer, arg2, &hspec_new, &hspec_old)); 12473 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) { 12474 goto efault; 12475 } 12476 } 12477 break; 12478 } 12479 #endif 12480 12481 #ifdef TARGET_NR_timer_gettime 12482 case TARGET_NR_timer_gettime: 12483 { 12484 /* args: timer_t timerid, struct itimerspec *curr_value */ 12485 target_timer_t timerid = get_timer_id(arg1); 12486 12487 if (timerid < 0) { 12488 ret = timerid; 12489 } else if (!arg2) { 12490 ret = -TARGET_EFAULT; 12491 } else { 12492 timer_t htimer = g_posix_timers[timerid]; 12493 struct itimerspec hspec; 12494 ret = get_errno(timer_gettime(htimer, &hspec)); 12495 12496 if (host_to_target_itimerspec(arg2, &hspec)) { 12497 ret = -TARGET_EFAULT; 12498 } 12499 } 12500 break; 12501 } 12502 #endif 12503 12504 #ifdef TARGET_NR_timer_getoverrun 12505 case TARGET_NR_timer_getoverrun: 12506 { 12507 /* args: timer_t timerid */ 12508 target_timer_t timerid = get_timer_id(arg1); 12509 12510 if (timerid < 0) { 12511 ret = timerid; 12512 } else { 12513 timer_t htimer = g_posix_timers[timerid]; 12514 ret = get_errno(timer_getoverrun(htimer)); 12515 } 12516 fd_trans_unregister(ret); 12517 break; 12518 } 12519 #endif 12520 12521 #ifdef TARGET_NR_timer_delete 12522 case TARGET_NR_timer_delete: 12523 { 12524 /* args: timer_t timerid */ 12525 target_timer_t timerid = get_timer_id(arg1); 12526 12527 if (timerid < 0) { 12528 ret = timerid; 12529 } else { 12530 timer_t htimer = g_posix_timers[timerid]; 12531 ret = get_errno(timer_delete(htimer)); 12532 g_posix_timers[timerid] = 0; 12533 } 12534 break; 12535 } 12536 #endif 12537 12538 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD) 12539 case TARGET_NR_timerfd_create: 12540 ret = get_errno(timerfd_create(arg1, 12541 target_to_host_bitmask(arg2, fcntl_flags_tbl))); 12542 break; 12543 #endif 12544 12545 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD) 12546 case TARGET_NR_timerfd_gettime: 12547 { 12548 struct itimerspec its_curr; 12549 12550 ret = get_errno(timerfd_gettime(arg1, &its_curr)); 12551 12552 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) { 12553 goto efault; 12554 } 12555 } 12556 break; 12557 #endif 12558 12559 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD) 12560 case TARGET_NR_timerfd_settime: 12561 { 12562 struct itimerspec its_new, its_old, *p_new; 12563 12564 if (arg3) { 12565 if (target_to_host_itimerspec(&its_new, arg3)) { 12566 goto efault; 12567 } 12568 p_new = &its_new; 12569 } else { 12570 p_new = NULL; 12571 } 12572 12573 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old)); 12574 12575 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) { 12576 goto efault; 12577 } 12578 } 12579 break; 12580 #endif 12581 12582 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get) 12583 case TARGET_NR_ioprio_get: 12584 ret = get_errno(ioprio_get(arg1, arg2)); 12585 break; 12586 #endif 12587 12588 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set) 12589 case TARGET_NR_ioprio_set: 12590 ret = get_errno(ioprio_set(arg1, arg2, arg3)); 12591 break; 12592 #endif 12593 12594 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS) 12595 case TARGET_NR_setns: 12596 ret = get_errno(setns(arg1, arg2)); 12597 break; 12598 #endif 12599 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS) 12600 case TARGET_NR_unshare: 12601 ret = get_errno(unshare(arg1)); 12602 break; 12603 #endif 12604 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp) 12605 case TARGET_NR_kcmp: 12606 ret = get_errno(kcmp(arg1, arg2, arg3, arg4, arg5)); 12607 break; 12608 #endif 12609 12610 default: 12611 unimplemented: 12612 gemu_log("qemu: Unsupported syscall: %d\n", num); 12613 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list) 12614 unimplemented_nowarn: 12615 #endif 12616 ret = -TARGET_ENOSYS; 12617 break; 12618 } 12619 fail: 12620 #ifdef DEBUG 12621 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret); 12622 #endif 12623 if(do_strace) 12624 print_syscall_ret(num, ret); 12625 trace_guest_user_syscall_ret(cpu, num, ret); 12626 return ret; 12627 efault: 12628 ret = -TARGET_EFAULT; 12629 goto fail; 12630 } 12631