1 /* 2 * Linux syscalls 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #define _ATFILE_SOURCE 20 #include "qemu/osdep.h" 21 #include "qemu/cutils.h" 22 #include "qemu/path.h" 23 #include <elf.h> 24 #include <endian.h> 25 #include <grp.h> 26 #include <sys/ipc.h> 27 #include <sys/msg.h> 28 #include <sys/wait.h> 29 #include <sys/mount.h> 30 #include <sys/file.h> 31 #include <sys/fsuid.h> 32 #include <sys/personality.h> 33 #include <sys/prctl.h> 34 #include <sys/resource.h> 35 #include <sys/swap.h> 36 #include <linux/capability.h> 37 #include <sched.h> 38 #include <sys/timex.h> 39 #include <sys/socket.h> 40 #include <sys/un.h> 41 #include <sys/uio.h> 42 #include <poll.h> 43 #include <sys/times.h> 44 #include <sys/shm.h> 45 #include <sys/sem.h> 46 #include <sys/statfs.h> 47 #include <utime.h> 48 #include <sys/sysinfo.h> 49 #include <sys/signalfd.h> 50 //#include <sys/user.h> 51 #include <netinet/ip.h> 52 #include <netinet/tcp.h> 53 #include <linux/wireless.h> 54 #include <linux/icmp.h> 55 #include <linux/icmpv6.h> 56 #include <linux/errqueue.h> 57 #include <linux/random.h> 58 #include "qemu-common.h" 59 #ifdef CONFIG_TIMERFD 60 #include <sys/timerfd.h> 61 #endif 62 #ifdef TARGET_GPROF 63 #include <sys/gmon.h> 64 #endif 65 #ifdef CONFIG_EVENTFD 66 #include <sys/eventfd.h> 67 #endif 68 #ifdef CONFIG_EPOLL 69 #include <sys/epoll.h> 70 #endif 71 #ifdef CONFIG_ATTR 72 #include "qemu/xattr.h" 73 #endif 74 #ifdef CONFIG_SENDFILE 75 #include <sys/sendfile.h> 76 #endif 77 78 #define termios host_termios 79 #define winsize host_winsize 80 #define termio host_termio 81 #define sgttyb host_sgttyb /* same as target */ 82 #define tchars host_tchars /* same as target */ 83 #define ltchars host_ltchars /* same as target */ 84 85 #include <linux/termios.h> 86 #include <linux/unistd.h> 87 #include <linux/cdrom.h> 88 #include <linux/hdreg.h> 89 #include <linux/soundcard.h> 90 #include <linux/kd.h> 91 #include <linux/mtio.h> 92 #include <linux/fs.h> 93 #if defined(CONFIG_FIEMAP) 94 #include <linux/fiemap.h> 95 #endif 96 #include <linux/fb.h> 97 #include <linux/vt.h> 98 #include <linux/dm-ioctl.h> 99 #include <linux/reboot.h> 100 #include <linux/route.h> 101 #include <linux/filter.h> 102 #include <linux/blkpg.h> 103 #include <netpacket/packet.h> 104 #include <linux/netlink.h> 105 #ifdef CONFIG_RTNETLINK 106 #include <linux/rtnetlink.h> 107 #include <linux/if_bridge.h> 108 #endif 109 #include <linux/audit.h> 110 #include "linux_loop.h" 111 #include "uname.h" 112 113 #include "qemu.h" 114 115 #ifndef CLONE_IO 116 #define CLONE_IO 0x80000000 /* Clone io context */ 117 #endif 118 119 /* We can't directly call the host clone syscall, because this will 120 * badly confuse libc (breaking mutexes, for example). So we must 121 * divide clone flags into: 122 * * flag combinations that look like pthread_create() 123 * * flag combinations that look like fork() 124 * * flags we can implement within QEMU itself 125 * * flags we can't support and will return an error for 126 */ 127 /* For thread creation, all these flags must be present; for 128 * fork, none must be present. 129 */ 130 #define CLONE_THREAD_FLAGS \ 131 (CLONE_VM | CLONE_FS | CLONE_FILES | \ 132 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM) 133 134 /* These flags are ignored: 135 * CLONE_DETACHED is now ignored by the kernel; 136 * CLONE_IO is just an optimisation hint to the I/O scheduler 137 */ 138 #define CLONE_IGNORED_FLAGS \ 139 (CLONE_DETACHED | CLONE_IO) 140 141 /* Flags for fork which we can implement within QEMU itself */ 142 #define CLONE_OPTIONAL_FORK_FLAGS \ 143 (CLONE_SETTLS | CLONE_PARENT_SETTID | \ 144 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID) 145 146 /* Flags for thread creation which we can implement within QEMU itself */ 147 #define CLONE_OPTIONAL_THREAD_FLAGS \ 148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \ 149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT) 150 151 #define CLONE_INVALID_FORK_FLAGS \ 152 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS)) 153 154 #define CLONE_INVALID_THREAD_FLAGS \ 155 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \ 156 CLONE_IGNORED_FLAGS)) 157 158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits 159 * have almost all been allocated. We cannot support any of 160 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC, 161 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED. 162 * The checks against the invalid thread masks above will catch these. 163 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.) 164 */ 165 166 //#define DEBUG 167 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted 168 * once. This exercises the codepaths for restart. 169 */ 170 //#define DEBUG_ERESTARTSYS 171 172 //#include <linux/msdos_fs.h> 173 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2]) 174 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2]) 175 176 #undef _syscall0 177 #undef _syscall1 178 #undef _syscall2 179 #undef _syscall3 180 #undef _syscall4 181 #undef _syscall5 182 #undef _syscall6 183 184 #define _syscall0(type,name) \ 185 static type name (void) \ 186 { \ 187 return syscall(__NR_##name); \ 188 } 189 190 #define _syscall1(type,name,type1,arg1) \ 191 static type name (type1 arg1) \ 192 { \ 193 return syscall(__NR_##name, arg1); \ 194 } 195 196 #define _syscall2(type,name,type1,arg1,type2,arg2) \ 197 static type name (type1 arg1,type2 arg2) \ 198 { \ 199 return syscall(__NR_##name, arg1, arg2); \ 200 } 201 202 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 203 static type name (type1 arg1,type2 arg2,type3 arg3) \ 204 { \ 205 return syscall(__NR_##name, arg1, arg2, arg3); \ 206 } 207 208 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 209 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ 210 { \ 211 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 212 } 213 214 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 215 type5,arg5) \ 216 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ 217 { \ 218 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 219 } 220 221 222 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 223 type5,arg5,type6,arg6) \ 224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \ 225 type6 arg6) \ 226 { \ 227 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 228 } 229 230 231 #define __NR_sys_uname __NR_uname 232 #define __NR_sys_getcwd1 __NR_getcwd 233 #define __NR_sys_getdents __NR_getdents 234 #define __NR_sys_getdents64 __NR_getdents64 235 #define __NR_sys_getpriority __NR_getpriority 236 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo 237 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo 238 #define __NR_sys_syslog __NR_syslog 239 #define __NR_sys_futex __NR_futex 240 #define __NR_sys_inotify_init __NR_inotify_init 241 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch 242 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch 243 244 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__) 245 #define __NR__llseek __NR_lseek 246 #endif 247 248 /* Newer kernel ports have llseek() instead of _llseek() */ 249 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek) 250 #define TARGET_NR__llseek TARGET_NR_llseek 251 #endif 252 253 #ifdef __NR_gettid 254 _syscall0(int, gettid) 255 #else 256 /* This is a replacement for the host gettid() and must return a host 257 errno. */ 258 static int gettid(void) { 259 return -ENOSYS; 260 } 261 #endif 262 263 /* For the 64-bit guest on 32-bit host case we must emulate 264 * getdents using getdents64, because otherwise the host 265 * might hand us back more dirent records than we can fit 266 * into the guest buffer after structure format conversion. 267 * Otherwise we emulate getdents with getdents if the host has it. 268 */ 269 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS 270 #define EMULATE_GETDENTS_WITH_GETDENTS 271 #endif 272 273 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS) 274 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count); 275 #endif 276 #if (defined(TARGET_NR_getdents) && \ 277 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \ 278 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64)) 279 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count); 280 #endif 281 #if defined(TARGET_NR__llseek) && defined(__NR_llseek) 282 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo, 283 loff_t *, res, uint, wh); 284 #endif 285 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo) 286 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig, 287 siginfo_t *, uinfo) 288 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len) 289 #ifdef __NR_exit_group 290 _syscall1(int,exit_group,int,error_code) 291 #endif 292 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 293 _syscall1(int,set_tid_address,int *,tidptr) 294 #endif 295 #if defined(TARGET_NR_futex) && defined(__NR_futex) 296 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, 297 const struct timespec *,timeout,int *,uaddr2,int,val3) 298 #endif 299 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity 300 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, 301 unsigned long *, user_mask_ptr); 302 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity 303 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len, 304 unsigned long *, user_mask_ptr); 305 #define __NR_sys_getcpu __NR_getcpu 306 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache); 307 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd, 308 void *, arg); 309 _syscall2(int, capget, struct __user_cap_header_struct *, header, 310 struct __user_cap_data_struct *, data); 311 _syscall2(int, capset, struct __user_cap_header_struct *, header, 312 struct __user_cap_data_struct *, data); 313 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get) 314 _syscall2(int, ioprio_get, int, which, int, who) 315 #endif 316 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set) 317 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio) 318 #endif 319 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom) 320 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags) 321 #endif 322 323 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp) 324 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type, 325 unsigned long, idx1, unsigned long, idx2) 326 #endif 327 328 static bitmask_transtbl fcntl_flags_tbl[] = { 329 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, }, 330 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, }, 331 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, }, 332 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, }, 333 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, }, 334 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, }, 335 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, }, 336 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, }, 337 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, }, 338 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, }, 339 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, }, 340 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, }, 341 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, }, 342 #if defined(O_DIRECT) 343 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, }, 344 #endif 345 #if defined(O_NOATIME) 346 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME }, 347 #endif 348 #if defined(O_CLOEXEC) 349 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC }, 350 #endif 351 #if defined(O_PATH) 352 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH }, 353 #endif 354 #if defined(O_TMPFILE) 355 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE }, 356 #endif 357 /* Don't terminate the list prematurely on 64-bit host+guest. */ 358 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0 359 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, }, 360 #endif 361 { 0, 0, 0, 0 } 362 }; 363 364 enum { 365 QEMU_IFLA_BR_UNSPEC, 366 QEMU_IFLA_BR_FORWARD_DELAY, 367 QEMU_IFLA_BR_HELLO_TIME, 368 QEMU_IFLA_BR_MAX_AGE, 369 QEMU_IFLA_BR_AGEING_TIME, 370 QEMU_IFLA_BR_STP_STATE, 371 QEMU_IFLA_BR_PRIORITY, 372 QEMU_IFLA_BR_VLAN_FILTERING, 373 QEMU_IFLA_BR_VLAN_PROTOCOL, 374 QEMU_IFLA_BR_GROUP_FWD_MASK, 375 QEMU_IFLA_BR_ROOT_ID, 376 QEMU_IFLA_BR_BRIDGE_ID, 377 QEMU_IFLA_BR_ROOT_PORT, 378 QEMU_IFLA_BR_ROOT_PATH_COST, 379 QEMU_IFLA_BR_TOPOLOGY_CHANGE, 380 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED, 381 QEMU_IFLA_BR_HELLO_TIMER, 382 QEMU_IFLA_BR_TCN_TIMER, 383 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER, 384 QEMU_IFLA_BR_GC_TIMER, 385 QEMU_IFLA_BR_GROUP_ADDR, 386 QEMU_IFLA_BR_FDB_FLUSH, 387 QEMU_IFLA_BR_MCAST_ROUTER, 388 QEMU_IFLA_BR_MCAST_SNOOPING, 389 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR, 390 QEMU_IFLA_BR_MCAST_QUERIER, 391 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY, 392 QEMU_IFLA_BR_MCAST_HASH_MAX, 393 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT, 394 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT, 395 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL, 396 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL, 397 QEMU_IFLA_BR_MCAST_QUERIER_INTVL, 398 QEMU_IFLA_BR_MCAST_QUERY_INTVL, 399 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, 400 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL, 401 QEMU_IFLA_BR_NF_CALL_IPTABLES, 402 QEMU_IFLA_BR_NF_CALL_IP6TABLES, 403 QEMU_IFLA_BR_NF_CALL_ARPTABLES, 404 QEMU_IFLA_BR_VLAN_DEFAULT_PVID, 405 QEMU_IFLA_BR_PAD, 406 QEMU_IFLA_BR_VLAN_STATS_ENABLED, 407 QEMU_IFLA_BR_MCAST_STATS_ENABLED, 408 QEMU_IFLA_BR_MCAST_IGMP_VERSION, 409 QEMU_IFLA_BR_MCAST_MLD_VERSION, 410 QEMU___IFLA_BR_MAX, 411 }; 412 413 enum { 414 QEMU_IFLA_UNSPEC, 415 QEMU_IFLA_ADDRESS, 416 QEMU_IFLA_BROADCAST, 417 QEMU_IFLA_IFNAME, 418 QEMU_IFLA_MTU, 419 QEMU_IFLA_LINK, 420 QEMU_IFLA_QDISC, 421 QEMU_IFLA_STATS, 422 QEMU_IFLA_COST, 423 QEMU_IFLA_PRIORITY, 424 QEMU_IFLA_MASTER, 425 QEMU_IFLA_WIRELESS, 426 QEMU_IFLA_PROTINFO, 427 QEMU_IFLA_TXQLEN, 428 QEMU_IFLA_MAP, 429 QEMU_IFLA_WEIGHT, 430 QEMU_IFLA_OPERSTATE, 431 QEMU_IFLA_LINKMODE, 432 QEMU_IFLA_LINKINFO, 433 QEMU_IFLA_NET_NS_PID, 434 QEMU_IFLA_IFALIAS, 435 QEMU_IFLA_NUM_VF, 436 QEMU_IFLA_VFINFO_LIST, 437 QEMU_IFLA_STATS64, 438 QEMU_IFLA_VF_PORTS, 439 QEMU_IFLA_PORT_SELF, 440 QEMU_IFLA_AF_SPEC, 441 QEMU_IFLA_GROUP, 442 QEMU_IFLA_NET_NS_FD, 443 QEMU_IFLA_EXT_MASK, 444 QEMU_IFLA_PROMISCUITY, 445 QEMU_IFLA_NUM_TX_QUEUES, 446 QEMU_IFLA_NUM_RX_QUEUES, 447 QEMU_IFLA_CARRIER, 448 QEMU_IFLA_PHYS_PORT_ID, 449 QEMU_IFLA_CARRIER_CHANGES, 450 QEMU_IFLA_PHYS_SWITCH_ID, 451 QEMU_IFLA_LINK_NETNSID, 452 QEMU_IFLA_PHYS_PORT_NAME, 453 QEMU_IFLA_PROTO_DOWN, 454 QEMU_IFLA_GSO_MAX_SEGS, 455 QEMU_IFLA_GSO_MAX_SIZE, 456 QEMU_IFLA_PAD, 457 QEMU_IFLA_XDP, 458 QEMU_IFLA_EVENT, 459 QEMU_IFLA_NEW_NETNSID, 460 QEMU_IFLA_IF_NETNSID, 461 QEMU_IFLA_CARRIER_UP_COUNT, 462 QEMU_IFLA_CARRIER_DOWN_COUNT, 463 QEMU_IFLA_NEW_IFINDEX, 464 QEMU___IFLA_MAX 465 }; 466 467 enum { 468 QEMU_IFLA_BRPORT_UNSPEC, 469 QEMU_IFLA_BRPORT_STATE, 470 QEMU_IFLA_BRPORT_PRIORITY, 471 QEMU_IFLA_BRPORT_COST, 472 QEMU_IFLA_BRPORT_MODE, 473 QEMU_IFLA_BRPORT_GUARD, 474 QEMU_IFLA_BRPORT_PROTECT, 475 QEMU_IFLA_BRPORT_FAST_LEAVE, 476 QEMU_IFLA_BRPORT_LEARNING, 477 QEMU_IFLA_BRPORT_UNICAST_FLOOD, 478 QEMU_IFLA_BRPORT_PROXYARP, 479 QEMU_IFLA_BRPORT_LEARNING_SYNC, 480 QEMU_IFLA_BRPORT_PROXYARP_WIFI, 481 QEMU_IFLA_BRPORT_ROOT_ID, 482 QEMU_IFLA_BRPORT_BRIDGE_ID, 483 QEMU_IFLA_BRPORT_DESIGNATED_PORT, 484 QEMU_IFLA_BRPORT_DESIGNATED_COST, 485 QEMU_IFLA_BRPORT_ID, 486 QEMU_IFLA_BRPORT_NO, 487 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK, 488 QEMU_IFLA_BRPORT_CONFIG_PENDING, 489 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER, 490 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER, 491 QEMU_IFLA_BRPORT_HOLD_TIMER, 492 QEMU_IFLA_BRPORT_FLUSH, 493 QEMU_IFLA_BRPORT_MULTICAST_ROUTER, 494 QEMU_IFLA_BRPORT_PAD, 495 QEMU_IFLA_BRPORT_MCAST_FLOOD, 496 QEMU_IFLA_BRPORT_MCAST_TO_UCAST, 497 QEMU_IFLA_BRPORT_VLAN_TUNNEL, 498 QEMU_IFLA_BRPORT_BCAST_FLOOD, 499 QEMU_IFLA_BRPORT_GROUP_FWD_MASK, 500 QEMU_IFLA_BRPORT_NEIGH_SUPPRESS, 501 QEMU___IFLA_BRPORT_MAX 502 }; 503 504 enum { 505 QEMU_IFLA_INFO_UNSPEC, 506 QEMU_IFLA_INFO_KIND, 507 QEMU_IFLA_INFO_DATA, 508 QEMU_IFLA_INFO_XSTATS, 509 QEMU_IFLA_INFO_SLAVE_KIND, 510 QEMU_IFLA_INFO_SLAVE_DATA, 511 QEMU___IFLA_INFO_MAX, 512 }; 513 514 enum { 515 QEMU_IFLA_INET_UNSPEC, 516 QEMU_IFLA_INET_CONF, 517 QEMU___IFLA_INET_MAX, 518 }; 519 520 enum { 521 QEMU_IFLA_INET6_UNSPEC, 522 QEMU_IFLA_INET6_FLAGS, 523 QEMU_IFLA_INET6_CONF, 524 QEMU_IFLA_INET6_STATS, 525 QEMU_IFLA_INET6_MCAST, 526 QEMU_IFLA_INET6_CACHEINFO, 527 QEMU_IFLA_INET6_ICMP6STATS, 528 QEMU_IFLA_INET6_TOKEN, 529 QEMU_IFLA_INET6_ADDR_GEN_MODE, 530 QEMU___IFLA_INET6_MAX 531 }; 532 533 enum { 534 QEMU_IFLA_XDP_UNSPEC, 535 QEMU_IFLA_XDP_FD, 536 QEMU_IFLA_XDP_ATTACHED, 537 QEMU_IFLA_XDP_FLAGS, 538 QEMU_IFLA_XDP_PROG_ID, 539 QEMU___IFLA_XDP_MAX, 540 }; 541 542 typedef abi_long (*TargetFdDataFunc)(void *, size_t); 543 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t); 544 typedef struct TargetFdTrans { 545 TargetFdDataFunc host_to_target_data; 546 TargetFdDataFunc target_to_host_data; 547 TargetFdAddrFunc target_to_host_addr; 548 } TargetFdTrans; 549 550 static TargetFdTrans **target_fd_trans; 551 552 static unsigned int target_fd_max; 553 554 static TargetFdDataFunc fd_trans_target_to_host_data(int fd) 555 { 556 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) { 557 return target_fd_trans[fd]->target_to_host_data; 558 } 559 return NULL; 560 } 561 562 static TargetFdDataFunc fd_trans_host_to_target_data(int fd) 563 { 564 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) { 565 return target_fd_trans[fd]->host_to_target_data; 566 } 567 return NULL; 568 } 569 570 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd) 571 { 572 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) { 573 return target_fd_trans[fd]->target_to_host_addr; 574 } 575 return NULL; 576 } 577 578 static void fd_trans_register(int fd, TargetFdTrans *trans) 579 { 580 unsigned int oldmax; 581 582 if (fd >= target_fd_max) { 583 oldmax = target_fd_max; 584 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */ 585 target_fd_trans = g_renew(TargetFdTrans *, 586 target_fd_trans, target_fd_max); 587 memset((void *)(target_fd_trans + oldmax), 0, 588 (target_fd_max - oldmax) * sizeof(TargetFdTrans *)); 589 } 590 target_fd_trans[fd] = trans; 591 } 592 593 static void fd_trans_unregister(int fd) 594 { 595 if (fd >= 0 && fd < target_fd_max) { 596 target_fd_trans[fd] = NULL; 597 } 598 } 599 600 static void fd_trans_dup(int oldfd, int newfd) 601 { 602 fd_trans_unregister(newfd); 603 if (oldfd < target_fd_max && target_fd_trans[oldfd]) { 604 fd_trans_register(newfd, target_fd_trans[oldfd]); 605 } 606 } 607 608 static int sys_getcwd1(char *buf, size_t size) 609 { 610 if (getcwd(buf, size) == NULL) { 611 /* getcwd() sets errno */ 612 return (-1); 613 } 614 return strlen(buf)+1; 615 } 616 617 #ifdef TARGET_NR_utimensat 618 #if defined(__NR_utimensat) 619 #define __NR_sys_utimensat __NR_utimensat 620 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname, 621 const struct timespec *,tsp,int,flags) 622 #else 623 static int sys_utimensat(int dirfd, const char *pathname, 624 const struct timespec times[2], int flags) 625 { 626 errno = ENOSYS; 627 return -1; 628 } 629 #endif 630 #endif /* TARGET_NR_utimensat */ 631 632 #ifdef TARGET_NR_renameat2 633 #if defined(__NR_renameat2) 634 #define __NR_sys_renameat2 __NR_renameat2 635 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd, 636 const char *, new, unsigned int, flags) 637 #else 638 static int sys_renameat2(int oldfd, const char *old, 639 int newfd, const char *new, int flags) 640 { 641 if (flags == 0) { 642 return renameat(oldfd, old, newfd, new); 643 } 644 errno = ENOSYS; 645 return -1; 646 } 647 #endif 648 #endif /* TARGET_NR_renameat2 */ 649 650 #ifdef CONFIG_INOTIFY 651 #include <sys/inotify.h> 652 653 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 654 static int sys_inotify_init(void) 655 { 656 return (inotify_init()); 657 } 658 #endif 659 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 660 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask) 661 { 662 return (inotify_add_watch(fd, pathname, mask)); 663 } 664 #endif 665 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 666 static int sys_inotify_rm_watch(int fd, int32_t wd) 667 { 668 return (inotify_rm_watch(fd, wd)); 669 } 670 #endif 671 #ifdef CONFIG_INOTIFY1 672 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 673 static int sys_inotify_init1(int flags) 674 { 675 return (inotify_init1(flags)); 676 } 677 #endif 678 #endif 679 #else 680 /* Userspace can usually survive runtime without inotify */ 681 #undef TARGET_NR_inotify_init 682 #undef TARGET_NR_inotify_init1 683 #undef TARGET_NR_inotify_add_watch 684 #undef TARGET_NR_inotify_rm_watch 685 #endif /* CONFIG_INOTIFY */ 686 687 #if defined(TARGET_NR_prlimit64) 688 #ifndef __NR_prlimit64 689 # define __NR_prlimit64 -1 690 #endif 691 #define __NR_sys_prlimit64 __NR_prlimit64 692 /* The glibc rlimit structure may not be that used by the underlying syscall */ 693 struct host_rlimit64 { 694 uint64_t rlim_cur; 695 uint64_t rlim_max; 696 }; 697 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource, 698 const struct host_rlimit64 *, new_limit, 699 struct host_rlimit64 *, old_limit) 700 #endif 701 702 703 #if defined(TARGET_NR_timer_create) 704 /* Maxiumum of 32 active POSIX timers allowed at any one time. */ 705 static timer_t g_posix_timers[32] = { 0, } ; 706 707 static inline int next_free_host_timer(void) 708 { 709 int k ; 710 /* FIXME: Does finding the next free slot require a lock? */ 711 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) { 712 if (g_posix_timers[k] == 0) { 713 g_posix_timers[k] = (timer_t) 1; 714 return k; 715 } 716 } 717 return -1; 718 } 719 #endif 720 721 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */ 722 #ifdef TARGET_ARM 723 static inline int regpairs_aligned(void *cpu_env, int num) 724 { 725 return ((((CPUARMState *)cpu_env)->eabi) == 1) ; 726 } 727 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32) 728 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; } 729 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64) 730 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs 731 * of registers which translates to the same as ARM/MIPS, because we start with 732 * r3 as arg1 */ 733 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; } 734 #elif defined(TARGET_SH4) 735 /* SH4 doesn't align register pairs, except for p{read,write}64 */ 736 static inline int regpairs_aligned(void *cpu_env, int num) 737 { 738 switch (num) { 739 case TARGET_NR_pread64: 740 case TARGET_NR_pwrite64: 741 return 1; 742 743 default: 744 return 0; 745 } 746 } 747 #elif defined(TARGET_XTENSA) 748 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; } 749 #else 750 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; } 751 #endif 752 753 #define ERRNO_TABLE_SIZE 1200 754 755 /* target_to_host_errno_table[] is initialized from 756 * host_to_target_errno_table[] in syscall_init(). */ 757 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = { 758 }; 759 760 /* 761 * This list is the union of errno values overridden in asm-<arch>/errno.h 762 * minus the errnos that are not actually generic to all archs. 763 */ 764 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = { 765 [EAGAIN] = TARGET_EAGAIN, 766 [EIDRM] = TARGET_EIDRM, 767 [ECHRNG] = TARGET_ECHRNG, 768 [EL2NSYNC] = TARGET_EL2NSYNC, 769 [EL3HLT] = TARGET_EL3HLT, 770 [EL3RST] = TARGET_EL3RST, 771 [ELNRNG] = TARGET_ELNRNG, 772 [EUNATCH] = TARGET_EUNATCH, 773 [ENOCSI] = TARGET_ENOCSI, 774 [EL2HLT] = TARGET_EL2HLT, 775 [EDEADLK] = TARGET_EDEADLK, 776 [ENOLCK] = TARGET_ENOLCK, 777 [EBADE] = TARGET_EBADE, 778 [EBADR] = TARGET_EBADR, 779 [EXFULL] = TARGET_EXFULL, 780 [ENOANO] = TARGET_ENOANO, 781 [EBADRQC] = TARGET_EBADRQC, 782 [EBADSLT] = TARGET_EBADSLT, 783 [EBFONT] = TARGET_EBFONT, 784 [ENOSTR] = TARGET_ENOSTR, 785 [ENODATA] = TARGET_ENODATA, 786 [ETIME] = TARGET_ETIME, 787 [ENOSR] = TARGET_ENOSR, 788 [ENONET] = TARGET_ENONET, 789 [ENOPKG] = TARGET_ENOPKG, 790 [EREMOTE] = TARGET_EREMOTE, 791 [ENOLINK] = TARGET_ENOLINK, 792 [EADV] = TARGET_EADV, 793 [ESRMNT] = TARGET_ESRMNT, 794 [ECOMM] = TARGET_ECOMM, 795 [EPROTO] = TARGET_EPROTO, 796 [EDOTDOT] = TARGET_EDOTDOT, 797 [EMULTIHOP] = TARGET_EMULTIHOP, 798 [EBADMSG] = TARGET_EBADMSG, 799 [ENAMETOOLONG] = TARGET_ENAMETOOLONG, 800 [EOVERFLOW] = TARGET_EOVERFLOW, 801 [ENOTUNIQ] = TARGET_ENOTUNIQ, 802 [EBADFD] = TARGET_EBADFD, 803 [EREMCHG] = TARGET_EREMCHG, 804 [ELIBACC] = TARGET_ELIBACC, 805 [ELIBBAD] = TARGET_ELIBBAD, 806 [ELIBSCN] = TARGET_ELIBSCN, 807 [ELIBMAX] = TARGET_ELIBMAX, 808 [ELIBEXEC] = TARGET_ELIBEXEC, 809 [EILSEQ] = TARGET_EILSEQ, 810 [ENOSYS] = TARGET_ENOSYS, 811 [ELOOP] = TARGET_ELOOP, 812 [ERESTART] = TARGET_ERESTART, 813 [ESTRPIPE] = TARGET_ESTRPIPE, 814 [ENOTEMPTY] = TARGET_ENOTEMPTY, 815 [EUSERS] = TARGET_EUSERS, 816 [ENOTSOCK] = TARGET_ENOTSOCK, 817 [EDESTADDRREQ] = TARGET_EDESTADDRREQ, 818 [EMSGSIZE] = TARGET_EMSGSIZE, 819 [EPROTOTYPE] = TARGET_EPROTOTYPE, 820 [ENOPROTOOPT] = TARGET_ENOPROTOOPT, 821 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT, 822 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT, 823 [EOPNOTSUPP] = TARGET_EOPNOTSUPP, 824 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT, 825 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT, 826 [EADDRINUSE] = TARGET_EADDRINUSE, 827 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL, 828 [ENETDOWN] = TARGET_ENETDOWN, 829 [ENETUNREACH] = TARGET_ENETUNREACH, 830 [ENETRESET] = TARGET_ENETRESET, 831 [ECONNABORTED] = TARGET_ECONNABORTED, 832 [ECONNRESET] = TARGET_ECONNRESET, 833 [ENOBUFS] = TARGET_ENOBUFS, 834 [EISCONN] = TARGET_EISCONN, 835 [ENOTCONN] = TARGET_ENOTCONN, 836 [EUCLEAN] = TARGET_EUCLEAN, 837 [ENOTNAM] = TARGET_ENOTNAM, 838 [ENAVAIL] = TARGET_ENAVAIL, 839 [EISNAM] = TARGET_EISNAM, 840 [EREMOTEIO] = TARGET_EREMOTEIO, 841 [EDQUOT] = TARGET_EDQUOT, 842 [ESHUTDOWN] = TARGET_ESHUTDOWN, 843 [ETOOMANYREFS] = TARGET_ETOOMANYREFS, 844 [ETIMEDOUT] = TARGET_ETIMEDOUT, 845 [ECONNREFUSED] = TARGET_ECONNREFUSED, 846 [EHOSTDOWN] = TARGET_EHOSTDOWN, 847 [EHOSTUNREACH] = TARGET_EHOSTUNREACH, 848 [EALREADY] = TARGET_EALREADY, 849 [EINPROGRESS] = TARGET_EINPROGRESS, 850 [ESTALE] = TARGET_ESTALE, 851 [ECANCELED] = TARGET_ECANCELED, 852 [ENOMEDIUM] = TARGET_ENOMEDIUM, 853 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE, 854 #ifdef ENOKEY 855 [ENOKEY] = TARGET_ENOKEY, 856 #endif 857 #ifdef EKEYEXPIRED 858 [EKEYEXPIRED] = TARGET_EKEYEXPIRED, 859 #endif 860 #ifdef EKEYREVOKED 861 [EKEYREVOKED] = TARGET_EKEYREVOKED, 862 #endif 863 #ifdef EKEYREJECTED 864 [EKEYREJECTED] = TARGET_EKEYREJECTED, 865 #endif 866 #ifdef EOWNERDEAD 867 [EOWNERDEAD] = TARGET_EOWNERDEAD, 868 #endif 869 #ifdef ENOTRECOVERABLE 870 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE, 871 #endif 872 #ifdef ENOMSG 873 [ENOMSG] = TARGET_ENOMSG, 874 #endif 875 #ifdef ERKFILL 876 [ERFKILL] = TARGET_ERFKILL, 877 #endif 878 #ifdef EHWPOISON 879 [EHWPOISON] = TARGET_EHWPOISON, 880 #endif 881 }; 882 883 static inline int host_to_target_errno(int err) 884 { 885 if (err >= 0 && err < ERRNO_TABLE_SIZE && 886 host_to_target_errno_table[err]) { 887 return host_to_target_errno_table[err]; 888 } 889 return err; 890 } 891 892 static inline int target_to_host_errno(int err) 893 { 894 if (err >= 0 && err < ERRNO_TABLE_SIZE && 895 target_to_host_errno_table[err]) { 896 return target_to_host_errno_table[err]; 897 } 898 return err; 899 } 900 901 static inline abi_long get_errno(abi_long ret) 902 { 903 if (ret == -1) 904 return -host_to_target_errno(errno); 905 else 906 return ret; 907 } 908 909 static inline int is_error(abi_long ret) 910 { 911 return (abi_ulong)ret >= (abi_ulong)(-4096); 912 } 913 914 const char *target_strerror(int err) 915 { 916 if (err == TARGET_ERESTARTSYS) { 917 return "To be restarted"; 918 } 919 if (err == TARGET_QEMU_ESIGRETURN) { 920 return "Successful exit from sigreturn"; 921 } 922 923 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) { 924 return NULL; 925 } 926 return strerror(target_to_host_errno(err)); 927 } 928 929 #define safe_syscall0(type, name) \ 930 static type safe_##name(void) \ 931 { \ 932 return safe_syscall(__NR_##name); \ 933 } 934 935 #define safe_syscall1(type, name, type1, arg1) \ 936 static type safe_##name(type1 arg1) \ 937 { \ 938 return safe_syscall(__NR_##name, arg1); \ 939 } 940 941 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \ 942 static type safe_##name(type1 arg1, type2 arg2) \ 943 { \ 944 return safe_syscall(__NR_##name, arg1, arg2); \ 945 } 946 947 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \ 948 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \ 949 { \ 950 return safe_syscall(__NR_##name, arg1, arg2, arg3); \ 951 } 952 953 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \ 954 type4, arg4) \ 955 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ 956 { \ 957 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 958 } 959 960 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \ 961 type4, arg4, type5, arg5) \ 962 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 963 type5 arg5) \ 964 { \ 965 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 966 } 967 968 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \ 969 type4, arg4, type5, arg5, type6, arg6) \ 970 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 971 type5 arg5, type6 arg6) \ 972 { \ 973 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 974 } 975 976 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count) 977 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count) 978 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \ 979 int, flags, mode_t, mode) 980 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \ 981 struct rusage *, rusage) 982 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \ 983 int, options, struct rusage *, rusage) 984 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp) 985 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \ 986 fd_set *, exceptfds, struct timespec *, timeout, void *, sig) 987 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds, 988 struct timespec *, tsp, const sigset_t *, sigmask, 989 size_t, sigsetsize) 990 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events, 991 int, maxevents, int, timeout, const sigset_t *, sigmask, 992 size_t, sigsetsize) 993 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \ 994 const struct timespec *,timeout,int *,uaddr2,int,val3) 995 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize) 996 safe_syscall2(int, kill, pid_t, pid, int, sig) 997 safe_syscall2(int, tkill, int, tid, int, sig) 998 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig) 999 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt) 1000 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt) 1001 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt, 1002 unsigned long, pos_l, unsigned long, pos_h) 1003 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt, 1004 unsigned long, pos_l, unsigned long, pos_h) 1005 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr, 1006 socklen_t, addrlen) 1007 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len, 1008 int, flags, const struct sockaddr *, addr, socklen_t, addrlen) 1009 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len, 1010 int, flags, struct sockaddr *, addr, socklen_t *, addrlen) 1011 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags) 1012 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags) 1013 safe_syscall2(int, flock, int, fd, int, operation) 1014 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo, 1015 const struct timespec *, uts, size_t, sigsetsize) 1016 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len, 1017 int, flags) 1018 safe_syscall2(int, nanosleep, const struct timespec *, req, 1019 struct timespec *, rem) 1020 #ifdef TARGET_NR_clock_nanosleep 1021 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags, 1022 const struct timespec *, req, struct timespec *, rem) 1023 #endif 1024 #ifdef __NR_msgsnd 1025 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz, 1026 int, flags) 1027 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz, 1028 long, msgtype, int, flags) 1029 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops, 1030 unsigned, nsops, const struct timespec *, timeout) 1031 #else 1032 /* This host kernel architecture uses a single ipc syscall; fake up 1033 * wrappers for the sub-operations to hide this implementation detail. 1034 * Annoyingly we can't include linux/ipc.h to get the constant definitions 1035 * for the call parameter because some structs in there conflict with the 1036 * sys/ipc.h ones. So we just define them here, and rely on them being 1037 * the same for all host architectures. 1038 */ 1039 #define Q_SEMTIMEDOP 4 1040 #define Q_MSGSND 11 1041 #define Q_MSGRCV 12 1042 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP)) 1043 1044 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third, 1045 void *, ptr, long, fifth) 1046 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags) 1047 { 1048 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0); 1049 } 1050 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags) 1051 { 1052 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type); 1053 } 1054 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops, 1055 const struct timespec *timeout) 1056 { 1057 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops, 1058 (long)timeout); 1059 } 1060 #endif 1061 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 1062 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr, 1063 size_t, len, unsigned, prio, const struct timespec *, timeout) 1064 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr, 1065 size_t, len, unsigned *, prio, const struct timespec *, timeout) 1066 #endif 1067 /* We do ioctl like this rather than via safe_syscall3 to preserve the 1068 * "third argument might be integer or pointer or not present" behaviour of 1069 * the libc function. 1070 */ 1071 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__) 1072 /* Similarly for fcntl. Note that callers must always: 1073 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK 1074 * use the flock64 struct rather than unsuffixed flock 1075 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts. 1076 */ 1077 #ifdef __NR_fcntl64 1078 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__) 1079 #else 1080 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__) 1081 #endif 1082 1083 static inline int host_to_target_sock_type(int host_type) 1084 { 1085 int target_type; 1086 1087 switch (host_type & 0xf /* SOCK_TYPE_MASK */) { 1088 case SOCK_DGRAM: 1089 target_type = TARGET_SOCK_DGRAM; 1090 break; 1091 case SOCK_STREAM: 1092 target_type = TARGET_SOCK_STREAM; 1093 break; 1094 default: 1095 target_type = host_type & 0xf /* SOCK_TYPE_MASK */; 1096 break; 1097 } 1098 1099 #if defined(SOCK_CLOEXEC) 1100 if (host_type & SOCK_CLOEXEC) { 1101 target_type |= TARGET_SOCK_CLOEXEC; 1102 } 1103 #endif 1104 1105 #if defined(SOCK_NONBLOCK) 1106 if (host_type & SOCK_NONBLOCK) { 1107 target_type |= TARGET_SOCK_NONBLOCK; 1108 } 1109 #endif 1110 1111 return target_type; 1112 } 1113 1114 static abi_ulong target_brk; 1115 static abi_ulong target_original_brk; 1116 static abi_ulong brk_page; 1117 1118 void target_set_brk(abi_ulong new_brk) 1119 { 1120 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk); 1121 brk_page = HOST_PAGE_ALIGN(target_brk); 1122 } 1123 1124 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0) 1125 #define DEBUGF_BRK(message, args...) 1126 1127 /* do_brk() must return target values and target errnos. */ 1128 abi_long do_brk(abi_ulong new_brk) 1129 { 1130 abi_long mapped_addr; 1131 abi_ulong new_alloc_size; 1132 1133 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk); 1134 1135 if (!new_brk) { 1136 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk); 1137 return target_brk; 1138 } 1139 if (new_brk < target_original_brk) { 1140 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n", 1141 target_brk); 1142 return target_brk; 1143 } 1144 1145 /* If the new brk is less than the highest page reserved to the 1146 * target heap allocation, set it and we're almost done... */ 1147 if (new_brk <= brk_page) { 1148 /* Heap contents are initialized to zero, as for anonymous 1149 * mapped pages. */ 1150 if (new_brk > target_brk) { 1151 memset(g2h(target_brk), 0, new_brk - target_brk); 1152 } 1153 target_brk = new_brk; 1154 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk); 1155 return target_brk; 1156 } 1157 1158 /* We need to allocate more memory after the brk... Note that 1159 * we don't use MAP_FIXED because that will map over the top of 1160 * any existing mapping (like the one with the host libc or qemu 1161 * itself); instead we treat "mapped but at wrong address" as 1162 * a failure and unmap again. 1163 */ 1164 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page); 1165 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, 1166 PROT_READ|PROT_WRITE, 1167 MAP_ANON|MAP_PRIVATE, 0, 0)); 1168 1169 if (mapped_addr == brk_page) { 1170 /* Heap contents are initialized to zero, as for anonymous 1171 * mapped pages. Technically the new pages are already 1172 * initialized to zero since they *are* anonymous mapped 1173 * pages, however we have to take care with the contents that 1174 * come from the remaining part of the previous page: it may 1175 * contains garbage data due to a previous heap usage (grown 1176 * then shrunken). */ 1177 memset(g2h(target_brk), 0, brk_page - target_brk); 1178 1179 target_brk = new_brk; 1180 brk_page = HOST_PAGE_ALIGN(target_brk); 1181 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n", 1182 target_brk); 1183 return target_brk; 1184 } else if (mapped_addr != -1) { 1185 /* Mapped but at wrong address, meaning there wasn't actually 1186 * enough space for this brk. 1187 */ 1188 target_munmap(mapped_addr, new_alloc_size); 1189 mapped_addr = -1; 1190 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk); 1191 } 1192 else { 1193 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk); 1194 } 1195 1196 #if defined(TARGET_ALPHA) 1197 /* We (partially) emulate OSF/1 on Alpha, which requires we 1198 return a proper errno, not an unchanged brk value. */ 1199 return -TARGET_ENOMEM; 1200 #endif 1201 /* For everything else, return the previous break. */ 1202 return target_brk; 1203 } 1204 1205 static inline abi_long copy_from_user_fdset(fd_set *fds, 1206 abi_ulong target_fds_addr, 1207 int n) 1208 { 1209 int i, nw, j, k; 1210 abi_ulong b, *target_fds; 1211 1212 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS); 1213 if (!(target_fds = lock_user(VERIFY_READ, 1214 target_fds_addr, 1215 sizeof(abi_ulong) * nw, 1216 1))) 1217 return -TARGET_EFAULT; 1218 1219 FD_ZERO(fds); 1220 k = 0; 1221 for (i = 0; i < nw; i++) { 1222 /* grab the abi_ulong */ 1223 __get_user(b, &target_fds[i]); 1224 for (j = 0; j < TARGET_ABI_BITS; j++) { 1225 /* check the bit inside the abi_ulong */ 1226 if ((b >> j) & 1) 1227 FD_SET(k, fds); 1228 k++; 1229 } 1230 } 1231 1232 unlock_user(target_fds, target_fds_addr, 0); 1233 1234 return 0; 1235 } 1236 1237 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr, 1238 abi_ulong target_fds_addr, 1239 int n) 1240 { 1241 if (target_fds_addr) { 1242 if (copy_from_user_fdset(fds, target_fds_addr, n)) 1243 return -TARGET_EFAULT; 1244 *fds_ptr = fds; 1245 } else { 1246 *fds_ptr = NULL; 1247 } 1248 return 0; 1249 } 1250 1251 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr, 1252 const fd_set *fds, 1253 int n) 1254 { 1255 int i, nw, j, k; 1256 abi_long v; 1257 abi_ulong *target_fds; 1258 1259 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS); 1260 if (!(target_fds = lock_user(VERIFY_WRITE, 1261 target_fds_addr, 1262 sizeof(abi_ulong) * nw, 1263 0))) 1264 return -TARGET_EFAULT; 1265 1266 k = 0; 1267 for (i = 0; i < nw; i++) { 1268 v = 0; 1269 for (j = 0; j < TARGET_ABI_BITS; j++) { 1270 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j); 1271 k++; 1272 } 1273 __put_user(v, &target_fds[i]); 1274 } 1275 1276 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw); 1277 1278 return 0; 1279 } 1280 1281 #if defined(__alpha__) 1282 #define HOST_HZ 1024 1283 #else 1284 #define HOST_HZ 100 1285 #endif 1286 1287 static inline abi_long host_to_target_clock_t(long ticks) 1288 { 1289 #if HOST_HZ == TARGET_HZ 1290 return ticks; 1291 #else 1292 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ; 1293 #endif 1294 } 1295 1296 static inline abi_long host_to_target_rusage(abi_ulong target_addr, 1297 const struct rusage *rusage) 1298 { 1299 struct target_rusage *target_rusage; 1300 1301 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0)) 1302 return -TARGET_EFAULT; 1303 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec); 1304 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec); 1305 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec); 1306 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec); 1307 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss); 1308 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss); 1309 target_rusage->ru_idrss = tswapal(rusage->ru_idrss); 1310 target_rusage->ru_isrss = tswapal(rusage->ru_isrss); 1311 target_rusage->ru_minflt = tswapal(rusage->ru_minflt); 1312 target_rusage->ru_majflt = tswapal(rusage->ru_majflt); 1313 target_rusage->ru_nswap = tswapal(rusage->ru_nswap); 1314 target_rusage->ru_inblock = tswapal(rusage->ru_inblock); 1315 target_rusage->ru_oublock = tswapal(rusage->ru_oublock); 1316 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd); 1317 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv); 1318 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals); 1319 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw); 1320 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw); 1321 unlock_user_struct(target_rusage, target_addr, 1); 1322 1323 return 0; 1324 } 1325 1326 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim) 1327 { 1328 abi_ulong target_rlim_swap; 1329 rlim_t result; 1330 1331 target_rlim_swap = tswapal(target_rlim); 1332 if (target_rlim_swap == TARGET_RLIM_INFINITY) 1333 return RLIM_INFINITY; 1334 1335 result = target_rlim_swap; 1336 if (target_rlim_swap != (rlim_t)result) 1337 return RLIM_INFINITY; 1338 1339 return result; 1340 } 1341 1342 static inline abi_ulong host_to_target_rlim(rlim_t rlim) 1343 { 1344 abi_ulong target_rlim_swap; 1345 abi_ulong result; 1346 1347 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim) 1348 target_rlim_swap = TARGET_RLIM_INFINITY; 1349 else 1350 target_rlim_swap = rlim; 1351 result = tswapal(target_rlim_swap); 1352 1353 return result; 1354 } 1355 1356 static inline int target_to_host_resource(int code) 1357 { 1358 switch (code) { 1359 case TARGET_RLIMIT_AS: 1360 return RLIMIT_AS; 1361 case TARGET_RLIMIT_CORE: 1362 return RLIMIT_CORE; 1363 case TARGET_RLIMIT_CPU: 1364 return RLIMIT_CPU; 1365 case TARGET_RLIMIT_DATA: 1366 return RLIMIT_DATA; 1367 case TARGET_RLIMIT_FSIZE: 1368 return RLIMIT_FSIZE; 1369 case TARGET_RLIMIT_LOCKS: 1370 return RLIMIT_LOCKS; 1371 case TARGET_RLIMIT_MEMLOCK: 1372 return RLIMIT_MEMLOCK; 1373 case TARGET_RLIMIT_MSGQUEUE: 1374 return RLIMIT_MSGQUEUE; 1375 case TARGET_RLIMIT_NICE: 1376 return RLIMIT_NICE; 1377 case TARGET_RLIMIT_NOFILE: 1378 return RLIMIT_NOFILE; 1379 case TARGET_RLIMIT_NPROC: 1380 return RLIMIT_NPROC; 1381 case TARGET_RLIMIT_RSS: 1382 return RLIMIT_RSS; 1383 case TARGET_RLIMIT_RTPRIO: 1384 return RLIMIT_RTPRIO; 1385 case TARGET_RLIMIT_SIGPENDING: 1386 return RLIMIT_SIGPENDING; 1387 case TARGET_RLIMIT_STACK: 1388 return RLIMIT_STACK; 1389 default: 1390 return code; 1391 } 1392 } 1393 1394 static inline abi_long copy_from_user_timeval(struct timeval *tv, 1395 abi_ulong target_tv_addr) 1396 { 1397 struct target_timeval *target_tv; 1398 1399 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) 1400 return -TARGET_EFAULT; 1401 1402 __get_user(tv->tv_sec, &target_tv->tv_sec); 1403 __get_user(tv->tv_usec, &target_tv->tv_usec); 1404 1405 unlock_user_struct(target_tv, target_tv_addr, 0); 1406 1407 return 0; 1408 } 1409 1410 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr, 1411 const struct timeval *tv) 1412 { 1413 struct target_timeval *target_tv; 1414 1415 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) 1416 return -TARGET_EFAULT; 1417 1418 __put_user(tv->tv_sec, &target_tv->tv_sec); 1419 __put_user(tv->tv_usec, &target_tv->tv_usec); 1420 1421 unlock_user_struct(target_tv, target_tv_addr, 1); 1422 1423 return 0; 1424 } 1425 1426 static inline abi_long copy_from_user_timezone(struct timezone *tz, 1427 abi_ulong target_tz_addr) 1428 { 1429 struct target_timezone *target_tz; 1430 1431 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) { 1432 return -TARGET_EFAULT; 1433 } 1434 1435 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest); 1436 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime); 1437 1438 unlock_user_struct(target_tz, target_tz_addr, 0); 1439 1440 return 0; 1441 } 1442 1443 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 1444 #include <mqueue.h> 1445 1446 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr, 1447 abi_ulong target_mq_attr_addr) 1448 { 1449 struct target_mq_attr *target_mq_attr; 1450 1451 if (!lock_user_struct(VERIFY_READ, target_mq_attr, 1452 target_mq_attr_addr, 1)) 1453 return -TARGET_EFAULT; 1454 1455 __get_user(attr->mq_flags, &target_mq_attr->mq_flags); 1456 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1457 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1458 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1459 1460 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0); 1461 1462 return 0; 1463 } 1464 1465 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr, 1466 const struct mq_attr *attr) 1467 { 1468 struct target_mq_attr *target_mq_attr; 1469 1470 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr, 1471 target_mq_attr_addr, 0)) 1472 return -TARGET_EFAULT; 1473 1474 __put_user(attr->mq_flags, &target_mq_attr->mq_flags); 1475 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1476 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1477 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1478 1479 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1); 1480 1481 return 0; 1482 } 1483 #endif 1484 1485 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) 1486 /* do_select() must return target values and target errnos. */ 1487 static abi_long do_select(int n, 1488 abi_ulong rfd_addr, abi_ulong wfd_addr, 1489 abi_ulong efd_addr, abi_ulong target_tv_addr) 1490 { 1491 fd_set rfds, wfds, efds; 1492 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 1493 struct timeval tv; 1494 struct timespec ts, *ts_ptr; 1495 abi_long ret; 1496 1497 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 1498 if (ret) { 1499 return ret; 1500 } 1501 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 1502 if (ret) { 1503 return ret; 1504 } 1505 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 1506 if (ret) { 1507 return ret; 1508 } 1509 1510 if (target_tv_addr) { 1511 if (copy_from_user_timeval(&tv, target_tv_addr)) 1512 return -TARGET_EFAULT; 1513 ts.tv_sec = tv.tv_sec; 1514 ts.tv_nsec = tv.tv_usec * 1000; 1515 ts_ptr = &ts; 1516 } else { 1517 ts_ptr = NULL; 1518 } 1519 1520 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 1521 ts_ptr, NULL)); 1522 1523 if (!is_error(ret)) { 1524 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 1525 return -TARGET_EFAULT; 1526 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 1527 return -TARGET_EFAULT; 1528 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 1529 return -TARGET_EFAULT; 1530 1531 if (target_tv_addr) { 1532 tv.tv_sec = ts.tv_sec; 1533 tv.tv_usec = ts.tv_nsec / 1000; 1534 if (copy_to_user_timeval(target_tv_addr, &tv)) { 1535 return -TARGET_EFAULT; 1536 } 1537 } 1538 } 1539 1540 return ret; 1541 } 1542 1543 #if defined(TARGET_WANT_OLD_SYS_SELECT) 1544 static abi_long do_old_select(abi_ulong arg1) 1545 { 1546 struct target_sel_arg_struct *sel; 1547 abi_ulong inp, outp, exp, tvp; 1548 long nsel; 1549 1550 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) { 1551 return -TARGET_EFAULT; 1552 } 1553 1554 nsel = tswapal(sel->n); 1555 inp = tswapal(sel->inp); 1556 outp = tswapal(sel->outp); 1557 exp = tswapal(sel->exp); 1558 tvp = tswapal(sel->tvp); 1559 1560 unlock_user_struct(sel, arg1, 0); 1561 1562 return do_select(nsel, inp, outp, exp, tvp); 1563 } 1564 #endif 1565 #endif 1566 1567 static abi_long do_pipe2(int host_pipe[], int flags) 1568 { 1569 #ifdef CONFIG_PIPE2 1570 return pipe2(host_pipe, flags); 1571 #else 1572 return -ENOSYS; 1573 #endif 1574 } 1575 1576 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes, 1577 int flags, int is_pipe2) 1578 { 1579 int host_pipe[2]; 1580 abi_long ret; 1581 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe); 1582 1583 if (is_error(ret)) 1584 return get_errno(ret); 1585 1586 /* Several targets have special calling conventions for the original 1587 pipe syscall, but didn't replicate this into the pipe2 syscall. */ 1588 if (!is_pipe2) { 1589 #if defined(TARGET_ALPHA) 1590 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1]; 1591 return host_pipe[0]; 1592 #elif defined(TARGET_MIPS) 1593 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1]; 1594 return host_pipe[0]; 1595 #elif defined(TARGET_SH4) 1596 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1]; 1597 return host_pipe[0]; 1598 #elif defined(TARGET_SPARC) 1599 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1]; 1600 return host_pipe[0]; 1601 #endif 1602 } 1603 1604 if (put_user_s32(host_pipe[0], pipedes) 1605 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0]))) 1606 return -TARGET_EFAULT; 1607 return get_errno(ret); 1608 } 1609 1610 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn, 1611 abi_ulong target_addr, 1612 socklen_t len) 1613 { 1614 struct target_ip_mreqn *target_smreqn; 1615 1616 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1); 1617 if (!target_smreqn) 1618 return -TARGET_EFAULT; 1619 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr; 1620 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr; 1621 if (len == sizeof(struct target_ip_mreqn)) 1622 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex); 1623 unlock_user(target_smreqn, target_addr, 0); 1624 1625 return 0; 1626 } 1627 1628 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr, 1629 abi_ulong target_addr, 1630 socklen_t len) 1631 { 1632 const socklen_t unix_maxlen = sizeof (struct sockaddr_un); 1633 sa_family_t sa_family; 1634 struct target_sockaddr *target_saddr; 1635 1636 if (fd_trans_target_to_host_addr(fd)) { 1637 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len); 1638 } 1639 1640 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 1641 if (!target_saddr) 1642 return -TARGET_EFAULT; 1643 1644 sa_family = tswap16(target_saddr->sa_family); 1645 1646 /* Oops. The caller might send a incomplete sun_path; sun_path 1647 * must be terminated by \0 (see the manual page), but 1648 * unfortunately it is quite common to specify sockaddr_un 1649 * length as "strlen(x->sun_path)" while it should be 1650 * "strlen(...) + 1". We'll fix that here if needed. 1651 * Linux kernel has a similar feature. 1652 */ 1653 1654 if (sa_family == AF_UNIX) { 1655 if (len < unix_maxlen && len > 0) { 1656 char *cp = (char*)target_saddr; 1657 1658 if ( cp[len-1] && !cp[len] ) 1659 len++; 1660 } 1661 if (len > unix_maxlen) 1662 len = unix_maxlen; 1663 } 1664 1665 memcpy(addr, target_saddr, len); 1666 addr->sa_family = sa_family; 1667 if (sa_family == AF_NETLINK) { 1668 struct sockaddr_nl *nladdr; 1669 1670 nladdr = (struct sockaddr_nl *)addr; 1671 nladdr->nl_pid = tswap32(nladdr->nl_pid); 1672 nladdr->nl_groups = tswap32(nladdr->nl_groups); 1673 } else if (sa_family == AF_PACKET) { 1674 struct target_sockaddr_ll *lladdr; 1675 1676 lladdr = (struct target_sockaddr_ll *)addr; 1677 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex); 1678 lladdr->sll_hatype = tswap16(lladdr->sll_hatype); 1679 } 1680 unlock_user(target_saddr, target_addr, 0); 1681 1682 return 0; 1683 } 1684 1685 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr, 1686 struct sockaddr *addr, 1687 socklen_t len) 1688 { 1689 struct target_sockaddr *target_saddr; 1690 1691 if (len == 0) { 1692 return 0; 1693 } 1694 assert(addr); 1695 1696 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0); 1697 if (!target_saddr) 1698 return -TARGET_EFAULT; 1699 memcpy(target_saddr, addr, len); 1700 if (len >= offsetof(struct target_sockaddr, sa_family) + 1701 sizeof(target_saddr->sa_family)) { 1702 target_saddr->sa_family = tswap16(addr->sa_family); 1703 } 1704 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) { 1705 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr; 1706 target_nl->nl_pid = tswap32(target_nl->nl_pid); 1707 target_nl->nl_groups = tswap32(target_nl->nl_groups); 1708 } else if (addr->sa_family == AF_PACKET) { 1709 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr; 1710 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex); 1711 target_ll->sll_hatype = tswap16(target_ll->sll_hatype); 1712 } else if (addr->sa_family == AF_INET6 && 1713 len >= sizeof(struct target_sockaddr_in6)) { 1714 struct target_sockaddr_in6 *target_in6 = 1715 (struct target_sockaddr_in6 *)target_saddr; 1716 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id); 1717 } 1718 unlock_user(target_saddr, target_addr, len); 1719 1720 return 0; 1721 } 1722 1723 static inline abi_long target_to_host_cmsg(struct msghdr *msgh, 1724 struct target_msghdr *target_msgh) 1725 { 1726 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1727 abi_long msg_controllen; 1728 abi_ulong target_cmsg_addr; 1729 struct target_cmsghdr *target_cmsg, *target_cmsg_start; 1730 socklen_t space = 0; 1731 1732 msg_controllen = tswapal(target_msgh->msg_controllen); 1733 if (msg_controllen < sizeof (struct target_cmsghdr)) 1734 goto the_end; 1735 target_cmsg_addr = tswapal(target_msgh->msg_control); 1736 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1); 1737 target_cmsg_start = target_cmsg; 1738 if (!target_cmsg) 1739 return -TARGET_EFAULT; 1740 1741 while (cmsg && target_cmsg) { 1742 void *data = CMSG_DATA(cmsg); 1743 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1744 1745 int len = tswapal(target_cmsg->cmsg_len) 1746 - sizeof(struct target_cmsghdr); 1747 1748 space += CMSG_SPACE(len); 1749 if (space > msgh->msg_controllen) { 1750 space -= CMSG_SPACE(len); 1751 /* This is a QEMU bug, since we allocated the payload 1752 * area ourselves (unlike overflow in host-to-target 1753 * conversion, which is just the guest giving us a buffer 1754 * that's too small). It can't happen for the payload types 1755 * we currently support; if it becomes an issue in future 1756 * we would need to improve our allocation strategy to 1757 * something more intelligent than "twice the size of the 1758 * target buffer we're reading from". 1759 */ 1760 gemu_log("Host cmsg overflow\n"); 1761 break; 1762 } 1763 1764 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) { 1765 cmsg->cmsg_level = SOL_SOCKET; 1766 } else { 1767 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level); 1768 } 1769 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type); 1770 cmsg->cmsg_len = CMSG_LEN(len); 1771 1772 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) { 1773 int *fd = (int *)data; 1774 int *target_fd = (int *)target_data; 1775 int i, numfds = len / sizeof(int); 1776 1777 for (i = 0; i < numfds; i++) { 1778 __get_user(fd[i], target_fd + i); 1779 } 1780 } else if (cmsg->cmsg_level == SOL_SOCKET 1781 && cmsg->cmsg_type == SCM_CREDENTIALS) { 1782 struct ucred *cred = (struct ucred *)data; 1783 struct target_ucred *target_cred = 1784 (struct target_ucred *)target_data; 1785 1786 __get_user(cred->pid, &target_cred->pid); 1787 __get_user(cred->uid, &target_cred->uid); 1788 __get_user(cred->gid, &target_cred->gid); 1789 } else { 1790 gemu_log("Unsupported ancillary data: %d/%d\n", 1791 cmsg->cmsg_level, cmsg->cmsg_type); 1792 memcpy(data, target_data, len); 1793 } 1794 1795 cmsg = CMSG_NXTHDR(msgh, cmsg); 1796 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg, 1797 target_cmsg_start); 1798 } 1799 unlock_user(target_cmsg, target_cmsg_addr, 0); 1800 the_end: 1801 msgh->msg_controllen = space; 1802 return 0; 1803 } 1804 1805 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, 1806 struct msghdr *msgh) 1807 { 1808 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1809 abi_long msg_controllen; 1810 abi_ulong target_cmsg_addr; 1811 struct target_cmsghdr *target_cmsg, *target_cmsg_start; 1812 socklen_t space = 0; 1813 1814 msg_controllen = tswapal(target_msgh->msg_controllen); 1815 if (msg_controllen < sizeof (struct target_cmsghdr)) 1816 goto the_end; 1817 target_cmsg_addr = tswapal(target_msgh->msg_control); 1818 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0); 1819 target_cmsg_start = target_cmsg; 1820 if (!target_cmsg) 1821 return -TARGET_EFAULT; 1822 1823 while (cmsg && target_cmsg) { 1824 void *data = CMSG_DATA(cmsg); 1825 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1826 1827 int len = cmsg->cmsg_len - sizeof(struct cmsghdr); 1828 int tgt_len, tgt_space; 1829 1830 /* We never copy a half-header but may copy half-data; 1831 * this is Linux's behaviour in put_cmsg(). Note that 1832 * truncation here is a guest problem (which we report 1833 * to the guest via the CTRUNC bit), unlike truncation 1834 * in target_to_host_cmsg, which is a QEMU bug. 1835 */ 1836 if (msg_controllen < sizeof(struct target_cmsghdr)) { 1837 target_msgh->msg_flags |= tswap32(MSG_CTRUNC); 1838 break; 1839 } 1840 1841 if (cmsg->cmsg_level == SOL_SOCKET) { 1842 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET); 1843 } else { 1844 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level); 1845 } 1846 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type); 1847 1848 /* Payload types which need a different size of payload on 1849 * the target must adjust tgt_len here. 1850 */ 1851 tgt_len = len; 1852 switch (cmsg->cmsg_level) { 1853 case SOL_SOCKET: 1854 switch (cmsg->cmsg_type) { 1855 case SO_TIMESTAMP: 1856 tgt_len = sizeof(struct target_timeval); 1857 break; 1858 default: 1859 break; 1860 } 1861 break; 1862 default: 1863 break; 1864 } 1865 1866 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) { 1867 target_msgh->msg_flags |= tswap32(MSG_CTRUNC); 1868 tgt_len = msg_controllen - sizeof(struct target_cmsghdr); 1869 } 1870 1871 /* We must now copy-and-convert len bytes of payload 1872 * into tgt_len bytes of destination space. Bear in mind 1873 * that in both source and destination we may be dealing 1874 * with a truncated value! 1875 */ 1876 switch (cmsg->cmsg_level) { 1877 case SOL_SOCKET: 1878 switch (cmsg->cmsg_type) { 1879 case SCM_RIGHTS: 1880 { 1881 int *fd = (int *)data; 1882 int *target_fd = (int *)target_data; 1883 int i, numfds = tgt_len / sizeof(int); 1884 1885 for (i = 0; i < numfds; i++) { 1886 __put_user(fd[i], target_fd + i); 1887 } 1888 break; 1889 } 1890 case SO_TIMESTAMP: 1891 { 1892 struct timeval *tv = (struct timeval *)data; 1893 struct target_timeval *target_tv = 1894 (struct target_timeval *)target_data; 1895 1896 if (len != sizeof(struct timeval) || 1897 tgt_len != sizeof(struct target_timeval)) { 1898 goto unimplemented; 1899 } 1900 1901 /* copy struct timeval to target */ 1902 __put_user(tv->tv_sec, &target_tv->tv_sec); 1903 __put_user(tv->tv_usec, &target_tv->tv_usec); 1904 break; 1905 } 1906 case SCM_CREDENTIALS: 1907 { 1908 struct ucred *cred = (struct ucred *)data; 1909 struct target_ucred *target_cred = 1910 (struct target_ucred *)target_data; 1911 1912 __put_user(cred->pid, &target_cred->pid); 1913 __put_user(cred->uid, &target_cred->uid); 1914 __put_user(cred->gid, &target_cred->gid); 1915 break; 1916 } 1917 default: 1918 goto unimplemented; 1919 } 1920 break; 1921 1922 case SOL_IP: 1923 switch (cmsg->cmsg_type) { 1924 case IP_TTL: 1925 { 1926 uint32_t *v = (uint32_t *)data; 1927 uint32_t *t_int = (uint32_t *)target_data; 1928 1929 if (len != sizeof(uint32_t) || 1930 tgt_len != sizeof(uint32_t)) { 1931 goto unimplemented; 1932 } 1933 __put_user(*v, t_int); 1934 break; 1935 } 1936 case IP_RECVERR: 1937 { 1938 struct errhdr_t { 1939 struct sock_extended_err ee; 1940 struct sockaddr_in offender; 1941 }; 1942 struct errhdr_t *errh = (struct errhdr_t *)data; 1943 struct errhdr_t *target_errh = 1944 (struct errhdr_t *)target_data; 1945 1946 if (len != sizeof(struct errhdr_t) || 1947 tgt_len != sizeof(struct errhdr_t)) { 1948 goto unimplemented; 1949 } 1950 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno); 1951 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin); 1952 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type); 1953 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code); 1954 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad); 1955 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info); 1956 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data); 1957 host_to_target_sockaddr((unsigned long) &target_errh->offender, 1958 (void *) &errh->offender, sizeof(errh->offender)); 1959 break; 1960 } 1961 default: 1962 goto unimplemented; 1963 } 1964 break; 1965 1966 case SOL_IPV6: 1967 switch (cmsg->cmsg_type) { 1968 case IPV6_HOPLIMIT: 1969 { 1970 uint32_t *v = (uint32_t *)data; 1971 uint32_t *t_int = (uint32_t *)target_data; 1972 1973 if (len != sizeof(uint32_t) || 1974 tgt_len != sizeof(uint32_t)) { 1975 goto unimplemented; 1976 } 1977 __put_user(*v, t_int); 1978 break; 1979 } 1980 case IPV6_RECVERR: 1981 { 1982 struct errhdr6_t { 1983 struct sock_extended_err ee; 1984 struct sockaddr_in6 offender; 1985 }; 1986 struct errhdr6_t *errh = (struct errhdr6_t *)data; 1987 struct errhdr6_t *target_errh = 1988 (struct errhdr6_t *)target_data; 1989 1990 if (len != sizeof(struct errhdr6_t) || 1991 tgt_len != sizeof(struct errhdr6_t)) { 1992 goto unimplemented; 1993 } 1994 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno); 1995 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin); 1996 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type); 1997 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code); 1998 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad); 1999 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info); 2000 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data); 2001 host_to_target_sockaddr((unsigned long) &target_errh->offender, 2002 (void *) &errh->offender, sizeof(errh->offender)); 2003 break; 2004 } 2005 default: 2006 goto unimplemented; 2007 } 2008 break; 2009 2010 default: 2011 unimplemented: 2012 gemu_log("Unsupported ancillary data: %d/%d\n", 2013 cmsg->cmsg_level, cmsg->cmsg_type); 2014 memcpy(target_data, data, MIN(len, tgt_len)); 2015 if (tgt_len > len) { 2016 memset(target_data + len, 0, tgt_len - len); 2017 } 2018 } 2019 2020 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len)); 2021 tgt_space = TARGET_CMSG_SPACE(tgt_len); 2022 if (msg_controllen < tgt_space) { 2023 tgt_space = msg_controllen; 2024 } 2025 msg_controllen -= tgt_space; 2026 space += tgt_space; 2027 cmsg = CMSG_NXTHDR(msgh, cmsg); 2028 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg, 2029 target_cmsg_start); 2030 } 2031 unlock_user(target_cmsg, target_cmsg_addr, space); 2032 the_end: 2033 target_msgh->msg_controllen = tswapal(space); 2034 return 0; 2035 } 2036 2037 static void tswap_nlmsghdr(struct nlmsghdr *nlh) 2038 { 2039 nlh->nlmsg_len = tswap32(nlh->nlmsg_len); 2040 nlh->nlmsg_type = tswap16(nlh->nlmsg_type); 2041 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags); 2042 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq); 2043 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid); 2044 } 2045 2046 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh, 2047 size_t len, 2048 abi_long (*host_to_target_nlmsg) 2049 (struct nlmsghdr *)) 2050 { 2051 uint32_t nlmsg_len; 2052 abi_long ret; 2053 2054 while (len > sizeof(struct nlmsghdr)) { 2055 2056 nlmsg_len = nlh->nlmsg_len; 2057 if (nlmsg_len < sizeof(struct nlmsghdr) || 2058 nlmsg_len > len) { 2059 break; 2060 } 2061 2062 switch (nlh->nlmsg_type) { 2063 case NLMSG_DONE: 2064 tswap_nlmsghdr(nlh); 2065 return 0; 2066 case NLMSG_NOOP: 2067 break; 2068 case NLMSG_ERROR: 2069 { 2070 struct nlmsgerr *e = NLMSG_DATA(nlh); 2071 e->error = tswap32(e->error); 2072 tswap_nlmsghdr(&e->msg); 2073 tswap_nlmsghdr(nlh); 2074 return 0; 2075 } 2076 default: 2077 ret = host_to_target_nlmsg(nlh); 2078 if (ret < 0) { 2079 tswap_nlmsghdr(nlh); 2080 return ret; 2081 } 2082 break; 2083 } 2084 tswap_nlmsghdr(nlh); 2085 len -= NLMSG_ALIGN(nlmsg_len); 2086 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len)); 2087 } 2088 return 0; 2089 } 2090 2091 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh, 2092 size_t len, 2093 abi_long (*target_to_host_nlmsg) 2094 (struct nlmsghdr *)) 2095 { 2096 int ret; 2097 2098 while (len > sizeof(struct nlmsghdr)) { 2099 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) || 2100 tswap32(nlh->nlmsg_len) > len) { 2101 break; 2102 } 2103 tswap_nlmsghdr(nlh); 2104 switch (nlh->nlmsg_type) { 2105 case NLMSG_DONE: 2106 return 0; 2107 case NLMSG_NOOP: 2108 break; 2109 case NLMSG_ERROR: 2110 { 2111 struct nlmsgerr *e = NLMSG_DATA(nlh); 2112 e->error = tswap32(e->error); 2113 tswap_nlmsghdr(&e->msg); 2114 return 0; 2115 } 2116 default: 2117 ret = target_to_host_nlmsg(nlh); 2118 if (ret < 0) { 2119 return ret; 2120 } 2121 } 2122 len -= NLMSG_ALIGN(nlh->nlmsg_len); 2123 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len)); 2124 } 2125 return 0; 2126 } 2127 2128 #ifdef CONFIG_RTNETLINK 2129 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr, 2130 size_t len, void *context, 2131 abi_long (*host_to_target_nlattr) 2132 (struct nlattr *, 2133 void *context)) 2134 { 2135 unsigned short nla_len; 2136 abi_long ret; 2137 2138 while (len > sizeof(struct nlattr)) { 2139 nla_len = nlattr->nla_len; 2140 if (nla_len < sizeof(struct nlattr) || 2141 nla_len > len) { 2142 break; 2143 } 2144 ret = host_to_target_nlattr(nlattr, context); 2145 nlattr->nla_len = tswap16(nlattr->nla_len); 2146 nlattr->nla_type = tswap16(nlattr->nla_type); 2147 if (ret < 0) { 2148 return ret; 2149 } 2150 len -= NLA_ALIGN(nla_len); 2151 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len)); 2152 } 2153 return 0; 2154 } 2155 2156 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr, 2157 size_t len, 2158 abi_long (*host_to_target_rtattr) 2159 (struct rtattr *)) 2160 { 2161 unsigned short rta_len; 2162 abi_long ret; 2163 2164 while (len > sizeof(struct rtattr)) { 2165 rta_len = rtattr->rta_len; 2166 if (rta_len < sizeof(struct rtattr) || 2167 rta_len > len) { 2168 break; 2169 } 2170 ret = host_to_target_rtattr(rtattr); 2171 rtattr->rta_len = tswap16(rtattr->rta_len); 2172 rtattr->rta_type = tswap16(rtattr->rta_type); 2173 if (ret < 0) { 2174 return ret; 2175 } 2176 len -= RTA_ALIGN(rta_len); 2177 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len)); 2178 } 2179 return 0; 2180 } 2181 2182 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN) 2183 2184 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr, 2185 void *context) 2186 { 2187 uint16_t *u16; 2188 uint32_t *u32; 2189 uint64_t *u64; 2190 2191 switch (nlattr->nla_type) { 2192 /* no data */ 2193 case QEMU_IFLA_BR_FDB_FLUSH: 2194 break; 2195 /* binary */ 2196 case QEMU_IFLA_BR_GROUP_ADDR: 2197 break; 2198 /* uint8_t */ 2199 case QEMU_IFLA_BR_VLAN_FILTERING: 2200 case QEMU_IFLA_BR_TOPOLOGY_CHANGE: 2201 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED: 2202 case QEMU_IFLA_BR_MCAST_ROUTER: 2203 case QEMU_IFLA_BR_MCAST_SNOOPING: 2204 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR: 2205 case QEMU_IFLA_BR_MCAST_QUERIER: 2206 case QEMU_IFLA_BR_NF_CALL_IPTABLES: 2207 case QEMU_IFLA_BR_NF_CALL_IP6TABLES: 2208 case QEMU_IFLA_BR_NF_CALL_ARPTABLES: 2209 case QEMU_IFLA_BR_VLAN_STATS_ENABLED: 2210 case QEMU_IFLA_BR_MCAST_STATS_ENABLED: 2211 case QEMU_IFLA_BR_MCAST_IGMP_VERSION: 2212 case QEMU_IFLA_BR_MCAST_MLD_VERSION: 2213 break; 2214 /* uint16_t */ 2215 case QEMU_IFLA_BR_PRIORITY: 2216 case QEMU_IFLA_BR_VLAN_PROTOCOL: 2217 case QEMU_IFLA_BR_GROUP_FWD_MASK: 2218 case QEMU_IFLA_BR_ROOT_PORT: 2219 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID: 2220 u16 = NLA_DATA(nlattr); 2221 *u16 = tswap16(*u16); 2222 break; 2223 /* uint32_t */ 2224 case QEMU_IFLA_BR_FORWARD_DELAY: 2225 case QEMU_IFLA_BR_HELLO_TIME: 2226 case QEMU_IFLA_BR_MAX_AGE: 2227 case QEMU_IFLA_BR_AGEING_TIME: 2228 case QEMU_IFLA_BR_STP_STATE: 2229 case QEMU_IFLA_BR_ROOT_PATH_COST: 2230 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY: 2231 case QEMU_IFLA_BR_MCAST_HASH_MAX: 2232 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT: 2233 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT: 2234 u32 = NLA_DATA(nlattr); 2235 *u32 = tswap32(*u32); 2236 break; 2237 /* uint64_t */ 2238 case QEMU_IFLA_BR_HELLO_TIMER: 2239 case QEMU_IFLA_BR_TCN_TIMER: 2240 case QEMU_IFLA_BR_GC_TIMER: 2241 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER: 2242 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL: 2243 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL: 2244 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL: 2245 case QEMU_IFLA_BR_MCAST_QUERY_INTVL: 2246 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL: 2247 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL: 2248 u64 = NLA_DATA(nlattr); 2249 *u64 = tswap64(*u64); 2250 break; 2251 /* ifla_bridge_id: uin8_t[] */ 2252 case QEMU_IFLA_BR_ROOT_ID: 2253 case QEMU_IFLA_BR_BRIDGE_ID: 2254 break; 2255 default: 2256 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type); 2257 break; 2258 } 2259 return 0; 2260 } 2261 2262 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr, 2263 void *context) 2264 { 2265 uint16_t *u16; 2266 uint32_t *u32; 2267 uint64_t *u64; 2268 2269 switch (nlattr->nla_type) { 2270 /* uint8_t */ 2271 case QEMU_IFLA_BRPORT_STATE: 2272 case QEMU_IFLA_BRPORT_MODE: 2273 case QEMU_IFLA_BRPORT_GUARD: 2274 case QEMU_IFLA_BRPORT_PROTECT: 2275 case QEMU_IFLA_BRPORT_FAST_LEAVE: 2276 case QEMU_IFLA_BRPORT_LEARNING: 2277 case QEMU_IFLA_BRPORT_UNICAST_FLOOD: 2278 case QEMU_IFLA_BRPORT_PROXYARP: 2279 case QEMU_IFLA_BRPORT_LEARNING_SYNC: 2280 case QEMU_IFLA_BRPORT_PROXYARP_WIFI: 2281 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK: 2282 case QEMU_IFLA_BRPORT_CONFIG_PENDING: 2283 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER: 2284 case QEMU_IFLA_BRPORT_MCAST_FLOOD: 2285 case QEMU_IFLA_BRPORT_MCAST_TO_UCAST: 2286 case QEMU_IFLA_BRPORT_VLAN_TUNNEL: 2287 case QEMU_IFLA_BRPORT_BCAST_FLOOD: 2288 case QEMU_IFLA_BRPORT_NEIGH_SUPPRESS: 2289 break; 2290 /* uint16_t */ 2291 case QEMU_IFLA_BRPORT_PRIORITY: 2292 case QEMU_IFLA_BRPORT_DESIGNATED_PORT: 2293 case QEMU_IFLA_BRPORT_DESIGNATED_COST: 2294 case QEMU_IFLA_BRPORT_ID: 2295 case QEMU_IFLA_BRPORT_NO: 2296 case QEMU_IFLA_BRPORT_GROUP_FWD_MASK: 2297 u16 = NLA_DATA(nlattr); 2298 *u16 = tswap16(*u16); 2299 break; 2300 /* uin32_t */ 2301 case QEMU_IFLA_BRPORT_COST: 2302 u32 = NLA_DATA(nlattr); 2303 *u32 = tswap32(*u32); 2304 break; 2305 /* uint64_t */ 2306 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER: 2307 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER: 2308 case QEMU_IFLA_BRPORT_HOLD_TIMER: 2309 u64 = NLA_DATA(nlattr); 2310 *u64 = tswap64(*u64); 2311 break; 2312 /* ifla_bridge_id: uint8_t[] */ 2313 case QEMU_IFLA_BRPORT_ROOT_ID: 2314 case QEMU_IFLA_BRPORT_BRIDGE_ID: 2315 break; 2316 default: 2317 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type); 2318 break; 2319 } 2320 return 0; 2321 } 2322 2323 struct linkinfo_context { 2324 int len; 2325 char *name; 2326 int slave_len; 2327 char *slave_name; 2328 }; 2329 2330 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr, 2331 void *context) 2332 { 2333 struct linkinfo_context *li_context = context; 2334 2335 switch (nlattr->nla_type) { 2336 /* string */ 2337 case QEMU_IFLA_INFO_KIND: 2338 li_context->name = NLA_DATA(nlattr); 2339 li_context->len = nlattr->nla_len - NLA_HDRLEN; 2340 break; 2341 case QEMU_IFLA_INFO_SLAVE_KIND: 2342 li_context->slave_name = NLA_DATA(nlattr); 2343 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN; 2344 break; 2345 /* stats */ 2346 case QEMU_IFLA_INFO_XSTATS: 2347 /* FIXME: only used by CAN */ 2348 break; 2349 /* nested */ 2350 case QEMU_IFLA_INFO_DATA: 2351 if (strncmp(li_context->name, "bridge", 2352 li_context->len) == 0) { 2353 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), 2354 nlattr->nla_len, 2355 NULL, 2356 host_to_target_data_bridge_nlattr); 2357 } else { 2358 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name); 2359 } 2360 break; 2361 case QEMU_IFLA_INFO_SLAVE_DATA: 2362 if (strncmp(li_context->slave_name, "bridge", 2363 li_context->slave_len) == 0) { 2364 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), 2365 nlattr->nla_len, 2366 NULL, 2367 host_to_target_slave_data_bridge_nlattr); 2368 } else { 2369 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n", 2370 li_context->slave_name); 2371 } 2372 break; 2373 default: 2374 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type); 2375 break; 2376 } 2377 2378 return 0; 2379 } 2380 2381 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr, 2382 void *context) 2383 { 2384 uint32_t *u32; 2385 int i; 2386 2387 switch (nlattr->nla_type) { 2388 case QEMU_IFLA_INET_CONF: 2389 u32 = NLA_DATA(nlattr); 2390 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32); 2391 i++) { 2392 u32[i] = tswap32(u32[i]); 2393 } 2394 break; 2395 default: 2396 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type); 2397 } 2398 return 0; 2399 } 2400 2401 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr, 2402 void *context) 2403 { 2404 uint32_t *u32; 2405 uint64_t *u64; 2406 struct ifla_cacheinfo *ci; 2407 int i; 2408 2409 switch (nlattr->nla_type) { 2410 /* binaries */ 2411 case QEMU_IFLA_INET6_TOKEN: 2412 break; 2413 /* uint8_t */ 2414 case QEMU_IFLA_INET6_ADDR_GEN_MODE: 2415 break; 2416 /* uint32_t */ 2417 case QEMU_IFLA_INET6_FLAGS: 2418 u32 = NLA_DATA(nlattr); 2419 *u32 = tswap32(*u32); 2420 break; 2421 /* uint32_t[] */ 2422 case QEMU_IFLA_INET6_CONF: 2423 u32 = NLA_DATA(nlattr); 2424 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32); 2425 i++) { 2426 u32[i] = tswap32(u32[i]); 2427 } 2428 break; 2429 /* ifla_cacheinfo */ 2430 case QEMU_IFLA_INET6_CACHEINFO: 2431 ci = NLA_DATA(nlattr); 2432 ci->max_reasm_len = tswap32(ci->max_reasm_len); 2433 ci->tstamp = tswap32(ci->tstamp); 2434 ci->reachable_time = tswap32(ci->reachable_time); 2435 ci->retrans_time = tswap32(ci->retrans_time); 2436 break; 2437 /* uint64_t[] */ 2438 case QEMU_IFLA_INET6_STATS: 2439 case QEMU_IFLA_INET6_ICMP6STATS: 2440 u64 = NLA_DATA(nlattr); 2441 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64); 2442 i++) { 2443 u64[i] = tswap64(u64[i]); 2444 } 2445 break; 2446 default: 2447 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type); 2448 } 2449 return 0; 2450 } 2451 2452 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr, 2453 void *context) 2454 { 2455 switch (nlattr->nla_type) { 2456 case AF_INET: 2457 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len, 2458 NULL, 2459 host_to_target_data_inet_nlattr); 2460 case AF_INET6: 2461 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len, 2462 NULL, 2463 host_to_target_data_inet6_nlattr); 2464 default: 2465 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type); 2466 break; 2467 } 2468 return 0; 2469 } 2470 2471 static abi_long host_to_target_data_xdp_nlattr(struct nlattr *nlattr, 2472 void *context) 2473 { 2474 uint32_t *u32; 2475 2476 switch (nlattr->nla_type) { 2477 /* uint8_t */ 2478 case QEMU_IFLA_XDP_ATTACHED: 2479 break; 2480 /* uint32_t */ 2481 case QEMU_IFLA_XDP_PROG_ID: 2482 u32 = NLA_DATA(nlattr); 2483 *u32 = tswap32(*u32); 2484 break; 2485 default: 2486 gemu_log("Unknown host XDP type: %d\n", nlattr->nla_type); 2487 break; 2488 } 2489 return 0; 2490 } 2491 2492 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr) 2493 { 2494 uint32_t *u32; 2495 struct rtnl_link_stats *st; 2496 struct rtnl_link_stats64 *st64; 2497 struct rtnl_link_ifmap *map; 2498 struct linkinfo_context li_context; 2499 2500 switch (rtattr->rta_type) { 2501 /* binary stream */ 2502 case QEMU_IFLA_ADDRESS: 2503 case QEMU_IFLA_BROADCAST: 2504 /* string */ 2505 case QEMU_IFLA_IFNAME: 2506 case QEMU_IFLA_QDISC: 2507 break; 2508 /* uin8_t */ 2509 case QEMU_IFLA_OPERSTATE: 2510 case QEMU_IFLA_LINKMODE: 2511 case QEMU_IFLA_CARRIER: 2512 case QEMU_IFLA_PROTO_DOWN: 2513 break; 2514 /* uint32_t */ 2515 case QEMU_IFLA_MTU: 2516 case QEMU_IFLA_LINK: 2517 case QEMU_IFLA_WEIGHT: 2518 case QEMU_IFLA_TXQLEN: 2519 case QEMU_IFLA_CARRIER_CHANGES: 2520 case QEMU_IFLA_NUM_RX_QUEUES: 2521 case QEMU_IFLA_NUM_TX_QUEUES: 2522 case QEMU_IFLA_PROMISCUITY: 2523 case QEMU_IFLA_EXT_MASK: 2524 case QEMU_IFLA_LINK_NETNSID: 2525 case QEMU_IFLA_GROUP: 2526 case QEMU_IFLA_MASTER: 2527 case QEMU_IFLA_NUM_VF: 2528 case QEMU_IFLA_GSO_MAX_SEGS: 2529 case QEMU_IFLA_GSO_MAX_SIZE: 2530 u32 = RTA_DATA(rtattr); 2531 *u32 = tswap32(*u32); 2532 break; 2533 /* struct rtnl_link_stats */ 2534 case QEMU_IFLA_STATS: 2535 st = RTA_DATA(rtattr); 2536 st->rx_packets = tswap32(st->rx_packets); 2537 st->tx_packets = tswap32(st->tx_packets); 2538 st->rx_bytes = tswap32(st->rx_bytes); 2539 st->tx_bytes = tswap32(st->tx_bytes); 2540 st->rx_errors = tswap32(st->rx_errors); 2541 st->tx_errors = tswap32(st->tx_errors); 2542 st->rx_dropped = tswap32(st->rx_dropped); 2543 st->tx_dropped = tswap32(st->tx_dropped); 2544 st->multicast = tswap32(st->multicast); 2545 st->collisions = tswap32(st->collisions); 2546 2547 /* detailed rx_errors: */ 2548 st->rx_length_errors = tswap32(st->rx_length_errors); 2549 st->rx_over_errors = tswap32(st->rx_over_errors); 2550 st->rx_crc_errors = tswap32(st->rx_crc_errors); 2551 st->rx_frame_errors = tswap32(st->rx_frame_errors); 2552 st->rx_fifo_errors = tswap32(st->rx_fifo_errors); 2553 st->rx_missed_errors = tswap32(st->rx_missed_errors); 2554 2555 /* detailed tx_errors */ 2556 st->tx_aborted_errors = tswap32(st->tx_aborted_errors); 2557 st->tx_carrier_errors = tswap32(st->tx_carrier_errors); 2558 st->tx_fifo_errors = tswap32(st->tx_fifo_errors); 2559 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors); 2560 st->tx_window_errors = tswap32(st->tx_window_errors); 2561 2562 /* for cslip etc */ 2563 st->rx_compressed = tswap32(st->rx_compressed); 2564 st->tx_compressed = tswap32(st->tx_compressed); 2565 break; 2566 /* struct rtnl_link_stats64 */ 2567 case QEMU_IFLA_STATS64: 2568 st64 = RTA_DATA(rtattr); 2569 st64->rx_packets = tswap64(st64->rx_packets); 2570 st64->tx_packets = tswap64(st64->tx_packets); 2571 st64->rx_bytes = tswap64(st64->rx_bytes); 2572 st64->tx_bytes = tswap64(st64->tx_bytes); 2573 st64->rx_errors = tswap64(st64->rx_errors); 2574 st64->tx_errors = tswap64(st64->tx_errors); 2575 st64->rx_dropped = tswap64(st64->rx_dropped); 2576 st64->tx_dropped = tswap64(st64->tx_dropped); 2577 st64->multicast = tswap64(st64->multicast); 2578 st64->collisions = tswap64(st64->collisions); 2579 2580 /* detailed rx_errors: */ 2581 st64->rx_length_errors = tswap64(st64->rx_length_errors); 2582 st64->rx_over_errors = tswap64(st64->rx_over_errors); 2583 st64->rx_crc_errors = tswap64(st64->rx_crc_errors); 2584 st64->rx_frame_errors = tswap64(st64->rx_frame_errors); 2585 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors); 2586 st64->rx_missed_errors = tswap64(st64->rx_missed_errors); 2587 2588 /* detailed tx_errors */ 2589 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors); 2590 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors); 2591 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors); 2592 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors); 2593 st64->tx_window_errors = tswap64(st64->tx_window_errors); 2594 2595 /* for cslip etc */ 2596 st64->rx_compressed = tswap64(st64->rx_compressed); 2597 st64->tx_compressed = tswap64(st64->tx_compressed); 2598 break; 2599 /* struct rtnl_link_ifmap */ 2600 case QEMU_IFLA_MAP: 2601 map = RTA_DATA(rtattr); 2602 map->mem_start = tswap64(map->mem_start); 2603 map->mem_end = tswap64(map->mem_end); 2604 map->base_addr = tswap64(map->base_addr); 2605 map->irq = tswap16(map->irq); 2606 break; 2607 /* nested */ 2608 case QEMU_IFLA_LINKINFO: 2609 memset(&li_context, 0, sizeof(li_context)); 2610 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len, 2611 &li_context, 2612 host_to_target_data_linkinfo_nlattr); 2613 case QEMU_IFLA_AF_SPEC: 2614 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len, 2615 NULL, 2616 host_to_target_data_spec_nlattr); 2617 case QEMU_IFLA_XDP: 2618 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len, 2619 NULL, 2620 host_to_target_data_xdp_nlattr); 2621 default: 2622 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type); 2623 break; 2624 } 2625 return 0; 2626 } 2627 2628 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr) 2629 { 2630 uint32_t *u32; 2631 struct ifa_cacheinfo *ci; 2632 2633 switch (rtattr->rta_type) { 2634 /* binary: depends on family type */ 2635 case IFA_ADDRESS: 2636 case IFA_LOCAL: 2637 break; 2638 /* string */ 2639 case IFA_LABEL: 2640 break; 2641 /* u32 */ 2642 case IFA_FLAGS: 2643 case IFA_BROADCAST: 2644 u32 = RTA_DATA(rtattr); 2645 *u32 = tswap32(*u32); 2646 break; 2647 /* struct ifa_cacheinfo */ 2648 case IFA_CACHEINFO: 2649 ci = RTA_DATA(rtattr); 2650 ci->ifa_prefered = tswap32(ci->ifa_prefered); 2651 ci->ifa_valid = tswap32(ci->ifa_valid); 2652 ci->cstamp = tswap32(ci->cstamp); 2653 ci->tstamp = tswap32(ci->tstamp); 2654 break; 2655 default: 2656 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type); 2657 break; 2658 } 2659 return 0; 2660 } 2661 2662 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr) 2663 { 2664 uint32_t *u32; 2665 switch (rtattr->rta_type) { 2666 /* binary: depends on family type */ 2667 case RTA_GATEWAY: 2668 case RTA_DST: 2669 case RTA_PREFSRC: 2670 break; 2671 /* u32 */ 2672 case RTA_PRIORITY: 2673 case RTA_TABLE: 2674 case RTA_OIF: 2675 u32 = RTA_DATA(rtattr); 2676 *u32 = tswap32(*u32); 2677 break; 2678 default: 2679 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type); 2680 break; 2681 } 2682 return 0; 2683 } 2684 2685 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr, 2686 uint32_t rtattr_len) 2687 { 2688 return host_to_target_for_each_rtattr(rtattr, rtattr_len, 2689 host_to_target_data_link_rtattr); 2690 } 2691 2692 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr, 2693 uint32_t rtattr_len) 2694 { 2695 return host_to_target_for_each_rtattr(rtattr, rtattr_len, 2696 host_to_target_data_addr_rtattr); 2697 } 2698 2699 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr, 2700 uint32_t rtattr_len) 2701 { 2702 return host_to_target_for_each_rtattr(rtattr, rtattr_len, 2703 host_to_target_data_route_rtattr); 2704 } 2705 2706 static abi_long host_to_target_data_route(struct nlmsghdr *nlh) 2707 { 2708 uint32_t nlmsg_len; 2709 struct ifinfomsg *ifi; 2710 struct ifaddrmsg *ifa; 2711 struct rtmsg *rtm; 2712 2713 nlmsg_len = nlh->nlmsg_len; 2714 switch (nlh->nlmsg_type) { 2715 case RTM_NEWLINK: 2716 case RTM_DELLINK: 2717 case RTM_GETLINK: 2718 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) { 2719 ifi = NLMSG_DATA(nlh); 2720 ifi->ifi_type = tswap16(ifi->ifi_type); 2721 ifi->ifi_index = tswap32(ifi->ifi_index); 2722 ifi->ifi_flags = tswap32(ifi->ifi_flags); 2723 ifi->ifi_change = tswap32(ifi->ifi_change); 2724 host_to_target_link_rtattr(IFLA_RTA(ifi), 2725 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi))); 2726 } 2727 break; 2728 case RTM_NEWADDR: 2729 case RTM_DELADDR: 2730 case RTM_GETADDR: 2731 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) { 2732 ifa = NLMSG_DATA(nlh); 2733 ifa->ifa_index = tswap32(ifa->ifa_index); 2734 host_to_target_addr_rtattr(IFA_RTA(ifa), 2735 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa))); 2736 } 2737 break; 2738 case RTM_NEWROUTE: 2739 case RTM_DELROUTE: 2740 case RTM_GETROUTE: 2741 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) { 2742 rtm = NLMSG_DATA(nlh); 2743 rtm->rtm_flags = tswap32(rtm->rtm_flags); 2744 host_to_target_route_rtattr(RTM_RTA(rtm), 2745 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm))); 2746 } 2747 break; 2748 default: 2749 return -TARGET_EINVAL; 2750 } 2751 return 0; 2752 } 2753 2754 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh, 2755 size_t len) 2756 { 2757 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route); 2758 } 2759 2760 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr, 2761 size_t len, 2762 abi_long (*target_to_host_rtattr) 2763 (struct rtattr *)) 2764 { 2765 abi_long ret; 2766 2767 while (len >= sizeof(struct rtattr)) { 2768 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) || 2769 tswap16(rtattr->rta_len) > len) { 2770 break; 2771 } 2772 rtattr->rta_len = tswap16(rtattr->rta_len); 2773 rtattr->rta_type = tswap16(rtattr->rta_type); 2774 ret = target_to_host_rtattr(rtattr); 2775 if (ret < 0) { 2776 return ret; 2777 } 2778 len -= RTA_ALIGN(rtattr->rta_len); 2779 rtattr = (struct rtattr *)(((char *)rtattr) + 2780 RTA_ALIGN(rtattr->rta_len)); 2781 } 2782 return 0; 2783 } 2784 2785 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr) 2786 { 2787 switch (rtattr->rta_type) { 2788 default: 2789 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type); 2790 break; 2791 } 2792 return 0; 2793 } 2794 2795 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr) 2796 { 2797 switch (rtattr->rta_type) { 2798 /* binary: depends on family type */ 2799 case IFA_LOCAL: 2800 case IFA_ADDRESS: 2801 break; 2802 default: 2803 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type); 2804 break; 2805 } 2806 return 0; 2807 } 2808 2809 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr) 2810 { 2811 uint32_t *u32; 2812 switch (rtattr->rta_type) { 2813 /* binary: depends on family type */ 2814 case RTA_DST: 2815 case RTA_SRC: 2816 case RTA_GATEWAY: 2817 break; 2818 /* u32 */ 2819 case RTA_PRIORITY: 2820 case RTA_OIF: 2821 u32 = RTA_DATA(rtattr); 2822 *u32 = tswap32(*u32); 2823 break; 2824 default: 2825 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type); 2826 break; 2827 } 2828 return 0; 2829 } 2830 2831 static void target_to_host_link_rtattr(struct rtattr *rtattr, 2832 uint32_t rtattr_len) 2833 { 2834 target_to_host_for_each_rtattr(rtattr, rtattr_len, 2835 target_to_host_data_link_rtattr); 2836 } 2837 2838 static void target_to_host_addr_rtattr(struct rtattr *rtattr, 2839 uint32_t rtattr_len) 2840 { 2841 target_to_host_for_each_rtattr(rtattr, rtattr_len, 2842 target_to_host_data_addr_rtattr); 2843 } 2844 2845 static void target_to_host_route_rtattr(struct rtattr *rtattr, 2846 uint32_t rtattr_len) 2847 { 2848 target_to_host_for_each_rtattr(rtattr, rtattr_len, 2849 target_to_host_data_route_rtattr); 2850 } 2851 2852 static abi_long target_to_host_data_route(struct nlmsghdr *nlh) 2853 { 2854 struct ifinfomsg *ifi; 2855 struct ifaddrmsg *ifa; 2856 struct rtmsg *rtm; 2857 2858 switch (nlh->nlmsg_type) { 2859 case RTM_GETLINK: 2860 break; 2861 case RTM_NEWLINK: 2862 case RTM_DELLINK: 2863 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) { 2864 ifi = NLMSG_DATA(nlh); 2865 ifi->ifi_type = tswap16(ifi->ifi_type); 2866 ifi->ifi_index = tswap32(ifi->ifi_index); 2867 ifi->ifi_flags = tswap32(ifi->ifi_flags); 2868 ifi->ifi_change = tswap32(ifi->ifi_change); 2869 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len - 2870 NLMSG_LENGTH(sizeof(*ifi))); 2871 } 2872 break; 2873 case RTM_GETADDR: 2874 case RTM_NEWADDR: 2875 case RTM_DELADDR: 2876 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) { 2877 ifa = NLMSG_DATA(nlh); 2878 ifa->ifa_index = tswap32(ifa->ifa_index); 2879 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len - 2880 NLMSG_LENGTH(sizeof(*ifa))); 2881 } 2882 break; 2883 case RTM_GETROUTE: 2884 break; 2885 case RTM_NEWROUTE: 2886 case RTM_DELROUTE: 2887 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) { 2888 rtm = NLMSG_DATA(nlh); 2889 rtm->rtm_flags = tswap32(rtm->rtm_flags); 2890 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len - 2891 NLMSG_LENGTH(sizeof(*rtm))); 2892 } 2893 break; 2894 default: 2895 return -TARGET_EOPNOTSUPP; 2896 } 2897 return 0; 2898 } 2899 2900 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len) 2901 { 2902 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route); 2903 } 2904 #endif /* CONFIG_RTNETLINK */ 2905 2906 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh) 2907 { 2908 switch (nlh->nlmsg_type) { 2909 default: 2910 gemu_log("Unknown host audit message type %d\n", 2911 nlh->nlmsg_type); 2912 return -TARGET_EINVAL; 2913 } 2914 return 0; 2915 } 2916 2917 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh, 2918 size_t len) 2919 { 2920 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit); 2921 } 2922 2923 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh) 2924 { 2925 switch (nlh->nlmsg_type) { 2926 case AUDIT_USER: 2927 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG: 2928 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2: 2929 break; 2930 default: 2931 gemu_log("Unknown target audit message type %d\n", 2932 nlh->nlmsg_type); 2933 return -TARGET_EINVAL; 2934 } 2935 2936 return 0; 2937 } 2938 2939 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len) 2940 { 2941 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit); 2942 } 2943 2944 /* do_setsockopt() Must return target values and target errnos. */ 2945 static abi_long do_setsockopt(int sockfd, int level, int optname, 2946 abi_ulong optval_addr, socklen_t optlen) 2947 { 2948 abi_long ret; 2949 int val; 2950 struct ip_mreqn *ip_mreq; 2951 struct ip_mreq_source *ip_mreq_source; 2952 2953 switch(level) { 2954 case SOL_TCP: 2955 /* TCP options all take an 'int' value. */ 2956 if (optlen < sizeof(uint32_t)) 2957 return -TARGET_EINVAL; 2958 2959 if (get_user_u32(val, optval_addr)) 2960 return -TARGET_EFAULT; 2961 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 2962 break; 2963 case SOL_IP: 2964 switch(optname) { 2965 case IP_TOS: 2966 case IP_TTL: 2967 case IP_HDRINCL: 2968 case IP_ROUTER_ALERT: 2969 case IP_RECVOPTS: 2970 case IP_RETOPTS: 2971 case IP_PKTINFO: 2972 case IP_MTU_DISCOVER: 2973 case IP_RECVERR: 2974 case IP_RECVTTL: 2975 case IP_RECVTOS: 2976 #ifdef IP_FREEBIND 2977 case IP_FREEBIND: 2978 #endif 2979 case IP_MULTICAST_TTL: 2980 case IP_MULTICAST_LOOP: 2981 val = 0; 2982 if (optlen >= sizeof(uint32_t)) { 2983 if (get_user_u32(val, optval_addr)) 2984 return -TARGET_EFAULT; 2985 } else if (optlen >= 1) { 2986 if (get_user_u8(val, optval_addr)) 2987 return -TARGET_EFAULT; 2988 } 2989 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 2990 break; 2991 case IP_ADD_MEMBERSHIP: 2992 case IP_DROP_MEMBERSHIP: 2993 if (optlen < sizeof (struct target_ip_mreq) || 2994 optlen > sizeof (struct target_ip_mreqn)) 2995 return -TARGET_EINVAL; 2996 2997 ip_mreq = (struct ip_mreqn *) alloca(optlen); 2998 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen); 2999 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen)); 3000 break; 3001 3002 case IP_BLOCK_SOURCE: 3003 case IP_UNBLOCK_SOURCE: 3004 case IP_ADD_SOURCE_MEMBERSHIP: 3005 case IP_DROP_SOURCE_MEMBERSHIP: 3006 if (optlen != sizeof (struct target_ip_mreq_source)) 3007 return -TARGET_EINVAL; 3008 3009 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1); 3010 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen)); 3011 unlock_user (ip_mreq_source, optval_addr, 0); 3012 break; 3013 3014 default: 3015 goto unimplemented; 3016 } 3017 break; 3018 case SOL_IPV6: 3019 switch (optname) { 3020 case IPV6_MTU_DISCOVER: 3021 case IPV6_MTU: 3022 case IPV6_V6ONLY: 3023 case IPV6_RECVPKTINFO: 3024 case IPV6_UNICAST_HOPS: 3025 case IPV6_RECVERR: 3026 case IPV6_RECVHOPLIMIT: 3027 case IPV6_2292HOPLIMIT: 3028 case IPV6_CHECKSUM: 3029 val = 0; 3030 if (optlen < sizeof(uint32_t)) { 3031 return -TARGET_EINVAL; 3032 } 3033 if (get_user_u32(val, optval_addr)) { 3034 return -TARGET_EFAULT; 3035 } 3036 ret = get_errno(setsockopt(sockfd, level, optname, 3037 &val, sizeof(val))); 3038 break; 3039 case IPV6_PKTINFO: 3040 { 3041 struct in6_pktinfo pki; 3042 3043 if (optlen < sizeof(pki)) { 3044 return -TARGET_EINVAL; 3045 } 3046 3047 if (copy_from_user(&pki, optval_addr, sizeof(pki))) { 3048 return -TARGET_EFAULT; 3049 } 3050 3051 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex); 3052 3053 ret = get_errno(setsockopt(sockfd, level, optname, 3054 &pki, sizeof(pki))); 3055 break; 3056 } 3057 default: 3058 goto unimplemented; 3059 } 3060 break; 3061 case SOL_ICMPV6: 3062 switch (optname) { 3063 case ICMPV6_FILTER: 3064 { 3065 struct icmp6_filter icmp6f; 3066 3067 if (optlen > sizeof(icmp6f)) { 3068 optlen = sizeof(icmp6f); 3069 } 3070 3071 if (copy_from_user(&icmp6f, optval_addr, optlen)) { 3072 return -TARGET_EFAULT; 3073 } 3074 3075 for (val = 0; val < 8; val++) { 3076 icmp6f.data[val] = tswap32(icmp6f.data[val]); 3077 } 3078 3079 ret = get_errno(setsockopt(sockfd, level, optname, 3080 &icmp6f, optlen)); 3081 break; 3082 } 3083 default: 3084 goto unimplemented; 3085 } 3086 break; 3087 case SOL_RAW: 3088 switch (optname) { 3089 case ICMP_FILTER: 3090 case IPV6_CHECKSUM: 3091 /* those take an u32 value */ 3092 if (optlen < sizeof(uint32_t)) { 3093 return -TARGET_EINVAL; 3094 } 3095 3096 if (get_user_u32(val, optval_addr)) { 3097 return -TARGET_EFAULT; 3098 } 3099 ret = get_errno(setsockopt(sockfd, level, optname, 3100 &val, sizeof(val))); 3101 break; 3102 3103 default: 3104 goto unimplemented; 3105 } 3106 break; 3107 case TARGET_SOL_SOCKET: 3108 switch (optname) { 3109 case TARGET_SO_RCVTIMEO: 3110 { 3111 struct timeval tv; 3112 3113 optname = SO_RCVTIMEO; 3114 3115 set_timeout: 3116 if (optlen != sizeof(struct target_timeval)) { 3117 return -TARGET_EINVAL; 3118 } 3119 3120 if (copy_from_user_timeval(&tv, optval_addr)) { 3121 return -TARGET_EFAULT; 3122 } 3123 3124 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 3125 &tv, sizeof(tv))); 3126 return ret; 3127 } 3128 case TARGET_SO_SNDTIMEO: 3129 optname = SO_SNDTIMEO; 3130 goto set_timeout; 3131 case TARGET_SO_ATTACH_FILTER: 3132 { 3133 struct target_sock_fprog *tfprog; 3134 struct target_sock_filter *tfilter; 3135 struct sock_fprog fprog; 3136 struct sock_filter *filter; 3137 int i; 3138 3139 if (optlen != sizeof(*tfprog)) { 3140 return -TARGET_EINVAL; 3141 } 3142 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) { 3143 return -TARGET_EFAULT; 3144 } 3145 if (!lock_user_struct(VERIFY_READ, tfilter, 3146 tswapal(tfprog->filter), 0)) { 3147 unlock_user_struct(tfprog, optval_addr, 1); 3148 return -TARGET_EFAULT; 3149 } 3150 3151 fprog.len = tswap16(tfprog->len); 3152 filter = g_try_new(struct sock_filter, fprog.len); 3153 if (filter == NULL) { 3154 unlock_user_struct(tfilter, tfprog->filter, 1); 3155 unlock_user_struct(tfprog, optval_addr, 1); 3156 return -TARGET_ENOMEM; 3157 } 3158 for (i = 0; i < fprog.len; i++) { 3159 filter[i].code = tswap16(tfilter[i].code); 3160 filter[i].jt = tfilter[i].jt; 3161 filter[i].jf = tfilter[i].jf; 3162 filter[i].k = tswap32(tfilter[i].k); 3163 } 3164 fprog.filter = filter; 3165 3166 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, 3167 SO_ATTACH_FILTER, &fprog, sizeof(fprog))); 3168 g_free(filter); 3169 3170 unlock_user_struct(tfilter, tfprog->filter, 1); 3171 unlock_user_struct(tfprog, optval_addr, 1); 3172 return ret; 3173 } 3174 case TARGET_SO_BINDTODEVICE: 3175 { 3176 char *dev_ifname, *addr_ifname; 3177 3178 if (optlen > IFNAMSIZ - 1) { 3179 optlen = IFNAMSIZ - 1; 3180 } 3181 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1); 3182 if (!dev_ifname) { 3183 return -TARGET_EFAULT; 3184 } 3185 optname = SO_BINDTODEVICE; 3186 addr_ifname = alloca(IFNAMSIZ); 3187 memcpy(addr_ifname, dev_ifname, optlen); 3188 addr_ifname[optlen] = 0; 3189 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 3190 addr_ifname, optlen)); 3191 unlock_user (dev_ifname, optval_addr, 0); 3192 return ret; 3193 } 3194 /* Options with 'int' argument. */ 3195 case TARGET_SO_DEBUG: 3196 optname = SO_DEBUG; 3197 break; 3198 case TARGET_SO_REUSEADDR: 3199 optname = SO_REUSEADDR; 3200 break; 3201 case TARGET_SO_TYPE: 3202 optname = SO_TYPE; 3203 break; 3204 case TARGET_SO_ERROR: 3205 optname = SO_ERROR; 3206 break; 3207 case TARGET_SO_DONTROUTE: 3208 optname = SO_DONTROUTE; 3209 break; 3210 case TARGET_SO_BROADCAST: 3211 optname = SO_BROADCAST; 3212 break; 3213 case TARGET_SO_SNDBUF: 3214 optname = SO_SNDBUF; 3215 break; 3216 case TARGET_SO_SNDBUFFORCE: 3217 optname = SO_SNDBUFFORCE; 3218 break; 3219 case TARGET_SO_RCVBUF: 3220 optname = SO_RCVBUF; 3221 break; 3222 case TARGET_SO_RCVBUFFORCE: 3223 optname = SO_RCVBUFFORCE; 3224 break; 3225 case TARGET_SO_KEEPALIVE: 3226 optname = SO_KEEPALIVE; 3227 break; 3228 case TARGET_SO_OOBINLINE: 3229 optname = SO_OOBINLINE; 3230 break; 3231 case TARGET_SO_NO_CHECK: 3232 optname = SO_NO_CHECK; 3233 break; 3234 case TARGET_SO_PRIORITY: 3235 optname = SO_PRIORITY; 3236 break; 3237 #ifdef SO_BSDCOMPAT 3238 case TARGET_SO_BSDCOMPAT: 3239 optname = SO_BSDCOMPAT; 3240 break; 3241 #endif 3242 case TARGET_SO_PASSCRED: 3243 optname = SO_PASSCRED; 3244 break; 3245 case TARGET_SO_PASSSEC: 3246 optname = SO_PASSSEC; 3247 break; 3248 case TARGET_SO_TIMESTAMP: 3249 optname = SO_TIMESTAMP; 3250 break; 3251 case TARGET_SO_RCVLOWAT: 3252 optname = SO_RCVLOWAT; 3253 break; 3254 default: 3255 goto unimplemented; 3256 } 3257 if (optlen < sizeof(uint32_t)) 3258 return -TARGET_EINVAL; 3259 3260 if (get_user_u32(val, optval_addr)) 3261 return -TARGET_EFAULT; 3262 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val))); 3263 break; 3264 default: 3265 unimplemented: 3266 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname); 3267 ret = -TARGET_ENOPROTOOPT; 3268 } 3269 return ret; 3270 } 3271 3272 /* do_getsockopt() Must return target values and target errnos. */ 3273 static abi_long do_getsockopt(int sockfd, int level, int optname, 3274 abi_ulong optval_addr, abi_ulong optlen) 3275 { 3276 abi_long ret; 3277 int len, val; 3278 socklen_t lv; 3279 3280 switch(level) { 3281 case TARGET_SOL_SOCKET: 3282 level = SOL_SOCKET; 3283 switch (optname) { 3284 /* These don't just return a single integer */ 3285 case TARGET_SO_LINGER: 3286 case TARGET_SO_RCVTIMEO: 3287 case TARGET_SO_SNDTIMEO: 3288 case TARGET_SO_PEERNAME: 3289 goto unimplemented; 3290 case TARGET_SO_PEERCRED: { 3291 struct ucred cr; 3292 socklen_t crlen; 3293 struct target_ucred *tcr; 3294 3295 if (get_user_u32(len, optlen)) { 3296 return -TARGET_EFAULT; 3297 } 3298 if (len < 0) { 3299 return -TARGET_EINVAL; 3300 } 3301 3302 crlen = sizeof(cr); 3303 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED, 3304 &cr, &crlen)); 3305 if (ret < 0) { 3306 return ret; 3307 } 3308 if (len > crlen) { 3309 len = crlen; 3310 } 3311 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) { 3312 return -TARGET_EFAULT; 3313 } 3314 __put_user(cr.pid, &tcr->pid); 3315 __put_user(cr.uid, &tcr->uid); 3316 __put_user(cr.gid, &tcr->gid); 3317 unlock_user_struct(tcr, optval_addr, 1); 3318 if (put_user_u32(len, optlen)) { 3319 return -TARGET_EFAULT; 3320 } 3321 break; 3322 } 3323 /* Options with 'int' argument. */ 3324 case TARGET_SO_DEBUG: 3325 optname = SO_DEBUG; 3326 goto int_case; 3327 case TARGET_SO_REUSEADDR: 3328 optname = SO_REUSEADDR; 3329 goto int_case; 3330 case TARGET_SO_TYPE: 3331 optname = SO_TYPE; 3332 goto int_case; 3333 case TARGET_SO_ERROR: 3334 optname = SO_ERROR; 3335 goto int_case; 3336 case TARGET_SO_DONTROUTE: 3337 optname = SO_DONTROUTE; 3338 goto int_case; 3339 case TARGET_SO_BROADCAST: 3340 optname = SO_BROADCAST; 3341 goto int_case; 3342 case TARGET_SO_SNDBUF: 3343 optname = SO_SNDBUF; 3344 goto int_case; 3345 case TARGET_SO_RCVBUF: 3346 optname = SO_RCVBUF; 3347 goto int_case; 3348 case TARGET_SO_KEEPALIVE: 3349 optname = SO_KEEPALIVE; 3350 goto int_case; 3351 case TARGET_SO_OOBINLINE: 3352 optname = SO_OOBINLINE; 3353 goto int_case; 3354 case TARGET_SO_NO_CHECK: 3355 optname = SO_NO_CHECK; 3356 goto int_case; 3357 case TARGET_SO_PRIORITY: 3358 optname = SO_PRIORITY; 3359 goto int_case; 3360 #ifdef SO_BSDCOMPAT 3361 case TARGET_SO_BSDCOMPAT: 3362 optname = SO_BSDCOMPAT; 3363 goto int_case; 3364 #endif 3365 case TARGET_SO_PASSCRED: 3366 optname = SO_PASSCRED; 3367 goto int_case; 3368 case TARGET_SO_TIMESTAMP: 3369 optname = SO_TIMESTAMP; 3370 goto int_case; 3371 case TARGET_SO_RCVLOWAT: 3372 optname = SO_RCVLOWAT; 3373 goto int_case; 3374 case TARGET_SO_ACCEPTCONN: 3375 optname = SO_ACCEPTCONN; 3376 goto int_case; 3377 default: 3378 goto int_case; 3379 } 3380 break; 3381 case SOL_TCP: 3382 /* TCP options all take an 'int' value. */ 3383 int_case: 3384 if (get_user_u32(len, optlen)) 3385 return -TARGET_EFAULT; 3386 if (len < 0) 3387 return -TARGET_EINVAL; 3388 lv = sizeof(lv); 3389 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 3390 if (ret < 0) 3391 return ret; 3392 if (optname == SO_TYPE) { 3393 val = host_to_target_sock_type(val); 3394 } 3395 if (len > lv) 3396 len = lv; 3397 if (len == 4) { 3398 if (put_user_u32(val, optval_addr)) 3399 return -TARGET_EFAULT; 3400 } else { 3401 if (put_user_u8(val, optval_addr)) 3402 return -TARGET_EFAULT; 3403 } 3404 if (put_user_u32(len, optlen)) 3405 return -TARGET_EFAULT; 3406 break; 3407 case SOL_IP: 3408 switch(optname) { 3409 case IP_TOS: 3410 case IP_TTL: 3411 case IP_HDRINCL: 3412 case IP_ROUTER_ALERT: 3413 case IP_RECVOPTS: 3414 case IP_RETOPTS: 3415 case IP_PKTINFO: 3416 case IP_MTU_DISCOVER: 3417 case IP_RECVERR: 3418 case IP_RECVTOS: 3419 #ifdef IP_FREEBIND 3420 case IP_FREEBIND: 3421 #endif 3422 case IP_MULTICAST_TTL: 3423 case IP_MULTICAST_LOOP: 3424 if (get_user_u32(len, optlen)) 3425 return -TARGET_EFAULT; 3426 if (len < 0) 3427 return -TARGET_EINVAL; 3428 lv = sizeof(lv); 3429 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 3430 if (ret < 0) 3431 return ret; 3432 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 3433 len = 1; 3434 if (put_user_u32(len, optlen) 3435 || put_user_u8(val, optval_addr)) 3436 return -TARGET_EFAULT; 3437 } else { 3438 if (len > sizeof(int)) 3439 len = sizeof(int); 3440 if (put_user_u32(len, optlen) 3441 || put_user_u32(val, optval_addr)) 3442 return -TARGET_EFAULT; 3443 } 3444 break; 3445 default: 3446 ret = -TARGET_ENOPROTOOPT; 3447 break; 3448 } 3449 break; 3450 default: 3451 unimplemented: 3452 gemu_log("getsockopt level=%d optname=%d not yet supported\n", 3453 level, optname); 3454 ret = -TARGET_EOPNOTSUPP; 3455 break; 3456 } 3457 return ret; 3458 } 3459 3460 /* Convert target low/high pair representing file offset into the host 3461 * low/high pair. This function doesn't handle offsets bigger than 64 bits 3462 * as the kernel doesn't handle them either. 3463 */ 3464 static void target_to_host_low_high(abi_ulong tlow, 3465 abi_ulong thigh, 3466 unsigned long *hlow, 3467 unsigned long *hhigh) 3468 { 3469 uint64_t off = tlow | 3470 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) << 3471 TARGET_LONG_BITS / 2; 3472 3473 *hlow = off; 3474 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2; 3475 } 3476 3477 static struct iovec *lock_iovec(int type, abi_ulong target_addr, 3478 abi_ulong count, int copy) 3479 { 3480 struct target_iovec *target_vec; 3481 struct iovec *vec; 3482 abi_ulong total_len, max_len; 3483 int i; 3484 int err = 0; 3485 bool bad_address = false; 3486 3487 if (count == 0) { 3488 errno = 0; 3489 return NULL; 3490 } 3491 if (count > IOV_MAX) { 3492 errno = EINVAL; 3493 return NULL; 3494 } 3495 3496 vec = g_try_new0(struct iovec, count); 3497 if (vec == NULL) { 3498 errno = ENOMEM; 3499 return NULL; 3500 } 3501 3502 target_vec = lock_user(VERIFY_READ, target_addr, 3503 count * sizeof(struct target_iovec), 1); 3504 if (target_vec == NULL) { 3505 err = EFAULT; 3506 goto fail2; 3507 } 3508 3509 /* ??? If host page size > target page size, this will result in a 3510 value larger than what we can actually support. */ 3511 max_len = 0x7fffffff & TARGET_PAGE_MASK; 3512 total_len = 0; 3513 3514 for (i = 0; i < count; i++) { 3515 abi_ulong base = tswapal(target_vec[i].iov_base); 3516 abi_long len = tswapal(target_vec[i].iov_len); 3517 3518 if (len < 0) { 3519 err = EINVAL; 3520 goto fail; 3521 } else if (len == 0) { 3522 /* Zero length pointer is ignored. */ 3523 vec[i].iov_base = 0; 3524 } else { 3525 vec[i].iov_base = lock_user(type, base, len, copy); 3526 /* If the first buffer pointer is bad, this is a fault. But 3527 * subsequent bad buffers will result in a partial write; this 3528 * is realized by filling the vector with null pointers and 3529 * zero lengths. */ 3530 if (!vec[i].iov_base) { 3531 if (i == 0) { 3532 err = EFAULT; 3533 goto fail; 3534 } else { 3535 bad_address = true; 3536 } 3537 } 3538 if (bad_address) { 3539 len = 0; 3540 } 3541 if (len > max_len - total_len) { 3542 len = max_len - total_len; 3543 } 3544 } 3545 vec[i].iov_len = len; 3546 total_len += len; 3547 } 3548 3549 unlock_user(target_vec, target_addr, 0); 3550 return vec; 3551 3552 fail: 3553 while (--i >= 0) { 3554 if (tswapal(target_vec[i].iov_len) > 0) { 3555 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0); 3556 } 3557 } 3558 unlock_user(target_vec, target_addr, 0); 3559 fail2: 3560 g_free(vec); 3561 errno = err; 3562 return NULL; 3563 } 3564 3565 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr, 3566 abi_ulong count, int copy) 3567 { 3568 struct target_iovec *target_vec; 3569 int i; 3570 3571 target_vec = lock_user(VERIFY_READ, target_addr, 3572 count * sizeof(struct target_iovec), 1); 3573 if (target_vec) { 3574 for (i = 0; i < count; i++) { 3575 abi_ulong base = tswapal(target_vec[i].iov_base); 3576 abi_long len = tswapal(target_vec[i].iov_len); 3577 if (len < 0) { 3578 break; 3579 } 3580 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0); 3581 } 3582 unlock_user(target_vec, target_addr, 0); 3583 } 3584 3585 g_free(vec); 3586 } 3587 3588 static inline int target_to_host_sock_type(int *type) 3589 { 3590 int host_type = 0; 3591 int target_type = *type; 3592 3593 switch (target_type & TARGET_SOCK_TYPE_MASK) { 3594 case TARGET_SOCK_DGRAM: 3595 host_type = SOCK_DGRAM; 3596 break; 3597 case TARGET_SOCK_STREAM: 3598 host_type = SOCK_STREAM; 3599 break; 3600 default: 3601 host_type = target_type & TARGET_SOCK_TYPE_MASK; 3602 break; 3603 } 3604 if (target_type & TARGET_SOCK_CLOEXEC) { 3605 #if defined(SOCK_CLOEXEC) 3606 host_type |= SOCK_CLOEXEC; 3607 #else 3608 return -TARGET_EINVAL; 3609 #endif 3610 } 3611 if (target_type & TARGET_SOCK_NONBLOCK) { 3612 #if defined(SOCK_NONBLOCK) 3613 host_type |= SOCK_NONBLOCK; 3614 #elif !defined(O_NONBLOCK) 3615 return -TARGET_EINVAL; 3616 #endif 3617 } 3618 *type = host_type; 3619 return 0; 3620 } 3621 3622 /* Try to emulate socket type flags after socket creation. */ 3623 static int sock_flags_fixup(int fd, int target_type) 3624 { 3625 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK) 3626 if (target_type & TARGET_SOCK_NONBLOCK) { 3627 int flags = fcntl(fd, F_GETFL); 3628 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) { 3629 close(fd); 3630 return -TARGET_EINVAL; 3631 } 3632 } 3633 #endif 3634 return fd; 3635 } 3636 3637 static abi_long packet_target_to_host_sockaddr(void *host_addr, 3638 abi_ulong target_addr, 3639 socklen_t len) 3640 { 3641 struct sockaddr *addr = host_addr; 3642 struct target_sockaddr *target_saddr; 3643 3644 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 3645 if (!target_saddr) { 3646 return -TARGET_EFAULT; 3647 } 3648 3649 memcpy(addr, target_saddr, len); 3650 addr->sa_family = tswap16(target_saddr->sa_family); 3651 /* spkt_protocol is big-endian */ 3652 3653 unlock_user(target_saddr, target_addr, 0); 3654 return 0; 3655 } 3656 3657 static TargetFdTrans target_packet_trans = { 3658 .target_to_host_addr = packet_target_to_host_sockaddr, 3659 }; 3660 3661 #ifdef CONFIG_RTNETLINK 3662 static abi_long netlink_route_target_to_host(void *buf, size_t len) 3663 { 3664 abi_long ret; 3665 3666 ret = target_to_host_nlmsg_route(buf, len); 3667 if (ret < 0) { 3668 return ret; 3669 } 3670 3671 return len; 3672 } 3673 3674 static abi_long netlink_route_host_to_target(void *buf, size_t len) 3675 { 3676 abi_long ret; 3677 3678 ret = host_to_target_nlmsg_route(buf, len); 3679 if (ret < 0) { 3680 return ret; 3681 } 3682 3683 return len; 3684 } 3685 3686 static TargetFdTrans target_netlink_route_trans = { 3687 .target_to_host_data = netlink_route_target_to_host, 3688 .host_to_target_data = netlink_route_host_to_target, 3689 }; 3690 #endif /* CONFIG_RTNETLINK */ 3691 3692 static abi_long netlink_audit_target_to_host(void *buf, size_t len) 3693 { 3694 abi_long ret; 3695 3696 ret = target_to_host_nlmsg_audit(buf, len); 3697 if (ret < 0) { 3698 return ret; 3699 } 3700 3701 return len; 3702 } 3703 3704 static abi_long netlink_audit_host_to_target(void *buf, size_t len) 3705 { 3706 abi_long ret; 3707 3708 ret = host_to_target_nlmsg_audit(buf, len); 3709 if (ret < 0) { 3710 return ret; 3711 } 3712 3713 return len; 3714 } 3715 3716 static TargetFdTrans target_netlink_audit_trans = { 3717 .target_to_host_data = netlink_audit_target_to_host, 3718 .host_to_target_data = netlink_audit_host_to_target, 3719 }; 3720 3721 /* do_socket() Must return target values and target errnos. */ 3722 static abi_long do_socket(int domain, int type, int protocol) 3723 { 3724 int target_type = type; 3725 int ret; 3726 3727 ret = target_to_host_sock_type(&type); 3728 if (ret) { 3729 return ret; 3730 } 3731 3732 if (domain == PF_NETLINK && !( 3733 #ifdef CONFIG_RTNETLINK 3734 protocol == NETLINK_ROUTE || 3735 #endif 3736 protocol == NETLINK_KOBJECT_UEVENT || 3737 protocol == NETLINK_AUDIT)) { 3738 return -EPFNOSUPPORT; 3739 } 3740 3741 if (domain == AF_PACKET || 3742 (domain == AF_INET && type == SOCK_PACKET)) { 3743 protocol = tswap16(protocol); 3744 } 3745 3746 ret = get_errno(socket(domain, type, protocol)); 3747 if (ret >= 0) { 3748 ret = sock_flags_fixup(ret, target_type); 3749 if (type == SOCK_PACKET) { 3750 /* Manage an obsolete case : 3751 * if socket type is SOCK_PACKET, bind by name 3752 */ 3753 fd_trans_register(ret, &target_packet_trans); 3754 } else if (domain == PF_NETLINK) { 3755 switch (protocol) { 3756 #ifdef CONFIG_RTNETLINK 3757 case NETLINK_ROUTE: 3758 fd_trans_register(ret, &target_netlink_route_trans); 3759 break; 3760 #endif 3761 case NETLINK_KOBJECT_UEVENT: 3762 /* nothing to do: messages are strings */ 3763 break; 3764 case NETLINK_AUDIT: 3765 fd_trans_register(ret, &target_netlink_audit_trans); 3766 break; 3767 default: 3768 g_assert_not_reached(); 3769 } 3770 } 3771 } 3772 return ret; 3773 } 3774 3775 /* do_bind() Must return target values and target errnos. */ 3776 static abi_long do_bind(int sockfd, abi_ulong target_addr, 3777 socklen_t addrlen) 3778 { 3779 void *addr; 3780 abi_long ret; 3781 3782 if ((int)addrlen < 0) { 3783 return -TARGET_EINVAL; 3784 } 3785 3786 addr = alloca(addrlen+1); 3787 3788 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen); 3789 if (ret) 3790 return ret; 3791 3792 return get_errno(bind(sockfd, addr, addrlen)); 3793 } 3794 3795 /* do_connect() Must return target values and target errnos. */ 3796 static abi_long do_connect(int sockfd, abi_ulong target_addr, 3797 socklen_t addrlen) 3798 { 3799 void *addr; 3800 abi_long ret; 3801 3802 if ((int)addrlen < 0) { 3803 return -TARGET_EINVAL; 3804 } 3805 3806 addr = alloca(addrlen+1); 3807 3808 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen); 3809 if (ret) 3810 return ret; 3811 3812 return get_errno(safe_connect(sockfd, addr, addrlen)); 3813 } 3814 3815 /* do_sendrecvmsg_locked() Must return target values and target errnos. */ 3816 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp, 3817 int flags, int send) 3818 { 3819 abi_long ret, len; 3820 struct msghdr msg; 3821 abi_ulong count; 3822 struct iovec *vec; 3823 abi_ulong target_vec; 3824 3825 if (msgp->msg_name) { 3826 msg.msg_namelen = tswap32(msgp->msg_namelen); 3827 msg.msg_name = alloca(msg.msg_namelen+1); 3828 ret = target_to_host_sockaddr(fd, msg.msg_name, 3829 tswapal(msgp->msg_name), 3830 msg.msg_namelen); 3831 if (ret == -TARGET_EFAULT) { 3832 /* For connected sockets msg_name and msg_namelen must 3833 * be ignored, so returning EFAULT immediately is wrong. 3834 * Instead, pass a bad msg_name to the host kernel, and 3835 * let it decide whether to return EFAULT or not. 3836 */ 3837 msg.msg_name = (void *)-1; 3838 } else if (ret) { 3839 goto out2; 3840 } 3841 } else { 3842 msg.msg_name = NULL; 3843 msg.msg_namelen = 0; 3844 } 3845 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen); 3846 msg.msg_control = alloca(msg.msg_controllen); 3847 msg.msg_flags = tswap32(msgp->msg_flags); 3848 3849 count = tswapal(msgp->msg_iovlen); 3850 target_vec = tswapal(msgp->msg_iov); 3851 3852 if (count > IOV_MAX) { 3853 /* sendrcvmsg returns a different errno for this condition than 3854 * readv/writev, so we must catch it here before lock_iovec() does. 3855 */ 3856 ret = -TARGET_EMSGSIZE; 3857 goto out2; 3858 } 3859 3860 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, 3861 target_vec, count, send); 3862 if (vec == NULL) { 3863 ret = -host_to_target_errno(errno); 3864 goto out2; 3865 } 3866 msg.msg_iovlen = count; 3867 msg.msg_iov = vec; 3868 3869 if (send) { 3870 if (fd_trans_target_to_host_data(fd)) { 3871 void *host_msg; 3872 3873 host_msg = g_malloc(msg.msg_iov->iov_len); 3874 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len); 3875 ret = fd_trans_target_to_host_data(fd)(host_msg, 3876 msg.msg_iov->iov_len); 3877 if (ret >= 0) { 3878 msg.msg_iov->iov_base = host_msg; 3879 ret = get_errno(safe_sendmsg(fd, &msg, flags)); 3880 } 3881 g_free(host_msg); 3882 } else { 3883 ret = target_to_host_cmsg(&msg, msgp); 3884 if (ret == 0) { 3885 ret = get_errno(safe_sendmsg(fd, &msg, flags)); 3886 } 3887 } 3888 } else { 3889 ret = get_errno(safe_recvmsg(fd, &msg, flags)); 3890 if (!is_error(ret)) { 3891 len = ret; 3892 if (fd_trans_host_to_target_data(fd)) { 3893 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base, 3894 len); 3895 } else { 3896 ret = host_to_target_cmsg(msgp, &msg); 3897 } 3898 if (!is_error(ret)) { 3899 msgp->msg_namelen = tswap32(msg.msg_namelen); 3900 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) { 3901 ret = host_to_target_sockaddr(tswapal(msgp->msg_name), 3902 msg.msg_name, msg.msg_namelen); 3903 if (ret) { 3904 goto out; 3905 } 3906 } 3907 3908 ret = len; 3909 } 3910 } 3911 } 3912 3913 out: 3914 unlock_iovec(vec, target_vec, count, !send); 3915 out2: 3916 return ret; 3917 } 3918 3919 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg, 3920 int flags, int send) 3921 { 3922 abi_long ret; 3923 struct target_msghdr *msgp; 3924 3925 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE, 3926 msgp, 3927 target_msg, 3928 send ? 1 : 0)) { 3929 return -TARGET_EFAULT; 3930 } 3931 ret = do_sendrecvmsg_locked(fd, msgp, flags, send); 3932 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 3933 return ret; 3934 } 3935 3936 /* We don't rely on the C library to have sendmmsg/recvmmsg support, 3937 * so it might not have this *mmsg-specific flag either. 3938 */ 3939 #ifndef MSG_WAITFORONE 3940 #define MSG_WAITFORONE 0x10000 3941 #endif 3942 3943 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec, 3944 unsigned int vlen, unsigned int flags, 3945 int send) 3946 { 3947 struct target_mmsghdr *mmsgp; 3948 abi_long ret = 0; 3949 int i; 3950 3951 if (vlen > UIO_MAXIOV) { 3952 vlen = UIO_MAXIOV; 3953 } 3954 3955 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1); 3956 if (!mmsgp) { 3957 return -TARGET_EFAULT; 3958 } 3959 3960 for (i = 0; i < vlen; i++) { 3961 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send); 3962 if (is_error(ret)) { 3963 break; 3964 } 3965 mmsgp[i].msg_len = tswap32(ret); 3966 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ 3967 if (flags & MSG_WAITFORONE) { 3968 flags |= MSG_DONTWAIT; 3969 } 3970 } 3971 3972 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i); 3973 3974 /* Return number of datagrams sent if we sent any at all; 3975 * otherwise return the error. 3976 */ 3977 if (i) { 3978 return i; 3979 } 3980 return ret; 3981 } 3982 3983 /* do_accept4() Must return target values and target errnos. */ 3984 static abi_long do_accept4(int fd, abi_ulong target_addr, 3985 abi_ulong target_addrlen_addr, int flags) 3986 { 3987 socklen_t addrlen; 3988 void *addr; 3989 abi_long ret; 3990 int host_flags; 3991 3992 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl); 3993 3994 if (target_addr == 0) { 3995 return get_errno(safe_accept4(fd, NULL, NULL, host_flags)); 3996 } 3997 3998 /* linux returns EINVAL if addrlen pointer is invalid */ 3999 if (get_user_u32(addrlen, target_addrlen_addr)) 4000 return -TARGET_EINVAL; 4001 4002 if ((int)addrlen < 0) { 4003 return -TARGET_EINVAL; 4004 } 4005 4006 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 4007 return -TARGET_EINVAL; 4008 4009 addr = alloca(addrlen); 4010 4011 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags)); 4012 if (!is_error(ret)) { 4013 host_to_target_sockaddr(target_addr, addr, addrlen); 4014 if (put_user_u32(addrlen, target_addrlen_addr)) 4015 ret = -TARGET_EFAULT; 4016 } 4017 return ret; 4018 } 4019 4020 /* do_getpeername() Must return target values and target errnos. */ 4021 static abi_long do_getpeername(int fd, abi_ulong target_addr, 4022 abi_ulong target_addrlen_addr) 4023 { 4024 socklen_t addrlen; 4025 void *addr; 4026 abi_long ret; 4027 4028 if (get_user_u32(addrlen, target_addrlen_addr)) 4029 return -TARGET_EFAULT; 4030 4031 if ((int)addrlen < 0) { 4032 return -TARGET_EINVAL; 4033 } 4034 4035 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 4036 return -TARGET_EFAULT; 4037 4038 addr = alloca(addrlen); 4039 4040 ret = get_errno(getpeername(fd, addr, &addrlen)); 4041 if (!is_error(ret)) { 4042 host_to_target_sockaddr(target_addr, addr, addrlen); 4043 if (put_user_u32(addrlen, target_addrlen_addr)) 4044 ret = -TARGET_EFAULT; 4045 } 4046 return ret; 4047 } 4048 4049 /* do_getsockname() Must return target values and target errnos. */ 4050 static abi_long do_getsockname(int fd, abi_ulong target_addr, 4051 abi_ulong target_addrlen_addr) 4052 { 4053 socklen_t addrlen; 4054 void *addr; 4055 abi_long ret; 4056 4057 if (get_user_u32(addrlen, target_addrlen_addr)) 4058 return -TARGET_EFAULT; 4059 4060 if ((int)addrlen < 0) { 4061 return -TARGET_EINVAL; 4062 } 4063 4064 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 4065 return -TARGET_EFAULT; 4066 4067 addr = alloca(addrlen); 4068 4069 ret = get_errno(getsockname(fd, addr, &addrlen)); 4070 if (!is_error(ret)) { 4071 host_to_target_sockaddr(target_addr, addr, addrlen); 4072 if (put_user_u32(addrlen, target_addrlen_addr)) 4073 ret = -TARGET_EFAULT; 4074 } 4075 return ret; 4076 } 4077 4078 /* do_socketpair() Must return target values and target errnos. */ 4079 static abi_long do_socketpair(int domain, int type, int protocol, 4080 abi_ulong target_tab_addr) 4081 { 4082 int tab[2]; 4083 abi_long ret; 4084 4085 target_to_host_sock_type(&type); 4086 4087 ret = get_errno(socketpair(domain, type, protocol, tab)); 4088 if (!is_error(ret)) { 4089 if (put_user_s32(tab[0], target_tab_addr) 4090 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0]))) 4091 ret = -TARGET_EFAULT; 4092 } 4093 return ret; 4094 } 4095 4096 /* do_sendto() Must return target values and target errnos. */ 4097 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags, 4098 abi_ulong target_addr, socklen_t addrlen) 4099 { 4100 void *addr; 4101 void *host_msg; 4102 void *copy_msg = NULL; 4103 abi_long ret; 4104 4105 if ((int)addrlen < 0) { 4106 return -TARGET_EINVAL; 4107 } 4108 4109 host_msg = lock_user(VERIFY_READ, msg, len, 1); 4110 if (!host_msg) 4111 return -TARGET_EFAULT; 4112 if (fd_trans_target_to_host_data(fd)) { 4113 copy_msg = host_msg; 4114 host_msg = g_malloc(len); 4115 memcpy(host_msg, copy_msg, len); 4116 ret = fd_trans_target_to_host_data(fd)(host_msg, len); 4117 if (ret < 0) { 4118 goto fail; 4119 } 4120 } 4121 if (target_addr) { 4122 addr = alloca(addrlen+1); 4123 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen); 4124 if (ret) { 4125 goto fail; 4126 } 4127 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen)); 4128 } else { 4129 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0)); 4130 } 4131 fail: 4132 if (copy_msg) { 4133 g_free(host_msg); 4134 host_msg = copy_msg; 4135 } 4136 unlock_user(host_msg, msg, 0); 4137 return ret; 4138 } 4139 4140 /* do_recvfrom() Must return target values and target errnos. */ 4141 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags, 4142 abi_ulong target_addr, 4143 abi_ulong target_addrlen) 4144 { 4145 socklen_t addrlen; 4146 void *addr; 4147 void *host_msg; 4148 abi_long ret; 4149 4150 host_msg = lock_user(VERIFY_WRITE, msg, len, 0); 4151 if (!host_msg) 4152 return -TARGET_EFAULT; 4153 if (target_addr) { 4154 if (get_user_u32(addrlen, target_addrlen)) { 4155 ret = -TARGET_EFAULT; 4156 goto fail; 4157 } 4158 if ((int)addrlen < 0) { 4159 ret = -TARGET_EINVAL; 4160 goto fail; 4161 } 4162 addr = alloca(addrlen); 4163 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, 4164 addr, &addrlen)); 4165 } else { 4166 addr = NULL; /* To keep compiler quiet. */ 4167 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0)); 4168 } 4169 if (!is_error(ret)) { 4170 if (fd_trans_host_to_target_data(fd)) { 4171 ret = fd_trans_host_to_target_data(fd)(host_msg, ret); 4172 } 4173 if (target_addr) { 4174 host_to_target_sockaddr(target_addr, addr, addrlen); 4175 if (put_user_u32(addrlen, target_addrlen)) { 4176 ret = -TARGET_EFAULT; 4177 goto fail; 4178 } 4179 } 4180 unlock_user(host_msg, msg, len); 4181 } else { 4182 fail: 4183 unlock_user(host_msg, msg, 0); 4184 } 4185 return ret; 4186 } 4187 4188 #ifdef TARGET_NR_socketcall 4189 /* do_socketcall() must return target values and target errnos. */ 4190 static abi_long do_socketcall(int num, abi_ulong vptr) 4191 { 4192 static const unsigned nargs[] = { /* number of arguments per operation */ 4193 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */ 4194 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */ 4195 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */ 4196 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */ 4197 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */ 4198 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */ 4199 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */ 4200 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */ 4201 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */ 4202 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */ 4203 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */ 4204 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */ 4205 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */ 4206 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */ 4207 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */ 4208 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */ 4209 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */ 4210 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */ 4211 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */ 4212 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */ 4213 }; 4214 abi_long a[6]; /* max 6 args */ 4215 unsigned i; 4216 4217 /* check the range of the first argument num */ 4218 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */ 4219 if (num < 1 || num > TARGET_SYS_SENDMMSG) { 4220 return -TARGET_EINVAL; 4221 } 4222 /* ensure we have space for args */ 4223 if (nargs[num] > ARRAY_SIZE(a)) { 4224 return -TARGET_EINVAL; 4225 } 4226 /* collect the arguments in a[] according to nargs[] */ 4227 for (i = 0; i < nargs[num]; ++i) { 4228 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) { 4229 return -TARGET_EFAULT; 4230 } 4231 } 4232 /* now when we have the args, invoke the appropriate underlying function */ 4233 switch (num) { 4234 case TARGET_SYS_SOCKET: /* domain, type, protocol */ 4235 return do_socket(a[0], a[1], a[2]); 4236 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */ 4237 return do_bind(a[0], a[1], a[2]); 4238 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */ 4239 return do_connect(a[0], a[1], a[2]); 4240 case TARGET_SYS_LISTEN: /* sockfd, backlog */ 4241 return get_errno(listen(a[0], a[1])); 4242 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */ 4243 return do_accept4(a[0], a[1], a[2], 0); 4244 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */ 4245 return do_getsockname(a[0], a[1], a[2]); 4246 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */ 4247 return do_getpeername(a[0], a[1], a[2]); 4248 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */ 4249 return do_socketpair(a[0], a[1], a[2], a[3]); 4250 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */ 4251 return do_sendto(a[0], a[1], a[2], a[3], 0, 0); 4252 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */ 4253 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0); 4254 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */ 4255 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]); 4256 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */ 4257 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]); 4258 case TARGET_SYS_SHUTDOWN: /* sockfd, how */ 4259 return get_errno(shutdown(a[0], a[1])); 4260 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */ 4261 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]); 4262 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */ 4263 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]); 4264 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */ 4265 return do_sendrecvmsg(a[0], a[1], a[2], 1); 4266 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */ 4267 return do_sendrecvmsg(a[0], a[1], a[2], 0); 4268 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */ 4269 return do_accept4(a[0], a[1], a[2], a[3]); 4270 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */ 4271 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0); 4272 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */ 4273 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1); 4274 default: 4275 gemu_log("Unsupported socketcall: %d\n", num); 4276 return -TARGET_EINVAL; 4277 } 4278 } 4279 #endif 4280 4281 #define N_SHM_REGIONS 32 4282 4283 static struct shm_region { 4284 abi_ulong start; 4285 abi_ulong size; 4286 bool in_use; 4287 } shm_regions[N_SHM_REGIONS]; 4288 4289 #ifndef TARGET_SEMID64_DS 4290 /* asm-generic version of this struct */ 4291 struct target_semid64_ds 4292 { 4293 struct target_ipc_perm sem_perm; 4294 abi_ulong sem_otime; 4295 #if TARGET_ABI_BITS == 32 4296 abi_ulong __unused1; 4297 #endif 4298 abi_ulong sem_ctime; 4299 #if TARGET_ABI_BITS == 32 4300 abi_ulong __unused2; 4301 #endif 4302 abi_ulong sem_nsems; 4303 abi_ulong __unused3; 4304 abi_ulong __unused4; 4305 }; 4306 #endif 4307 4308 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip, 4309 abi_ulong target_addr) 4310 { 4311 struct target_ipc_perm *target_ip; 4312 struct target_semid64_ds *target_sd; 4313 4314 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 4315 return -TARGET_EFAULT; 4316 target_ip = &(target_sd->sem_perm); 4317 host_ip->__key = tswap32(target_ip->__key); 4318 host_ip->uid = tswap32(target_ip->uid); 4319 host_ip->gid = tswap32(target_ip->gid); 4320 host_ip->cuid = tswap32(target_ip->cuid); 4321 host_ip->cgid = tswap32(target_ip->cgid); 4322 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 4323 host_ip->mode = tswap32(target_ip->mode); 4324 #else 4325 host_ip->mode = tswap16(target_ip->mode); 4326 #endif 4327 #if defined(TARGET_PPC) 4328 host_ip->__seq = tswap32(target_ip->__seq); 4329 #else 4330 host_ip->__seq = tswap16(target_ip->__seq); 4331 #endif 4332 unlock_user_struct(target_sd, target_addr, 0); 4333 return 0; 4334 } 4335 4336 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr, 4337 struct ipc_perm *host_ip) 4338 { 4339 struct target_ipc_perm *target_ip; 4340 struct target_semid64_ds *target_sd; 4341 4342 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 4343 return -TARGET_EFAULT; 4344 target_ip = &(target_sd->sem_perm); 4345 target_ip->__key = tswap32(host_ip->__key); 4346 target_ip->uid = tswap32(host_ip->uid); 4347 target_ip->gid = tswap32(host_ip->gid); 4348 target_ip->cuid = tswap32(host_ip->cuid); 4349 target_ip->cgid = tswap32(host_ip->cgid); 4350 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 4351 target_ip->mode = tswap32(host_ip->mode); 4352 #else 4353 target_ip->mode = tswap16(host_ip->mode); 4354 #endif 4355 #if defined(TARGET_PPC) 4356 target_ip->__seq = tswap32(host_ip->__seq); 4357 #else 4358 target_ip->__seq = tswap16(host_ip->__seq); 4359 #endif 4360 unlock_user_struct(target_sd, target_addr, 1); 4361 return 0; 4362 } 4363 4364 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd, 4365 abi_ulong target_addr) 4366 { 4367 struct target_semid64_ds *target_sd; 4368 4369 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 4370 return -TARGET_EFAULT; 4371 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr)) 4372 return -TARGET_EFAULT; 4373 host_sd->sem_nsems = tswapal(target_sd->sem_nsems); 4374 host_sd->sem_otime = tswapal(target_sd->sem_otime); 4375 host_sd->sem_ctime = tswapal(target_sd->sem_ctime); 4376 unlock_user_struct(target_sd, target_addr, 0); 4377 return 0; 4378 } 4379 4380 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr, 4381 struct semid_ds *host_sd) 4382 { 4383 struct target_semid64_ds *target_sd; 4384 4385 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 4386 return -TARGET_EFAULT; 4387 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm))) 4388 return -TARGET_EFAULT; 4389 target_sd->sem_nsems = tswapal(host_sd->sem_nsems); 4390 target_sd->sem_otime = tswapal(host_sd->sem_otime); 4391 target_sd->sem_ctime = tswapal(host_sd->sem_ctime); 4392 unlock_user_struct(target_sd, target_addr, 1); 4393 return 0; 4394 } 4395 4396 struct target_seminfo { 4397 int semmap; 4398 int semmni; 4399 int semmns; 4400 int semmnu; 4401 int semmsl; 4402 int semopm; 4403 int semume; 4404 int semusz; 4405 int semvmx; 4406 int semaem; 4407 }; 4408 4409 static inline abi_long host_to_target_seminfo(abi_ulong target_addr, 4410 struct seminfo *host_seminfo) 4411 { 4412 struct target_seminfo *target_seminfo; 4413 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0)) 4414 return -TARGET_EFAULT; 4415 __put_user(host_seminfo->semmap, &target_seminfo->semmap); 4416 __put_user(host_seminfo->semmni, &target_seminfo->semmni); 4417 __put_user(host_seminfo->semmns, &target_seminfo->semmns); 4418 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu); 4419 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl); 4420 __put_user(host_seminfo->semopm, &target_seminfo->semopm); 4421 __put_user(host_seminfo->semume, &target_seminfo->semume); 4422 __put_user(host_seminfo->semusz, &target_seminfo->semusz); 4423 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx); 4424 __put_user(host_seminfo->semaem, &target_seminfo->semaem); 4425 unlock_user_struct(target_seminfo, target_addr, 1); 4426 return 0; 4427 } 4428 4429 union semun { 4430 int val; 4431 struct semid_ds *buf; 4432 unsigned short *array; 4433 struct seminfo *__buf; 4434 }; 4435 4436 union target_semun { 4437 int val; 4438 abi_ulong buf; 4439 abi_ulong array; 4440 abi_ulong __buf; 4441 }; 4442 4443 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array, 4444 abi_ulong target_addr) 4445 { 4446 int nsems; 4447 unsigned short *array; 4448 union semun semun; 4449 struct semid_ds semid_ds; 4450 int i, ret; 4451 4452 semun.buf = &semid_ds; 4453 4454 ret = semctl(semid, 0, IPC_STAT, semun); 4455 if (ret == -1) 4456 return get_errno(ret); 4457 4458 nsems = semid_ds.sem_nsems; 4459 4460 *host_array = g_try_new(unsigned short, nsems); 4461 if (!*host_array) { 4462 return -TARGET_ENOMEM; 4463 } 4464 array = lock_user(VERIFY_READ, target_addr, 4465 nsems*sizeof(unsigned short), 1); 4466 if (!array) { 4467 g_free(*host_array); 4468 return -TARGET_EFAULT; 4469 } 4470 4471 for(i=0; i<nsems; i++) { 4472 __get_user((*host_array)[i], &array[i]); 4473 } 4474 unlock_user(array, target_addr, 0); 4475 4476 return 0; 4477 } 4478 4479 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr, 4480 unsigned short **host_array) 4481 { 4482 int nsems; 4483 unsigned short *array; 4484 union semun semun; 4485 struct semid_ds semid_ds; 4486 int i, ret; 4487 4488 semun.buf = &semid_ds; 4489 4490 ret = semctl(semid, 0, IPC_STAT, semun); 4491 if (ret == -1) 4492 return get_errno(ret); 4493 4494 nsems = semid_ds.sem_nsems; 4495 4496 array = lock_user(VERIFY_WRITE, target_addr, 4497 nsems*sizeof(unsigned short), 0); 4498 if (!array) 4499 return -TARGET_EFAULT; 4500 4501 for(i=0; i<nsems; i++) { 4502 __put_user((*host_array)[i], &array[i]); 4503 } 4504 g_free(*host_array); 4505 unlock_user(array, target_addr, 1); 4506 4507 return 0; 4508 } 4509 4510 static inline abi_long do_semctl(int semid, int semnum, int cmd, 4511 abi_ulong target_arg) 4512 { 4513 union target_semun target_su = { .buf = target_arg }; 4514 union semun arg; 4515 struct semid_ds dsarg; 4516 unsigned short *array = NULL; 4517 struct seminfo seminfo; 4518 abi_long ret = -TARGET_EINVAL; 4519 abi_long err; 4520 cmd &= 0xff; 4521 4522 switch( cmd ) { 4523 case GETVAL: 4524 case SETVAL: 4525 /* In 64 bit cross-endian situations, we will erroneously pick up 4526 * the wrong half of the union for the "val" element. To rectify 4527 * this, the entire 8-byte structure is byteswapped, followed by 4528 * a swap of the 4 byte val field. In other cases, the data is 4529 * already in proper host byte order. */ 4530 if (sizeof(target_su.val) != (sizeof(target_su.buf))) { 4531 target_su.buf = tswapal(target_su.buf); 4532 arg.val = tswap32(target_su.val); 4533 } else { 4534 arg.val = target_su.val; 4535 } 4536 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4537 break; 4538 case GETALL: 4539 case SETALL: 4540 err = target_to_host_semarray(semid, &array, target_su.array); 4541 if (err) 4542 return err; 4543 arg.array = array; 4544 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4545 err = host_to_target_semarray(semid, target_su.array, &array); 4546 if (err) 4547 return err; 4548 break; 4549 case IPC_STAT: 4550 case IPC_SET: 4551 case SEM_STAT: 4552 err = target_to_host_semid_ds(&dsarg, target_su.buf); 4553 if (err) 4554 return err; 4555 arg.buf = &dsarg; 4556 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4557 err = host_to_target_semid_ds(target_su.buf, &dsarg); 4558 if (err) 4559 return err; 4560 break; 4561 case IPC_INFO: 4562 case SEM_INFO: 4563 arg.__buf = &seminfo; 4564 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4565 err = host_to_target_seminfo(target_su.__buf, &seminfo); 4566 if (err) 4567 return err; 4568 break; 4569 case IPC_RMID: 4570 case GETPID: 4571 case GETNCNT: 4572 case GETZCNT: 4573 ret = get_errno(semctl(semid, semnum, cmd, NULL)); 4574 break; 4575 } 4576 4577 return ret; 4578 } 4579 4580 struct target_sembuf { 4581 unsigned short sem_num; 4582 short sem_op; 4583 short sem_flg; 4584 }; 4585 4586 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf, 4587 abi_ulong target_addr, 4588 unsigned nsops) 4589 { 4590 struct target_sembuf *target_sembuf; 4591 int i; 4592 4593 target_sembuf = lock_user(VERIFY_READ, target_addr, 4594 nsops*sizeof(struct target_sembuf), 1); 4595 if (!target_sembuf) 4596 return -TARGET_EFAULT; 4597 4598 for(i=0; i<nsops; i++) { 4599 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num); 4600 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op); 4601 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg); 4602 } 4603 4604 unlock_user(target_sembuf, target_addr, 0); 4605 4606 return 0; 4607 } 4608 4609 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops) 4610 { 4611 struct sembuf sops[nsops]; 4612 4613 if (target_to_host_sembuf(sops, ptr, nsops)) 4614 return -TARGET_EFAULT; 4615 4616 return get_errno(safe_semtimedop(semid, sops, nsops, NULL)); 4617 } 4618 4619 struct target_msqid_ds 4620 { 4621 struct target_ipc_perm msg_perm; 4622 abi_ulong msg_stime; 4623 #if TARGET_ABI_BITS == 32 4624 abi_ulong __unused1; 4625 #endif 4626 abi_ulong msg_rtime; 4627 #if TARGET_ABI_BITS == 32 4628 abi_ulong __unused2; 4629 #endif 4630 abi_ulong msg_ctime; 4631 #if TARGET_ABI_BITS == 32 4632 abi_ulong __unused3; 4633 #endif 4634 abi_ulong __msg_cbytes; 4635 abi_ulong msg_qnum; 4636 abi_ulong msg_qbytes; 4637 abi_ulong msg_lspid; 4638 abi_ulong msg_lrpid; 4639 abi_ulong __unused4; 4640 abi_ulong __unused5; 4641 }; 4642 4643 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md, 4644 abi_ulong target_addr) 4645 { 4646 struct target_msqid_ds *target_md; 4647 4648 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1)) 4649 return -TARGET_EFAULT; 4650 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr)) 4651 return -TARGET_EFAULT; 4652 host_md->msg_stime = tswapal(target_md->msg_stime); 4653 host_md->msg_rtime = tswapal(target_md->msg_rtime); 4654 host_md->msg_ctime = tswapal(target_md->msg_ctime); 4655 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes); 4656 host_md->msg_qnum = tswapal(target_md->msg_qnum); 4657 host_md->msg_qbytes = tswapal(target_md->msg_qbytes); 4658 host_md->msg_lspid = tswapal(target_md->msg_lspid); 4659 host_md->msg_lrpid = tswapal(target_md->msg_lrpid); 4660 unlock_user_struct(target_md, target_addr, 0); 4661 return 0; 4662 } 4663 4664 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr, 4665 struct msqid_ds *host_md) 4666 { 4667 struct target_msqid_ds *target_md; 4668 4669 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0)) 4670 return -TARGET_EFAULT; 4671 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm))) 4672 return -TARGET_EFAULT; 4673 target_md->msg_stime = tswapal(host_md->msg_stime); 4674 target_md->msg_rtime = tswapal(host_md->msg_rtime); 4675 target_md->msg_ctime = tswapal(host_md->msg_ctime); 4676 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes); 4677 target_md->msg_qnum = tswapal(host_md->msg_qnum); 4678 target_md->msg_qbytes = tswapal(host_md->msg_qbytes); 4679 target_md->msg_lspid = tswapal(host_md->msg_lspid); 4680 target_md->msg_lrpid = tswapal(host_md->msg_lrpid); 4681 unlock_user_struct(target_md, target_addr, 1); 4682 return 0; 4683 } 4684 4685 struct target_msginfo { 4686 int msgpool; 4687 int msgmap; 4688 int msgmax; 4689 int msgmnb; 4690 int msgmni; 4691 int msgssz; 4692 int msgtql; 4693 unsigned short int msgseg; 4694 }; 4695 4696 static inline abi_long host_to_target_msginfo(abi_ulong target_addr, 4697 struct msginfo *host_msginfo) 4698 { 4699 struct target_msginfo *target_msginfo; 4700 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0)) 4701 return -TARGET_EFAULT; 4702 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool); 4703 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap); 4704 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax); 4705 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb); 4706 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni); 4707 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz); 4708 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql); 4709 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg); 4710 unlock_user_struct(target_msginfo, target_addr, 1); 4711 return 0; 4712 } 4713 4714 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr) 4715 { 4716 struct msqid_ds dsarg; 4717 struct msginfo msginfo; 4718 abi_long ret = -TARGET_EINVAL; 4719 4720 cmd &= 0xff; 4721 4722 switch (cmd) { 4723 case IPC_STAT: 4724 case IPC_SET: 4725 case MSG_STAT: 4726 if (target_to_host_msqid_ds(&dsarg,ptr)) 4727 return -TARGET_EFAULT; 4728 ret = get_errno(msgctl(msgid, cmd, &dsarg)); 4729 if (host_to_target_msqid_ds(ptr,&dsarg)) 4730 return -TARGET_EFAULT; 4731 break; 4732 case IPC_RMID: 4733 ret = get_errno(msgctl(msgid, cmd, NULL)); 4734 break; 4735 case IPC_INFO: 4736 case MSG_INFO: 4737 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo)); 4738 if (host_to_target_msginfo(ptr, &msginfo)) 4739 return -TARGET_EFAULT; 4740 break; 4741 } 4742 4743 return ret; 4744 } 4745 4746 struct target_msgbuf { 4747 abi_long mtype; 4748 char mtext[1]; 4749 }; 4750 4751 static inline abi_long do_msgsnd(int msqid, abi_long msgp, 4752 ssize_t msgsz, int msgflg) 4753 { 4754 struct target_msgbuf *target_mb; 4755 struct msgbuf *host_mb; 4756 abi_long ret = 0; 4757 4758 if (msgsz < 0) { 4759 return -TARGET_EINVAL; 4760 } 4761 4762 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0)) 4763 return -TARGET_EFAULT; 4764 host_mb = g_try_malloc(msgsz + sizeof(long)); 4765 if (!host_mb) { 4766 unlock_user_struct(target_mb, msgp, 0); 4767 return -TARGET_ENOMEM; 4768 } 4769 host_mb->mtype = (abi_long) tswapal(target_mb->mtype); 4770 memcpy(host_mb->mtext, target_mb->mtext, msgsz); 4771 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg)); 4772 g_free(host_mb); 4773 unlock_user_struct(target_mb, msgp, 0); 4774 4775 return ret; 4776 } 4777 4778 static inline abi_long do_msgrcv(int msqid, abi_long msgp, 4779 ssize_t msgsz, abi_long msgtyp, 4780 int msgflg) 4781 { 4782 struct target_msgbuf *target_mb; 4783 char *target_mtext; 4784 struct msgbuf *host_mb; 4785 abi_long ret = 0; 4786 4787 if (msgsz < 0) { 4788 return -TARGET_EINVAL; 4789 } 4790 4791 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0)) 4792 return -TARGET_EFAULT; 4793 4794 host_mb = g_try_malloc(msgsz + sizeof(long)); 4795 if (!host_mb) { 4796 ret = -TARGET_ENOMEM; 4797 goto end; 4798 } 4799 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg)); 4800 4801 if (ret > 0) { 4802 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong); 4803 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0); 4804 if (!target_mtext) { 4805 ret = -TARGET_EFAULT; 4806 goto end; 4807 } 4808 memcpy(target_mb->mtext, host_mb->mtext, ret); 4809 unlock_user(target_mtext, target_mtext_addr, ret); 4810 } 4811 4812 target_mb->mtype = tswapal(host_mb->mtype); 4813 4814 end: 4815 if (target_mb) 4816 unlock_user_struct(target_mb, msgp, 1); 4817 g_free(host_mb); 4818 return ret; 4819 } 4820 4821 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd, 4822 abi_ulong target_addr) 4823 { 4824 struct target_shmid_ds *target_sd; 4825 4826 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 4827 return -TARGET_EFAULT; 4828 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr)) 4829 return -TARGET_EFAULT; 4830 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz); 4831 __get_user(host_sd->shm_atime, &target_sd->shm_atime); 4832 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime); 4833 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime); 4834 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid); 4835 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid); 4836 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch); 4837 unlock_user_struct(target_sd, target_addr, 0); 4838 return 0; 4839 } 4840 4841 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr, 4842 struct shmid_ds *host_sd) 4843 { 4844 struct target_shmid_ds *target_sd; 4845 4846 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 4847 return -TARGET_EFAULT; 4848 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm))) 4849 return -TARGET_EFAULT; 4850 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz); 4851 __put_user(host_sd->shm_atime, &target_sd->shm_atime); 4852 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime); 4853 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime); 4854 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid); 4855 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid); 4856 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch); 4857 unlock_user_struct(target_sd, target_addr, 1); 4858 return 0; 4859 } 4860 4861 struct target_shminfo { 4862 abi_ulong shmmax; 4863 abi_ulong shmmin; 4864 abi_ulong shmmni; 4865 abi_ulong shmseg; 4866 abi_ulong shmall; 4867 }; 4868 4869 static inline abi_long host_to_target_shminfo(abi_ulong target_addr, 4870 struct shminfo *host_shminfo) 4871 { 4872 struct target_shminfo *target_shminfo; 4873 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0)) 4874 return -TARGET_EFAULT; 4875 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax); 4876 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin); 4877 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni); 4878 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg); 4879 __put_user(host_shminfo->shmall, &target_shminfo->shmall); 4880 unlock_user_struct(target_shminfo, target_addr, 1); 4881 return 0; 4882 } 4883 4884 struct target_shm_info { 4885 int used_ids; 4886 abi_ulong shm_tot; 4887 abi_ulong shm_rss; 4888 abi_ulong shm_swp; 4889 abi_ulong swap_attempts; 4890 abi_ulong swap_successes; 4891 }; 4892 4893 static inline abi_long host_to_target_shm_info(abi_ulong target_addr, 4894 struct shm_info *host_shm_info) 4895 { 4896 struct target_shm_info *target_shm_info; 4897 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0)) 4898 return -TARGET_EFAULT; 4899 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids); 4900 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot); 4901 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss); 4902 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp); 4903 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts); 4904 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes); 4905 unlock_user_struct(target_shm_info, target_addr, 1); 4906 return 0; 4907 } 4908 4909 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf) 4910 { 4911 struct shmid_ds dsarg; 4912 struct shminfo shminfo; 4913 struct shm_info shm_info; 4914 abi_long ret = -TARGET_EINVAL; 4915 4916 cmd &= 0xff; 4917 4918 switch(cmd) { 4919 case IPC_STAT: 4920 case IPC_SET: 4921 case SHM_STAT: 4922 if (target_to_host_shmid_ds(&dsarg, buf)) 4923 return -TARGET_EFAULT; 4924 ret = get_errno(shmctl(shmid, cmd, &dsarg)); 4925 if (host_to_target_shmid_ds(buf, &dsarg)) 4926 return -TARGET_EFAULT; 4927 break; 4928 case IPC_INFO: 4929 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo)); 4930 if (host_to_target_shminfo(buf, &shminfo)) 4931 return -TARGET_EFAULT; 4932 break; 4933 case SHM_INFO: 4934 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info)); 4935 if (host_to_target_shm_info(buf, &shm_info)) 4936 return -TARGET_EFAULT; 4937 break; 4938 case IPC_RMID: 4939 case SHM_LOCK: 4940 case SHM_UNLOCK: 4941 ret = get_errno(shmctl(shmid, cmd, NULL)); 4942 break; 4943 } 4944 4945 return ret; 4946 } 4947 4948 #ifndef TARGET_FORCE_SHMLBA 4949 /* For most architectures, SHMLBA is the same as the page size; 4950 * some architectures have larger values, in which case they should 4951 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function. 4952 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA 4953 * and defining its own value for SHMLBA. 4954 * 4955 * The kernel also permits SHMLBA to be set by the architecture to a 4956 * value larger than the page size without setting __ARCH_FORCE_SHMLBA; 4957 * this means that addresses are rounded to the large size if 4958 * SHM_RND is set but addresses not aligned to that size are not rejected 4959 * as long as they are at least page-aligned. Since the only architecture 4960 * which uses this is ia64 this code doesn't provide for that oddity. 4961 */ 4962 static inline abi_ulong target_shmlba(CPUArchState *cpu_env) 4963 { 4964 return TARGET_PAGE_SIZE; 4965 } 4966 #endif 4967 4968 static inline abi_ulong do_shmat(CPUArchState *cpu_env, 4969 int shmid, abi_ulong shmaddr, int shmflg) 4970 { 4971 abi_long raddr; 4972 void *host_raddr; 4973 struct shmid_ds shm_info; 4974 int i,ret; 4975 abi_ulong shmlba; 4976 4977 /* find out the length of the shared memory segment */ 4978 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); 4979 if (is_error(ret)) { 4980 /* can't get length, bail out */ 4981 return ret; 4982 } 4983 4984 shmlba = target_shmlba(cpu_env); 4985 4986 if (shmaddr & (shmlba - 1)) { 4987 if (shmflg & SHM_RND) { 4988 shmaddr &= ~(shmlba - 1); 4989 } else { 4990 return -TARGET_EINVAL; 4991 } 4992 } 4993 if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) { 4994 return -TARGET_EINVAL; 4995 } 4996 4997 mmap_lock(); 4998 4999 if (shmaddr) 5000 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg); 5001 else { 5002 abi_ulong mmap_start; 5003 5004 mmap_start = mmap_find_vma(0, shm_info.shm_segsz); 5005 5006 if (mmap_start == -1) { 5007 errno = ENOMEM; 5008 host_raddr = (void *)-1; 5009 } else 5010 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP); 5011 } 5012 5013 if (host_raddr == (void *)-1) { 5014 mmap_unlock(); 5015 return get_errno((long)host_raddr); 5016 } 5017 raddr=h2g((unsigned long)host_raddr); 5018 5019 page_set_flags(raddr, raddr + shm_info.shm_segsz, 5020 PAGE_VALID | PAGE_READ | 5021 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE)); 5022 5023 for (i = 0; i < N_SHM_REGIONS; i++) { 5024 if (!shm_regions[i].in_use) { 5025 shm_regions[i].in_use = true; 5026 shm_regions[i].start = raddr; 5027 shm_regions[i].size = shm_info.shm_segsz; 5028 break; 5029 } 5030 } 5031 5032 mmap_unlock(); 5033 return raddr; 5034 5035 } 5036 5037 static inline abi_long do_shmdt(abi_ulong shmaddr) 5038 { 5039 int i; 5040 abi_long rv; 5041 5042 mmap_lock(); 5043 5044 for (i = 0; i < N_SHM_REGIONS; ++i) { 5045 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) { 5046 shm_regions[i].in_use = false; 5047 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0); 5048 break; 5049 } 5050 } 5051 rv = get_errno(shmdt(g2h(shmaddr))); 5052 5053 mmap_unlock(); 5054 5055 return rv; 5056 } 5057 5058 #ifdef TARGET_NR_ipc 5059 /* ??? This only works with linear mappings. */ 5060 /* do_ipc() must return target values and target errnos. */ 5061 static abi_long do_ipc(CPUArchState *cpu_env, 5062 unsigned int call, abi_long first, 5063 abi_long second, abi_long third, 5064 abi_long ptr, abi_long fifth) 5065 { 5066 int version; 5067 abi_long ret = 0; 5068 5069 version = call >> 16; 5070 call &= 0xffff; 5071 5072 switch (call) { 5073 case IPCOP_semop: 5074 ret = do_semop(first, ptr, second); 5075 break; 5076 5077 case IPCOP_semget: 5078 ret = get_errno(semget(first, second, third)); 5079 break; 5080 5081 case IPCOP_semctl: { 5082 /* The semun argument to semctl is passed by value, so dereference the 5083 * ptr argument. */ 5084 abi_ulong atptr; 5085 get_user_ual(atptr, ptr); 5086 ret = do_semctl(first, second, third, atptr); 5087 break; 5088 } 5089 5090 case IPCOP_msgget: 5091 ret = get_errno(msgget(first, second)); 5092 break; 5093 5094 case IPCOP_msgsnd: 5095 ret = do_msgsnd(first, ptr, second, third); 5096 break; 5097 5098 case IPCOP_msgctl: 5099 ret = do_msgctl(first, second, ptr); 5100 break; 5101 5102 case IPCOP_msgrcv: 5103 switch (version) { 5104 case 0: 5105 { 5106 struct target_ipc_kludge { 5107 abi_long msgp; 5108 abi_long msgtyp; 5109 } *tmp; 5110 5111 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) { 5112 ret = -TARGET_EFAULT; 5113 break; 5114 } 5115 5116 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third); 5117 5118 unlock_user_struct(tmp, ptr, 0); 5119 break; 5120 } 5121 default: 5122 ret = do_msgrcv(first, ptr, second, fifth, third); 5123 } 5124 break; 5125 5126 case IPCOP_shmat: 5127 switch (version) { 5128 default: 5129 { 5130 abi_ulong raddr; 5131 raddr = do_shmat(cpu_env, first, ptr, second); 5132 if (is_error(raddr)) 5133 return get_errno(raddr); 5134 if (put_user_ual(raddr, third)) 5135 return -TARGET_EFAULT; 5136 break; 5137 } 5138 case 1: 5139 ret = -TARGET_EINVAL; 5140 break; 5141 } 5142 break; 5143 case IPCOP_shmdt: 5144 ret = do_shmdt(ptr); 5145 break; 5146 5147 case IPCOP_shmget: 5148 /* IPC_* flag values are the same on all linux platforms */ 5149 ret = get_errno(shmget(first, second, third)); 5150 break; 5151 5152 /* IPC_* and SHM_* command values are the same on all linux platforms */ 5153 case IPCOP_shmctl: 5154 ret = do_shmctl(first, second, ptr); 5155 break; 5156 default: 5157 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version); 5158 ret = -TARGET_ENOSYS; 5159 break; 5160 } 5161 return ret; 5162 } 5163 #endif 5164 5165 /* kernel structure types definitions */ 5166 5167 #define STRUCT(name, ...) STRUCT_ ## name, 5168 #define STRUCT_SPECIAL(name) STRUCT_ ## name, 5169 enum { 5170 #include "syscall_types.h" 5171 STRUCT_MAX 5172 }; 5173 #undef STRUCT 5174 #undef STRUCT_SPECIAL 5175 5176 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL }; 5177 #define STRUCT_SPECIAL(name) 5178 #include "syscall_types.h" 5179 #undef STRUCT 5180 #undef STRUCT_SPECIAL 5181 5182 typedef struct IOCTLEntry IOCTLEntry; 5183 5184 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp, 5185 int fd, int cmd, abi_long arg); 5186 5187 struct IOCTLEntry { 5188 int target_cmd; 5189 unsigned int host_cmd; 5190 const char *name; 5191 int access; 5192 do_ioctl_fn *do_ioctl; 5193 const argtype arg_type[5]; 5194 }; 5195 5196 #define IOC_R 0x0001 5197 #define IOC_W 0x0002 5198 #define IOC_RW (IOC_R | IOC_W) 5199 5200 #define MAX_STRUCT_SIZE 4096 5201 5202 #ifdef CONFIG_FIEMAP 5203 /* So fiemap access checks don't overflow on 32 bit systems. 5204 * This is very slightly smaller than the limit imposed by 5205 * the underlying kernel. 5206 */ 5207 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \ 5208 / sizeof(struct fiemap_extent)) 5209 5210 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp, 5211 int fd, int cmd, abi_long arg) 5212 { 5213 /* The parameter for this ioctl is a struct fiemap followed 5214 * by an array of struct fiemap_extent whose size is set 5215 * in fiemap->fm_extent_count. The array is filled in by the 5216 * ioctl. 5217 */ 5218 int target_size_in, target_size_out; 5219 struct fiemap *fm; 5220 const argtype *arg_type = ie->arg_type; 5221 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) }; 5222 void *argptr, *p; 5223 abi_long ret; 5224 int i, extent_size = thunk_type_size(extent_arg_type, 0); 5225 uint32_t outbufsz; 5226 int free_fm = 0; 5227 5228 assert(arg_type[0] == TYPE_PTR); 5229 assert(ie->access == IOC_RW); 5230 arg_type++; 5231 target_size_in = thunk_type_size(arg_type, 0); 5232 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1); 5233 if (!argptr) { 5234 return -TARGET_EFAULT; 5235 } 5236 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5237 unlock_user(argptr, arg, 0); 5238 fm = (struct fiemap *)buf_temp; 5239 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) { 5240 return -TARGET_EINVAL; 5241 } 5242 5243 outbufsz = sizeof (*fm) + 5244 (sizeof(struct fiemap_extent) * fm->fm_extent_count); 5245 5246 if (outbufsz > MAX_STRUCT_SIZE) { 5247 /* We can't fit all the extents into the fixed size buffer. 5248 * Allocate one that is large enough and use it instead. 5249 */ 5250 fm = g_try_malloc(outbufsz); 5251 if (!fm) { 5252 return -TARGET_ENOMEM; 5253 } 5254 memcpy(fm, buf_temp, sizeof(struct fiemap)); 5255 free_fm = 1; 5256 } 5257 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm)); 5258 if (!is_error(ret)) { 5259 target_size_out = target_size_in; 5260 /* An extent_count of 0 means we were only counting the extents 5261 * so there are no structs to copy 5262 */ 5263 if (fm->fm_extent_count != 0) { 5264 target_size_out += fm->fm_mapped_extents * extent_size; 5265 } 5266 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0); 5267 if (!argptr) { 5268 ret = -TARGET_EFAULT; 5269 } else { 5270 /* Convert the struct fiemap */ 5271 thunk_convert(argptr, fm, arg_type, THUNK_TARGET); 5272 if (fm->fm_extent_count != 0) { 5273 p = argptr + target_size_in; 5274 /* ...and then all the struct fiemap_extents */ 5275 for (i = 0; i < fm->fm_mapped_extents; i++) { 5276 thunk_convert(p, &fm->fm_extents[i], extent_arg_type, 5277 THUNK_TARGET); 5278 p += extent_size; 5279 } 5280 } 5281 unlock_user(argptr, arg, target_size_out); 5282 } 5283 } 5284 if (free_fm) { 5285 g_free(fm); 5286 } 5287 return ret; 5288 } 5289 #endif 5290 5291 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, 5292 int fd, int cmd, abi_long arg) 5293 { 5294 const argtype *arg_type = ie->arg_type; 5295 int target_size; 5296 void *argptr; 5297 int ret; 5298 struct ifconf *host_ifconf; 5299 uint32_t outbufsz; 5300 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) }; 5301 int target_ifreq_size; 5302 int nb_ifreq; 5303 int free_buf = 0; 5304 int i; 5305 int target_ifc_len; 5306 abi_long target_ifc_buf; 5307 int host_ifc_len; 5308 char *host_ifc_buf; 5309 5310 assert(arg_type[0] == TYPE_PTR); 5311 assert(ie->access == IOC_RW); 5312 5313 arg_type++; 5314 target_size = thunk_type_size(arg_type, 0); 5315 5316 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5317 if (!argptr) 5318 return -TARGET_EFAULT; 5319 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5320 unlock_user(argptr, arg, 0); 5321 5322 host_ifconf = (struct ifconf *)(unsigned long)buf_temp; 5323 target_ifc_len = host_ifconf->ifc_len; 5324 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf; 5325 5326 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0); 5327 nb_ifreq = target_ifc_len / target_ifreq_size; 5328 host_ifc_len = nb_ifreq * sizeof(struct ifreq); 5329 5330 outbufsz = sizeof(*host_ifconf) + host_ifc_len; 5331 if (outbufsz > MAX_STRUCT_SIZE) { 5332 /* We can't fit all the extents into the fixed size buffer. 5333 * Allocate one that is large enough and use it instead. 5334 */ 5335 host_ifconf = malloc(outbufsz); 5336 if (!host_ifconf) { 5337 return -TARGET_ENOMEM; 5338 } 5339 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf)); 5340 free_buf = 1; 5341 } 5342 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf); 5343 5344 host_ifconf->ifc_len = host_ifc_len; 5345 host_ifconf->ifc_buf = host_ifc_buf; 5346 5347 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf)); 5348 if (!is_error(ret)) { 5349 /* convert host ifc_len to target ifc_len */ 5350 5351 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq); 5352 target_ifc_len = nb_ifreq * target_ifreq_size; 5353 host_ifconf->ifc_len = target_ifc_len; 5354 5355 /* restore target ifc_buf */ 5356 5357 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf; 5358 5359 /* copy struct ifconf to target user */ 5360 5361 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5362 if (!argptr) 5363 return -TARGET_EFAULT; 5364 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET); 5365 unlock_user(argptr, arg, target_size); 5366 5367 /* copy ifreq[] to target user */ 5368 5369 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0); 5370 for (i = 0; i < nb_ifreq ; i++) { 5371 thunk_convert(argptr + i * target_ifreq_size, 5372 host_ifc_buf + i * sizeof(struct ifreq), 5373 ifreq_arg_type, THUNK_TARGET); 5374 } 5375 unlock_user(argptr, target_ifc_buf, target_ifc_len); 5376 } 5377 5378 if (free_buf) { 5379 free(host_ifconf); 5380 } 5381 5382 return ret; 5383 } 5384 5385 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 5386 int cmd, abi_long arg) 5387 { 5388 void *argptr; 5389 struct dm_ioctl *host_dm; 5390 abi_long guest_data; 5391 uint32_t guest_data_size; 5392 int target_size; 5393 const argtype *arg_type = ie->arg_type; 5394 abi_long ret; 5395 void *big_buf = NULL; 5396 char *host_data; 5397 5398 arg_type++; 5399 target_size = thunk_type_size(arg_type, 0); 5400 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5401 if (!argptr) { 5402 ret = -TARGET_EFAULT; 5403 goto out; 5404 } 5405 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5406 unlock_user(argptr, arg, 0); 5407 5408 /* buf_temp is too small, so fetch things into a bigger buffer */ 5409 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2); 5410 memcpy(big_buf, buf_temp, target_size); 5411 buf_temp = big_buf; 5412 host_dm = big_buf; 5413 5414 guest_data = arg + host_dm->data_start; 5415 if ((guest_data - arg) < 0) { 5416 ret = -TARGET_EINVAL; 5417 goto out; 5418 } 5419 guest_data_size = host_dm->data_size - host_dm->data_start; 5420 host_data = (char*)host_dm + host_dm->data_start; 5421 5422 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1); 5423 if (!argptr) { 5424 ret = -TARGET_EFAULT; 5425 goto out; 5426 } 5427 5428 switch (ie->host_cmd) { 5429 case DM_REMOVE_ALL: 5430 case DM_LIST_DEVICES: 5431 case DM_DEV_CREATE: 5432 case DM_DEV_REMOVE: 5433 case DM_DEV_SUSPEND: 5434 case DM_DEV_STATUS: 5435 case DM_DEV_WAIT: 5436 case DM_TABLE_STATUS: 5437 case DM_TABLE_CLEAR: 5438 case DM_TABLE_DEPS: 5439 case DM_LIST_VERSIONS: 5440 /* no input data */ 5441 break; 5442 case DM_DEV_RENAME: 5443 case DM_DEV_SET_GEOMETRY: 5444 /* data contains only strings */ 5445 memcpy(host_data, argptr, guest_data_size); 5446 break; 5447 case DM_TARGET_MSG: 5448 memcpy(host_data, argptr, guest_data_size); 5449 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr); 5450 break; 5451 case DM_TABLE_LOAD: 5452 { 5453 void *gspec = argptr; 5454 void *cur_data = host_data; 5455 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 5456 int spec_size = thunk_type_size(arg_type, 0); 5457 int i; 5458 5459 for (i = 0; i < host_dm->target_count; i++) { 5460 struct dm_target_spec *spec = cur_data; 5461 uint32_t next; 5462 int slen; 5463 5464 thunk_convert(spec, gspec, arg_type, THUNK_HOST); 5465 slen = strlen((char*)gspec + spec_size) + 1; 5466 next = spec->next; 5467 spec->next = sizeof(*spec) + slen; 5468 strcpy((char*)&spec[1], gspec + spec_size); 5469 gspec += next; 5470 cur_data += spec->next; 5471 } 5472 break; 5473 } 5474 default: 5475 ret = -TARGET_EINVAL; 5476 unlock_user(argptr, guest_data, 0); 5477 goto out; 5478 } 5479 unlock_user(argptr, guest_data, 0); 5480 5481 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5482 if (!is_error(ret)) { 5483 guest_data = arg + host_dm->data_start; 5484 guest_data_size = host_dm->data_size - host_dm->data_start; 5485 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0); 5486 switch (ie->host_cmd) { 5487 case DM_REMOVE_ALL: 5488 case DM_DEV_CREATE: 5489 case DM_DEV_REMOVE: 5490 case DM_DEV_RENAME: 5491 case DM_DEV_SUSPEND: 5492 case DM_DEV_STATUS: 5493 case DM_TABLE_LOAD: 5494 case DM_TABLE_CLEAR: 5495 case DM_TARGET_MSG: 5496 case DM_DEV_SET_GEOMETRY: 5497 /* no return data */ 5498 break; 5499 case DM_LIST_DEVICES: 5500 { 5501 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start; 5502 uint32_t remaining_data = guest_data_size; 5503 void *cur_data = argptr; 5504 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) }; 5505 int nl_size = 12; /* can't use thunk_size due to alignment */ 5506 5507 while (1) { 5508 uint32_t next = nl->next; 5509 if (next) { 5510 nl->next = nl_size + (strlen(nl->name) + 1); 5511 } 5512 if (remaining_data < nl->next) { 5513 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5514 break; 5515 } 5516 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET); 5517 strcpy(cur_data + nl_size, nl->name); 5518 cur_data += nl->next; 5519 remaining_data -= nl->next; 5520 if (!next) { 5521 break; 5522 } 5523 nl = (void*)nl + next; 5524 } 5525 break; 5526 } 5527 case DM_DEV_WAIT: 5528 case DM_TABLE_STATUS: 5529 { 5530 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start; 5531 void *cur_data = argptr; 5532 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 5533 int spec_size = thunk_type_size(arg_type, 0); 5534 int i; 5535 5536 for (i = 0; i < host_dm->target_count; i++) { 5537 uint32_t next = spec->next; 5538 int slen = strlen((char*)&spec[1]) + 1; 5539 spec->next = (cur_data - argptr) + spec_size + slen; 5540 if (guest_data_size < spec->next) { 5541 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5542 break; 5543 } 5544 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET); 5545 strcpy(cur_data + spec_size, (char*)&spec[1]); 5546 cur_data = argptr + spec->next; 5547 spec = (void*)host_dm + host_dm->data_start + next; 5548 } 5549 break; 5550 } 5551 case DM_TABLE_DEPS: 5552 { 5553 void *hdata = (void*)host_dm + host_dm->data_start; 5554 int count = *(uint32_t*)hdata; 5555 uint64_t *hdev = hdata + 8; 5556 uint64_t *gdev = argptr + 8; 5557 int i; 5558 5559 *(uint32_t*)argptr = tswap32(count); 5560 for (i = 0; i < count; i++) { 5561 *gdev = tswap64(*hdev); 5562 gdev++; 5563 hdev++; 5564 } 5565 break; 5566 } 5567 case DM_LIST_VERSIONS: 5568 { 5569 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start; 5570 uint32_t remaining_data = guest_data_size; 5571 void *cur_data = argptr; 5572 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) }; 5573 int vers_size = thunk_type_size(arg_type, 0); 5574 5575 while (1) { 5576 uint32_t next = vers->next; 5577 if (next) { 5578 vers->next = vers_size + (strlen(vers->name) + 1); 5579 } 5580 if (remaining_data < vers->next) { 5581 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5582 break; 5583 } 5584 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET); 5585 strcpy(cur_data + vers_size, vers->name); 5586 cur_data += vers->next; 5587 remaining_data -= vers->next; 5588 if (!next) { 5589 break; 5590 } 5591 vers = (void*)vers + next; 5592 } 5593 break; 5594 } 5595 default: 5596 unlock_user(argptr, guest_data, 0); 5597 ret = -TARGET_EINVAL; 5598 goto out; 5599 } 5600 unlock_user(argptr, guest_data, guest_data_size); 5601 5602 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5603 if (!argptr) { 5604 ret = -TARGET_EFAULT; 5605 goto out; 5606 } 5607 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5608 unlock_user(argptr, arg, target_size); 5609 } 5610 out: 5611 g_free(big_buf); 5612 return ret; 5613 } 5614 5615 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 5616 int cmd, abi_long arg) 5617 { 5618 void *argptr; 5619 int target_size; 5620 const argtype *arg_type = ie->arg_type; 5621 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) }; 5622 abi_long ret; 5623 5624 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp; 5625 struct blkpg_partition host_part; 5626 5627 /* Read and convert blkpg */ 5628 arg_type++; 5629 target_size = thunk_type_size(arg_type, 0); 5630 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5631 if (!argptr) { 5632 ret = -TARGET_EFAULT; 5633 goto out; 5634 } 5635 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5636 unlock_user(argptr, arg, 0); 5637 5638 switch (host_blkpg->op) { 5639 case BLKPG_ADD_PARTITION: 5640 case BLKPG_DEL_PARTITION: 5641 /* payload is struct blkpg_partition */ 5642 break; 5643 default: 5644 /* Unknown opcode */ 5645 ret = -TARGET_EINVAL; 5646 goto out; 5647 } 5648 5649 /* Read and convert blkpg->data */ 5650 arg = (abi_long)(uintptr_t)host_blkpg->data; 5651 target_size = thunk_type_size(part_arg_type, 0); 5652 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5653 if (!argptr) { 5654 ret = -TARGET_EFAULT; 5655 goto out; 5656 } 5657 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST); 5658 unlock_user(argptr, arg, 0); 5659 5660 /* Swizzle the data pointer to our local copy and call! */ 5661 host_blkpg->data = &host_part; 5662 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg)); 5663 5664 out: 5665 return ret; 5666 } 5667 5668 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp, 5669 int fd, int cmd, abi_long arg) 5670 { 5671 const argtype *arg_type = ie->arg_type; 5672 const StructEntry *se; 5673 const argtype *field_types; 5674 const int *dst_offsets, *src_offsets; 5675 int target_size; 5676 void *argptr; 5677 abi_ulong *target_rt_dev_ptr; 5678 unsigned long *host_rt_dev_ptr; 5679 abi_long ret; 5680 int i; 5681 5682 assert(ie->access == IOC_W); 5683 assert(*arg_type == TYPE_PTR); 5684 arg_type++; 5685 assert(*arg_type == TYPE_STRUCT); 5686 target_size = thunk_type_size(arg_type, 0); 5687 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5688 if (!argptr) { 5689 return -TARGET_EFAULT; 5690 } 5691 arg_type++; 5692 assert(*arg_type == (int)STRUCT_rtentry); 5693 se = struct_entries + *arg_type++; 5694 assert(se->convert[0] == NULL); 5695 /* convert struct here to be able to catch rt_dev string */ 5696 field_types = se->field_types; 5697 dst_offsets = se->field_offsets[THUNK_HOST]; 5698 src_offsets = se->field_offsets[THUNK_TARGET]; 5699 for (i = 0; i < se->nb_fields; i++) { 5700 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) { 5701 assert(*field_types == TYPE_PTRVOID); 5702 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]); 5703 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]); 5704 if (*target_rt_dev_ptr != 0) { 5705 *host_rt_dev_ptr = (unsigned long)lock_user_string( 5706 tswapal(*target_rt_dev_ptr)); 5707 if (!*host_rt_dev_ptr) { 5708 unlock_user(argptr, arg, 0); 5709 return -TARGET_EFAULT; 5710 } 5711 } else { 5712 *host_rt_dev_ptr = 0; 5713 } 5714 field_types++; 5715 continue; 5716 } 5717 field_types = thunk_convert(buf_temp + dst_offsets[i], 5718 argptr + src_offsets[i], 5719 field_types, THUNK_HOST); 5720 } 5721 unlock_user(argptr, arg, 0); 5722 5723 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5724 if (*host_rt_dev_ptr != 0) { 5725 unlock_user((void *)*host_rt_dev_ptr, 5726 *target_rt_dev_ptr, 0); 5727 } 5728 return ret; 5729 } 5730 5731 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp, 5732 int fd, int cmd, abi_long arg) 5733 { 5734 int sig = target_to_host_signal(arg); 5735 return get_errno(safe_ioctl(fd, ie->host_cmd, sig)); 5736 } 5737 5738 #ifdef TIOCGPTPEER 5739 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp, 5740 int fd, int cmd, abi_long arg) 5741 { 5742 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl); 5743 return get_errno(safe_ioctl(fd, ie->host_cmd, flags)); 5744 } 5745 #endif 5746 5747 static IOCTLEntry ioctl_entries[] = { 5748 #define IOCTL(cmd, access, ...) \ 5749 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } }, 5750 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \ 5751 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } }, 5752 #define IOCTL_IGNORE(cmd) \ 5753 { TARGET_ ## cmd, 0, #cmd }, 5754 #include "ioctls.h" 5755 { 0, 0, }, 5756 }; 5757 5758 /* ??? Implement proper locking for ioctls. */ 5759 /* do_ioctl() Must return target values and target errnos. */ 5760 static abi_long do_ioctl(int fd, int cmd, abi_long arg) 5761 { 5762 const IOCTLEntry *ie; 5763 const argtype *arg_type; 5764 abi_long ret; 5765 uint8_t buf_temp[MAX_STRUCT_SIZE]; 5766 int target_size; 5767 void *argptr; 5768 5769 ie = ioctl_entries; 5770 for(;;) { 5771 if (ie->target_cmd == 0) { 5772 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd); 5773 return -TARGET_ENOSYS; 5774 } 5775 if (ie->target_cmd == cmd) 5776 break; 5777 ie++; 5778 } 5779 arg_type = ie->arg_type; 5780 #if defined(DEBUG) 5781 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name); 5782 #endif 5783 if (ie->do_ioctl) { 5784 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg); 5785 } else if (!ie->host_cmd) { 5786 /* Some architectures define BSD ioctls in their headers 5787 that are not implemented in Linux. */ 5788 return -TARGET_ENOSYS; 5789 } 5790 5791 switch(arg_type[0]) { 5792 case TYPE_NULL: 5793 /* no argument */ 5794 ret = get_errno(safe_ioctl(fd, ie->host_cmd)); 5795 break; 5796 case TYPE_PTRVOID: 5797 case TYPE_INT: 5798 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg)); 5799 break; 5800 case TYPE_PTR: 5801 arg_type++; 5802 target_size = thunk_type_size(arg_type, 0); 5803 switch(ie->access) { 5804 case IOC_R: 5805 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5806 if (!is_error(ret)) { 5807 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5808 if (!argptr) 5809 return -TARGET_EFAULT; 5810 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5811 unlock_user(argptr, arg, target_size); 5812 } 5813 break; 5814 case IOC_W: 5815 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5816 if (!argptr) 5817 return -TARGET_EFAULT; 5818 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5819 unlock_user(argptr, arg, 0); 5820 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5821 break; 5822 default: 5823 case IOC_RW: 5824 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5825 if (!argptr) 5826 return -TARGET_EFAULT; 5827 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5828 unlock_user(argptr, arg, 0); 5829 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5830 if (!is_error(ret)) { 5831 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5832 if (!argptr) 5833 return -TARGET_EFAULT; 5834 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5835 unlock_user(argptr, arg, target_size); 5836 } 5837 break; 5838 } 5839 break; 5840 default: 5841 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n", 5842 (long)cmd, arg_type[0]); 5843 ret = -TARGET_ENOSYS; 5844 break; 5845 } 5846 return ret; 5847 } 5848 5849 static const bitmask_transtbl iflag_tbl[] = { 5850 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK }, 5851 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT }, 5852 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR }, 5853 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK }, 5854 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK }, 5855 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP }, 5856 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR }, 5857 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR }, 5858 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL }, 5859 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC }, 5860 { TARGET_IXON, TARGET_IXON, IXON, IXON }, 5861 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY }, 5862 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF }, 5863 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL }, 5864 { 0, 0, 0, 0 } 5865 }; 5866 5867 static const bitmask_transtbl oflag_tbl[] = { 5868 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST }, 5869 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC }, 5870 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR }, 5871 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL }, 5872 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR }, 5873 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET }, 5874 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL }, 5875 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL }, 5876 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 }, 5877 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 }, 5878 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 }, 5879 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 }, 5880 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 }, 5881 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 }, 5882 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 }, 5883 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 }, 5884 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 }, 5885 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 }, 5886 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 }, 5887 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 }, 5888 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 }, 5889 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 }, 5890 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 }, 5891 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 }, 5892 { 0, 0, 0, 0 } 5893 }; 5894 5895 static const bitmask_transtbl cflag_tbl[] = { 5896 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 }, 5897 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 }, 5898 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 }, 5899 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 }, 5900 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 }, 5901 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 }, 5902 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 }, 5903 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 }, 5904 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 }, 5905 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 }, 5906 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 }, 5907 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 }, 5908 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 }, 5909 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 }, 5910 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 }, 5911 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 }, 5912 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 }, 5913 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 }, 5914 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 }, 5915 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 }, 5916 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 }, 5917 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 }, 5918 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 }, 5919 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 }, 5920 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB }, 5921 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD }, 5922 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB }, 5923 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD }, 5924 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL }, 5925 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL }, 5926 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS }, 5927 { 0, 0, 0, 0 } 5928 }; 5929 5930 static const bitmask_transtbl lflag_tbl[] = { 5931 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG }, 5932 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON }, 5933 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE }, 5934 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO }, 5935 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE }, 5936 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK }, 5937 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL }, 5938 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH }, 5939 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP }, 5940 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL }, 5941 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT }, 5942 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE }, 5943 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO }, 5944 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN }, 5945 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN }, 5946 { 0, 0, 0, 0 } 5947 }; 5948 5949 static void target_to_host_termios (void *dst, const void *src) 5950 { 5951 struct host_termios *host = dst; 5952 const struct target_termios *target = src; 5953 5954 host->c_iflag = 5955 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl); 5956 host->c_oflag = 5957 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl); 5958 host->c_cflag = 5959 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl); 5960 host->c_lflag = 5961 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl); 5962 host->c_line = target->c_line; 5963 5964 memset(host->c_cc, 0, sizeof(host->c_cc)); 5965 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR]; 5966 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT]; 5967 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE]; 5968 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL]; 5969 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF]; 5970 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME]; 5971 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN]; 5972 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC]; 5973 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART]; 5974 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP]; 5975 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP]; 5976 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL]; 5977 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT]; 5978 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD]; 5979 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE]; 5980 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT]; 5981 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2]; 5982 } 5983 5984 static void host_to_target_termios (void *dst, const void *src) 5985 { 5986 struct target_termios *target = dst; 5987 const struct host_termios *host = src; 5988 5989 target->c_iflag = 5990 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl)); 5991 target->c_oflag = 5992 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl)); 5993 target->c_cflag = 5994 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl)); 5995 target->c_lflag = 5996 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl)); 5997 target->c_line = host->c_line; 5998 5999 memset(target->c_cc, 0, sizeof(target->c_cc)); 6000 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR]; 6001 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT]; 6002 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE]; 6003 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL]; 6004 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF]; 6005 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME]; 6006 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN]; 6007 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC]; 6008 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART]; 6009 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP]; 6010 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP]; 6011 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL]; 6012 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT]; 6013 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD]; 6014 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE]; 6015 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT]; 6016 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2]; 6017 } 6018 6019 static const StructEntry struct_termios_def = { 6020 .convert = { host_to_target_termios, target_to_host_termios }, 6021 .size = { sizeof(struct target_termios), sizeof(struct host_termios) }, 6022 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) }, 6023 }; 6024 6025 static bitmask_transtbl mmap_flags_tbl[] = { 6026 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED }, 6027 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE }, 6028 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED }, 6029 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, 6030 MAP_ANONYMOUS, MAP_ANONYMOUS }, 6031 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, 6032 MAP_GROWSDOWN, MAP_GROWSDOWN }, 6033 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, 6034 MAP_DENYWRITE, MAP_DENYWRITE }, 6035 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, 6036 MAP_EXECUTABLE, MAP_EXECUTABLE }, 6037 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED }, 6038 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, 6039 MAP_NORESERVE, MAP_NORESERVE }, 6040 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB }, 6041 /* MAP_STACK had been ignored by the kernel for quite some time. 6042 Recognize it for the target insofar as we do not want to pass 6043 it through to the host. */ 6044 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 }, 6045 { 0, 0, 0, 0 } 6046 }; 6047 6048 #if defined(TARGET_I386) 6049 6050 /* NOTE: there is really one LDT for all the threads */ 6051 static uint8_t *ldt_table; 6052 6053 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount) 6054 { 6055 int size; 6056 void *p; 6057 6058 if (!ldt_table) 6059 return 0; 6060 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE; 6061 if (size > bytecount) 6062 size = bytecount; 6063 p = lock_user(VERIFY_WRITE, ptr, size, 0); 6064 if (!p) 6065 return -TARGET_EFAULT; 6066 /* ??? Should this by byteswapped? */ 6067 memcpy(p, ldt_table, size); 6068 unlock_user(p, ptr, size); 6069 return size; 6070 } 6071 6072 /* XXX: add locking support */ 6073 static abi_long write_ldt(CPUX86State *env, 6074 abi_ulong ptr, unsigned long bytecount, int oldmode) 6075 { 6076 struct target_modify_ldt_ldt_s ldt_info; 6077 struct target_modify_ldt_ldt_s *target_ldt_info; 6078 int seg_32bit, contents, read_exec_only, limit_in_pages; 6079 int seg_not_present, useable, lm; 6080 uint32_t *lp, entry_1, entry_2; 6081 6082 if (bytecount != sizeof(ldt_info)) 6083 return -TARGET_EINVAL; 6084 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1)) 6085 return -TARGET_EFAULT; 6086 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 6087 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 6088 ldt_info.limit = tswap32(target_ldt_info->limit); 6089 ldt_info.flags = tswap32(target_ldt_info->flags); 6090 unlock_user_struct(target_ldt_info, ptr, 0); 6091 6092 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES) 6093 return -TARGET_EINVAL; 6094 seg_32bit = ldt_info.flags & 1; 6095 contents = (ldt_info.flags >> 1) & 3; 6096 read_exec_only = (ldt_info.flags >> 3) & 1; 6097 limit_in_pages = (ldt_info.flags >> 4) & 1; 6098 seg_not_present = (ldt_info.flags >> 5) & 1; 6099 useable = (ldt_info.flags >> 6) & 1; 6100 #ifdef TARGET_ABI32 6101 lm = 0; 6102 #else 6103 lm = (ldt_info.flags >> 7) & 1; 6104 #endif 6105 if (contents == 3) { 6106 if (oldmode) 6107 return -TARGET_EINVAL; 6108 if (seg_not_present == 0) 6109 return -TARGET_EINVAL; 6110 } 6111 /* allocate the LDT */ 6112 if (!ldt_table) { 6113 env->ldt.base = target_mmap(0, 6114 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE, 6115 PROT_READ|PROT_WRITE, 6116 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 6117 if (env->ldt.base == -1) 6118 return -TARGET_ENOMEM; 6119 memset(g2h(env->ldt.base), 0, 6120 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE); 6121 env->ldt.limit = 0xffff; 6122 ldt_table = g2h(env->ldt.base); 6123 } 6124 6125 /* NOTE: same code as Linux kernel */ 6126 /* Allow LDTs to be cleared by the user. */ 6127 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 6128 if (oldmode || 6129 (contents == 0 && 6130 read_exec_only == 1 && 6131 seg_32bit == 0 && 6132 limit_in_pages == 0 && 6133 seg_not_present == 1 && 6134 useable == 0 )) { 6135 entry_1 = 0; 6136 entry_2 = 0; 6137 goto install; 6138 } 6139 } 6140 6141 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 6142 (ldt_info.limit & 0x0ffff); 6143 entry_2 = (ldt_info.base_addr & 0xff000000) | 6144 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 6145 (ldt_info.limit & 0xf0000) | 6146 ((read_exec_only ^ 1) << 9) | 6147 (contents << 10) | 6148 ((seg_not_present ^ 1) << 15) | 6149 (seg_32bit << 22) | 6150 (limit_in_pages << 23) | 6151 (lm << 21) | 6152 0x7000; 6153 if (!oldmode) 6154 entry_2 |= (useable << 20); 6155 6156 /* Install the new entry ... */ 6157 install: 6158 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3)); 6159 lp[0] = tswap32(entry_1); 6160 lp[1] = tswap32(entry_2); 6161 return 0; 6162 } 6163 6164 /* specific and weird i386 syscalls */ 6165 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr, 6166 unsigned long bytecount) 6167 { 6168 abi_long ret; 6169 6170 switch (func) { 6171 case 0: 6172 ret = read_ldt(ptr, bytecount); 6173 break; 6174 case 1: 6175 ret = write_ldt(env, ptr, bytecount, 1); 6176 break; 6177 case 0x11: 6178 ret = write_ldt(env, ptr, bytecount, 0); 6179 break; 6180 default: 6181 ret = -TARGET_ENOSYS; 6182 break; 6183 } 6184 return ret; 6185 } 6186 6187 #if defined(TARGET_I386) && defined(TARGET_ABI32) 6188 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr) 6189 { 6190 uint64_t *gdt_table = g2h(env->gdt.base); 6191 struct target_modify_ldt_ldt_s ldt_info; 6192 struct target_modify_ldt_ldt_s *target_ldt_info; 6193 int seg_32bit, contents, read_exec_only, limit_in_pages; 6194 int seg_not_present, useable, lm; 6195 uint32_t *lp, entry_1, entry_2; 6196 int i; 6197 6198 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 6199 if (!target_ldt_info) 6200 return -TARGET_EFAULT; 6201 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 6202 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 6203 ldt_info.limit = tswap32(target_ldt_info->limit); 6204 ldt_info.flags = tswap32(target_ldt_info->flags); 6205 if (ldt_info.entry_number == -1) { 6206 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) { 6207 if (gdt_table[i] == 0) { 6208 ldt_info.entry_number = i; 6209 target_ldt_info->entry_number = tswap32(i); 6210 break; 6211 } 6212 } 6213 } 6214 unlock_user_struct(target_ldt_info, ptr, 1); 6215 6216 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 6217 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX) 6218 return -TARGET_EINVAL; 6219 seg_32bit = ldt_info.flags & 1; 6220 contents = (ldt_info.flags >> 1) & 3; 6221 read_exec_only = (ldt_info.flags >> 3) & 1; 6222 limit_in_pages = (ldt_info.flags >> 4) & 1; 6223 seg_not_present = (ldt_info.flags >> 5) & 1; 6224 useable = (ldt_info.flags >> 6) & 1; 6225 #ifdef TARGET_ABI32 6226 lm = 0; 6227 #else 6228 lm = (ldt_info.flags >> 7) & 1; 6229 #endif 6230 6231 if (contents == 3) { 6232 if (seg_not_present == 0) 6233 return -TARGET_EINVAL; 6234 } 6235 6236 /* NOTE: same code as Linux kernel */ 6237 /* Allow LDTs to be cleared by the user. */ 6238 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 6239 if ((contents == 0 && 6240 read_exec_only == 1 && 6241 seg_32bit == 0 && 6242 limit_in_pages == 0 && 6243 seg_not_present == 1 && 6244 useable == 0 )) { 6245 entry_1 = 0; 6246 entry_2 = 0; 6247 goto install; 6248 } 6249 } 6250 6251 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 6252 (ldt_info.limit & 0x0ffff); 6253 entry_2 = (ldt_info.base_addr & 0xff000000) | 6254 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 6255 (ldt_info.limit & 0xf0000) | 6256 ((read_exec_only ^ 1) << 9) | 6257 (contents << 10) | 6258 ((seg_not_present ^ 1) << 15) | 6259 (seg_32bit << 22) | 6260 (limit_in_pages << 23) | 6261 (useable << 20) | 6262 (lm << 21) | 6263 0x7000; 6264 6265 /* Install the new entry ... */ 6266 install: 6267 lp = (uint32_t *)(gdt_table + ldt_info.entry_number); 6268 lp[0] = tswap32(entry_1); 6269 lp[1] = tswap32(entry_2); 6270 return 0; 6271 } 6272 6273 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr) 6274 { 6275 struct target_modify_ldt_ldt_s *target_ldt_info; 6276 uint64_t *gdt_table = g2h(env->gdt.base); 6277 uint32_t base_addr, limit, flags; 6278 int seg_32bit, contents, read_exec_only, limit_in_pages, idx; 6279 int seg_not_present, useable, lm; 6280 uint32_t *lp, entry_1, entry_2; 6281 6282 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 6283 if (!target_ldt_info) 6284 return -TARGET_EFAULT; 6285 idx = tswap32(target_ldt_info->entry_number); 6286 if (idx < TARGET_GDT_ENTRY_TLS_MIN || 6287 idx > TARGET_GDT_ENTRY_TLS_MAX) { 6288 unlock_user_struct(target_ldt_info, ptr, 1); 6289 return -TARGET_EINVAL; 6290 } 6291 lp = (uint32_t *)(gdt_table + idx); 6292 entry_1 = tswap32(lp[0]); 6293 entry_2 = tswap32(lp[1]); 6294 6295 read_exec_only = ((entry_2 >> 9) & 1) ^ 1; 6296 contents = (entry_2 >> 10) & 3; 6297 seg_not_present = ((entry_2 >> 15) & 1) ^ 1; 6298 seg_32bit = (entry_2 >> 22) & 1; 6299 limit_in_pages = (entry_2 >> 23) & 1; 6300 useable = (entry_2 >> 20) & 1; 6301 #ifdef TARGET_ABI32 6302 lm = 0; 6303 #else 6304 lm = (entry_2 >> 21) & 1; 6305 #endif 6306 flags = (seg_32bit << 0) | (contents << 1) | 6307 (read_exec_only << 3) | (limit_in_pages << 4) | 6308 (seg_not_present << 5) | (useable << 6) | (lm << 7); 6309 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000); 6310 base_addr = (entry_1 >> 16) | 6311 (entry_2 & 0xff000000) | 6312 ((entry_2 & 0xff) << 16); 6313 target_ldt_info->base_addr = tswapal(base_addr); 6314 target_ldt_info->limit = tswap32(limit); 6315 target_ldt_info->flags = tswap32(flags); 6316 unlock_user_struct(target_ldt_info, ptr, 1); 6317 return 0; 6318 } 6319 #endif /* TARGET_I386 && TARGET_ABI32 */ 6320 6321 #ifndef TARGET_ABI32 6322 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 6323 { 6324 abi_long ret = 0; 6325 abi_ulong val; 6326 int idx; 6327 6328 switch(code) { 6329 case TARGET_ARCH_SET_GS: 6330 case TARGET_ARCH_SET_FS: 6331 if (code == TARGET_ARCH_SET_GS) 6332 idx = R_GS; 6333 else 6334 idx = R_FS; 6335 cpu_x86_load_seg(env, idx, 0); 6336 env->segs[idx].base = addr; 6337 break; 6338 case TARGET_ARCH_GET_GS: 6339 case TARGET_ARCH_GET_FS: 6340 if (code == TARGET_ARCH_GET_GS) 6341 idx = R_GS; 6342 else 6343 idx = R_FS; 6344 val = env->segs[idx].base; 6345 if (put_user(val, addr, abi_ulong)) 6346 ret = -TARGET_EFAULT; 6347 break; 6348 default: 6349 ret = -TARGET_EINVAL; 6350 break; 6351 } 6352 return ret; 6353 } 6354 #endif 6355 6356 #endif /* defined(TARGET_I386) */ 6357 6358 #define NEW_STACK_SIZE 0x40000 6359 6360 6361 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; 6362 typedef struct { 6363 CPUArchState *env; 6364 pthread_mutex_t mutex; 6365 pthread_cond_t cond; 6366 pthread_t thread; 6367 uint32_t tid; 6368 abi_ulong child_tidptr; 6369 abi_ulong parent_tidptr; 6370 sigset_t sigmask; 6371 } new_thread_info; 6372 6373 static void *clone_func(void *arg) 6374 { 6375 new_thread_info *info = arg; 6376 CPUArchState *env; 6377 CPUState *cpu; 6378 TaskState *ts; 6379 6380 rcu_register_thread(); 6381 tcg_register_thread(); 6382 env = info->env; 6383 cpu = ENV_GET_CPU(env); 6384 thread_cpu = cpu; 6385 ts = (TaskState *)cpu->opaque; 6386 info->tid = gettid(); 6387 task_settid(ts); 6388 if (info->child_tidptr) 6389 put_user_u32(info->tid, info->child_tidptr); 6390 if (info->parent_tidptr) 6391 put_user_u32(info->tid, info->parent_tidptr); 6392 /* Enable signals. */ 6393 sigprocmask(SIG_SETMASK, &info->sigmask, NULL); 6394 /* Signal to the parent that we're ready. */ 6395 pthread_mutex_lock(&info->mutex); 6396 pthread_cond_broadcast(&info->cond); 6397 pthread_mutex_unlock(&info->mutex); 6398 /* Wait until the parent has finished initializing the tls state. */ 6399 pthread_mutex_lock(&clone_lock); 6400 pthread_mutex_unlock(&clone_lock); 6401 cpu_loop(env); 6402 /* never exits */ 6403 return NULL; 6404 } 6405 6406 /* do_fork() Must return host values and target errnos (unlike most 6407 do_*() functions). */ 6408 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, 6409 abi_ulong parent_tidptr, target_ulong newtls, 6410 abi_ulong child_tidptr) 6411 { 6412 CPUState *cpu = ENV_GET_CPU(env); 6413 int ret; 6414 TaskState *ts; 6415 CPUState *new_cpu; 6416 CPUArchState *new_env; 6417 sigset_t sigmask; 6418 6419 flags &= ~CLONE_IGNORED_FLAGS; 6420 6421 /* Emulate vfork() with fork() */ 6422 if (flags & CLONE_VFORK) 6423 flags &= ~(CLONE_VFORK | CLONE_VM); 6424 6425 if (flags & CLONE_VM) { 6426 TaskState *parent_ts = (TaskState *)cpu->opaque; 6427 new_thread_info info; 6428 pthread_attr_t attr; 6429 6430 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) || 6431 (flags & CLONE_INVALID_THREAD_FLAGS)) { 6432 return -TARGET_EINVAL; 6433 } 6434 6435 ts = g_new0(TaskState, 1); 6436 init_task_state(ts); 6437 6438 /* Grab a mutex so that thread setup appears atomic. */ 6439 pthread_mutex_lock(&clone_lock); 6440 6441 /* we create a new CPU instance. */ 6442 new_env = cpu_copy(env); 6443 /* Init regs that differ from the parent. */ 6444 cpu_clone_regs(new_env, newsp); 6445 new_cpu = ENV_GET_CPU(new_env); 6446 new_cpu->opaque = ts; 6447 ts->bprm = parent_ts->bprm; 6448 ts->info = parent_ts->info; 6449 ts->signal_mask = parent_ts->signal_mask; 6450 6451 if (flags & CLONE_CHILD_CLEARTID) { 6452 ts->child_tidptr = child_tidptr; 6453 } 6454 6455 if (flags & CLONE_SETTLS) { 6456 cpu_set_tls (new_env, newtls); 6457 } 6458 6459 memset(&info, 0, sizeof(info)); 6460 pthread_mutex_init(&info.mutex, NULL); 6461 pthread_mutex_lock(&info.mutex); 6462 pthread_cond_init(&info.cond, NULL); 6463 info.env = new_env; 6464 if (flags & CLONE_CHILD_SETTID) { 6465 info.child_tidptr = child_tidptr; 6466 } 6467 if (flags & CLONE_PARENT_SETTID) { 6468 info.parent_tidptr = parent_tidptr; 6469 } 6470 6471 ret = pthread_attr_init(&attr); 6472 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE); 6473 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 6474 /* It is not safe to deliver signals until the child has finished 6475 initializing, so temporarily block all signals. */ 6476 sigfillset(&sigmask); 6477 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); 6478 6479 /* If this is our first additional thread, we need to ensure we 6480 * generate code for parallel execution and flush old translations. 6481 */ 6482 if (!parallel_cpus) { 6483 parallel_cpus = true; 6484 tb_flush(cpu); 6485 } 6486 6487 ret = pthread_create(&info.thread, &attr, clone_func, &info); 6488 /* TODO: Free new CPU state if thread creation failed. */ 6489 6490 sigprocmask(SIG_SETMASK, &info.sigmask, NULL); 6491 pthread_attr_destroy(&attr); 6492 if (ret == 0) { 6493 /* Wait for the child to initialize. */ 6494 pthread_cond_wait(&info.cond, &info.mutex); 6495 ret = info.tid; 6496 } else { 6497 ret = -1; 6498 } 6499 pthread_mutex_unlock(&info.mutex); 6500 pthread_cond_destroy(&info.cond); 6501 pthread_mutex_destroy(&info.mutex); 6502 pthread_mutex_unlock(&clone_lock); 6503 } else { 6504 /* if no CLONE_VM, we consider it is a fork */ 6505 if (flags & CLONE_INVALID_FORK_FLAGS) { 6506 return -TARGET_EINVAL; 6507 } 6508 6509 /* We can't support custom termination signals */ 6510 if ((flags & CSIGNAL) != TARGET_SIGCHLD) { 6511 return -TARGET_EINVAL; 6512 } 6513 6514 if (block_signals()) { 6515 return -TARGET_ERESTARTSYS; 6516 } 6517 6518 fork_start(); 6519 ret = fork(); 6520 if (ret == 0) { 6521 /* Child Process. */ 6522 cpu_clone_regs(env, newsp); 6523 fork_end(1); 6524 /* There is a race condition here. The parent process could 6525 theoretically read the TID in the child process before the child 6526 tid is set. This would require using either ptrace 6527 (not implemented) or having *_tidptr to point at a shared memory 6528 mapping. We can't repeat the spinlock hack used above because 6529 the child process gets its own copy of the lock. */ 6530 if (flags & CLONE_CHILD_SETTID) 6531 put_user_u32(gettid(), child_tidptr); 6532 if (flags & CLONE_PARENT_SETTID) 6533 put_user_u32(gettid(), parent_tidptr); 6534 ts = (TaskState *)cpu->opaque; 6535 if (flags & CLONE_SETTLS) 6536 cpu_set_tls (env, newtls); 6537 if (flags & CLONE_CHILD_CLEARTID) 6538 ts->child_tidptr = child_tidptr; 6539 } else { 6540 fork_end(0); 6541 } 6542 } 6543 return ret; 6544 } 6545 6546 /* warning : doesn't handle linux specific flags... */ 6547 static int target_to_host_fcntl_cmd(int cmd) 6548 { 6549 switch(cmd) { 6550 case TARGET_F_DUPFD: 6551 case TARGET_F_GETFD: 6552 case TARGET_F_SETFD: 6553 case TARGET_F_GETFL: 6554 case TARGET_F_SETFL: 6555 return cmd; 6556 case TARGET_F_GETLK: 6557 return F_GETLK64; 6558 case TARGET_F_SETLK: 6559 return F_SETLK64; 6560 case TARGET_F_SETLKW: 6561 return F_SETLKW64; 6562 case TARGET_F_GETOWN: 6563 return F_GETOWN; 6564 case TARGET_F_SETOWN: 6565 return F_SETOWN; 6566 case TARGET_F_GETSIG: 6567 return F_GETSIG; 6568 case TARGET_F_SETSIG: 6569 return F_SETSIG; 6570 #if TARGET_ABI_BITS == 32 6571 case TARGET_F_GETLK64: 6572 return F_GETLK64; 6573 case TARGET_F_SETLK64: 6574 return F_SETLK64; 6575 case TARGET_F_SETLKW64: 6576 return F_SETLKW64; 6577 #endif 6578 case TARGET_F_SETLEASE: 6579 return F_SETLEASE; 6580 case TARGET_F_GETLEASE: 6581 return F_GETLEASE; 6582 #ifdef F_DUPFD_CLOEXEC 6583 case TARGET_F_DUPFD_CLOEXEC: 6584 return F_DUPFD_CLOEXEC; 6585 #endif 6586 case TARGET_F_NOTIFY: 6587 return F_NOTIFY; 6588 #ifdef F_GETOWN_EX 6589 case TARGET_F_GETOWN_EX: 6590 return F_GETOWN_EX; 6591 #endif 6592 #ifdef F_SETOWN_EX 6593 case TARGET_F_SETOWN_EX: 6594 return F_SETOWN_EX; 6595 #endif 6596 #ifdef F_SETPIPE_SZ 6597 case TARGET_F_SETPIPE_SZ: 6598 return F_SETPIPE_SZ; 6599 case TARGET_F_GETPIPE_SZ: 6600 return F_GETPIPE_SZ; 6601 #endif 6602 default: 6603 return -TARGET_EINVAL; 6604 } 6605 return -TARGET_EINVAL; 6606 } 6607 6608 #define FLOCK_TRANSTBL \ 6609 switch (type) { \ 6610 TRANSTBL_CONVERT(F_RDLCK); \ 6611 TRANSTBL_CONVERT(F_WRLCK); \ 6612 TRANSTBL_CONVERT(F_UNLCK); \ 6613 TRANSTBL_CONVERT(F_EXLCK); \ 6614 TRANSTBL_CONVERT(F_SHLCK); \ 6615 } 6616 6617 static int target_to_host_flock(int type) 6618 { 6619 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a 6620 FLOCK_TRANSTBL 6621 #undef TRANSTBL_CONVERT 6622 return -TARGET_EINVAL; 6623 } 6624 6625 static int host_to_target_flock(int type) 6626 { 6627 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a 6628 FLOCK_TRANSTBL 6629 #undef TRANSTBL_CONVERT 6630 /* if we don't know how to convert the value coming 6631 * from the host we copy to the target field as-is 6632 */ 6633 return type; 6634 } 6635 6636 static inline abi_long copy_from_user_flock(struct flock64 *fl, 6637 abi_ulong target_flock_addr) 6638 { 6639 struct target_flock *target_fl; 6640 int l_type; 6641 6642 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6643 return -TARGET_EFAULT; 6644 } 6645 6646 __get_user(l_type, &target_fl->l_type); 6647 l_type = target_to_host_flock(l_type); 6648 if (l_type < 0) { 6649 return l_type; 6650 } 6651 fl->l_type = l_type; 6652 __get_user(fl->l_whence, &target_fl->l_whence); 6653 __get_user(fl->l_start, &target_fl->l_start); 6654 __get_user(fl->l_len, &target_fl->l_len); 6655 __get_user(fl->l_pid, &target_fl->l_pid); 6656 unlock_user_struct(target_fl, target_flock_addr, 0); 6657 return 0; 6658 } 6659 6660 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr, 6661 const struct flock64 *fl) 6662 { 6663 struct target_flock *target_fl; 6664 short l_type; 6665 6666 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 6667 return -TARGET_EFAULT; 6668 } 6669 6670 l_type = host_to_target_flock(fl->l_type); 6671 __put_user(l_type, &target_fl->l_type); 6672 __put_user(fl->l_whence, &target_fl->l_whence); 6673 __put_user(fl->l_start, &target_fl->l_start); 6674 __put_user(fl->l_len, &target_fl->l_len); 6675 __put_user(fl->l_pid, &target_fl->l_pid); 6676 unlock_user_struct(target_fl, target_flock_addr, 1); 6677 return 0; 6678 } 6679 6680 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr); 6681 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl); 6682 6683 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32 6684 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl, 6685 abi_ulong target_flock_addr) 6686 { 6687 struct target_oabi_flock64 *target_fl; 6688 int l_type; 6689 6690 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6691 return -TARGET_EFAULT; 6692 } 6693 6694 __get_user(l_type, &target_fl->l_type); 6695 l_type = target_to_host_flock(l_type); 6696 if (l_type < 0) { 6697 return l_type; 6698 } 6699 fl->l_type = l_type; 6700 __get_user(fl->l_whence, &target_fl->l_whence); 6701 __get_user(fl->l_start, &target_fl->l_start); 6702 __get_user(fl->l_len, &target_fl->l_len); 6703 __get_user(fl->l_pid, &target_fl->l_pid); 6704 unlock_user_struct(target_fl, target_flock_addr, 0); 6705 return 0; 6706 } 6707 6708 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr, 6709 const struct flock64 *fl) 6710 { 6711 struct target_oabi_flock64 *target_fl; 6712 short l_type; 6713 6714 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 6715 return -TARGET_EFAULT; 6716 } 6717 6718 l_type = host_to_target_flock(fl->l_type); 6719 __put_user(l_type, &target_fl->l_type); 6720 __put_user(fl->l_whence, &target_fl->l_whence); 6721 __put_user(fl->l_start, &target_fl->l_start); 6722 __put_user(fl->l_len, &target_fl->l_len); 6723 __put_user(fl->l_pid, &target_fl->l_pid); 6724 unlock_user_struct(target_fl, target_flock_addr, 1); 6725 return 0; 6726 } 6727 #endif 6728 6729 static inline abi_long copy_from_user_flock64(struct flock64 *fl, 6730 abi_ulong target_flock_addr) 6731 { 6732 struct target_flock64 *target_fl; 6733 int l_type; 6734 6735 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6736 return -TARGET_EFAULT; 6737 } 6738 6739 __get_user(l_type, &target_fl->l_type); 6740 l_type = target_to_host_flock(l_type); 6741 if (l_type < 0) { 6742 return l_type; 6743 } 6744 fl->l_type = l_type; 6745 __get_user(fl->l_whence, &target_fl->l_whence); 6746 __get_user(fl->l_start, &target_fl->l_start); 6747 __get_user(fl->l_len, &target_fl->l_len); 6748 __get_user(fl->l_pid, &target_fl->l_pid); 6749 unlock_user_struct(target_fl, target_flock_addr, 0); 6750 return 0; 6751 } 6752 6753 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr, 6754 const struct flock64 *fl) 6755 { 6756 struct target_flock64 *target_fl; 6757 short l_type; 6758 6759 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 6760 return -TARGET_EFAULT; 6761 } 6762 6763 l_type = host_to_target_flock(fl->l_type); 6764 __put_user(l_type, &target_fl->l_type); 6765 __put_user(fl->l_whence, &target_fl->l_whence); 6766 __put_user(fl->l_start, &target_fl->l_start); 6767 __put_user(fl->l_len, &target_fl->l_len); 6768 __put_user(fl->l_pid, &target_fl->l_pid); 6769 unlock_user_struct(target_fl, target_flock_addr, 1); 6770 return 0; 6771 } 6772 6773 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) 6774 { 6775 struct flock64 fl64; 6776 #ifdef F_GETOWN_EX 6777 struct f_owner_ex fox; 6778 struct target_f_owner_ex *target_fox; 6779 #endif 6780 abi_long ret; 6781 int host_cmd = target_to_host_fcntl_cmd(cmd); 6782 6783 if (host_cmd == -TARGET_EINVAL) 6784 return host_cmd; 6785 6786 switch(cmd) { 6787 case TARGET_F_GETLK: 6788 ret = copy_from_user_flock(&fl64, arg); 6789 if (ret) { 6790 return ret; 6791 } 6792 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 6793 if (ret == 0) { 6794 ret = copy_to_user_flock(arg, &fl64); 6795 } 6796 break; 6797 6798 case TARGET_F_SETLK: 6799 case TARGET_F_SETLKW: 6800 ret = copy_from_user_flock(&fl64, arg); 6801 if (ret) { 6802 return ret; 6803 } 6804 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 6805 break; 6806 6807 case TARGET_F_GETLK64: 6808 ret = copy_from_user_flock64(&fl64, arg); 6809 if (ret) { 6810 return ret; 6811 } 6812 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 6813 if (ret == 0) { 6814 ret = copy_to_user_flock64(arg, &fl64); 6815 } 6816 break; 6817 case TARGET_F_SETLK64: 6818 case TARGET_F_SETLKW64: 6819 ret = copy_from_user_flock64(&fl64, arg); 6820 if (ret) { 6821 return ret; 6822 } 6823 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 6824 break; 6825 6826 case TARGET_F_GETFL: 6827 ret = get_errno(safe_fcntl(fd, host_cmd, arg)); 6828 if (ret >= 0) { 6829 ret = host_to_target_bitmask(ret, fcntl_flags_tbl); 6830 } 6831 break; 6832 6833 case TARGET_F_SETFL: 6834 ret = get_errno(safe_fcntl(fd, host_cmd, 6835 target_to_host_bitmask(arg, 6836 fcntl_flags_tbl))); 6837 break; 6838 6839 #ifdef F_GETOWN_EX 6840 case TARGET_F_GETOWN_EX: 6841 ret = get_errno(safe_fcntl(fd, host_cmd, &fox)); 6842 if (ret >= 0) { 6843 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0)) 6844 return -TARGET_EFAULT; 6845 target_fox->type = tswap32(fox.type); 6846 target_fox->pid = tswap32(fox.pid); 6847 unlock_user_struct(target_fox, arg, 1); 6848 } 6849 break; 6850 #endif 6851 6852 #ifdef F_SETOWN_EX 6853 case TARGET_F_SETOWN_EX: 6854 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1)) 6855 return -TARGET_EFAULT; 6856 fox.type = tswap32(target_fox->type); 6857 fox.pid = tswap32(target_fox->pid); 6858 unlock_user_struct(target_fox, arg, 0); 6859 ret = get_errno(safe_fcntl(fd, host_cmd, &fox)); 6860 break; 6861 #endif 6862 6863 case TARGET_F_SETOWN: 6864 case TARGET_F_GETOWN: 6865 case TARGET_F_SETSIG: 6866 case TARGET_F_GETSIG: 6867 case TARGET_F_SETLEASE: 6868 case TARGET_F_GETLEASE: 6869 case TARGET_F_SETPIPE_SZ: 6870 case TARGET_F_GETPIPE_SZ: 6871 ret = get_errno(safe_fcntl(fd, host_cmd, arg)); 6872 break; 6873 6874 default: 6875 ret = get_errno(safe_fcntl(fd, cmd, arg)); 6876 break; 6877 } 6878 return ret; 6879 } 6880 6881 #ifdef USE_UID16 6882 6883 static inline int high2lowuid(int uid) 6884 { 6885 if (uid > 65535) 6886 return 65534; 6887 else 6888 return uid; 6889 } 6890 6891 static inline int high2lowgid(int gid) 6892 { 6893 if (gid > 65535) 6894 return 65534; 6895 else 6896 return gid; 6897 } 6898 6899 static inline int low2highuid(int uid) 6900 { 6901 if ((int16_t)uid == -1) 6902 return -1; 6903 else 6904 return uid; 6905 } 6906 6907 static inline int low2highgid(int gid) 6908 { 6909 if ((int16_t)gid == -1) 6910 return -1; 6911 else 6912 return gid; 6913 } 6914 static inline int tswapid(int id) 6915 { 6916 return tswap16(id); 6917 } 6918 6919 #define put_user_id(x, gaddr) put_user_u16(x, gaddr) 6920 6921 #else /* !USE_UID16 */ 6922 static inline int high2lowuid(int uid) 6923 { 6924 return uid; 6925 } 6926 static inline int high2lowgid(int gid) 6927 { 6928 return gid; 6929 } 6930 static inline int low2highuid(int uid) 6931 { 6932 return uid; 6933 } 6934 static inline int low2highgid(int gid) 6935 { 6936 return gid; 6937 } 6938 static inline int tswapid(int id) 6939 { 6940 return tswap32(id); 6941 } 6942 6943 #define put_user_id(x, gaddr) put_user_u32(x, gaddr) 6944 6945 #endif /* USE_UID16 */ 6946 6947 /* We must do direct syscalls for setting UID/GID, because we want to 6948 * implement the Linux system call semantics of "change only for this thread", 6949 * not the libc/POSIX semantics of "change for all threads in process". 6950 * (See http://ewontfix.com/17/ for more details.) 6951 * We use the 32-bit version of the syscalls if present; if it is not 6952 * then either the host architecture supports 32-bit UIDs natively with 6953 * the standard syscall, or the 16-bit UID is the best we can do. 6954 */ 6955 #ifdef __NR_setuid32 6956 #define __NR_sys_setuid __NR_setuid32 6957 #else 6958 #define __NR_sys_setuid __NR_setuid 6959 #endif 6960 #ifdef __NR_setgid32 6961 #define __NR_sys_setgid __NR_setgid32 6962 #else 6963 #define __NR_sys_setgid __NR_setgid 6964 #endif 6965 #ifdef __NR_setresuid32 6966 #define __NR_sys_setresuid __NR_setresuid32 6967 #else 6968 #define __NR_sys_setresuid __NR_setresuid 6969 #endif 6970 #ifdef __NR_setresgid32 6971 #define __NR_sys_setresgid __NR_setresgid32 6972 #else 6973 #define __NR_sys_setresgid __NR_setresgid 6974 #endif 6975 6976 _syscall1(int, sys_setuid, uid_t, uid) 6977 _syscall1(int, sys_setgid, gid_t, gid) 6978 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) 6979 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid) 6980 6981 void syscall_init(void) 6982 { 6983 IOCTLEntry *ie; 6984 const argtype *arg_type; 6985 int size; 6986 int i; 6987 6988 thunk_init(STRUCT_MAX); 6989 6990 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def); 6991 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def); 6992 #include "syscall_types.h" 6993 #undef STRUCT 6994 #undef STRUCT_SPECIAL 6995 6996 /* Build target_to_host_errno_table[] table from 6997 * host_to_target_errno_table[]. */ 6998 for (i = 0; i < ERRNO_TABLE_SIZE; i++) { 6999 target_to_host_errno_table[host_to_target_errno_table[i]] = i; 7000 } 7001 7002 /* we patch the ioctl size if necessary. We rely on the fact that 7003 no ioctl has all the bits at '1' in the size field */ 7004 ie = ioctl_entries; 7005 while (ie->target_cmd != 0) { 7006 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) == 7007 TARGET_IOC_SIZEMASK) { 7008 arg_type = ie->arg_type; 7009 if (arg_type[0] != TYPE_PTR) { 7010 fprintf(stderr, "cannot patch size for ioctl 0x%x\n", 7011 ie->target_cmd); 7012 exit(1); 7013 } 7014 arg_type++; 7015 size = thunk_type_size(arg_type, 0); 7016 ie->target_cmd = (ie->target_cmd & 7017 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) | 7018 (size << TARGET_IOC_SIZESHIFT); 7019 } 7020 7021 /* automatic consistency check if same arch */ 7022 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 7023 (defined(__x86_64__) && defined(TARGET_X86_64)) 7024 if (unlikely(ie->target_cmd != ie->host_cmd)) { 7025 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n", 7026 ie->name, ie->target_cmd, ie->host_cmd); 7027 } 7028 #endif 7029 ie++; 7030 } 7031 } 7032 7033 #if TARGET_ABI_BITS == 32 7034 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1) 7035 { 7036 #ifdef TARGET_WORDS_BIGENDIAN 7037 return ((uint64_t)word0 << 32) | word1; 7038 #else 7039 return ((uint64_t)word1 << 32) | word0; 7040 #endif 7041 } 7042 #else /* TARGET_ABI_BITS == 32 */ 7043 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1) 7044 { 7045 return word0; 7046 } 7047 #endif /* TARGET_ABI_BITS != 32 */ 7048 7049 #ifdef TARGET_NR_truncate64 7050 static inline abi_long target_truncate64(void *cpu_env, const char *arg1, 7051 abi_long arg2, 7052 abi_long arg3, 7053 abi_long arg4) 7054 { 7055 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) { 7056 arg2 = arg3; 7057 arg3 = arg4; 7058 } 7059 return get_errno(truncate64(arg1, target_offset64(arg2, arg3))); 7060 } 7061 #endif 7062 7063 #ifdef TARGET_NR_ftruncate64 7064 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1, 7065 abi_long arg2, 7066 abi_long arg3, 7067 abi_long arg4) 7068 { 7069 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) { 7070 arg2 = arg3; 7071 arg3 = arg4; 7072 } 7073 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3))); 7074 } 7075 #endif 7076 7077 static inline abi_long target_to_host_timespec(struct timespec *host_ts, 7078 abi_ulong target_addr) 7079 { 7080 struct target_timespec *target_ts; 7081 7082 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) 7083 return -TARGET_EFAULT; 7084 __get_user(host_ts->tv_sec, &target_ts->tv_sec); 7085 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec); 7086 unlock_user_struct(target_ts, target_addr, 0); 7087 return 0; 7088 } 7089 7090 static inline abi_long host_to_target_timespec(abi_ulong target_addr, 7091 struct timespec *host_ts) 7092 { 7093 struct target_timespec *target_ts; 7094 7095 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) 7096 return -TARGET_EFAULT; 7097 __put_user(host_ts->tv_sec, &target_ts->tv_sec); 7098 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec); 7099 unlock_user_struct(target_ts, target_addr, 1); 7100 return 0; 7101 } 7102 7103 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec, 7104 abi_ulong target_addr) 7105 { 7106 struct target_itimerspec *target_itspec; 7107 7108 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) { 7109 return -TARGET_EFAULT; 7110 } 7111 7112 host_itspec->it_interval.tv_sec = 7113 tswapal(target_itspec->it_interval.tv_sec); 7114 host_itspec->it_interval.tv_nsec = 7115 tswapal(target_itspec->it_interval.tv_nsec); 7116 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec); 7117 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec); 7118 7119 unlock_user_struct(target_itspec, target_addr, 1); 7120 return 0; 7121 } 7122 7123 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr, 7124 struct itimerspec *host_its) 7125 { 7126 struct target_itimerspec *target_itspec; 7127 7128 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) { 7129 return -TARGET_EFAULT; 7130 } 7131 7132 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec); 7133 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec); 7134 7135 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec); 7136 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec); 7137 7138 unlock_user_struct(target_itspec, target_addr, 0); 7139 return 0; 7140 } 7141 7142 static inline abi_long target_to_host_timex(struct timex *host_tx, 7143 abi_long target_addr) 7144 { 7145 struct target_timex *target_tx; 7146 7147 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) { 7148 return -TARGET_EFAULT; 7149 } 7150 7151 __get_user(host_tx->modes, &target_tx->modes); 7152 __get_user(host_tx->offset, &target_tx->offset); 7153 __get_user(host_tx->freq, &target_tx->freq); 7154 __get_user(host_tx->maxerror, &target_tx->maxerror); 7155 __get_user(host_tx->esterror, &target_tx->esterror); 7156 __get_user(host_tx->status, &target_tx->status); 7157 __get_user(host_tx->constant, &target_tx->constant); 7158 __get_user(host_tx->precision, &target_tx->precision); 7159 __get_user(host_tx->tolerance, &target_tx->tolerance); 7160 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec); 7161 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec); 7162 __get_user(host_tx->tick, &target_tx->tick); 7163 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7164 __get_user(host_tx->jitter, &target_tx->jitter); 7165 __get_user(host_tx->shift, &target_tx->shift); 7166 __get_user(host_tx->stabil, &target_tx->stabil); 7167 __get_user(host_tx->jitcnt, &target_tx->jitcnt); 7168 __get_user(host_tx->calcnt, &target_tx->calcnt); 7169 __get_user(host_tx->errcnt, &target_tx->errcnt); 7170 __get_user(host_tx->stbcnt, &target_tx->stbcnt); 7171 __get_user(host_tx->tai, &target_tx->tai); 7172 7173 unlock_user_struct(target_tx, target_addr, 0); 7174 return 0; 7175 } 7176 7177 static inline abi_long host_to_target_timex(abi_long target_addr, 7178 struct timex *host_tx) 7179 { 7180 struct target_timex *target_tx; 7181 7182 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) { 7183 return -TARGET_EFAULT; 7184 } 7185 7186 __put_user(host_tx->modes, &target_tx->modes); 7187 __put_user(host_tx->offset, &target_tx->offset); 7188 __put_user(host_tx->freq, &target_tx->freq); 7189 __put_user(host_tx->maxerror, &target_tx->maxerror); 7190 __put_user(host_tx->esterror, &target_tx->esterror); 7191 __put_user(host_tx->status, &target_tx->status); 7192 __put_user(host_tx->constant, &target_tx->constant); 7193 __put_user(host_tx->precision, &target_tx->precision); 7194 __put_user(host_tx->tolerance, &target_tx->tolerance); 7195 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec); 7196 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec); 7197 __put_user(host_tx->tick, &target_tx->tick); 7198 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7199 __put_user(host_tx->jitter, &target_tx->jitter); 7200 __put_user(host_tx->shift, &target_tx->shift); 7201 __put_user(host_tx->stabil, &target_tx->stabil); 7202 __put_user(host_tx->jitcnt, &target_tx->jitcnt); 7203 __put_user(host_tx->calcnt, &target_tx->calcnt); 7204 __put_user(host_tx->errcnt, &target_tx->errcnt); 7205 __put_user(host_tx->stbcnt, &target_tx->stbcnt); 7206 __put_user(host_tx->tai, &target_tx->tai); 7207 7208 unlock_user_struct(target_tx, target_addr, 1); 7209 return 0; 7210 } 7211 7212 7213 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp, 7214 abi_ulong target_addr) 7215 { 7216 struct target_sigevent *target_sevp; 7217 7218 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) { 7219 return -TARGET_EFAULT; 7220 } 7221 7222 /* This union is awkward on 64 bit systems because it has a 32 bit 7223 * integer and a pointer in it; we follow the conversion approach 7224 * used for handling sigval types in signal.c so the guest should get 7225 * the correct value back even if we did a 64 bit byteswap and it's 7226 * using the 32 bit integer. 7227 */ 7228 host_sevp->sigev_value.sival_ptr = 7229 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr); 7230 host_sevp->sigev_signo = 7231 target_to_host_signal(tswap32(target_sevp->sigev_signo)); 7232 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify); 7233 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid); 7234 7235 unlock_user_struct(target_sevp, target_addr, 1); 7236 return 0; 7237 } 7238 7239 #if defined(TARGET_NR_mlockall) 7240 static inline int target_to_host_mlockall_arg(int arg) 7241 { 7242 int result = 0; 7243 7244 if (arg & TARGET_MLOCKALL_MCL_CURRENT) { 7245 result |= MCL_CURRENT; 7246 } 7247 if (arg & TARGET_MLOCKALL_MCL_FUTURE) { 7248 result |= MCL_FUTURE; 7249 } 7250 return result; 7251 } 7252 #endif 7253 7254 static inline abi_long host_to_target_stat64(void *cpu_env, 7255 abi_ulong target_addr, 7256 struct stat *host_st) 7257 { 7258 #if defined(TARGET_ARM) && defined(TARGET_ABI32) 7259 if (((CPUARMState *)cpu_env)->eabi) { 7260 struct target_eabi_stat64 *target_st; 7261 7262 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 7263 return -TARGET_EFAULT; 7264 memset(target_st, 0, sizeof(struct target_eabi_stat64)); 7265 __put_user(host_st->st_dev, &target_st->st_dev); 7266 __put_user(host_st->st_ino, &target_st->st_ino); 7267 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 7268 __put_user(host_st->st_ino, &target_st->__st_ino); 7269 #endif 7270 __put_user(host_st->st_mode, &target_st->st_mode); 7271 __put_user(host_st->st_nlink, &target_st->st_nlink); 7272 __put_user(host_st->st_uid, &target_st->st_uid); 7273 __put_user(host_st->st_gid, &target_st->st_gid); 7274 __put_user(host_st->st_rdev, &target_st->st_rdev); 7275 __put_user(host_st->st_size, &target_st->st_size); 7276 __put_user(host_st->st_blksize, &target_st->st_blksize); 7277 __put_user(host_st->st_blocks, &target_st->st_blocks); 7278 __put_user(host_st->st_atime, &target_st->target_st_atime); 7279 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 7280 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 7281 unlock_user_struct(target_st, target_addr, 1); 7282 } else 7283 #endif 7284 { 7285 #if defined(TARGET_HAS_STRUCT_STAT64) 7286 struct target_stat64 *target_st; 7287 #else 7288 struct target_stat *target_st; 7289 #endif 7290 7291 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 7292 return -TARGET_EFAULT; 7293 memset(target_st, 0, sizeof(*target_st)); 7294 __put_user(host_st->st_dev, &target_st->st_dev); 7295 __put_user(host_st->st_ino, &target_st->st_ino); 7296 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 7297 __put_user(host_st->st_ino, &target_st->__st_ino); 7298 #endif 7299 __put_user(host_st->st_mode, &target_st->st_mode); 7300 __put_user(host_st->st_nlink, &target_st->st_nlink); 7301 __put_user(host_st->st_uid, &target_st->st_uid); 7302 __put_user(host_st->st_gid, &target_st->st_gid); 7303 __put_user(host_st->st_rdev, &target_st->st_rdev); 7304 /* XXX: better use of kernel struct */ 7305 __put_user(host_st->st_size, &target_st->st_size); 7306 __put_user(host_st->st_blksize, &target_st->st_blksize); 7307 __put_user(host_st->st_blocks, &target_st->st_blocks); 7308 __put_user(host_st->st_atime, &target_st->target_st_atime); 7309 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 7310 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 7311 unlock_user_struct(target_st, target_addr, 1); 7312 } 7313 7314 return 0; 7315 } 7316 7317 /* ??? Using host futex calls even when target atomic operations 7318 are not really atomic probably breaks things. However implementing 7319 futexes locally would make futexes shared between multiple processes 7320 tricky. However they're probably useless because guest atomic 7321 operations won't work either. */ 7322 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout, 7323 target_ulong uaddr2, int val3) 7324 { 7325 struct timespec ts, *pts; 7326 int base_op; 7327 7328 /* ??? We assume FUTEX_* constants are the same on both host 7329 and target. */ 7330 #ifdef FUTEX_CMD_MASK 7331 base_op = op & FUTEX_CMD_MASK; 7332 #else 7333 base_op = op; 7334 #endif 7335 switch (base_op) { 7336 case FUTEX_WAIT: 7337 case FUTEX_WAIT_BITSET: 7338 if (timeout) { 7339 pts = &ts; 7340 target_to_host_timespec(pts, timeout); 7341 } else { 7342 pts = NULL; 7343 } 7344 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val), 7345 pts, NULL, val3)); 7346 case FUTEX_WAKE: 7347 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 7348 case FUTEX_FD: 7349 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 7350 case FUTEX_REQUEUE: 7351 case FUTEX_CMP_REQUEUE: 7352 case FUTEX_WAKE_OP: 7353 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 7354 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 7355 But the prototype takes a `struct timespec *'; insert casts 7356 to satisfy the compiler. We do not need to tswap TIMEOUT 7357 since it's not compared to guest memory. */ 7358 pts = (struct timespec *)(uintptr_t) timeout; 7359 return get_errno(safe_futex(g2h(uaddr), op, val, pts, 7360 g2h(uaddr2), 7361 (base_op == FUTEX_CMP_REQUEUE 7362 ? tswap32(val3) 7363 : val3))); 7364 default: 7365 return -TARGET_ENOSYS; 7366 } 7367 } 7368 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 7369 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname, 7370 abi_long handle, abi_long mount_id, 7371 abi_long flags) 7372 { 7373 struct file_handle *target_fh; 7374 struct file_handle *fh; 7375 int mid = 0; 7376 abi_long ret; 7377 char *name; 7378 unsigned int size, total_size; 7379 7380 if (get_user_s32(size, handle)) { 7381 return -TARGET_EFAULT; 7382 } 7383 7384 name = lock_user_string(pathname); 7385 if (!name) { 7386 return -TARGET_EFAULT; 7387 } 7388 7389 total_size = sizeof(struct file_handle) + size; 7390 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0); 7391 if (!target_fh) { 7392 unlock_user(name, pathname, 0); 7393 return -TARGET_EFAULT; 7394 } 7395 7396 fh = g_malloc0(total_size); 7397 fh->handle_bytes = size; 7398 7399 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags)); 7400 unlock_user(name, pathname, 0); 7401 7402 /* man name_to_handle_at(2): 7403 * Other than the use of the handle_bytes field, the caller should treat 7404 * the file_handle structure as an opaque data type 7405 */ 7406 7407 memcpy(target_fh, fh, total_size); 7408 target_fh->handle_bytes = tswap32(fh->handle_bytes); 7409 target_fh->handle_type = tswap32(fh->handle_type); 7410 g_free(fh); 7411 unlock_user(target_fh, handle, total_size); 7412 7413 if (put_user_s32(mid, mount_id)) { 7414 return -TARGET_EFAULT; 7415 } 7416 7417 return ret; 7418 7419 } 7420 #endif 7421 7422 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 7423 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle, 7424 abi_long flags) 7425 { 7426 struct file_handle *target_fh; 7427 struct file_handle *fh; 7428 unsigned int size, total_size; 7429 abi_long ret; 7430 7431 if (get_user_s32(size, handle)) { 7432 return -TARGET_EFAULT; 7433 } 7434 7435 total_size = sizeof(struct file_handle) + size; 7436 target_fh = lock_user(VERIFY_READ, handle, total_size, 1); 7437 if (!target_fh) { 7438 return -TARGET_EFAULT; 7439 } 7440 7441 fh = g_memdup(target_fh, total_size); 7442 fh->handle_bytes = size; 7443 fh->handle_type = tswap32(target_fh->handle_type); 7444 7445 ret = get_errno(open_by_handle_at(mount_fd, fh, 7446 target_to_host_bitmask(flags, fcntl_flags_tbl))); 7447 7448 g_free(fh); 7449 7450 unlock_user(target_fh, handle, total_size); 7451 7452 return ret; 7453 } 7454 #endif 7455 7456 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4) 7457 7458 /* signalfd siginfo conversion */ 7459 7460 static void 7461 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo, 7462 const struct signalfd_siginfo *info) 7463 { 7464 int sig = host_to_target_signal(info->ssi_signo); 7465 7466 /* linux/signalfd.h defines a ssi_addr_lsb 7467 * not defined in sys/signalfd.h but used by some kernels 7468 */ 7469 7470 #ifdef BUS_MCEERR_AO 7471 if (tinfo->ssi_signo == SIGBUS && 7472 (tinfo->ssi_code == BUS_MCEERR_AR || 7473 tinfo->ssi_code == BUS_MCEERR_AO)) { 7474 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1); 7475 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1); 7476 *tssi_addr_lsb = tswap16(*ssi_addr_lsb); 7477 } 7478 #endif 7479 7480 tinfo->ssi_signo = tswap32(sig); 7481 tinfo->ssi_errno = tswap32(tinfo->ssi_errno); 7482 tinfo->ssi_code = tswap32(info->ssi_code); 7483 tinfo->ssi_pid = tswap32(info->ssi_pid); 7484 tinfo->ssi_uid = tswap32(info->ssi_uid); 7485 tinfo->ssi_fd = tswap32(info->ssi_fd); 7486 tinfo->ssi_tid = tswap32(info->ssi_tid); 7487 tinfo->ssi_band = tswap32(info->ssi_band); 7488 tinfo->ssi_overrun = tswap32(info->ssi_overrun); 7489 tinfo->ssi_trapno = tswap32(info->ssi_trapno); 7490 tinfo->ssi_status = tswap32(info->ssi_status); 7491 tinfo->ssi_int = tswap32(info->ssi_int); 7492 tinfo->ssi_ptr = tswap64(info->ssi_ptr); 7493 tinfo->ssi_utime = tswap64(info->ssi_utime); 7494 tinfo->ssi_stime = tswap64(info->ssi_stime); 7495 tinfo->ssi_addr = tswap64(info->ssi_addr); 7496 } 7497 7498 static abi_long host_to_target_data_signalfd(void *buf, size_t len) 7499 { 7500 int i; 7501 7502 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) { 7503 host_to_target_signalfd_siginfo(buf + i, buf + i); 7504 } 7505 7506 return len; 7507 } 7508 7509 static TargetFdTrans target_signalfd_trans = { 7510 .host_to_target_data = host_to_target_data_signalfd, 7511 }; 7512 7513 static abi_long do_signalfd4(int fd, abi_long mask, int flags) 7514 { 7515 int host_flags; 7516 target_sigset_t *target_mask; 7517 sigset_t host_mask; 7518 abi_long ret; 7519 7520 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) { 7521 return -TARGET_EINVAL; 7522 } 7523 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) { 7524 return -TARGET_EFAULT; 7525 } 7526 7527 target_to_host_sigset(&host_mask, target_mask); 7528 7529 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl); 7530 7531 ret = get_errno(signalfd(fd, &host_mask, host_flags)); 7532 if (ret >= 0) { 7533 fd_trans_register(ret, &target_signalfd_trans); 7534 } 7535 7536 unlock_user_struct(target_mask, mask, 0); 7537 7538 return ret; 7539 } 7540 #endif 7541 7542 /* Map host to target signal numbers for the wait family of syscalls. 7543 Assume all other status bits are the same. */ 7544 int host_to_target_waitstatus(int status) 7545 { 7546 if (WIFSIGNALED(status)) { 7547 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f); 7548 } 7549 if (WIFSTOPPED(status)) { 7550 return (host_to_target_signal(WSTOPSIG(status)) << 8) 7551 | (status & 0xff); 7552 } 7553 return status; 7554 } 7555 7556 static int open_self_cmdline(void *cpu_env, int fd) 7557 { 7558 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 7559 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm; 7560 int i; 7561 7562 for (i = 0; i < bprm->argc; i++) { 7563 size_t len = strlen(bprm->argv[i]) + 1; 7564 7565 if (write(fd, bprm->argv[i], len) != len) { 7566 return -1; 7567 } 7568 } 7569 7570 return 0; 7571 } 7572 7573 static int open_self_maps(void *cpu_env, int fd) 7574 { 7575 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 7576 TaskState *ts = cpu->opaque; 7577 FILE *fp; 7578 char *line = NULL; 7579 size_t len = 0; 7580 ssize_t read; 7581 7582 fp = fopen("/proc/self/maps", "r"); 7583 if (fp == NULL) { 7584 return -1; 7585 } 7586 7587 while ((read = getline(&line, &len, fp)) != -1) { 7588 int fields, dev_maj, dev_min, inode; 7589 uint64_t min, max, offset; 7590 char flag_r, flag_w, flag_x, flag_p; 7591 char path[512] = ""; 7592 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d" 7593 " %512s", &min, &max, &flag_r, &flag_w, &flag_x, 7594 &flag_p, &offset, &dev_maj, &dev_min, &inode, path); 7595 7596 if ((fields < 10) || (fields > 11)) { 7597 continue; 7598 } 7599 if (h2g_valid(min)) { 7600 int flags = page_get_flags(h2g(min)); 7601 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1; 7602 if (page_check_range(h2g(min), max - min, flags) == -1) { 7603 continue; 7604 } 7605 if (h2g(min) == ts->info->stack_limit) { 7606 pstrcpy(path, sizeof(path), " [stack]"); 7607 } 7608 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx 7609 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n", 7610 h2g(min), h2g(max - 1) + 1, flag_r, flag_w, 7611 flag_x, flag_p, offset, dev_maj, dev_min, inode, 7612 path[0] ? " " : "", path); 7613 } 7614 } 7615 7616 free(line); 7617 fclose(fp); 7618 7619 return 0; 7620 } 7621 7622 static int open_self_stat(void *cpu_env, int fd) 7623 { 7624 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 7625 TaskState *ts = cpu->opaque; 7626 abi_ulong start_stack = ts->info->start_stack; 7627 int i; 7628 7629 for (i = 0; i < 44; i++) { 7630 char buf[128]; 7631 int len; 7632 uint64_t val = 0; 7633 7634 if (i == 0) { 7635 /* pid */ 7636 val = getpid(); 7637 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 7638 } else if (i == 1) { 7639 /* app name */ 7640 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]); 7641 } else if (i == 27) { 7642 /* stack bottom */ 7643 val = start_stack; 7644 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 7645 } else { 7646 /* for the rest, there is MasterCard */ 7647 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' '); 7648 } 7649 7650 len = strlen(buf); 7651 if (write(fd, buf, len) != len) { 7652 return -1; 7653 } 7654 } 7655 7656 return 0; 7657 } 7658 7659 static int open_self_auxv(void *cpu_env, int fd) 7660 { 7661 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 7662 TaskState *ts = cpu->opaque; 7663 abi_ulong auxv = ts->info->saved_auxv; 7664 abi_ulong len = ts->info->auxv_len; 7665 char *ptr; 7666 7667 /* 7668 * Auxiliary vector is stored in target process stack. 7669 * read in whole auxv vector and copy it to file 7670 */ 7671 ptr = lock_user(VERIFY_READ, auxv, len, 0); 7672 if (ptr != NULL) { 7673 while (len > 0) { 7674 ssize_t r; 7675 r = write(fd, ptr, len); 7676 if (r <= 0) { 7677 break; 7678 } 7679 len -= r; 7680 ptr += r; 7681 } 7682 lseek(fd, 0, SEEK_SET); 7683 unlock_user(ptr, auxv, len); 7684 } 7685 7686 return 0; 7687 } 7688 7689 static int is_proc_myself(const char *filename, const char *entry) 7690 { 7691 if (!strncmp(filename, "/proc/", strlen("/proc/"))) { 7692 filename += strlen("/proc/"); 7693 if (!strncmp(filename, "self/", strlen("self/"))) { 7694 filename += strlen("self/"); 7695 } else if (*filename >= '1' && *filename <= '9') { 7696 char myself[80]; 7697 snprintf(myself, sizeof(myself), "%d/", getpid()); 7698 if (!strncmp(filename, myself, strlen(myself))) { 7699 filename += strlen(myself); 7700 } else { 7701 return 0; 7702 } 7703 } else { 7704 return 0; 7705 } 7706 if (!strcmp(filename, entry)) { 7707 return 1; 7708 } 7709 } 7710 return 0; 7711 } 7712 7713 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 7714 static int is_proc(const char *filename, const char *entry) 7715 { 7716 return strcmp(filename, entry) == 0; 7717 } 7718 7719 static int open_net_route(void *cpu_env, int fd) 7720 { 7721 FILE *fp; 7722 char *line = NULL; 7723 size_t len = 0; 7724 ssize_t read; 7725 7726 fp = fopen("/proc/net/route", "r"); 7727 if (fp == NULL) { 7728 return -1; 7729 } 7730 7731 /* read header */ 7732 7733 read = getline(&line, &len, fp); 7734 dprintf(fd, "%s", line); 7735 7736 /* read routes */ 7737 7738 while ((read = getline(&line, &len, fp)) != -1) { 7739 char iface[16]; 7740 uint32_t dest, gw, mask; 7741 unsigned int flags, refcnt, use, metric, mtu, window, irtt; 7742 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 7743 iface, &dest, &gw, &flags, &refcnt, &use, &metric, 7744 &mask, &mtu, &window, &irtt); 7745 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 7746 iface, tswap32(dest), tswap32(gw), flags, refcnt, use, 7747 metric, tswap32(mask), mtu, window, irtt); 7748 } 7749 7750 free(line); 7751 fclose(fp); 7752 7753 return 0; 7754 } 7755 #endif 7756 7757 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode) 7758 { 7759 struct fake_open { 7760 const char *filename; 7761 int (*fill)(void *cpu_env, int fd); 7762 int (*cmp)(const char *s1, const char *s2); 7763 }; 7764 const struct fake_open *fake_open; 7765 static const struct fake_open fakes[] = { 7766 { "maps", open_self_maps, is_proc_myself }, 7767 { "stat", open_self_stat, is_proc_myself }, 7768 { "auxv", open_self_auxv, is_proc_myself }, 7769 { "cmdline", open_self_cmdline, is_proc_myself }, 7770 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 7771 { "/proc/net/route", open_net_route, is_proc }, 7772 #endif 7773 { NULL, NULL, NULL } 7774 }; 7775 7776 if (is_proc_myself(pathname, "exe")) { 7777 int execfd = qemu_getauxval(AT_EXECFD); 7778 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode); 7779 } 7780 7781 for (fake_open = fakes; fake_open->filename; fake_open++) { 7782 if (fake_open->cmp(pathname, fake_open->filename)) { 7783 break; 7784 } 7785 } 7786 7787 if (fake_open->filename) { 7788 const char *tmpdir; 7789 char filename[PATH_MAX]; 7790 int fd, r; 7791 7792 /* create temporary file to map stat to */ 7793 tmpdir = getenv("TMPDIR"); 7794 if (!tmpdir) 7795 tmpdir = "/tmp"; 7796 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir); 7797 fd = mkstemp(filename); 7798 if (fd < 0) { 7799 return fd; 7800 } 7801 unlink(filename); 7802 7803 if ((r = fake_open->fill(cpu_env, fd))) { 7804 int e = errno; 7805 close(fd); 7806 errno = e; 7807 return r; 7808 } 7809 lseek(fd, 0, SEEK_SET); 7810 7811 return fd; 7812 } 7813 7814 return safe_openat(dirfd, path(pathname), flags, mode); 7815 } 7816 7817 #define TIMER_MAGIC 0x0caf0000 7818 #define TIMER_MAGIC_MASK 0xffff0000 7819 7820 /* Convert QEMU provided timer ID back to internal 16bit index format */ 7821 static target_timer_t get_timer_id(abi_long arg) 7822 { 7823 target_timer_t timerid = arg; 7824 7825 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) { 7826 return -TARGET_EINVAL; 7827 } 7828 7829 timerid &= 0xffff; 7830 7831 if (timerid >= ARRAY_SIZE(g_posix_timers)) { 7832 return -TARGET_EINVAL; 7833 } 7834 7835 return timerid; 7836 } 7837 7838 static abi_long swap_data_eventfd(void *buf, size_t len) 7839 { 7840 uint64_t *counter = buf; 7841 int i; 7842 7843 if (len < sizeof(uint64_t)) { 7844 return -EINVAL; 7845 } 7846 7847 for (i = 0; i < len; i += sizeof(uint64_t)) { 7848 *counter = tswap64(*counter); 7849 counter++; 7850 } 7851 7852 return len; 7853 } 7854 7855 static TargetFdTrans target_eventfd_trans = { 7856 .host_to_target_data = swap_data_eventfd, 7857 .target_to_host_data = swap_data_eventfd, 7858 }; 7859 7860 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \ 7861 (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \ 7862 defined(__NR_inotify_init1)) 7863 static abi_long host_to_target_data_inotify(void *buf, size_t len) 7864 { 7865 struct inotify_event *ev; 7866 int i; 7867 uint32_t name_len; 7868 7869 for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) { 7870 ev = (struct inotify_event *)((char *)buf + i); 7871 name_len = ev->len; 7872 7873 ev->wd = tswap32(ev->wd); 7874 ev->mask = tswap32(ev->mask); 7875 ev->cookie = tswap32(ev->cookie); 7876 ev->len = tswap32(name_len); 7877 } 7878 7879 return len; 7880 } 7881 7882 static TargetFdTrans target_inotify_trans = { 7883 .host_to_target_data = host_to_target_data_inotify, 7884 }; 7885 #endif 7886 7887 static int target_to_host_cpu_mask(unsigned long *host_mask, 7888 size_t host_size, 7889 abi_ulong target_addr, 7890 size_t target_size) 7891 { 7892 unsigned target_bits = sizeof(abi_ulong) * 8; 7893 unsigned host_bits = sizeof(*host_mask) * 8; 7894 abi_ulong *target_mask; 7895 unsigned i, j; 7896 7897 assert(host_size >= target_size); 7898 7899 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1); 7900 if (!target_mask) { 7901 return -TARGET_EFAULT; 7902 } 7903 memset(host_mask, 0, host_size); 7904 7905 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) { 7906 unsigned bit = i * target_bits; 7907 abi_ulong val; 7908 7909 __get_user(val, &target_mask[i]); 7910 for (j = 0; j < target_bits; j++, bit++) { 7911 if (val & (1UL << j)) { 7912 host_mask[bit / host_bits] |= 1UL << (bit % host_bits); 7913 } 7914 } 7915 } 7916 7917 unlock_user(target_mask, target_addr, 0); 7918 return 0; 7919 } 7920 7921 static int host_to_target_cpu_mask(const unsigned long *host_mask, 7922 size_t host_size, 7923 abi_ulong target_addr, 7924 size_t target_size) 7925 { 7926 unsigned target_bits = sizeof(abi_ulong) * 8; 7927 unsigned host_bits = sizeof(*host_mask) * 8; 7928 abi_ulong *target_mask; 7929 unsigned i, j; 7930 7931 assert(host_size >= target_size); 7932 7933 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0); 7934 if (!target_mask) { 7935 return -TARGET_EFAULT; 7936 } 7937 7938 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) { 7939 unsigned bit = i * target_bits; 7940 abi_ulong val = 0; 7941 7942 for (j = 0; j < target_bits; j++, bit++) { 7943 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) { 7944 val |= 1UL << j; 7945 } 7946 } 7947 __put_user(val, &target_mask[i]); 7948 } 7949 7950 unlock_user(target_mask, target_addr, target_size); 7951 return 0; 7952 } 7953 7954 /* do_syscall() should always have a single exit point at the end so 7955 that actions, such as logging of syscall results, can be performed. 7956 All errnos that do_syscall() returns must be -TARGET_<errcode>. */ 7957 abi_long do_syscall(void *cpu_env, int num, abi_long arg1, 7958 abi_long arg2, abi_long arg3, abi_long arg4, 7959 abi_long arg5, abi_long arg6, abi_long arg7, 7960 abi_long arg8) 7961 { 7962 CPUState *cpu = ENV_GET_CPU(cpu_env); 7963 abi_long ret; 7964 struct stat st; 7965 struct statfs stfs; 7966 void *p; 7967 7968 #if defined(DEBUG_ERESTARTSYS) 7969 /* Debug-only code for exercising the syscall-restart code paths 7970 * in the per-architecture cpu main loops: restart every syscall 7971 * the guest makes once before letting it through. 7972 */ 7973 { 7974 static int flag; 7975 7976 flag = !flag; 7977 if (flag) { 7978 return -TARGET_ERESTARTSYS; 7979 } 7980 } 7981 #endif 7982 7983 #ifdef DEBUG 7984 gemu_log("syscall %d", num); 7985 #endif 7986 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8); 7987 if(do_strace) 7988 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); 7989 7990 switch(num) { 7991 case TARGET_NR_exit: 7992 /* In old applications this may be used to implement _exit(2). 7993 However in threaded applictions it is used for thread termination, 7994 and _exit_group is used for application termination. 7995 Do thread termination if we have more then one thread. */ 7996 7997 if (block_signals()) { 7998 ret = -TARGET_ERESTARTSYS; 7999 break; 8000 } 8001 8002 cpu_list_lock(); 8003 8004 if (CPU_NEXT(first_cpu)) { 8005 TaskState *ts; 8006 8007 /* Remove the CPU from the list. */ 8008 QTAILQ_REMOVE(&cpus, cpu, node); 8009 8010 cpu_list_unlock(); 8011 8012 ts = cpu->opaque; 8013 if (ts->child_tidptr) { 8014 put_user_u32(0, ts->child_tidptr); 8015 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX, 8016 NULL, NULL, 0); 8017 } 8018 thread_cpu = NULL; 8019 object_unref(OBJECT(cpu)); 8020 g_free(ts); 8021 rcu_unregister_thread(); 8022 pthread_exit(NULL); 8023 } 8024 8025 cpu_list_unlock(); 8026 #ifdef TARGET_GPROF 8027 _mcleanup(); 8028 #endif 8029 gdb_exit(cpu_env, arg1); 8030 _exit(arg1); 8031 ret = 0; /* avoid warning */ 8032 break; 8033 case TARGET_NR_read: 8034 if (arg3 == 0) 8035 ret = 0; 8036 else { 8037 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 8038 goto efault; 8039 ret = get_errno(safe_read(arg1, p, arg3)); 8040 if (ret >= 0 && 8041 fd_trans_host_to_target_data(arg1)) { 8042 ret = fd_trans_host_to_target_data(arg1)(p, ret); 8043 } 8044 unlock_user(p, arg2, ret); 8045 } 8046 break; 8047 case TARGET_NR_write: 8048 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 8049 goto efault; 8050 if (fd_trans_target_to_host_data(arg1)) { 8051 void *copy = g_malloc(arg3); 8052 memcpy(copy, p, arg3); 8053 ret = fd_trans_target_to_host_data(arg1)(copy, arg3); 8054 if (ret >= 0) { 8055 ret = get_errno(safe_write(arg1, copy, ret)); 8056 } 8057 g_free(copy); 8058 } else { 8059 ret = get_errno(safe_write(arg1, p, arg3)); 8060 } 8061 unlock_user(p, arg2, 0); 8062 break; 8063 #ifdef TARGET_NR_open 8064 case TARGET_NR_open: 8065 if (!(p = lock_user_string(arg1))) 8066 goto efault; 8067 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p, 8068 target_to_host_bitmask(arg2, fcntl_flags_tbl), 8069 arg3)); 8070 fd_trans_unregister(ret); 8071 unlock_user(p, arg1, 0); 8072 break; 8073 #endif 8074 case TARGET_NR_openat: 8075 if (!(p = lock_user_string(arg2))) 8076 goto efault; 8077 ret = get_errno(do_openat(cpu_env, arg1, p, 8078 target_to_host_bitmask(arg3, fcntl_flags_tbl), 8079 arg4)); 8080 fd_trans_unregister(ret); 8081 unlock_user(p, arg2, 0); 8082 break; 8083 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 8084 case TARGET_NR_name_to_handle_at: 8085 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5); 8086 break; 8087 #endif 8088 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 8089 case TARGET_NR_open_by_handle_at: 8090 ret = do_open_by_handle_at(arg1, arg2, arg3); 8091 fd_trans_unregister(ret); 8092 break; 8093 #endif 8094 case TARGET_NR_close: 8095 fd_trans_unregister(arg1); 8096 ret = get_errno(close(arg1)); 8097 break; 8098 case TARGET_NR_brk: 8099 ret = do_brk(arg1); 8100 break; 8101 #ifdef TARGET_NR_fork 8102 case TARGET_NR_fork: 8103 ret = get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0)); 8104 break; 8105 #endif 8106 #ifdef TARGET_NR_waitpid 8107 case TARGET_NR_waitpid: 8108 { 8109 int status; 8110 ret = get_errno(safe_wait4(arg1, &status, arg3, 0)); 8111 if (!is_error(ret) && arg2 && ret 8112 && put_user_s32(host_to_target_waitstatus(status), arg2)) 8113 goto efault; 8114 } 8115 break; 8116 #endif 8117 #ifdef TARGET_NR_waitid 8118 case TARGET_NR_waitid: 8119 { 8120 siginfo_t info; 8121 info.si_pid = 0; 8122 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL)); 8123 if (!is_error(ret) && arg3 && info.si_pid != 0) { 8124 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) 8125 goto efault; 8126 host_to_target_siginfo(p, &info); 8127 unlock_user(p, arg3, sizeof(target_siginfo_t)); 8128 } 8129 } 8130 break; 8131 #endif 8132 #ifdef TARGET_NR_creat /* not on alpha */ 8133 case TARGET_NR_creat: 8134 if (!(p = lock_user_string(arg1))) 8135 goto efault; 8136 ret = get_errno(creat(p, arg2)); 8137 fd_trans_unregister(ret); 8138 unlock_user(p, arg1, 0); 8139 break; 8140 #endif 8141 #ifdef TARGET_NR_link 8142 case TARGET_NR_link: 8143 { 8144 void * p2; 8145 p = lock_user_string(arg1); 8146 p2 = lock_user_string(arg2); 8147 if (!p || !p2) 8148 ret = -TARGET_EFAULT; 8149 else 8150 ret = get_errno(link(p, p2)); 8151 unlock_user(p2, arg2, 0); 8152 unlock_user(p, arg1, 0); 8153 } 8154 break; 8155 #endif 8156 #if defined(TARGET_NR_linkat) 8157 case TARGET_NR_linkat: 8158 { 8159 void * p2 = NULL; 8160 if (!arg2 || !arg4) 8161 goto efault; 8162 p = lock_user_string(arg2); 8163 p2 = lock_user_string(arg4); 8164 if (!p || !p2) 8165 ret = -TARGET_EFAULT; 8166 else 8167 ret = get_errno(linkat(arg1, p, arg3, p2, arg5)); 8168 unlock_user(p, arg2, 0); 8169 unlock_user(p2, arg4, 0); 8170 } 8171 break; 8172 #endif 8173 #ifdef TARGET_NR_unlink 8174 case TARGET_NR_unlink: 8175 if (!(p = lock_user_string(arg1))) 8176 goto efault; 8177 ret = get_errno(unlink(p)); 8178 unlock_user(p, arg1, 0); 8179 break; 8180 #endif 8181 #if defined(TARGET_NR_unlinkat) 8182 case TARGET_NR_unlinkat: 8183 if (!(p = lock_user_string(arg2))) 8184 goto efault; 8185 ret = get_errno(unlinkat(arg1, p, arg3)); 8186 unlock_user(p, arg2, 0); 8187 break; 8188 #endif 8189 case TARGET_NR_execve: 8190 { 8191 char **argp, **envp; 8192 int argc, envc; 8193 abi_ulong gp; 8194 abi_ulong guest_argp; 8195 abi_ulong guest_envp; 8196 abi_ulong addr; 8197 char **q; 8198 int total_size = 0; 8199 8200 argc = 0; 8201 guest_argp = arg2; 8202 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { 8203 if (get_user_ual(addr, gp)) 8204 goto efault; 8205 if (!addr) 8206 break; 8207 argc++; 8208 } 8209 envc = 0; 8210 guest_envp = arg3; 8211 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { 8212 if (get_user_ual(addr, gp)) 8213 goto efault; 8214 if (!addr) 8215 break; 8216 envc++; 8217 } 8218 8219 argp = g_new0(char *, argc + 1); 8220 envp = g_new0(char *, envc + 1); 8221 8222 for (gp = guest_argp, q = argp; gp; 8223 gp += sizeof(abi_ulong), q++) { 8224 if (get_user_ual(addr, gp)) 8225 goto execve_efault; 8226 if (!addr) 8227 break; 8228 if (!(*q = lock_user_string(addr))) 8229 goto execve_efault; 8230 total_size += strlen(*q) + 1; 8231 } 8232 *q = NULL; 8233 8234 for (gp = guest_envp, q = envp; gp; 8235 gp += sizeof(abi_ulong), q++) { 8236 if (get_user_ual(addr, gp)) 8237 goto execve_efault; 8238 if (!addr) 8239 break; 8240 if (!(*q = lock_user_string(addr))) 8241 goto execve_efault; 8242 total_size += strlen(*q) + 1; 8243 } 8244 *q = NULL; 8245 8246 if (!(p = lock_user_string(arg1))) 8247 goto execve_efault; 8248 /* Although execve() is not an interruptible syscall it is 8249 * a special case where we must use the safe_syscall wrapper: 8250 * if we allow a signal to happen before we make the host 8251 * syscall then we will 'lose' it, because at the point of 8252 * execve the process leaves QEMU's control. So we use the 8253 * safe syscall wrapper to ensure that we either take the 8254 * signal as a guest signal, or else it does not happen 8255 * before the execve completes and makes it the other 8256 * program's problem. 8257 */ 8258 ret = get_errno(safe_execve(p, argp, envp)); 8259 unlock_user(p, arg1, 0); 8260 8261 goto execve_end; 8262 8263 execve_efault: 8264 ret = -TARGET_EFAULT; 8265 8266 execve_end: 8267 for (gp = guest_argp, q = argp; *q; 8268 gp += sizeof(abi_ulong), q++) { 8269 if (get_user_ual(addr, gp) 8270 || !addr) 8271 break; 8272 unlock_user(*q, addr, 0); 8273 } 8274 for (gp = guest_envp, q = envp; *q; 8275 gp += sizeof(abi_ulong), q++) { 8276 if (get_user_ual(addr, gp) 8277 || !addr) 8278 break; 8279 unlock_user(*q, addr, 0); 8280 } 8281 8282 g_free(argp); 8283 g_free(envp); 8284 } 8285 break; 8286 case TARGET_NR_chdir: 8287 if (!(p = lock_user_string(arg1))) 8288 goto efault; 8289 ret = get_errno(chdir(p)); 8290 unlock_user(p, arg1, 0); 8291 break; 8292 #ifdef TARGET_NR_time 8293 case TARGET_NR_time: 8294 { 8295 time_t host_time; 8296 ret = get_errno(time(&host_time)); 8297 if (!is_error(ret) 8298 && arg1 8299 && put_user_sal(host_time, arg1)) 8300 goto efault; 8301 } 8302 break; 8303 #endif 8304 #ifdef TARGET_NR_mknod 8305 case TARGET_NR_mknod: 8306 if (!(p = lock_user_string(arg1))) 8307 goto efault; 8308 ret = get_errno(mknod(p, arg2, arg3)); 8309 unlock_user(p, arg1, 0); 8310 break; 8311 #endif 8312 #if defined(TARGET_NR_mknodat) 8313 case TARGET_NR_mknodat: 8314 if (!(p = lock_user_string(arg2))) 8315 goto efault; 8316 ret = get_errno(mknodat(arg1, p, arg3, arg4)); 8317 unlock_user(p, arg2, 0); 8318 break; 8319 #endif 8320 #ifdef TARGET_NR_chmod 8321 case TARGET_NR_chmod: 8322 if (!(p = lock_user_string(arg1))) 8323 goto efault; 8324 ret = get_errno(chmod(p, arg2)); 8325 unlock_user(p, arg1, 0); 8326 break; 8327 #endif 8328 #ifdef TARGET_NR_break 8329 case TARGET_NR_break: 8330 goto unimplemented; 8331 #endif 8332 #ifdef TARGET_NR_oldstat 8333 case TARGET_NR_oldstat: 8334 goto unimplemented; 8335 #endif 8336 case TARGET_NR_lseek: 8337 ret = get_errno(lseek(arg1, arg2, arg3)); 8338 break; 8339 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) 8340 /* Alpha specific */ 8341 case TARGET_NR_getxpid: 8342 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid(); 8343 ret = get_errno(getpid()); 8344 break; 8345 #endif 8346 #ifdef TARGET_NR_getpid 8347 case TARGET_NR_getpid: 8348 ret = get_errno(getpid()); 8349 break; 8350 #endif 8351 case TARGET_NR_mount: 8352 { 8353 /* need to look at the data field */ 8354 void *p2, *p3; 8355 8356 if (arg1) { 8357 p = lock_user_string(arg1); 8358 if (!p) { 8359 goto efault; 8360 } 8361 } else { 8362 p = NULL; 8363 } 8364 8365 p2 = lock_user_string(arg2); 8366 if (!p2) { 8367 if (arg1) { 8368 unlock_user(p, arg1, 0); 8369 } 8370 goto efault; 8371 } 8372 8373 if (arg3) { 8374 p3 = lock_user_string(arg3); 8375 if (!p3) { 8376 if (arg1) { 8377 unlock_user(p, arg1, 0); 8378 } 8379 unlock_user(p2, arg2, 0); 8380 goto efault; 8381 } 8382 } else { 8383 p3 = NULL; 8384 } 8385 8386 /* FIXME - arg5 should be locked, but it isn't clear how to 8387 * do that since it's not guaranteed to be a NULL-terminated 8388 * string. 8389 */ 8390 if (!arg5) { 8391 ret = mount(p, p2, p3, (unsigned long)arg4, NULL); 8392 } else { 8393 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)); 8394 } 8395 ret = get_errno(ret); 8396 8397 if (arg1) { 8398 unlock_user(p, arg1, 0); 8399 } 8400 unlock_user(p2, arg2, 0); 8401 if (arg3) { 8402 unlock_user(p3, arg3, 0); 8403 } 8404 } 8405 break; 8406 #ifdef TARGET_NR_umount 8407 case TARGET_NR_umount: 8408 if (!(p = lock_user_string(arg1))) 8409 goto efault; 8410 ret = get_errno(umount(p)); 8411 unlock_user(p, arg1, 0); 8412 break; 8413 #endif 8414 #ifdef TARGET_NR_stime /* not on alpha */ 8415 case TARGET_NR_stime: 8416 { 8417 time_t host_time; 8418 if (get_user_sal(host_time, arg1)) 8419 goto efault; 8420 ret = get_errno(stime(&host_time)); 8421 } 8422 break; 8423 #endif 8424 case TARGET_NR_ptrace: 8425 goto unimplemented; 8426 #ifdef TARGET_NR_alarm /* not on alpha */ 8427 case TARGET_NR_alarm: 8428 ret = alarm(arg1); 8429 break; 8430 #endif 8431 #ifdef TARGET_NR_oldfstat 8432 case TARGET_NR_oldfstat: 8433 goto unimplemented; 8434 #endif 8435 #ifdef TARGET_NR_pause /* not on alpha */ 8436 case TARGET_NR_pause: 8437 if (!block_signals()) { 8438 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask); 8439 } 8440 ret = -TARGET_EINTR; 8441 break; 8442 #endif 8443 #ifdef TARGET_NR_utime 8444 case TARGET_NR_utime: 8445 { 8446 struct utimbuf tbuf, *host_tbuf; 8447 struct target_utimbuf *target_tbuf; 8448 if (arg2) { 8449 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) 8450 goto efault; 8451 tbuf.actime = tswapal(target_tbuf->actime); 8452 tbuf.modtime = tswapal(target_tbuf->modtime); 8453 unlock_user_struct(target_tbuf, arg2, 0); 8454 host_tbuf = &tbuf; 8455 } else { 8456 host_tbuf = NULL; 8457 } 8458 if (!(p = lock_user_string(arg1))) 8459 goto efault; 8460 ret = get_errno(utime(p, host_tbuf)); 8461 unlock_user(p, arg1, 0); 8462 } 8463 break; 8464 #endif 8465 #ifdef TARGET_NR_utimes 8466 case TARGET_NR_utimes: 8467 { 8468 struct timeval *tvp, tv[2]; 8469 if (arg2) { 8470 if (copy_from_user_timeval(&tv[0], arg2) 8471 || copy_from_user_timeval(&tv[1], 8472 arg2 + sizeof(struct target_timeval))) 8473 goto efault; 8474 tvp = tv; 8475 } else { 8476 tvp = NULL; 8477 } 8478 if (!(p = lock_user_string(arg1))) 8479 goto efault; 8480 ret = get_errno(utimes(p, tvp)); 8481 unlock_user(p, arg1, 0); 8482 } 8483 break; 8484 #endif 8485 #if defined(TARGET_NR_futimesat) 8486 case TARGET_NR_futimesat: 8487 { 8488 struct timeval *tvp, tv[2]; 8489 if (arg3) { 8490 if (copy_from_user_timeval(&tv[0], arg3) 8491 || copy_from_user_timeval(&tv[1], 8492 arg3 + sizeof(struct target_timeval))) 8493 goto efault; 8494 tvp = tv; 8495 } else { 8496 tvp = NULL; 8497 } 8498 if (!(p = lock_user_string(arg2))) 8499 goto efault; 8500 ret = get_errno(futimesat(arg1, path(p), tvp)); 8501 unlock_user(p, arg2, 0); 8502 } 8503 break; 8504 #endif 8505 #ifdef TARGET_NR_stty 8506 case TARGET_NR_stty: 8507 goto unimplemented; 8508 #endif 8509 #ifdef TARGET_NR_gtty 8510 case TARGET_NR_gtty: 8511 goto unimplemented; 8512 #endif 8513 #ifdef TARGET_NR_access 8514 case TARGET_NR_access: 8515 if (!(p = lock_user_string(arg1))) 8516 goto efault; 8517 ret = get_errno(access(path(p), arg2)); 8518 unlock_user(p, arg1, 0); 8519 break; 8520 #endif 8521 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 8522 case TARGET_NR_faccessat: 8523 if (!(p = lock_user_string(arg2))) 8524 goto efault; 8525 ret = get_errno(faccessat(arg1, p, arg3, 0)); 8526 unlock_user(p, arg2, 0); 8527 break; 8528 #endif 8529 #ifdef TARGET_NR_nice /* not on alpha */ 8530 case TARGET_NR_nice: 8531 ret = get_errno(nice(arg1)); 8532 break; 8533 #endif 8534 #ifdef TARGET_NR_ftime 8535 case TARGET_NR_ftime: 8536 goto unimplemented; 8537 #endif 8538 case TARGET_NR_sync: 8539 sync(); 8540 ret = 0; 8541 break; 8542 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS) 8543 case TARGET_NR_syncfs: 8544 ret = get_errno(syncfs(arg1)); 8545 break; 8546 #endif 8547 case TARGET_NR_kill: 8548 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2))); 8549 break; 8550 #ifdef TARGET_NR_rename 8551 case TARGET_NR_rename: 8552 { 8553 void *p2; 8554 p = lock_user_string(arg1); 8555 p2 = lock_user_string(arg2); 8556 if (!p || !p2) 8557 ret = -TARGET_EFAULT; 8558 else 8559 ret = get_errno(rename(p, p2)); 8560 unlock_user(p2, arg2, 0); 8561 unlock_user(p, arg1, 0); 8562 } 8563 break; 8564 #endif 8565 #if defined(TARGET_NR_renameat) 8566 case TARGET_NR_renameat: 8567 { 8568 void *p2; 8569 p = lock_user_string(arg2); 8570 p2 = lock_user_string(arg4); 8571 if (!p || !p2) 8572 ret = -TARGET_EFAULT; 8573 else 8574 ret = get_errno(renameat(arg1, p, arg3, p2)); 8575 unlock_user(p2, arg4, 0); 8576 unlock_user(p, arg2, 0); 8577 } 8578 break; 8579 #endif 8580 #if defined(TARGET_NR_renameat2) 8581 case TARGET_NR_renameat2: 8582 { 8583 void *p2; 8584 p = lock_user_string(arg2); 8585 p2 = lock_user_string(arg4); 8586 if (!p || !p2) { 8587 ret = -TARGET_EFAULT; 8588 } else { 8589 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5)); 8590 } 8591 unlock_user(p2, arg4, 0); 8592 unlock_user(p, arg2, 0); 8593 } 8594 break; 8595 #endif 8596 #ifdef TARGET_NR_mkdir 8597 case TARGET_NR_mkdir: 8598 if (!(p = lock_user_string(arg1))) 8599 goto efault; 8600 ret = get_errno(mkdir(p, arg2)); 8601 unlock_user(p, arg1, 0); 8602 break; 8603 #endif 8604 #if defined(TARGET_NR_mkdirat) 8605 case TARGET_NR_mkdirat: 8606 if (!(p = lock_user_string(arg2))) 8607 goto efault; 8608 ret = get_errno(mkdirat(arg1, p, arg3)); 8609 unlock_user(p, arg2, 0); 8610 break; 8611 #endif 8612 #ifdef TARGET_NR_rmdir 8613 case TARGET_NR_rmdir: 8614 if (!(p = lock_user_string(arg1))) 8615 goto efault; 8616 ret = get_errno(rmdir(p)); 8617 unlock_user(p, arg1, 0); 8618 break; 8619 #endif 8620 case TARGET_NR_dup: 8621 ret = get_errno(dup(arg1)); 8622 if (ret >= 0) { 8623 fd_trans_dup(arg1, ret); 8624 } 8625 break; 8626 #ifdef TARGET_NR_pipe 8627 case TARGET_NR_pipe: 8628 ret = do_pipe(cpu_env, arg1, 0, 0); 8629 break; 8630 #endif 8631 #ifdef TARGET_NR_pipe2 8632 case TARGET_NR_pipe2: 8633 ret = do_pipe(cpu_env, arg1, 8634 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1); 8635 break; 8636 #endif 8637 case TARGET_NR_times: 8638 { 8639 struct target_tms *tmsp; 8640 struct tms tms; 8641 ret = get_errno(times(&tms)); 8642 if (arg1) { 8643 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); 8644 if (!tmsp) 8645 goto efault; 8646 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime)); 8647 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime)); 8648 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime)); 8649 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime)); 8650 } 8651 if (!is_error(ret)) 8652 ret = host_to_target_clock_t(ret); 8653 } 8654 break; 8655 #ifdef TARGET_NR_prof 8656 case TARGET_NR_prof: 8657 goto unimplemented; 8658 #endif 8659 #ifdef TARGET_NR_signal 8660 case TARGET_NR_signal: 8661 goto unimplemented; 8662 #endif 8663 case TARGET_NR_acct: 8664 if (arg1 == 0) { 8665 ret = get_errno(acct(NULL)); 8666 } else { 8667 if (!(p = lock_user_string(arg1))) 8668 goto efault; 8669 ret = get_errno(acct(path(p))); 8670 unlock_user(p, arg1, 0); 8671 } 8672 break; 8673 #ifdef TARGET_NR_umount2 8674 case TARGET_NR_umount2: 8675 if (!(p = lock_user_string(arg1))) 8676 goto efault; 8677 ret = get_errno(umount2(p, arg2)); 8678 unlock_user(p, arg1, 0); 8679 break; 8680 #endif 8681 #ifdef TARGET_NR_lock 8682 case TARGET_NR_lock: 8683 goto unimplemented; 8684 #endif 8685 case TARGET_NR_ioctl: 8686 ret = do_ioctl(arg1, arg2, arg3); 8687 break; 8688 #ifdef TARGET_NR_fcntl 8689 case TARGET_NR_fcntl: 8690 ret = do_fcntl(arg1, arg2, arg3); 8691 break; 8692 #endif 8693 #ifdef TARGET_NR_mpx 8694 case TARGET_NR_mpx: 8695 goto unimplemented; 8696 #endif 8697 case TARGET_NR_setpgid: 8698 ret = get_errno(setpgid(arg1, arg2)); 8699 break; 8700 #ifdef TARGET_NR_ulimit 8701 case TARGET_NR_ulimit: 8702 goto unimplemented; 8703 #endif 8704 #ifdef TARGET_NR_oldolduname 8705 case TARGET_NR_oldolduname: 8706 goto unimplemented; 8707 #endif 8708 case TARGET_NR_umask: 8709 ret = get_errno(umask(arg1)); 8710 break; 8711 case TARGET_NR_chroot: 8712 if (!(p = lock_user_string(arg1))) 8713 goto efault; 8714 ret = get_errno(chroot(p)); 8715 unlock_user(p, arg1, 0); 8716 break; 8717 #ifdef TARGET_NR_ustat 8718 case TARGET_NR_ustat: 8719 goto unimplemented; 8720 #endif 8721 #ifdef TARGET_NR_dup2 8722 case TARGET_NR_dup2: 8723 ret = get_errno(dup2(arg1, arg2)); 8724 if (ret >= 0) { 8725 fd_trans_dup(arg1, arg2); 8726 } 8727 break; 8728 #endif 8729 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) 8730 case TARGET_NR_dup3: 8731 { 8732 int host_flags; 8733 8734 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) { 8735 return -EINVAL; 8736 } 8737 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl); 8738 ret = get_errno(dup3(arg1, arg2, host_flags)); 8739 if (ret >= 0) { 8740 fd_trans_dup(arg1, arg2); 8741 } 8742 break; 8743 } 8744 #endif 8745 #ifdef TARGET_NR_getppid /* not on alpha */ 8746 case TARGET_NR_getppid: 8747 ret = get_errno(getppid()); 8748 break; 8749 #endif 8750 #ifdef TARGET_NR_getpgrp 8751 case TARGET_NR_getpgrp: 8752 ret = get_errno(getpgrp()); 8753 break; 8754 #endif 8755 case TARGET_NR_setsid: 8756 ret = get_errno(setsid()); 8757 break; 8758 #ifdef TARGET_NR_sigaction 8759 case TARGET_NR_sigaction: 8760 { 8761 #if defined(TARGET_ALPHA) 8762 struct target_sigaction act, oact, *pact = 0; 8763 struct target_old_sigaction *old_act; 8764 if (arg2) { 8765 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 8766 goto efault; 8767 act._sa_handler = old_act->_sa_handler; 8768 target_siginitset(&act.sa_mask, old_act->sa_mask); 8769 act.sa_flags = old_act->sa_flags; 8770 act.sa_restorer = 0; 8771 unlock_user_struct(old_act, arg2, 0); 8772 pact = &act; 8773 } 8774 ret = get_errno(do_sigaction(arg1, pact, &oact)); 8775 if (!is_error(ret) && arg3) { 8776 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 8777 goto efault; 8778 old_act->_sa_handler = oact._sa_handler; 8779 old_act->sa_mask = oact.sa_mask.sig[0]; 8780 old_act->sa_flags = oact.sa_flags; 8781 unlock_user_struct(old_act, arg3, 1); 8782 } 8783 #elif defined(TARGET_MIPS) 8784 struct target_sigaction act, oact, *pact, *old_act; 8785 8786 if (arg2) { 8787 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 8788 goto efault; 8789 act._sa_handler = old_act->_sa_handler; 8790 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); 8791 act.sa_flags = old_act->sa_flags; 8792 unlock_user_struct(old_act, arg2, 0); 8793 pact = &act; 8794 } else { 8795 pact = NULL; 8796 } 8797 8798 ret = get_errno(do_sigaction(arg1, pact, &oact)); 8799 8800 if (!is_error(ret) && arg3) { 8801 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 8802 goto efault; 8803 old_act->_sa_handler = oact._sa_handler; 8804 old_act->sa_flags = oact.sa_flags; 8805 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; 8806 old_act->sa_mask.sig[1] = 0; 8807 old_act->sa_mask.sig[2] = 0; 8808 old_act->sa_mask.sig[3] = 0; 8809 unlock_user_struct(old_act, arg3, 1); 8810 } 8811 #else 8812 struct target_old_sigaction *old_act; 8813 struct target_sigaction act, oact, *pact; 8814 if (arg2) { 8815 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 8816 goto efault; 8817 act._sa_handler = old_act->_sa_handler; 8818 target_siginitset(&act.sa_mask, old_act->sa_mask); 8819 act.sa_flags = old_act->sa_flags; 8820 act.sa_restorer = old_act->sa_restorer; 8821 #ifdef TARGET_ARCH_HAS_KA_RESTORER 8822 act.ka_restorer = 0; 8823 #endif 8824 unlock_user_struct(old_act, arg2, 0); 8825 pact = &act; 8826 } else { 8827 pact = NULL; 8828 } 8829 ret = get_errno(do_sigaction(arg1, pact, &oact)); 8830 if (!is_error(ret) && arg3) { 8831 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 8832 goto efault; 8833 old_act->_sa_handler = oact._sa_handler; 8834 old_act->sa_mask = oact.sa_mask.sig[0]; 8835 old_act->sa_flags = oact.sa_flags; 8836 old_act->sa_restorer = oact.sa_restorer; 8837 unlock_user_struct(old_act, arg3, 1); 8838 } 8839 #endif 8840 } 8841 break; 8842 #endif 8843 case TARGET_NR_rt_sigaction: 8844 { 8845 #if defined(TARGET_ALPHA) 8846 /* For Alpha and SPARC this is a 5 argument syscall, with 8847 * a 'restorer' parameter which must be copied into the 8848 * sa_restorer field of the sigaction struct. 8849 * For Alpha that 'restorer' is arg5; for SPARC it is arg4, 8850 * and arg5 is the sigsetsize. 8851 * Alpha also has a separate rt_sigaction struct that it uses 8852 * here; SPARC uses the usual sigaction struct. 8853 */ 8854 struct target_rt_sigaction *rt_act; 8855 struct target_sigaction act, oact, *pact = 0; 8856 8857 if (arg4 != sizeof(target_sigset_t)) { 8858 ret = -TARGET_EINVAL; 8859 break; 8860 } 8861 if (arg2) { 8862 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1)) 8863 goto efault; 8864 act._sa_handler = rt_act->_sa_handler; 8865 act.sa_mask = rt_act->sa_mask; 8866 act.sa_flags = rt_act->sa_flags; 8867 act.sa_restorer = arg5; 8868 unlock_user_struct(rt_act, arg2, 0); 8869 pact = &act; 8870 } 8871 ret = get_errno(do_sigaction(arg1, pact, &oact)); 8872 if (!is_error(ret) && arg3) { 8873 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0)) 8874 goto efault; 8875 rt_act->_sa_handler = oact._sa_handler; 8876 rt_act->sa_mask = oact.sa_mask; 8877 rt_act->sa_flags = oact.sa_flags; 8878 unlock_user_struct(rt_act, arg3, 1); 8879 } 8880 #else 8881 #ifdef TARGET_SPARC 8882 target_ulong restorer = arg4; 8883 target_ulong sigsetsize = arg5; 8884 #else 8885 target_ulong sigsetsize = arg4; 8886 #endif 8887 struct target_sigaction *act; 8888 struct target_sigaction *oact; 8889 8890 if (sigsetsize != sizeof(target_sigset_t)) { 8891 ret = -TARGET_EINVAL; 8892 break; 8893 } 8894 if (arg2) { 8895 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) { 8896 goto efault; 8897 } 8898 #ifdef TARGET_ARCH_HAS_KA_RESTORER 8899 act->ka_restorer = restorer; 8900 #endif 8901 } else { 8902 act = NULL; 8903 } 8904 if (arg3) { 8905 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { 8906 ret = -TARGET_EFAULT; 8907 goto rt_sigaction_fail; 8908 } 8909 } else 8910 oact = NULL; 8911 ret = get_errno(do_sigaction(arg1, act, oact)); 8912 rt_sigaction_fail: 8913 if (act) 8914 unlock_user_struct(act, arg2, 0); 8915 if (oact) 8916 unlock_user_struct(oact, arg3, 1); 8917 #endif 8918 } 8919 break; 8920 #ifdef TARGET_NR_sgetmask /* not on alpha */ 8921 case TARGET_NR_sgetmask: 8922 { 8923 sigset_t cur_set; 8924 abi_ulong target_set; 8925 ret = do_sigprocmask(0, NULL, &cur_set); 8926 if (!ret) { 8927 host_to_target_old_sigset(&target_set, &cur_set); 8928 ret = target_set; 8929 } 8930 } 8931 break; 8932 #endif 8933 #ifdef TARGET_NR_ssetmask /* not on alpha */ 8934 case TARGET_NR_ssetmask: 8935 { 8936 sigset_t set, oset; 8937 abi_ulong target_set = arg1; 8938 target_to_host_old_sigset(&set, &target_set); 8939 ret = do_sigprocmask(SIG_SETMASK, &set, &oset); 8940 if (!ret) { 8941 host_to_target_old_sigset(&target_set, &oset); 8942 ret = target_set; 8943 } 8944 } 8945 break; 8946 #endif 8947 #ifdef TARGET_NR_sigprocmask 8948 case TARGET_NR_sigprocmask: 8949 { 8950 #if defined(TARGET_ALPHA) 8951 sigset_t set, oldset; 8952 abi_ulong mask; 8953 int how; 8954 8955 switch (arg1) { 8956 case TARGET_SIG_BLOCK: 8957 how = SIG_BLOCK; 8958 break; 8959 case TARGET_SIG_UNBLOCK: 8960 how = SIG_UNBLOCK; 8961 break; 8962 case TARGET_SIG_SETMASK: 8963 how = SIG_SETMASK; 8964 break; 8965 default: 8966 ret = -TARGET_EINVAL; 8967 goto fail; 8968 } 8969 mask = arg2; 8970 target_to_host_old_sigset(&set, &mask); 8971 8972 ret = do_sigprocmask(how, &set, &oldset); 8973 if (!is_error(ret)) { 8974 host_to_target_old_sigset(&mask, &oldset); 8975 ret = mask; 8976 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */ 8977 } 8978 #else 8979 sigset_t set, oldset, *set_ptr; 8980 int how; 8981 8982 if (arg2) { 8983 switch (arg1) { 8984 case TARGET_SIG_BLOCK: 8985 how = SIG_BLOCK; 8986 break; 8987 case TARGET_SIG_UNBLOCK: 8988 how = SIG_UNBLOCK; 8989 break; 8990 case TARGET_SIG_SETMASK: 8991 how = SIG_SETMASK; 8992 break; 8993 default: 8994 ret = -TARGET_EINVAL; 8995 goto fail; 8996 } 8997 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 8998 goto efault; 8999 target_to_host_old_sigset(&set, p); 9000 unlock_user(p, arg2, 0); 9001 set_ptr = &set; 9002 } else { 9003 how = 0; 9004 set_ptr = NULL; 9005 } 9006 ret = do_sigprocmask(how, set_ptr, &oldset); 9007 if (!is_error(ret) && arg3) { 9008 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 9009 goto efault; 9010 host_to_target_old_sigset(p, &oldset); 9011 unlock_user(p, arg3, sizeof(target_sigset_t)); 9012 } 9013 #endif 9014 } 9015 break; 9016 #endif 9017 case TARGET_NR_rt_sigprocmask: 9018 { 9019 int how = arg1; 9020 sigset_t set, oldset, *set_ptr; 9021 9022 if (arg4 != sizeof(target_sigset_t)) { 9023 ret = -TARGET_EINVAL; 9024 break; 9025 } 9026 9027 if (arg2) { 9028 switch(how) { 9029 case TARGET_SIG_BLOCK: 9030 how = SIG_BLOCK; 9031 break; 9032 case TARGET_SIG_UNBLOCK: 9033 how = SIG_UNBLOCK; 9034 break; 9035 case TARGET_SIG_SETMASK: 9036 how = SIG_SETMASK; 9037 break; 9038 default: 9039 ret = -TARGET_EINVAL; 9040 goto fail; 9041 } 9042 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 9043 goto efault; 9044 target_to_host_sigset(&set, p); 9045 unlock_user(p, arg2, 0); 9046 set_ptr = &set; 9047 } else { 9048 how = 0; 9049 set_ptr = NULL; 9050 } 9051 ret = do_sigprocmask(how, set_ptr, &oldset); 9052 if (!is_error(ret) && arg3) { 9053 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 9054 goto efault; 9055 host_to_target_sigset(p, &oldset); 9056 unlock_user(p, arg3, sizeof(target_sigset_t)); 9057 } 9058 } 9059 break; 9060 #ifdef TARGET_NR_sigpending 9061 case TARGET_NR_sigpending: 9062 { 9063 sigset_t set; 9064 ret = get_errno(sigpending(&set)); 9065 if (!is_error(ret)) { 9066 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 9067 goto efault; 9068 host_to_target_old_sigset(p, &set); 9069 unlock_user(p, arg1, sizeof(target_sigset_t)); 9070 } 9071 } 9072 break; 9073 #endif 9074 case TARGET_NR_rt_sigpending: 9075 { 9076 sigset_t set; 9077 9078 /* Yes, this check is >, not != like most. We follow the kernel's 9079 * logic and it does it like this because it implements 9080 * NR_sigpending through the same code path, and in that case 9081 * the old_sigset_t is smaller in size. 9082 */ 9083 if (arg2 > sizeof(target_sigset_t)) { 9084 ret = -TARGET_EINVAL; 9085 break; 9086 } 9087 9088 ret = get_errno(sigpending(&set)); 9089 if (!is_error(ret)) { 9090 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 9091 goto efault; 9092 host_to_target_sigset(p, &set); 9093 unlock_user(p, arg1, sizeof(target_sigset_t)); 9094 } 9095 } 9096 break; 9097 #ifdef TARGET_NR_sigsuspend 9098 case TARGET_NR_sigsuspend: 9099 { 9100 TaskState *ts = cpu->opaque; 9101 #if defined(TARGET_ALPHA) 9102 abi_ulong mask = arg1; 9103 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask); 9104 #else 9105 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 9106 goto efault; 9107 target_to_host_old_sigset(&ts->sigsuspend_mask, p); 9108 unlock_user(p, arg1, 0); 9109 #endif 9110 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask, 9111 SIGSET_T_SIZE)); 9112 if (ret != -TARGET_ERESTARTSYS) { 9113 ts->in_sigsuspend = 1; 9114 } 9115 } 9116 break; 9117 #endif 9118 case TARGET_NR_rt_sigsuspend: 9119 { 9120 TaskState *ts = cpu->opaque; 9121 9122 if (arg2 != sizeof(target_sigset_t)) { 9123 ret = -TARGET_EINVAL; 9124 break; 9125 } 9126 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 9127 goto efault; 9128 target_to_host_sigset(&ts->sigsuspend_mask, p); 9129 unlock_user(p, arg1, 0); 9130 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask, 9131 SIGSET_T_SIZE)); 9132 if (ret != -TARGET_ERESTARTSYS) { 9133 ts->in_sigsuspend = 1; 9134 } 9135 } 9136 break; 9137 case TARGET_NR_rt_sigtimedwait: 9138 { 9139 sigset_t set; 9140 struct timespec uts, *puts; 9141 siginfo_t uinfo; 9142 9143 if (arg4 != sizeof(target_sigset_t)) { 9144 ret = -TARGET_EINVAL; 9145 break; 9146 } 9147 9148 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 9149 goto efault; 9150 target_to_host_sigset(&set, p); 9151 unlock_user(p, arg1, 0); 9152 if (arg3) { 9153 puts = &uts; 9154 target_to_host_timespec(puts, arg3); 9155 } else { 9156 puts = NULL; 9157 } 9158 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts, 9159 SIGSET_T_SIZE)); 9160 if (!is_error(ret)) { 9161 if (arg2) { 9162 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 9163 0); 9164 if (!p) { 9165 goto efault; 9166 } 9167 host_to_target_siginfo(p, &uinfo); 9168 unlock_user(p, arg2, sizeof(target_siginfo_t)); 9169 } 9170 ret = host_to_target_signal(ret); 9171 } 9172 } 9173 break; 9174 case TARGET_NR_rt_sigqueueinfo: 9175 { 9176 siginfo_t uinfo; 9177 9178 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1); 9179 if (!p) { 9180 goto efault; 9181 } 9182 target_to_host_siginfo(&uinfo, p); 9183 unlock_user(p, arg3, 0); 9184 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo)); 9185 } 9186 break; 9187 case TARGET_NR_rt_tgsigqueueinfo: 9188 { 9189 siginfo_t uinfo; 9190 9191 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1); 9192 if (!p) { 9193 goto efault; 9194 } 9195 target_to_host_siginfo(&uinfo, p); 9196 unlock_user(p, arg4, 0); 9197 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo)); 9198 } 9199 break; 9200 #ifdef TARGET_NR_sigreturn 9201 case TARGET_NR_sigreturn: 9202 if (block_signals()) { 9203 ret = -TARGET_ERESTARTSYS; 9204 } else { 9205 ret = do_sigreturn(cpu_env); 9206 } 9207 break; 9208 #endif 9209 case TARGET_NR_rt_sigreturn: 9210 if (block_signals()) { 9211 ret = -TARGET_ERESTARTSYS; 9212 } else { 9213 ret = do_rt_sigreturn(cpu_env); 9214 } 9215 break; 9216 case TARGET_NR_sethostname: 9217 if (!(p = lock_user_string(arg1))) 9218 goto efault; 9219 ret = get_errno(sethostname(p, arg2)); 9220 unlock_user(p, arg1, 0); 9221 break; 9222 case TARGET_NR_setrlimit: 9223 { 9224 int resource = target_to_host_resource(arg1); 9225 struct target_rlimit *target_rlim; 9226 struct rlimit rlim; 9227 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) 9228 goto efault; 9229 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur); 9230 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max); 9231 unlock_user_struct(target_rlim, arg2, 0); 9232 ret = get_errno(setrlimit(resource, &rlim)); 9233 } 9234 break; 9235 case TARGET_NR_getrlimit: 9236 { 9237 int resource = target_to_host_resource(arg1); 9238 struct target_rlimit *target_rlim; 9239 struct rlimit rlim; 9240 9241 ret = get_errno(getrlimit(resource, &rlim)); 9242 if (!is_error(ret)) { 9243 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 9244 goto efault; 9245 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 9246 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 9247 unlock_user_struct(target_rlim, arg2, 1); 9248 } 9249 } 9250 break; 9251 case TARGET_NR_getrusage: 9252 { 9253 struct rusage rusage; 9254 ret = get_errno(getrusage(arg1, &rusage)); 9255 if (!is_error(ret)) { 9256 ret = host_to_target_rusage(arg2, &rusage); 9257 } 9258 } 9259 break; 9260 case TARGET_NR_gettimeofday: 9261 { 9262 struct timeval tv; 9263 ret = get_errno(gettimeofday(&tv, NULL)); 9264 if (!is_error(ret)) { 9265 if (copy_to_user_timeval(arg1, &tv)) 9266 goto efault; 9267 } 9268 } 9269 break; 9270 case TARGET_NR_settimeofday: 9271 { 9272 struct timeval tv, *ptv = NULL; 9273 struct timezone tz, *ptz = NULL; 9274 9275 if (arg1) { 9276 if (copy_from_user_timeval(&tv, arg1)) { 9277 goto efault; 9278 } 9279 ptv = &tv; 9280 } 9281 9282 if (arg2) { 9283 if (copy_from_user_timezone(&tz, arg2)) { 9284 goto efault; 9285 } 9286 ptz = &tz; 9287 } 9288 9289 ret = get_errno(settimeofday(ptv, ptz)); 9290 } 9291 break; 9292 #if defined(TARGET_NR_select) 9293 case TARGET_NR_select: 9294 #if defined(TARGET_WANT_NI_OLD_SELECT) 9295 /* some architectures used to have old_select here 9296 * but now ENOSYS it. 9297 */ 9298 ret = -TARGET_ENOSYS; 9299 #elif defined(TARGET_WANT_OLD_SYS_SELECT) 9300 ret = do_old_select(arg1); 9301 #else 9302 ret = do_select(arg1, arg2, arg3, arg4, arg5); 9303 #endif 9304 break; 9305 #endif 9306 #ifdef TARGET_NR_pselect6 9307 case TARGET_NR_pselect6: 9308 { 9309 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr; 9310 fd_set rfds, wfds, efds; 9311 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 9312 struct timespec ts, *ts_ptr; 9313 9314 /* 9315 * The 6th arg is actually two args smashed together, 9316 * so we cannot use the C library. 9317 */ 9318 sigset_t set; 9319 struct { 9320 sigset_t *set; 9321 size_t size; 9322 } sig, *sig_ptr; 9323 9324 abi_ulong arg_sigset, arg_sigsize, *arg7; 9325 target_sigset_t *target_sigset; 9326 9327 n = arg1; 9328 rfd_addr = arg2; 9329 wfd_addr = arg3; 9330 efd_addr = arg4; 9331 ts_addr = arg5; 9332 9333 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 9334 if (ret) { 9335 goto fail; 9336 } 9337 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 9338 if (ret) { 9339 goto fail; 9340 } 9341 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 9342 if (ret) { 9343 goto fail; 9344 } 9345 9346 /* 9347 * This takes a timespec, and not a timeval, so we cannot 9348 * use the do_select() helper ... 9349 */ 9350 if (ts_addr) { 9351 if (target_to_host_timespec(&ts, ts_addr)) { 9352 goto efault; 9353 } 9354 ts_ptr = &ts; 9355 } else { 9356 ts_ptr = NULL; 9357 } 9358 9359 /* Extract the two packed args for the sigset */ 9360 if (arg6) { 9361 sig_ptr = &sig; 9362 sig.size = SIGSET_T_SIZE; 9363 9364 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1); 9365 if (!arg7) { 9366 goto efault; 9367 } 9368 arg_sigset = tswapal(arg7[0]); 9369 arg_sigsize = tswapal(arg7[1]); 9370 unlock_user(arg7, arg6, 0); 9371 9372 if (arg_sigset) { 9373 sig.set = &set; 9374 if (arg_sigsize != sizeof(*target_sigset)) { 9375 /* Like the kernel, we enforce correct size sigsets */ 9376 ret = -TARGET_EINVAL; 9377 goto fail; 9378 } 9379 target_sigset = lock_user(VERIFY_READ, arg_sigset, 9380 sizeof(*target_sigset), 1); 9381 if (!target_sigset) { 9382 goto efault; 9383 } 9384 target_to_host_sigset(&set, target_sigset); 9385 unlock_user(target_sigset, arg_sigset, 0); 9386 } else { 9387 sig.set = NULL; 9388 } 9389 } else { 9390 sig_ptr = NULL; 9391 } 9392 9393 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 9394 ts_ptr, sig_ptr)); 9395 9396 if (!is_error(ret)) { 9397 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 9398 goto efault; 9399 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 9400 goto efault; 9401 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 9402 goto efault; 9403 9404 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) 9405 goto efault; 9406 } 9407 } 9408 break; 9409 #endif 9410 #ifdef TARGET_NR_symlink 9411 case TARGET_NR_symlink: 9412 { 9413 void *p2; 9414 p = lock_user_string(arg1); 9415 p2 = lock_user_string(arg2); 9416 if (!p || !p2) 9417 ret = -TARGET_EFAULT; 9418 else 9419 ret = get_errno(symlink(p, p2)); 9420 unlock_user(p2, arg2, 0); 9421 unlock_user(p, arg1, 0); 9422 } 9423 break; 9424 #endif 9425 #if defined(TARGET_NR_symlinkat) 9426 case TARGET_NR_symlinkat: 9427 { 9428 void *p2; 9429 p = lock_user_string(arg1); 9430 p2 = lock_user_string(arg3); 9431 if (!p || !p2) 9432 ret = -TARGET_EFAULT; 9433 else 9434 ret = get_errno(symlinkat(p, arg2, p2)); 9435 unlock_user(p2, arg3, 0); 9436 unlock_user(p, arg1, 0); 9437 } 9438 break; 9439 #endif 9440 #ifdef TARGET_NR_oldlstat 9441 case TARGET_NR_oldlstat: 9442 goto unimplemented; 9443 #endif 9444 #ifdef TARGET_NR_readlink 9445 case TARGET_NR_readlink: 9446 { 9447 void *p2; 9448 p = lock_user_string(arg1); 9449 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); 9450 if (!p || !p2) { 9451 ret = -TARGET_EFAULT; 9452 } else if (!arg3) { 9453 /* Short circuit this for the magic exe check. */ 9454 ret = -TARGET_EINVAL; 9455 } else if (is_proc_myself((const char *)p, "exe")) { 9456 char real[PATH_MAX], *temp; 9457 temp = realpath(exec_path, real); 9458 /* Return value is # of bytes that we wrote to the buffer. */ 9459 if (temp == NULL) { 9460 ret = get_errno(-1); 9461 } else { 9462 /* Don't worry about sign mismatch as earlier mapping 9463 * logic would have thrown a bad address error. */ 9464 ret = MIN(strlen(real), arg3); 9465 /* We cannot NUL terminate the string. */ 9466 memcpy(p2, real, ret); 9467 } 9468 } else { 9469 ret = get_errno(readlink(path(p), p2, arg3)); 9470 } 9471 unlock_user(p2, arg2, ret); 9472 unlock_user(p, arg1, 0); 9473 } 9474 break; 9475 #endif 9476 #if defined(TARGET_NR_readlinkat) 9477 case TARGET_NR_readlinkat: 9478 { 9479 void *p2; 9480 p = lock_user_string(arg2); 9481 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); 9482 if (!p || !p2) { 9483 ret = -TARGET_EFAULT; 9484 } else if (is_proc_myself((const char *)p, "exe")) { 9485 char real[PATH_MAX], *temp; 9486 temp = realpath(exec_path, real); 9487 ret = temp == NULL ? get_errno(-1) : strlen(real) ; 9488 snprintf((char *)p2, arg4, "%s", real); 9489 } else { 9490 ret = get_errno(readlinkat(arg1, path(p), p2, arg4)); 9491 } 9492 unlock_user(p2, arg3, ret); 9493 unlock_user(p, arg2, 0); 9494 } 9495 break; 9496 #endif 9497 #ifdef TARGET_NR_uselib 9498 case TARGET_NR_uselib: 9499 goto unimplemented; 9500 #endif 9501 #ifdef TARGET_NR_swapon 9502 case TARGET_NR_swapon: 9503 if (!(p = lock_user_string(arg1))) 9504 goto efault; 9505 ret = get_errno(swapon(p, arg2)); 9506 unlock_user(p, arg1, 0); 9507 break; 9508 #endif 9509 case TARGET_NR_reboot: 9510 if (arg3 == LINUX_REBOOT_CMD_RESTART2) { 9511 /* arg4 must be ignored in all other cases */ 9512 p = lock_user_string(arg4); 9513 if (!p) { 9514 goto efault; 9515 } 9516 ret = get_errno(reboot(arg1, arg2, arg3, p)); 9517 unlock_user(p, arg4, 0); 9518 } else { 9519 ret = get_errno(reboot(arg1, arg2, arg3, NULL)); 9520 } 9521 break; 9522 #ifdef TARGET_NR_readdir 9523 case TARGET_NR_readdir: 9524 goto unimplemented; 9525 #endif 9526 #ifdef TARGET_NR_mmap 9527 case TARGET_NR_mmap: 9528 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 9529 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \ 9530 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ 9531 || defined(TARGET_S390X) 9532 { 9533 abi_ulong *v; 9534 abi_ulong v1, v2, v3, v4, v5, v6; 9535 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) 9536 goto efault; 9537 v1 = tswapal(v[0]); 9538 v2 = tswapal(v[1]); 9539 v3 = tswapal(v[2]); 9540 v4 = tswapal(v[3]); 9541 v5 = tswapal(v[4]); 9542 v6 = tswapal(v[5]); 9543 unlock_user(v, arg1, 0); 9544 ret = get_errno(target_mmap(v1, v2, v3, 9545 target_to_host_bitmask(v4, mmap_flags_tbl), 9546 v5, v6)); 9547 } 9548 #else 9549 ret = get_errno(target_mmap(arg1, arg2, arg3, 9550 target_to_host_bitmask(arg4, mmap_flags_tbl), 9551 arg5, 9552 arg6)); 9553 #endif 9554 break; 9555 #endif 9556 #ifdef TARGET_NR_mmap2 9557 case TARGET_NR_mmap2: 9558 #ifndef MMAP_SHIFT 9559 #define MMAP_SHIFT 12 9560 #endif 9561 ret = get_errno(target_mmap(arg1, arg2, arg3, 9562 target_to_host_bitmask(arg4, mmap_flags_tbl), 9563 arg5, 9564 arg6 << MMAP_SHIFT)); 9565 break; 9566 #endif 9567 case TARGET_NR_munmap: 9568 ret = get_errno(target_munmap(arg1, arg2)); 9569 break; 9570 case TARGET_NR_mprotect: 9571 { 9572 TaskState *ts = cpu->opaque; 9573 /* Special hack to detect libc making the stack executable. */ 9574 if ((arg3 & PROT_GROWSDOWN) 9575 && arg1 >= ts->info->stack_limit 9576 && arg1 <= ts->info->start_stack) { 9577 arg3 &= ~PROT_GROWSDOWN; 9578 arg2 = arg2 + arg1 - ts->info->stack_limit; 9579 arg1 = ts->info->stack_limit; 9580 } 9581 } 9582 ret = get_errno(target_mprotect(arg1, arg2, arg3)); 9583 break; 9584 #ifdef TARGET_NR_mremap 9585 case TARGET_NR_mremap: 9586 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); 9587 break; 9588 #endif 9589 /* ??? msync/mlock/munlock are broken for softmmu. */ 9590 #ifdef TARGET_NR_msync 9591 case TARGET_NR_msync: 9592 ret = get_errno(msync(g2h(arg1), arg2, arg3)); 9593 break; 9594 #endif 9595 #ifdef TARGET_NR_mlock 9596 case TARGET_NR_mlock: 9597 ret = get_errno(mlock(g2h(arg1), arg2)); 9598 break; 9599 #endif 9600 #ifdef TARGET_NR_munlock 9601 case TARGET_NR_munlock: 9602 ret = get_errno(munlock(g2h(arg1), arg2)); 9603 break; 9604 #endif 9605 #ifdef TARGET_NR_mlockall 9606 case TARGET_NR_mlockall: 9607 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1))); 9608 break; 9609 #endif 9610 #ifdef TARGET_NR_munlockall 9611 case TARGET_NR_munlockall: 9612 ret = get_errno(munlockall()); 9613 break; 9614 #endif 9615 case TARGET_NR_truncate: 9616 if (!(p = lock_user_string(arg1))) 9617 goto efault; 9618 ret = get_errno(truncate(p, arg2)); 9619 unlock_user(p, arg1, 0); 9620 break; 9621 case TARGET_NR_ftruncate: 9622 ret = get_errno(ftruncate(arg1, arg2)); 9623 break; 9624 case TARGET_NR_fchmod: 9625 ret = get_errno(fchmod(arg1, arg2)); 9626 break; 9627 #if defined(TARGET_NR_fchmodat) 9628 case TARGET_NR_fchmodat: 9629 if (!(p = lock_user_string(arg2))) 9630 goto efault; 9631 ret = get_errno(fchmodat(arg1, p, arg3, 0)); 9632 unlock_user(p, arg2, 0); 9633 break; 9634 #endif 9635 case TARGET_NR_getpriority: 9636 /* Note that negative values are valid for getpriority, so we must 9637 differentiate based on errno settings. */ 9638 errno = 0; 9639 ret = getpriority(arg1, arg2); 9640 if (ret == -1 && errno != 0) { 9641 ret = -host_to_target_errno(errno); 9642 break; 9643 } 9644 #ifdef TARGET_ALPHA 9645 /* Return value is the unbiased priority. Signal no error. */ 9646 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; 9647 #else 9648 /* Return value is a biased priority to avoid negative numbers. */ 9649 ret = 20 - ret; 9650 #endif 9651 break; 9652 case TARGET_NR_setpriority: 9653 ret = get_errno(setpriority(arg1, arg2, arg3)); 9654 break; 9655 #ifdef TARGET_NR_profil 9656 case TARGET_NR_profil: 9657 goto unimplemented; 9658 #endif 9659 case TARGET_NR_statfs: 9660 if (!(p = lock_user_string(arg1))) 9661 goto efault; 9662 ret = get_errno(statfs(path(p), &stfs)); 9663 unlock_user(p, arg1, 0); 9664 convert_statfs: 9665 if (!is_error(ret)) { 9666 struct target_statfs *target_stfs; 9667 9668 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) 9669 goto efault; 9670 __put_user(stfs.f_type, &target_stfs->f_type); 9671 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 9672 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 9673 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 9674 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 9675 __put_user(stfs.f_files, &target_stfs->f_files); 9676 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 9677 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 9678 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 9679 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 9680 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 9681 #ifdef _STATFS_F_FLAGS 9682 __put_user(stfs.f_flags, &target_stfs->f_flags); 9683 #else 9684 __put_user(0, &target_stfs->f_flags); 9685 #endif 9686 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 9687 unlock_user_struct(target_stfs, arg2, 1); 9688 } 9689 break; 9690 case TARGET_NR_fstatfs: 9691 ret = get_errno(fstatfs(arg1, &stfs)); 9692 goto convert_statfs; 9693 #ifdef TARGET_NR_statfs64 9694 case TARGET_NR_statfs64: 9695 if (!(p = lock_user_string(arg1))) 9696 goto efault; 9697 ret = get_errno(statfs(path(p), &stfs)); 9698 unlock_user(p, arg1, 0); 9699 convert_statfs64: 9700 if (!is_error(ret)) { 9701 struct target_statfs64 *target_stfs; 9702 9703 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) 9704 goto efault; 9705 __put_user(stfs.f_type, &target_stfs->f_type); 9706 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 9707 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 9708 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 9709 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 9710 __put_user(stfs.f_files, &target_stfs->f_files); 9711 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 9712 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 9713 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 9714 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 9715 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 9716 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 9717 unlock_user_struct(target_stfs, arg3, 1); 9718 } 9719 break; 9720 case TARGET_NR_fstatfs64: 9721 ret = get_errno(fstatfs(arg1, &stfs)); 9722 goto convert_statfs64; 9723 #endif 9724 #ifdef TARGET_NR_ioperm 9725 case TARGET_NR_ioperm: 9726 goto unimplemented; 9727 #endif 9728 #ifdef TARGET_NR_socketcall 9729 case TARGET_NR_socketcall: 9730 ret = do_socketcall(arg1, arg2); 9731 break; 9732 #endif 9733 #ifdef TARGET_NR_accept 9734 case TARGET_NR_accept: 9735 ret = do_accept4(arg1, arg2, arg3, 0); 9736 break; 9737 #endif 9738 #ifdef TARGET_NR_accept4 9739 case TARGET_NR_accept4: 9740 ret = do_accept4(arg1, arg2, arg3, arg4); 9741 break; 9742 #endif 9743 #ifdef TARGET_NR_bind 9744 case TARGET_NR_bind: 9745 ret = do_bind(arg1, arg2, arg3); 9746 break; 9747 #endif 9748 #ifdef TARGET_NR_connect 9749 case TARGET_NR_connect: 9750 ret = do_connect(arg1, arg2, arg3); 9751 break; 9752 #endif 9753 #ifdef TARGET_NR_getpeername 9754 case TARGET_NR_getpeername: 9755 ret = do_getpeername(arg1, arg2, arg3); 9756 break; 9757 #endif 9758 #ifdef TARGET_NR_getsockname 9759 case TARGET_NR_getsockname: 9760 ret = do_getsockname(arg1, arg2, arg3); 9761 break; 9762 #endif 9763 #ifdef TARGET_NR_getsockopt 9764 case TARGET_NR_getsockopt: 9765 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5); 9766 break; 9767 #endif 9768 #ifdef TARGET_NR_listen 9769 case TARGET_NR_listen: 9770 ret = get_errno(listen(arg1, arg2)); 9771 break; 9772 #endif 9773 #ifdef TARGET_NR_recv 9774 case TARGET_NR_recv: 9775 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); 9776 break; 9777 #endif 9778 #ifdef TARGET_NR_recvfrom 9779 case TARGET_NR_recvfrom: 9780 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); 9781 break; 9782 #endif 9783 #ifdef TARGET_NR_recvmsg 9784 case TARGET_NR_recvmsg: 9785 ret = do_sendrecvmsg(arg1, arg2, arg3, 0); 9786 break; 9787 #endif 9788 #ifdef TARGET_NR_send 9789 case TARGET_NR_send: 9790 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0); 9791 break; 9792 #endif 9793 #ifdef TARGET_NR_sendmsg 9794 case TARGET_NR_sendmsg: 9795 ret = do_sendrecvmsg(arg1, arg2, arg3, 1); 9796 break; 9797 #endif 9798 #ifdef TARGET_NR_sendmmsg 9799 case TARGET_NR_sendmmsg: 9800 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1); 9801 break; 9802 case TARGET_NR_recvmmsg: 9803 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0); 9804 break; 9805 #endif 9806 #ifdef TARGET_NR_sendto 9807 case TARGET_NR_sendto: 9808 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); 9809 break; 9810 #endif 9811 #ifdef TARGET_NR_shutdown 9812 case TARGET_NR_shutdown: 9813 ret = get_errno(shutdown(arg1, arg2)); 9814 break; 9815 #endif 9816 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom) 9817 case TARGET_NR_getrandom: 9818 p = lock_user(VERIFY_WRITE, arg1, arg2, 0); 9819 if (!p) { 9820 goto efault; 9821 } 9822 ret = get_errno(getrandom(p, arg2, arg3)); 9823 unlock_user(p, arg1, ret); 9824 break; 9825 #endif 9826 #ifdef TARGET_NR_socket 9827 case TARGET_NR_socket: 9828 ret = do_socket(arg1, arg2, arg3); 9829 break; 9830 #endif 9831 #ifdef TARGET_NR_socketpair 9832 case TARGET_NR_socketpair: 9833 ret = do_socketpair(arg1, arg2, arg3, arg4); 9834 break; 9835 #endif 9836 #ifdef TARGET_NR_setsockopt 9837 case TARGET_NR_setsockopt: 9838 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); 9839 break; 9840 #endif 9841 #if defined(TARGET_NR_syslog) 9842 case TARGET_NR_syslog: 9843 { 9844 int len = arg2; 9845 9846 switch (arg1) { 9847 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */ 9848 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */ 9849 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */ 9850 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */ 9851 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */ 9852 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */ 9853 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */ 9854 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */ 9855 { 9856 ret = get_errno(sys_syslog((int)arg1, NULL, (int)arg3)); 9857 } 9858 break; 9859 case TARGET_SYSLOG_ACTION_READ: /* Read from log */ 9860 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */ 9861 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */ 9862 { 9863 ret = -TARGET_EINVAL; 9864 if (len < 0) { 9865 goto fail; 9866 } 9867 ret = 0; 9868 if (len == 0) { 9869 break; 9870 } 9871 p = lock_user(VERIFY_WRITE, arg2, arg3, 0); 9872 if (!p) { 9873 ret = -TARGET_EFAULT; 9874 goto fail; 9875 } 9876 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); 9877 unlock_user(p, arg2, arg3); 9878 } 9879 break; 9880 default: 9881 ret = -EINVAL; 9882 break; 9883 } 9884 } 9885 break; 9886 #endif 9887 case TARGET_NR_setitimer: 9888 { 9889 struct itimerval value, ovalue, *pvalue; 9890 9891 if (arg2) { 9892 pvalue = &value; 9893 if (copy_from_user_timeval(&pvalue->it_interval, arg2) 9894 || copy_from_user_timeval(&pvalue->it_value, 9895 arg2 + sizeof(struct target_timeval))) 9896 goto efault; 9897 } else { 9898 pvalue = NULL; 9899 } 9900 ret = get_errno(setitimer(arg1, pvalue, &ovalue)); 9901 if (!is_error(ret) && arg3) { 9902 if (copy_to_user_timeval(arg3, 9903 &ovalue.it_interval) 9904 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), 9905 &ovalue.it_value)) 9906 goto efault; 9907 } 9908 } 9909 break; 9910 case TARGET_NR_getitimer: 9911 { 9912 struct itimerval value; 9913 9914 ret = get_errno(getitimer(arg1, &value)); 9915 if (!is_error(ret) && arg2) { 9916 if (copy_to_user_timeval(arg2, 9917 &value.it_interval) 9918 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), 9919 &value.it_value)) 9920 goto efault; 9921 } 9922 } 9923 break; 9924 #ifdef TARGET_NR_stat 9925 case TARGET_NR_stat: 9926 if (!(p = lock_user_string(arg1))) 9927 goto efault; 9928 ret = get_errno(stat(path(p), &st)); 9929 unlock_user(p, arg1, 0); 9930 goto do_stat; 9931 #endif 9932 #ifdef TARGET_NR_lstat 9933 case TARGET_NR_lstat: 9934 if (!(p = lock_user_string(arg1))) 9935 goto efault; 9936 ret = get_errno(lstat(path(p), &st)); 9937 unlock_user(p, arg1, 0); 9938 goto do_stat; 9939 #endif 9940 case TARGET_NR_fstat: 9941 { 9942 ret = get_errno(fstat(arg1, &st)); 9943 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat) 9944 do_stat: 9945 #endif 9946 if (!is_error(ret)) { 9947 struct target_stat *target_st; 9948 9949 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) 9950 goto efault; 9951 memset(target_st, 0, sizeof(*target_st)); 9952 __put_user(st.st_dev, &target_st->st_dev); 9953 __put_user(st.st_ino, &target_st->st_ino); 9954 __put_user(st.st_mode, &target_st->st_mode); 9955 __put_user(st.st_uid, &target_st->st_uid); 9956 __put_user(st.st_gid, &target_st->st_gid); 9957 __put_user(st.st_nlink, &target_st->st_nlink); 9958 __put_user(st.st_rdev, &target_st->st_rdev); 9959 __put_user(st.st_size, &target_st->st_size); 9960 __put_user(st.st_blksize, &target_st->st_blksize); 9961 __put_user(st.st_blocks, &target_st->st_blocks); 9962 __put_user(st.st_atime, &target_st->target_st_atime); 9963 __put_user(st.st_mtime, &target_st->target_st_mtime); 9964 __put_user(st.st_ctime, &target_st->target_st_ctime); 9965 unlock_user_struct(target_st, arg2, 1); 9966 } 9967 } 9968 break; 9969 #ifdef TARGET_NR_olduname 9970 case TARGET_NR_olduname: 9971 goto unimplemented; 9972 #endif 9973 #ifdef TARGET_NR_iopl 9974 case TARGET_NR_iopl: 9975 goto unimplemented; 9976 #endif 9977 case TARGET_NR_vhangup: 9978 ret = get_errno(vhangup()); 9979 break; 9980 #ifdef TARGET_NR_idle 9981 case TARGET_NR_idle: 9982 goto unimplemented; 9983 #endif 9984 #ifdef TARGET_NR_syscall 9985 case TARGET_NR_syscall: 9986 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5, 9987 arg6, arg7, arg8, 0); 9988 break; 9989 #endif 9990 case TARGET_NR_wait4: 9991 { 9992 int status; 9993 abi_long status_ptr = arg2; 9994 struct rusage rusage, *rusage_ptr; 9995 abi_ulong target_rusage = arg4; 9996 abi_long rusage_err; 9997 if (target_rusage) 9998 rusage_ptr = &rusage; 9999 else 10000 rusage_ptr = NULL; 10001 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr)); 10002 if (!is_error(ret)) { 10003 if (status_ptr && ret) { 10004 status = host_to_target_waitstatus(status); 10005 if (put_user_s32(status, status_ptr)) 10006 goto efault; 10007 } 10008 if (target_rusage) { 10009 rusage_err = host_to_target_rusage(target_rusage, &rusage); 10010 if (rusage_err) { 10011 ret = rusage_err; 10012 } 10013 } 10014 } 10015 } 10016 break; 10017 #ifdef TARGET_NR_swapoff 10018 case TARGET_NR_swapoff: 10019 if (!(p = lock_user_string(arg1))) 10020 goto efault; 10021 ret = get_errno(swapoff(p)); 10022 unlock_user(p, arg1, 0); 10023 break; 10024 #endif 10025 case TARGET_NR_sysinfo: 10026 { 10027 struct target_sysinfo *target_value; 10028 struct sysinfo value; 10029 ret = get_errno(sysinfo(&value)); 10030 if (!is_error(ret) && arg1) 10031 { 10032 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) 10033 goto efault; 10034 __put_user(value.uptime, &target_value->uptime); 10035 __put_user(value.loads[0], &target_value->loads[0]); 10036 __put_user(value.loads[1], &target_value->loads[1]); 10037 __put_user(value.loads[2], &target_value->loads[2]); 10038 __put_user(value.totalram, &target_value->totalram); 10039 __put_user(value.freeram, &target_value->freeram); 10040 __put_user(value.sharedram, &target_value->sharedram); 10041 __put_user(value.bufferram, &target_value->bufferram); 10042 __put_user(value.totalswap, &target_value->totalswap); 10043 __put_user(value.freeswap, &target_value->freeswap); 10044 __put_user(value.procs, &target_value->procs); 10045 __put_user(value.totalhigh, &target_value->totalhigh); 10046 __put_user(value.freehigh, &target_value->freehigh); 10047 __put_user(value.mem_unit, &target_value->mem_unit); 10048 unlock_user_struct(target_value, arg1, 1); 10049 } 10050 } 10051 break; 10052 #ifdef TARGET_NR_ipc 10053 case TARGET_NR_ipc: 10054 ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6); 10055 break; 10056 #endif 10057 #ifdef TARGET_NR_semget 10058 case TARGET_NR_semget: 10059 ret = get_errno(semget(arg1, arg2, arg3)); 10060 break; 10061 #endif 10062 #ifdef TARGET_NR_semop 10063 case TARGET_NR_semop: 10064 ret = do_semop(arg1, arg2, arg3); 10065 break; 10066 #endif 10067 #ifdef TARGET_NR_semctl 10068 case TARGET_NR_semctl: 10069 ret = do_semctl(arg1, arg2, arg3, arg4); 10070 break; 10071 #endif 10072 #ifdef TARGET_NR_msgctl 10073 case TARGET_NR_msgctl: 10074 ret = do_msgctl(arg1, arg2, arg3); 10075 break; 10076 #endif 10077 #ifdef TARGET_NR_msgget 10078 case TARGET_NR_msgget: 10079 ret = get_errno(msgget(arg1, arg2)); 10080 break; 10081 #endif 10082 #ifdef TARGET_NR_msgrcv 10083 case TARGET_NR_msgrcv: 10084 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5); 10085 break; 10086 #endif 10087 #ifdef TARGET_NR_msgsnd 10088 case TARGET_NR_msgsnd: 10089 ret = do_msgsnd(arg1, arg2, arg3, arg4); 10090 break; 10091 #endif 10092 #ifdef TARGET_NR_shmget 10093 case TARGET_NR_shmget: 10094 ret = get_errno(shmget(arg1, arg2, arg3)); 10095 break; 10096 #endif 10097 #ifdef TARGET_NR_shmctl 10098 case TARGET_NR_shmctl: 10099 ret = do_shmctl(arg1, arg2, arg3); 10100 break; 10101 #endif 10102 #ifdef TARGET_NR_shmat 10103 case TARGET_NR_shmat: 10104 ret = do_shmat(cpu_env, arg1, arg2, arg3); 10105 break; 10106 #endif 10107 #ifdef TARGET_NR_shmdt 10108 case TARGET_NR_shmdt: 10109 ret = do_shmdt(arg1); 10110 break; 10111 #endif 10112 case TARGET_NR_fsync: 10113 ret = get_errno(fsync(arg1)); 10114 break; 10115 case TARGET_NR_clone: 10116 /* Linux manages to have three different orderings for its 10117 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines 10118 * match the kernel's CONFIG_CLONE_* settings. 10119 * Microblaze is further special in that it uses a sixth 10120 * implicit argument to clone for the TLS pointer. 10121 */ 10122 #if defined(TARGET_MICROBLAZE) 10123 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5)); 10124 #elif defined(TARGET_CLONE_BACKWARDS) 10125 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); 10126 #elif defined(TARGET_CLONE_BACKWARDS2) 10127 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); 10128 #else 10129 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); 10130 #endif 10131 break; 10132 #ifdef __NR_exit_group 10133 /* new thread calls */ 10134 case TARGET_NR_exit_group: 10135 #ifdef TARGET_GPROF 10136 _mcleanup(); 10137 #endif 10138 gdb_exit(cpu_env, arg1); 10139 ret = get_errno(exit_group(arg1)); 10140 break; 10141 #endif 10142 case TARGET_NR_setdomainname: 10143 if (!(p = lock_user_string(arg1))) 10144 goto efault; 10145 ret = get_errno(setdomainname(p, arg2)); 10146 unlock_user(p, arg1, 0); 10147 break; 10148 case TARGET_NR_uname: 10149 /* no need to transcode because we use the linux syscall */ 10150 { 10151 struct new_utsname * buf; 10152 10153 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) 10154 goto efault; 10155 ret = get_errno(sys_uname(buf)); 10156 if (!is_error(ret)) { 10157 /* Overwrite the native machine name with whatever is being 10158 emulated. */ 10159 strcpy (buf->machine, cpu_to_uname_machine(cpu_env)); 10160 /* Allow the user to override the reported release. */ 10161 if (qemu_uname_release && *qemu_uname_release) { 10162 g_strlcpy(buf->release, qemu_uname_release, 10163 sizeof(buf->release)); 10164 } 10165 } 10166 unlock_user_struct(buf, arg1, 1); 10167 } 10168 break; 10169 #ifdef TARGET_I386 10170 case TARGET_NR_modify_ldt: 10171 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3); 10172 break; 10173 #if !defined(TARGET_X86_64) 10174 case TARGET_NR_vm86old: 10175 goto unimplemented; 10176 case TARGET_NR_vm86: 10177 ret = do_vm86(cpu_env, arg1, arg2); 10178 break; 10179 #endif 10180 #endif 10181 case TARGET_NR_adjtimex: 10182 { 10183 struct timex host_buf; 10184 10185 if (target_to_host_timex(&host_buf, arg1) != 0) { 10186 goto efault; 10187 } 10188 ret = get_errno(adjtimex(&host_buf)); 10189 if (!is_error(ret)) { 10190 if (host_to_target_timex(arg1, &host_buf) != 0) { 10191 goto efault; 10192 } 10193 } 10194 } 10195 break; 10196 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME) 10197 case TARGET_NR_clock_adjtime: 10198 { 10199 struct timex htx, *phtx = &htx; 10200 10201 if (target_to_host_timex(phtx, arg2) != 0) { 10202 goto efault; 10203 } 10204 ret = get_errno(clock_adjtime(arg1, phtx)); 10205 if (!is_error(ret) && phtx) { 10206 if (host_to_target_timex(arg2, phtx) != 0) { 10207 goto efault; 10208 } 10209 } 10210 } 10211 break; 10212 #endif 10213 #ifdef TARGET_NR_create_module 10214 case TARGET_NR_create_module: 10215 #endif 10216 case TARGET_NR_init_module: 10217 case TARGET_NR_delete_module: 10218 #ifdef TARGET_NR_get_kernel_syms 10219 case TARGET_NR_get_kernel_syms: 10220 #endif 10221 goto unimplemented; 10222 case TARGET_NR_quotactl: 10223 goto unimplemented; 10224 case TARGET_NR_getpgid: 10225 ret = get_errno(getpgid(arg1)); 10226 break; 10227 case TARGET_NR_fchdir: 10228 ret = get_errno(fchdir(arg1)); 10229 break; 10230 #ifdef TARGET_NR_bdflush /* not on x86_64 */ 10231 case TARGET_NR_bdflush: 10232 goto unimplemented; 10233 #endif 10234 #ifdef TARGET_NR_sysfs 10235 case TARGET_NR_sysfs: 10236 goto unimplemented; 10237 #endif 10238 case TARGET_NR_personality: 10239 ret = get_errno(personality(arg1)); 10240 break; 10241 #ifdef TARGET_NR_afs_syscall 10242 case TARGET_NR_afs_syscall: 10243 goto unimplemented; 10244 #endif 10245 #ifdef TARGET_NR__llseek /* Not on alpha */ 10246 case TARGET_NR__llseek: 10247 { 10248 int64_t res; 10249 #if !defined(__NR_llseek) 10250 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5); 10251 if (res == -1) { 10252 ret = get_errno(res); 10253 } else { 10254 ret = 0; 10255 } 10256 #else 10257 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); 10258 #endif 10259 if ((ret == 0) && put_user_s64(res, arg4)) { 10260 goto efault; 10261 } 10262 } 10263 break; 10264 #endif 10265 #ifdef TARGET_NR_getdents 10266 case TARGET_NR_getdents: 10267 #ifdef EMULATE_GETDENTS_WITH_GETDENTS 10268 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 10269 { 10270 struct target_dirent *target_dirp; 10271 struct linux_dirent *dirp; 10272 abi_long count = arg3; 10273 10274 dirp = g_try_malloc(count); 10275 if (!dirp) { 10276 ret = -TARGET_ENOMEM; 10277 goto fail; 10278 } 10279 10280 ret = get_errno(sys_getdents(arg1, dirp, count)); 10281 if (!is_error(ret)) { 10282 struct linux_dirent *de; 10283 struct target_dirent *tde; 10284 int len = ret; 10285 int reclen, treclen; 10286 int count1, tnamelen; 10287 10288 count1 = 0; 10289 de = dirp; 10290 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 10291 goto efault; 10292 tde = target_dirp; 10293 while (len > 0) { 10294 reclen = de->d_reclen; 10295 tnamelen = reclen - offsetof(struct linux_dirent, d_name); 10296 assert(tnamelen >= 0); 10297 treclen = tnamelen + offsetof(struct target_dirent, d_name); 10298 assert(count1 + treclen <= count); 10299 tde->d_reclen = tswap16(treclen); 10300 tde->d_ino = tswapal(de->d_ino); 10301 tde->d_off = tswapal(de->d_off); 10302 memcpy(tde->d_name, de->d_name, tnamelen); 10303 de = (struct linux_dirent *)((char *)de + reclen); 10304 len -= reclen; 10305 tde = (struct target_dirent *)((char *)tde + treclen); 10306 count1 += treclen; 10307 } 10308 ret = count1; 10309 unlock_user(target_dirp, arg2, ret); 10310 } 10311 g_free(dirp); 10312 } 10313 #else 10314 { 10315 struct linux_dirent *dirp; 10316 abi_long count = arg3; 10317 10318 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 10319 goto efault; 10320 ret = get_errno(sys_getdents(arg1, dirp, count)); 10321 if (!is_error(ret)) { 10322 struct linux_dirent *de; 10323 int len = ret; 10324 int reclen; 10325 de = dirp; 10326 while (len > 0) { 10327 reclen = de->d_reclen; 10328 if (reclen > len) 10329 break; 10330 de->d_reclen = tswap16(reclen); 10331 tswapls(&de->d_ino); 10332 tswapls(&de->d_off); 10333 de = (struct linux_dirent *)((char *)de + reclen); 10334 len -= reclen; 10335 } 10336 } 10337 unlock_user(dirp, arg2, ret); 10338 } 10339 #endif 10340 #else 10341 /* Implement getdents in terms of getdents64 */ 10342 { 10343 struct linux_dirent64 *dirp; 10344 abi_long count = arg3; 10345 10346 dirp = lock_user(VERIFY_WRITE, arg2, count, 0); 10347 if (!dirp) { 10348 goto efault; 10349 } 10350 ret = get_errno(sys_getdents64(arg1, dirp, count)); 10351 if (!is_error(ret)) { 10352 /* Convert the dirent64 structs to target dirent. We do this 10353 * in-place, since we can guarantee that a target_dirent is no 10354 * larger than a dirent64; however this means we have to be 10355 * careful to read everything before writing in the new format. 10356 */ 10357 struct linux_dirent64 *de; 10358 struct target_dirent *tde; 10359 int len = ret; 10360 int tlen = 0; 10361 10362 de = dirp; 10363 tde = (struct target_dirent *)dirp; 10364 while (len > 0) { 10365 int namelen, treclen; 10366 int reclen = de->d_reclen; 10367 uint64_t ino = de->d_ino; 10368 int64_t off = de->d_off; 10369 uint8_t type = de->d_type; 10370 10371 namelen = strlen(de->d_name); 10372 treclen = offsetof(struct target_dirent, d_name) 10373 + namelen + 2; 10374 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long)); 10375 10376 memmove(tde->d_name, de->d_name, namelen + 1); 10377 tde->d_ino = tswapal(ino); 10378 tde->d_off = tswapal(off); 10379 tde->d_reclen = tswap16(treclen); 10380 /* The target_dirent type is in what was formerly a padding 10381 * byte at the end of the structure: 10382 */ 10383 *(((char *)tde) + treclen - 1) = type; 10384 10385 de = (struct linux_dirent64 *)((char *)de + reclen); 10386 tde = (struct target_dirent *)((char *)tde + treclen); 10387 len -= reclen; 10388 tlen += treclen; 10389 } 10390 ret = tlen; 10391 } 10392 unlock_user(dirp, arg2, ret); 10393 } 10394 #endif 10395 break; 10396 #endif /* TARGET_NR_getdents */ 10397 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 10398 case TARGET_NR_getdents64: 10399 { 10400 struct linux_dirent64 *dirp; 10401 abi_long count = arg3; 10402 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 10403 goto efault; 10404 ret = get_errno(sys_getdents64(arg1, dirp, count)); 10405 if (!is_error(ret)) { 10406 struct linux_dirent64 *de; 10407 int len = ret; 10408 int reclen; 10409 de = dirp; 10410 while (len > 0) { 10411 reclen = de->d_reclen; 10412 if (reclen > len) 10413 break; 10414 de->d_reclen = tswap16(reclen); 10415 tswap64s((uint64_t *)&de->d_ino); 10416 tswap64s((uint64_t *)&de->d_off); 10417 de = (struct linux_dirent64 *)((char *)de + reclen); 10418 len -= reclen; 10419 } 10420 } 10421 unlock_user(dirp, arg2, ret); 10422 } 10423 break; 10424 #endif /* TARGET_NR_getdents64 */ 10425 #if defined(TARGET_NR__newselect) 10426 case TARGET_NR__newselect: 10427 ret = do_select(arg1, arg2, arg3, arg4, arg5); 10428 break; 10429 #endif 10430 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) 10431 # ifdef TARGET_NR_poll 10432 case TARGET_NR_poll: 10433 # endif 10434 # ifdef TARGET_NR_ppoll 10435 case TARGET_NR_ppoll: 10436 # endif 10437 { 10438 struct target_pollfd *target_pfd; 10439 unsigned int nfds = arg2; 10440 struct pollfd *pfd; 10441 unsigned int i; 10442 10443 pfd = NULL; 10444 target_pfd = NULL; 10445 if (nfds) { 10446 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) { 10447 ret = -TARGET_EINVAL; 10448 break; 10449 } 10450 10451 target_pfd = lock_user(VERIFY_WRITE, arg1, 10452 sizeof(struct target_pollfd) * nfds, 1); 10453 if (!target_pfd) { 10454 goto efault; 10455 } 10456 10457 pfd = alloca(sizeof(struct pollfd) * nfds); 10458 for (i = 0; i < nfds; i++) { 10459 pfd[i].fd = tswap32(target_pfd[i].fd); 10460 pfd[i].events = tswap16(target_pfd[i].events); 10461 } 10462 } 10463 10464 switch (num) { 10465 # ifdef TARGET_NR_ppoll 10466 case TARGET_NR_ppoll: 10467 { 10468 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; 10469 target_sigset_t *target_set; 10470 sigset_t _set, *set = &_set; 10471 10472 if (arg3) { 10473 if (target_to_host_timespec(timeout_ts, arg3)) { 10474 unlock_user(target_pfd, arg1, 0); 10475 goto efault; 10476 } 10477 } else { 10478 timeout_ts = NULL; 10479 } 10480 10481 if (arg4) { 10482 if (arg5 != sizeof(target_sigset_t)) { 10483 unlock_user(target_pfd, arg1, 0); 10484 ret = -TARGET_EINVAL; 10485 break; 10486 } 10487 10488 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1); 10489 if (!target_set) { 10490 unlock_user(target_pfd, arg1, 0); 10491 goto efault; 10492 } 10493 target_to_host_sigset(set, target_set); 10494 } else { 10495 set = NULL; 10496 } 10497 10498 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts, 10499 set, SIGSET_T_SIZE)); 10500 10501 if (!is_error(ret) && arg3) { 10502 host_to_target_timespec(arg3, timeout_ts); 10503 } 10504 if (arg4) { 10505 unlock_user(target_set, arg4, 0); 10506 } 10507 break; 10508 } 10509 # endif 10510 # ifdef TARGET_NR_poll 10511 case TARGET_NR_poll: 10512 { 10513 struct timespec ts, *pts; 10514 10515 if (arg3 >= 0) { 10516 /* Convert ms to secs, ns */ 10517 ts.tv_sec = arg3 / 1000; 10518 ts.tv_nsec = (arg3 % 1000) * 1000000LL; 10519 pts = &ts; 10520 } else { 10521 /* -ve poll() timeout means "infinite" */ 10522 pts = NULL; 10523 } 10524 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0)); 10525 break; 10526 } 10527 # endif 10528 default: 10529 g_assert_not_reached(); 10530 } 10531 10532 if (!is_error(ret)) { 10533 for(i = 0; i < nfds; i++) { 10534 target_pfd[i].revents = tswap16(pfd[i].revents); 10535 } 10536 } 10537 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds); 10538 } 10539 break; 10540 #endif 10541 case TARGET_NR_flock: 10542 /* NOTE: the flock constant seems to be the same for every 10543 Linux platform */ 10544 ret = get_errno(safe_flock(arg1, arg2)); 10545 break; 10546 case TARGET_NR_readv: 10547 { 10548 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 10549 if (vec != NULL) { 10550 ret = get_errno(safe_readv(arg1, vec, arg3)); 10551 unlock_iovec(vec, arg2, arg3, 1); 10552 } else { 10553 ret = -host_to_target_errno(errno); 10554 } 10555 } 10556 break; 10557 case TARGET_NR_writev: 10558 { 10559 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 10560 if (vec != NULL) { 10561 ret = get_errno(safe_writev(arg1, vec, arg3)); 10562 unlock_iovec(vec, arg2, arg3, 0); 10563 } else { 10564 ret = -host_to_target_errno(errno); 10565 } 10566 } 10567 break; 10568 #if defined(TARGET_NR_preadv) 10569 case TARGET_NR_preadv: 10570 { 10571 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 10572 if (vec != NULL) { 10573 unsigned long low, high; 10574 10575 target_to_host_low_high(arg4, arg5, &low, &high); 10576 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high)); 10577 unlock_iovec(vec, arg2, arg3, 1); 10578 } else { 10579 ret = -host_to_target_errno(errno); 10580 } 10581 } 10582 break; 10583 #endif 10584 #if defined(TARGET_NR_pwritev) 10585 case TARGET_NR_pwritev: 10586 { 10587 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 10588 if (vec != NULL) { 10589 unsigned long low, high; 10590 10591 target_to_host_low_high(arg4, arg5, &low, &high); 10592 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high)); 10593 unlock_iovec(vec, arg2, arg3, 0); 10594 } else { 10595 ret = -host_to_target_errno(errno); 10596 } 10597 } 10598 break; 10599 #endif 10600 case TARGET_NR_getsid: 10601 ret = get_errno(getsid(arg1)); 10602 break; 10603 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ 10604 case TARGET_NR_fdatasync: 10605 ret = get_errno(fdatasync(arg1)); 10606 break; 10607 #endif 10608 #ifdef TARGET_NR__sysctl 10609 case TARGET_NR__sysctl: 10610 /* We don't implement this, but ENOTDIR is always a safe 10611 return value. */ 10612 ret = -TARGET_ENOTDIR; 10613 break; 10614 #endif 10615 case TARGET_NR_sched_getaffinity: 10616 { 10617 unsigned int mask_size; 10618 unsigned long *mask; 10619 10620 /* 10621 * sched_getaffinity needs multiples of ulong, so need to take 10622 * care of mismatches between target ulong and host ulong sizes. 10623 */ 10624 if (arg2 & (sizeof(abi_ulong) - 1)) { 10625 ret = -TARGET_EINVAL; 10626 break; 10627 } 10628 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 10629 10630 mask = alloca(mask_size); 10631 memset(mask, 0, mask_size); 10632 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask)); 10633 10634 if (!is_error(ret)) { 10635 if (ret > arg2) { 10636 /* More data returned than the caller's buffer will fit. 10637 * This only happens if sizeof(abi_long) < sizeof(long) 10638 * and the caller passed us a buffer holding an odd number 10639 * of abi_longs. If the host kernel is actually using the 10640 * extra 4 bytes then fail EINVAL; otherwise we can just 10641 * ignore them and only copy the interesting part. 10642 */ 10643 int numcpus = sysconf(_SC_NPROCESSORS_CONF); 10644 if (numcpus > arg2 * 8) { 10645 ret = -TARGET_EINVAL; 10646 break; 10647 } 10648 ret = arg2; 10649 } 10650 10651 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) { 10652 goto efault; 10653 } 10654 } 10655 } 10656 break; 10657 case TARGET_NR_sched_setaffinity: 10658 { 10659 unsigned int mask_size; 10660 unsigned long *mask; 10661 10662 /* 10663 * sched_setaffinity needs multiples of ulong, so need to take 10664 * care of mismatches between target ulong and host ulong sizes. 10665 */ 10666 if (arg2 & (sizeof(abi_ulong) - 1)) { 10667 ret = -TARGET_EINVAL; 10668 break; 10669 } 10670 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 10671 mask = alloca(mask_size); 10672 10673 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2); 10674 if (ret) { 10675 break; 10676 } 10677 10678 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask)); 10679 } 10680 break; 10681 case TARGET_NR_getcpu: 10682 { 10683 unsigned cpu, node; 10684 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL, 10685 arg2 ? &node : NULL, 10686 NULL)); 10687 if (is_error(ret)) { 10688 goto fail; 10689 } 10690 if (arg1 && put_user_u32(cpu, arg1)) { 10691 goto efault; 10692 } 10693 if (arg2 && put_user_u32(node, arg2)) { 10694 goto efault; 10695 } 10696 } 10697 break; 10698 case TARGET_NR_sched_setparam: 10699 { 10700 struct sched_param *target_schp; 10701 struct sched_param schp; 10702 10703 if (arg2 == 0) { 10704 return -TARGET_EINVAL; 10705 } 10706 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) 10707 goto efault; 10708 schp.sched_priority = tswap32(target_schp->sched_priority); 10709 unlock_user_struct(target_schp, arg2, 0); 10710 ret = get_errno(sched_setparam(arg1, &schp)); 10711 } 10712 break; 10713 case TARGET_NR_sched_getparam: 10714 { 10715 struct sched_param *target_schp; 10716 struct sched_param schp; 10717 10718 if (arg2 == 0) { 10719 return -TARGET_EINVAL; 10720 } 10721 ret = get_errno(sched_getparam(arg1, &schp)); 10722 if (!is_error(ret)) { 10723 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) 10724 goto efault; 10725 target_schp->sched_priority = tswap32(schp.sched_priority); 10726 unlock_user_struct(target_schp, arg2, 1); 10727 } 10728 } 10729 break; 10730 case TARGET_NR_sched_setscheduler: 10731 { 10732 struct sched_param *target_schp; 10733 struct sched_param schp; 10734 if (arg3 == 0) { 10735 return -TARGET_EINVAL; 10736 } 10737 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) 10738 goto efault; 10739 schp.sched_priority = tswap32(target_schp->sched_priority); 10740 unlock_user_struct(target_schp, arg3, 0); 10741 ret = get_errno(sched_setscheduler(arg1, arg2, &schp)); 10742 } 10743 break; 10744 case TARGET_NR_sched_getscheduler: 10745 ret = get_errno(sched_getscheduler(arg1)); 10746 break; 10747 case TARGET_NR_sched_yield: 10748 ret = get_errno(sched_yield()); 10749 break; 10750 case TARGET_NR_sched_get_priority_max: 10751 ret = get_errno(sched_get_priority_max(arg1)); 10752 break; 10753 case TARGET_NR_sched_get_priority_min: 10754 ret = get_errno(sched_get_priority_min(arg1)); 10755 break; 10756 case TARGET_NR_sched_rr_get_interval: 10757 { 10758 struct timespec ts; 10759 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 10760 if (!is_error(ret)) { 10761 ret = host_to_target_timespec(arg2, &ts); 10762 } 10763 } 10764 break; 10765 case TARGET_NR_nanosleep: 10766 { 10767 struct timespec req, rem; 10768 target_to_host_timespec(&req, arg1); 10769 ret = get_errno(safe_nanosleep(&req, &rem)); 10770 if (is_error(ret) && arg2) { 10771 host_to_target_timespec(arg2, &rem); 10772 } 10773 } 10774 break; 10775 #ifdef TARGET_NR_query_module 10776 case TARGET_NR_query_module: 10777 goto unimplemented; 10778 #endif 10779 #ifdef TARGET_NR_nfsservctl 10780 case TARGET_NR_nfsservctl: 10781 goto unimplemented; 10782 #endif 10783 case TARGET_NR_prctl: 10784 switch (arg1) { 10785 case PR_GET_PDEATHSIG: 10786 { 10787 int deathsig; 10788 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5)); 10789 if (!is_error(ret) && arg2 10790 && put_user_ual(deathsig, arg2)) { 10791 goto efault; 10792 } 10793 break; 10794 } 10795 #ifdef PR_GET_NAME 10796 case PR_GET_NAME: 10797 { 10798 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1); 10799 if (!name) { 10800 goto efault; 10801 } 10802 ret = get_errno(prctl(arg1, (unsigned long)name, 10803 arg3, arg4, arg5)); 10804 unlock_user(name, arg2, 16); 10805 break; 10806 } 10807 case PR_SET_NAME: 10808 { 10809 void *name = lock_user(VERIFY_READ, arg2, 16, 1); 10810 if (!name) { 10811 goto efault; 10812 } 10813 ret = get_errno(prctl(arg1, (unsigned long)name, 10814 arg3, arg4, arg5)); 10815 unlock_user(name, arg2, 0); 10816 break; 10817 } 10818 #endif 10819 #ifdef TARGET_AARCH64 10820 case TARGET_PR_SVE_SET_VL: 10821 /* We cannot support either PR_SVE_SET_VL_ONEXEC 10822 or PR_SVE_VL_INHERIT. Therefore, anything above 10823 ARM_MAX_VQ results in EINVAL. */ 10824 ret = -TARGET_EINVAL; 10825 if (arm_feature(cpu_env, ARM_FEATURE_SVE) 10826 && arg2 >= 0 && arg2 <= ARM_MAX_VQ * 16 && !(arg2 & 15)) { 10827 CPUARMState *env = cpu_env; 10828 int old_vq = (env->vfp.zcr_el[1] & 0xf) + 1; 10829 int vq = MAX(arg2 / 16, 1); 10830 10831 if (vq < old_vq) { 10832 aarch64_sve_narrow_vq(env, vq); 10833 } 10834 env->vfp.zcr_el[1] = vq - 1; 10835 ret = vq * 16; 10836 } 10837 break; 10838 case TARGET_PR_SVE_GET_VL: 10839 ret = -TARGET_EINVAL; 10840 if (arm_feature(cpu_env, ARM_FEATURE_SVE)) { 10841 CPUARMState *env = cpu_env; 10842 ret = ((env->vfp.zcr_el[1] & 0xf) + 1) * 16; 10843 } 10844 break; 10845 #endif /* AARCH64 */ 10846 case PR_GET_SECCOMP: 10847 case PR_SET_SECCOMP: 10848 /* Disable seccomp to prevent the target disabling syscalls we 10849 * need. */ 10850 ret = -TARGET_EINVAL; 10851 break; 10852 default: 10853 /* Most prctl options have no pointer arguments */ 10854 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5)); 10855 break; 10856 } 10857 break; 10858 #ifdef TARGET_NR_arch_prctl 10859 case TARGET_NR_arch_prctl: 10860 #if defined(TARGET_I386) && !defined(TARGET_ABI32) 10861 ret = do_arch_prctl(cpu_env, arg1, arg2); 10862 break; 10863 #else 10864 goto unimplemented; 10865 #endif 10866 #endif 10867 #ifdef TARGET_NR_pread64 10868 case TARGET_NR_pread64: 10869 if (regpairs_aligned(cpu_env, num)) { 10870 arg4 = arg5; 10871 arg5 = arg6; 10872 } 10873 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 10874 goto efault; 10875 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); 10876 unlock_user(p, arg2, ret); 10877 break; 10878 case TARGET_NR_pwrite64: 10879 if (regpairs_aligned(cpu_env, num)) { 10880 arg4 = arg5; 10881 arg5 = arg6; 10882 } 10883 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 10884 goto efault; 10885 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); 10886 unlock_user(p, arg2, 0); 10887 break; 10888 #endif 10889 case TARGET_NR_getcwd: 10890 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) 10891 goto efault; 10892 ret = get_errno(sys_getcwd1(p, arg2)); 10893 unlock_user(p, arg1, ret); 10894 break; 10895 case TARGET_NR_capget: 10896 case TARGET_NR_capset: 10897 { 10898 struct target_user_cap_header *target_header; 10899 struct target_user_cap_data *target_data = NULL; 10900 struct __user_cap_header_struct header; 10901 struct __user_cap_data_struct data[2]; 10902 struct __user_cap_data_struct *dataptr = NULL; 10903 int i, target_datalen; 10904 int data_items = 1; 10905 10906 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) { 10907 goto efault; 10908 } 10909 header.version = tswap32(target_header->version); 10910 header.pid = tswap32(target_header->pid); 10911 10912 if (header.version != _LINUX_CAPABILITY_VERSION) { 10913 /* Version 2 and up takes pointer to two user_data structs */ 10914 data_items = 2; 10915 } 10916 10917 target_datalen = sizeof(*target_data) * data_items; 10918 10919 if (arg2) { 10920 if (num == TARGET_NR_capget) { 10921 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0); 10922 } else { 10923 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1); 10924 } 10925 if (!target_data) { 10926 unlock_user_struct(target_header, arg1, 0); 10927 goto efault; 10928 } 10929 10930 if (num == TARGET_NR_capset) { 10931 for (i = 0; i < data_items; i++) { 10932 data[i].effective = tswap32(target_data[i].effective); 10933 data[i].permitted = tswap32(target_data[i].permitted); 10934 data[i].inheritable = tswap32(target_data[i].inheritable); 10935 } 10936 } 10937 10938 dataptr = data; 10939 } 10940 10941 if (num == TARGET_NR_capget) { 10942 ret = get_errno(capget(&header, dataptr)); 10943 } else { 10944 ret = get_errno(capset(&header, dataptr)); 10945 } 10946 10947 /* The kernel always updates version for both capget and capset */ 10948 target_header->version = tswap32(header.version); 10949 unlock_user_struct(target_header, arg1, 1); 10950 10951 if (arg2) { 10952 if (num == TARGET_NR_capget) { 10953 for (i = 0; i < data_items; i++) { 10954 target_data[i].effective = tswap32(data[i].effective); 10955 target_data[i].permitted = tswap32(data[i].permitted); 10956 target_data[i].inheritable = tswap32(data[i].inheritable); 10957 } 10958 unlock_user(target_data, arg2, target_datalen); 10959 } else { 10960 unlock_user(target_data, arg2, 0); 10961 } 10962 } 10963 break; 10964 } 10965 case TARGET_NR_sigaltstack: 10966 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env)); 10967 break; 10968 10969 #ifdef CONFIG_SENDFILE 10970 case TARGET_NR_sendfile: 10971 { 10972 off_t *offp = NULL; 10973 off_t off; 10974 if (arg3) { 10975 ret = get_user_sal(off, arg3); 10976 if (is_error(ret)) { 10977 break; 10978 } 10979 offp = &off; 10980 } 10981 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 10982 if (!is_error(ret) && arg3) { 10983 abi_long ret2 = put_user_sal(off, arg3); 10984 if (is_error(ret2)) { 10985 ret = ret2; 10986 } 10987 } 10988 break; 10989 } 10990 #ifdef TARGET_NR_sendfile64 10991 case TARGET_NR_sendfile64: 10992 { 10993 off_t *offp = NULL; 10994 off_t off; 10995 if (arg3) { 10996 ret = get_user_s64(off, arg3); 10997 if (is_error(ret)) { 10998 break; 10999 } 11000 offp = &off; 11001 } 11002 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 11003 if (!is_error(ret) && arg3) { 11004 abi_long ret2 = put_user_s64(off, arg3); 11005 if (is_error(ret2)) { 11006 ret = ret2; 11007 } 11008 } 11009 break; 11010 } 11011 #endif 11012 #else 11013 case TARGET_NR_sendfile: 11014 #ifdef TARGET_NR_sendfile64 11015 case TARGET_NR_sendfile64: 11016 #endif 11017 goto unimplemented; 11018 #endif 11019 11020 #ifdef TARGET_NR_getpmsg 11021 case TARGET_NR_getpmsg: 11022 goto unimplemented; 11023 #endif 11024 #ifdef TARGET_NR_putpmsg 11025 case TARGET_NR_putpmsg: 11026 goto unimplemented; 11027 #endif 11028 #ifdef TARGET_NR_vfork 11029 case TARGET_NR_vfork: 11030 ret = get_errno(do_fork(cpu_env, 11031 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD, 11032 0, 0, 0, 0)); 11033 break; 11034 #endif 11035 #ifdef TARGET_NR_ugetrlimit 11036 case TARGET_NR_ugetrlimit: 11037 { 11038 struct rlimit rlim; 11039 int resource = target_to_host_resource(arg1); 11040 ret = get_errno(getrlimit(resource, &rlim)); 11041 if (!is_error(ret)) { 11042 struct target_rlimit *target_rlim; 11043 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 11044 goto efault; 11045 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 11046 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 11047 unlock_user_struct(target_rlim, arg2, 1); 11048 } 11049 break; 11050 } 11051 #endif 11052 #ifdef TARGET_NR_truncate64 11053 case TARGET_NR_truncate64: 11054 if (!(p = lock_user_string(arg1))) 11055 goto efault; 11056 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); 11057 unlock_user(p, arg1, 0); 11058 break; 11059 #endif 11060 #ifdef TARGET_NR_ftruncate64 11061 case TARGET_NR_ftruncate64: 11062 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); 11063 break; 11064 #endif 11065 #ifdef TARGET_NR_stat64 11066 case TARGET_NR_stat64: 11067 if (!(p = lock_user_string(arg1))) 11068 goto efault; 11069 ret = get_errno(stat(path(p), &st)); 11070 unlock_user(p, arg1, 0); 11071 if (!is_error(ret)) 11072 ret = host_to_target_stat64(cpu_env, arg2, &st); 11073 break; 11074 #endif 11075 #ifdef TARGET_NR_lstat64 11076 case TARGET_NR_lstat64: 11077 if (!(p = lock_user_string(arg1))) 11078 goto efault; 11079 ret = get_errno(lstat(path(p), &st)); 11080 unlock_user(p, arg1, 0); 11081 if (!is_error(ret)) 11082 ret = host_to_target_stat64(cpu_env, arg2, &st); 11083 break; 11084 #endif 11085 #ifdef TARGET_NR_fstat64 11086 case TARGET_NR_fstat64: 11087 ret = get_errno(fstat(arg1, &st)); 11088 if (!is_error(ret)) 11089 ret = host_to_target_stat64(cpu_env, arg2, &st); 11090 break; 11091 #endif 11092 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) 11093 #ifdef TARGET_NR_fstatat64 11094 case TARGET_NR_fstatat64: 11095 #endif 11096 #ifdef TARGET_NR_newfstatat 11097 case TARGET_NR_newfstatat: 11098 #endif 11099 if (!(p = lock_user_string(arg2))) 11100 goto efault; 11101 ret = get_errno(fstatat(arg1, path(p), &st, arg4)); 11102 if (!is_error(ret)) 11103 ret = host_to_target_stat64(cpu_env, arg3, &st); 11104 break; 11105 #endif 11106 #ifdef TARGET_NR_lchown 11107 case TARGET_NR_lchown: 11108 if (!(p = lock_user_string(arg1))) 11109 goto efault; 11110 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); 11111 unlock_user(p, arg1, 0); 11112 break; 11113 #endif 11114 #ifdef TARGET_NR_getuid 11115 case TARGET_NR_getuid: 11116 ret = get_errno(high2lowuid(getuid())); 11117 break; 11118 #endif 11119 #ifdef TARGET_NR_getgid 11120 case TARGET_NR_getgid: 11121 ret = get_errno(high2lowgid(getgid())); 11122 break; 11123 #endif 11124 #ifdef TARGET_NR_geteuid 11125 case TARGET_NR_geteuid: 11126 ret = get_errno(high2lowuid(geteuid())); 11127 break; 11128 #endif 11129 #ifdef TARGET_NR_getegid 11130 case TARGET_NR_getegid: 11131 ret = get_errno(high2lowgid(getegid())); 11132 break; 11133 #endif 11134 case TARGET_NR_setreuid: 11135 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); 11136 break; 11137 case TARGET_NR_setregid: 11138 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); 11139 break; 11140 case TARGET_NR_getgroups: 11141 { 11142 int gidsetsize = arg1; 11143 target_id *target_grouplist; 11144 gid_t *grouplist; 11145 int i; 11146 11147 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11148 ret = get_errno(getgroups(gidsetsize, grouplist)); 11149 if (gidsetsize == 0) 11150 break; 11151 if (!is_error(ret)) { 11152 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0); 11153 if (!target_grouplist) 11154 goto efault; 11155 for(i = 0;i < ret; i++) 11156 target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); 11157 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id)); 11158 } 11159 } 11160 break; 11161 case TARGET_NR_setgroups: 11162 { 11163 int gidsetsize = arg1; 11164 target_id *target_grouplist; 11165 gid_t *grouplist = NULL; 11166 int i; 11167 if (gidsetsize) { 11168 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11169 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1); 11170 if (!target_grouplist) { 11171 ret = -TARGET_EFAULT; 11172 goto fail; 11173 } 11174 for (i = 0; i < gidsetsize; i++) { 11175 grouplist[i] = low2highgid(tswapid(target_grouplist[i])); 11176 } 11177 unlock_user(target_grouplist, arg2, 0); 11178 } 11179 ret = get_errno(setgroups(gidsetsize, grouplist)); 11180 } 11181 break; 11182 case TARGET_NR_fchown: 11183 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); 11184 break; 11185 #if defined(TARGET_NR_fchownat) 11186 case TARGET_NR_fchownat: 11187 if (!(p = lock_user_string(arg2))) 11188 goto efault; 11189 ret = get_errno(fchownat(arg1, p, low2highuid(arg3), 11190 low2highgid(arg4), arg5)); 11191 unlock_user(p, arg2, 0); 11192 break; 11193 #endif 11194 #ifdef TARGET_NR_setresuid 11195 case TARGET_NR_setresuid: 11196 ret = get_errno(sys_setresuid(low2highuid(arg1), 11197 low2highuid(arg2), 11198 low2highuid(arg3))); 11199 break; 11200 #endif 11201 #ifdef TARGET_NR_getresuid 11202 case TARGET_NR_getresuid: 11203 { 11204 uid_t ruid, euid, suid; 11205 ret = get_errno(getresuid(&ruid, &euid, &suid)); 11206 if (!is_error(ret)) { 11207 if (put_user_id(high2lowuid(ruid), arg1) 11208 || put_user_id(high2lowuid(euid), arg2) 11209 || put_user_id(high2lowuid(suid), arg3)) 11210 goto efault; 11211 } 11212 } 11213 break; 11214 #endif 11215 #ifdef TARGET_NR_getresgid 11216 case TARGET_NR_setresgid: 11217 ret = get_errno(sys_setresgid(low2highgid(arg1), 11218 low2highgid(arg2), 11219 low2highgid(arg3))); 11220 break; 11221 #endif 11222 #ifdef TARGET_NR_getresgid 11223 case TARGET_NR_getresgid: 11224 { 11225 gid_t rgid, egid, sgid; 11226 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 11227 if (!is_error(ret)) { 11228 if (put_user_id(high2lowgid(rgid), arg1) 11229 || put_user_id(high2lowgid(egid), arg2) 11230 || put_user_id(high2lowgid(sgid), arg3)) 11231 goto efault; 11232 } 11233 } 11234 break; 11235 #endif 11236 #ifdef TARGET_NR_chown 11237 case TARGET_NR_chown: 11238 if (!(p = lock_user_string(arg1))) 11239 goto efault; 11240 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); 11241 unlock_user(p, arg1, 0); 11242 break; 11243 #endif 11244 case TARGET_NR_setuid: 11245 ret = get_errno(sys_setuid(low2highuid(arg1))); 11246 break; 11247 case TARGET_NR_setgid: 11248 ret = get_errno(sys_setgid(low2highgid(arg1))); 11249 break; 11250 case TARGET_NR_setfsuid: 11251 ret = get_errno(setfsuid(arg1)); 11252 break; 11253 case TARGET_NR_setfsgid: 11254 ret = get_errno(setfsgid(arg1)); 11255 break; 11256 11257 #ifdef TARGET_NR_lchown32 11258 case TARGET_NR_lchown32: 11259 if (!(p = lock_user_string(arg1))) 11260 goto efault; 11261 ret = get_errno(lchown(p, arg2, arg3)); 11262 unlock_user(p, arg1, 0); 11263 break; 11264 #endif 11265 #ifdef TARGET_NR_getuid32 11266 case TARGET_NR_getuid32: 11267 ret = get_errno(getuid()); 11268 break; 11269 #endif 11270 11271 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) 11272 /* Alpha specific */ 11273 case TARGET_NR_getxuid: 11274 { 11275 uid_t euid; 11276 euid=geteuid(); 11277 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid; 11278 } 11279 ret = get_errno(getuid()); 11280 break; 11281 #endif 11282 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) 11283 /* Alpha specific */ 11284 case TARGET_NR_getxgid: 11285 { 11286 uid_t egid; 11287 egid=getegid(); 11288 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid; 11289 } 11290 ret = get_errno(getgid()); 11291 break; 11292 #endif 11293 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) 11294 /* Alpha specific */ 11295 case TARGET_NR_osf_getsysinfo: 11296 ret = -TARGET_EOPNOTSUPP; 11297 switch (arg1) { 11298 case TARGET_GSI_IEEE_FP_CONTROL: 11299 { 11300 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env); 11301 11302 /* Copied from linux ieee_fpcr_to_swcr. */ 11303 swcr = (fpcr >> 35) & SWCR_STATUS_MASK; 11304 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ; 11305 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV 11306 | SWCR_TRAP_ENABLE_DZE 11307 | SWCR_TRAP_ENABLE_OVF); 11308 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF 11309 | SWCR_TRAP_ENABLE_INE); 11310 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ; 11311 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO; 11312 11313 if (put_user_u64 (swcr, arg2)) 11314 goto efault; 11315 ret = 0; 11316 } 11317 break; 11318 11319 /* case GSI_IEEE_STATE_AT_SIGNAL: 11320 -- Not implemented in linux kernel. 11321 case GSI_UACPROC: 11322 -- Retrieves current unaligned access state; not much used. 11323 case GSI_PROC_TYPE: 11324 -- Retrieves implver information; surely not used. 11325 case GSI_GET_HWRPB: 11326 -- Grabs a copy of the HWRPB; surely not used. 11327 */ 11328 } 11329 break; 11330 #endif 11331 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) 11332 /* Alpha specific */ 11333 case TARGET_NR_osf_setsysinfo: 11334 ret = -TARGET_EOPNOTSUPP; 11335 switch (arg1) { 11336 case TARGET_SSI_IEEE_FP_CONTROL: 11337 { 11338 uint64_t swcr, fpcr, orig_fpcr; 11339 11340 if (get_user_u64 (swcr, arg2)) { 11341 goto efault; 11342 } 11343 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 11344 fpcr = orig_fpcr & FPCR_DYN_MASK; 11345 11346 /* Copied from linux ieee_swcr_to_fpcr. */ 11347 fpcr |= (swcr & SWCR_STATUS_MASK) << 35; 11348 fpcr |= (swcr & SWCR_MAP_DMZ) << 36; 11349 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV 11350 | SWCR_TRAP_ENABLE_DZE 11351 | SWCR_TRAP_ENABLE_OVF)) << 48; 11352 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF 11353 | SWCR_TRAP_ENABLE_INE)) << 57; 11354 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0); 11355 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41; 11356 11357 cpu_alpha_store_fpcr(cpu_env, fpcr); 11358 ret = 0; 11359 } 11360 break; 11361 11362 case TARGET_SSI_IEEE_RAISE_EXCEPTION: 11363 { 11364 uint64_t exc, fpcr, orig_fpcr; 11365 int si_code; 11366 11367 if (get_user_u64(exc, arg2)) { 11368 goto efault; 11369 } 11370 11371 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 11372 11373 /* We only add to the exception status here. */ 11374 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35); 11375 11376 cpu_alpha_store_fpcr(cpu_env, fpcr); 11377 ret = 0; 11378 11379 /* Old exceptions are not signaled. */ 11380 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK); 11381 11382 /* If any exceptions set by this call, 11383 and are unmasked, send a signal. */ 11384 si_code = 0; 11385 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) { 11386 si_code = TARGET_FPE_FLTRES; 11387 } 11388 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) { 11389 si_code = TARGET_FPE_FLTUND; 11390 } 11391 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) { 11392 si_code = TARGET_FPE_FLTOVF; 11393 } 11394 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) { 11395 si_code = TARGET_FPE_FLTDIV; 11396 } 11397 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) { 11398 si_code = TARGET_FPE_FLTINV; 11399 } 11400 if (si_code != 0) { 11401 target_siginfo_t info; 11402 info.si_signo = SIGFPE; 11403 info.si_errno = 0; 11404 info.si_code = si_code; 11405 info._sifields._sigfault._addr 11406 = ((CPUArchState *)cpu_env)->pc; 11407 queue_signal((CPUArchState *)cpu_env, info.si_signo, 11408 QEMU_SI_FAULT, &info); 11409 } 11410 } 11411 break; 11412 11413 /* case SSI_NVPAIRS: 11414 -- Used with SSIN_UACPROC to enable unaligned accesses. 11415 case SSI_IEEE_STATE_AT_SIGNAL: 11416 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: 11417 -- Not implemented in linux kernel 11418 */ 11419 } 11420 break; 11421 #endif 11422 #ifdef TARGET_NR_osf_sigprocmask 11423 /* Alpha specific. */ 11424 case TARGET_NR_osf_sigprocmask: 11425 { 11426 abi_ulong mask; 11427 int how; 11428 sigset_t set, oldset; 11429 11430 switch(arg1) { 11431 case TARGET_SIG_BLOCK: 11432 how = SIG_BLOCK; 11433 break; 11434 case TARGET_SIG_UNBLOCK: 11435 how = SIG_UNBLOCK; 11436 break; 11437 case TARGET_SIG_SETMASK: 11438 how = SIG_SETMASK; 11439 break; 11440 default: 11441 ret = -TARGET_EINVAL; 11442 goto fail; 11443 } 11444 mask = arg2; 11445 target_to_host_old_sigset(&set, &mask); 11446 ret = do_sigprocmask(how, &set, &oldset); 11447 if (!ret) { 11448 host_to_target_old_sigset(&mask, &oldset); 11449 ret = mask; 11450 } 11451 } 11452 break; 11453 #endif 11454 11455 #ifdef TARGET_NR_getgid32 11456 case TARGET_NR_getgid32: 11457 ret = get_errno(getgid()); 11458 break; 11459 #endif 11460 #ifdef TARGET_NR_geteuid32 11461 case TARGET_NR_geteuid32: 11462 ret = get_errno(geteuid()); 11463 break; 11464 #endif 11465 #ifdef TARGET_NR_getegid32 11466 case TARGET_NR_getegid32: 11467 ret = get_errno(getegid()); 11468 break; 11469 #endif 11470 #ifdef TARGET_NR_setreuid32 11471 case TARGET_NR_setreuid32: 11472 ret = get_errno(setreuid(arg1, arg2)); 11473 break; 11474 #endif 11475 #ifdef TARGET_NR_setregid32 11476 case TARGET_NR_setregid32: 11477 ret = get_errno(setregid(arg1, arg2)); 11478 break; 11479 #endif 11480 #ifdef TARGET_NR_getgroups32 11481 case TARGET_NR_getgroups32: 11482 { 11483 int gidsetsize = arg1; 11484 uint32_t *target_grouplist; 11485 gid_t *grouplist; 11486 int i; 11487 11488 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11489 ret = get_errno(getgroups(gidsetsize, grouplist)); 11490 if (gidsetsize == 0) 11491 break; 11492 if (!is_error(ret)) { 11493 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); 11494 if (!target_grouplist) { 11495 ret = -TARGET_EFAULT; 11496 goto fail; 11497 } 11498 for(i = 0;i < ret; i++) 11499 target_grouplist[i] = tswap32(grouplist[i]); 11500 unlock_user(target_grouplist, arg2, gidsetsize * 4); 11501 } 11502 } 11503 break; 11504 #endif 11505 #ifdef TARGET_NR_setgroups32 11506 case TARGET_NR_setgroups32: 11507 { 11508 int gidsetsize = arg1; 11509 uint32_t *target_grouplist; 11510 gid_t *grouplist; 11511 int i; 11512 11513 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11514 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); 11515 if (!target_grouplist) { 11516 ret = -TARGET_EFAULT; 11517 goto fail; 11518 } 11519 for(i = 0;i < gidsetsize; i++) 11520 grouplist[i] = tswap32(target_grouplist[i]); 11521 unlock_user(target_grouplist, arg2, 0); 11522 ret = get_errno(setgroups(gidsetsize, grouplist)); 11523 } 11524 break; 11525 #endif 11526 #ifdef TARGET_NR_fchown32 11527 case TARGET_NR_fchown32: 11528 ret = get_errno(fchown(arg1, arg2, arg3)); 11529 break; 11530 #endif 11531 #ifdef TARGET_NR_setresuid32 11532 case TARGET_NR_setresuid32: 11533 ret = get_errno(sys_setresuid(arg1, arg2, arg3)); 11534 break; 11535 #endif 11536 #ifdef TARGET_NR_getresuid32 11537 case TARGET_NR_getresuid32: 11538 { 11539 uid_t ruid, euid, suid; 11540 ret = get_errno(getresuid(&ruid, &euid, &suid)); 11541 if (!is_error(ret)) { 11542 if (put_user_u32(ruid, arg1) 11543 || put_user_u32(euid, arg2) 11544 || put_user_u32(suid, arg3)) 11545 goto efault; 11546 } 11547 } 11548 break; 11549 #endif 11550 #ifdef TARGET_NR_setresgid32 11551 case TARGET_NR_setresgid32: 11552 ret = get_errno(sys_setresgid(arg1, arg2, arg3)); 11553 break; 11554 #endif 11555 #ifdef TARGET_NR_getresgid32 11556 case TARGET_NR_getresgid32: 11557 { 11558 gid_t rgid, egid, sgid; 11559 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 11560 if (!is_error(ret)) { 11561 if (put_user_u32(rgid, arg1) 11562 || put_user_u32(egid, arg2) 11563 || put_user_u32(sgid, arg3)) 11564 goto efault; 11565 } 11566 } 11567 break; 11568 #endif 11569 #ifdef TARGET_NR_chown32 11570 case TARGET_NR_chown32: 11571 if (!(p = lock_user_string(arg1))) 11572 goto efault; 11573 ret = get_errno(chown(p, arg2, arg3)); 11574 unlock_user(p, arg1, 0); 11575 break; 11576 #endif 11577 #ifdef TARGET_NR_setuid32 11578 case TARGET_NR_setuid32: 11579 ret = get_errno(sys_setuid(arg1)); 11580 break; 11581 #endif 11582 #ifdef TARGET_NR_setgid32 11583 case TARGET_NR_setgid32: 11584 ret = get_errno(sys_setgid(arg1)); 11585 break; 11586 #endif 11587 #ifdef TARGET_NR_setfsuid32 11588 case TARGET_NR_setfsuid32: 11589 ret = get_errno(setfsuid(arg1)); 11590 break; 11591 #endif 11592 #ifdef TARGET_NR_setfsgid32 11593 case TARGET_NR_setfsgid32: 11594 ret = get_errno(setfsgid(arg1)); 11595 break; 11596 #endif 11597 11598 case TARGET_NR_pivot_root: 11599 goto unimplemented; 11600 #ifdef TARGET_NR_mincore 11601 case TARGET_NR_mincore: 11602 { 11603 void *a; 11604 ret = -TARGET_ENOMEM; 11605 a = lock_user(VERIFY_READ, arg1, arg2, 0); 11606 if (!a) { 11607 goto fail; 11608 } 11609 ret = -TARGET_EFAULT; 11610 p = lock_user_string(arg3); 11611 if (!p) { 11612 goto mincore_fail; 11613 } 11614 ret = get_errno(mincore(a, arg2, p)); 11615 unlock_user(p, arg3, ret); 11616 mincore_fail: 11617 unlock_user(a, arg1, 0); 11618 } 11619 break; 11620 #endif 11621 #ifdef TARGET_NR_arm_fadvise64_64 11622 case TARGET_NR_arm_fadvise64_64: 11623 /* arm_fadvise64_64 looks like fadvise64_64 but 11624 * with different argument order: fd, advice, offset, len 11625 * rather than the usual fd, offset, len, advice. 11626 * Note that offset and len are both 64-bit so appear as 11627 * pairs of 32-bit registers. 11628 */ 11629 ret = posix_fadvise(arg1, target_offset64(arg3, arg4), 11630 target_offset64(arg5, arg6), arg2); 11631 ret = -host_to_target_errno(ret); 11632 break; 11633 #endif 11634 11635 #if TARGET_ABI_BITS == 32 11636 11637 #ifdef TARGET_NR_fadvise64_64 11638 case TARGET_NR_fadvise64_64: 11639 #if defined(TARGET_PPC) || defined(TARGET_XTENSA) 11640 /* 6 args: fd, advice, offset (high, low), len (high, low) */ 11641 ret = arg2; 11642 arg2 = arg3; 11643 arg3 = arg4; 11644 arg4 = arg5; 11645 arg5 = arg6; 11646 arg6 = ret; 11647 #else 11648 /* 6 args: fd, offset (high, low), len (high, low), advice */ 11649 if (regpairs_aligned(cpu_env, num)) { 11650 /* offset is in (3,4), len in (5,6) and advice in 7 */ 11651 arg2 = arg3; 11652 arg3 = arg4; 11653 arg4 = arg5; 11654 arg5 = arg6; 11655 arg6 = arg7; 11656 } 11657 #endif 11658 ret = -host_to_target_errno(posix_fadvise(arg1, 11659 target_offset64(arg2, arg3), 11660 target_offset64(arg4, arg5), 11661 arg6)); 11662 break; 11663 #endif 11664 11665 #ifdef TARGET_NR_fadvise64 11666 case TARGET_NR_fadvise64: 11667 /* 5 args: fd, offset (high, low), len, advice */ 11668 if (regpairs_aligned(cpu_env, num)) { 11669 /* offset is in (3,4), len in 5 and advice in 6 */ 11670 arg2 = arg3; 11671 arg3 = arg4; 11672 arg4 = arg5; 11673 arg5 = arg6; 11674 } 11675 ret = -host_to_target_errno(posix_fadvise(arg1, 11676 target_offset64(arg2, arg3), 11677 arg4, arg5)); 11678 break; 11679 #endif 11680 11681 #else /* not a 32-bit ABI */ 11682 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64) 11683 #ifdef TARGET_NR_fadvise64_64 11684 case TARGET_NR_fadvise64_64: 11685 #endif 11686 #ifdef TARGET_NR_fadvise64 11687 case TARGET_NR_fadvise64: 11688 #endif 11689 #ifdef TARGET_S390X 11690 switch (arg4) { 11691 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ 11692 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ 11693 case 6: arg4 = POSIX_FADV_DONTNEED; break; 11694 case 7: arg4 = POSIX_FADV_NOREUSE; break; 11695 default: break; 11696 } 11697 #endif 11698 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4)); 11699 break; 11700 #endif 11701 #endif /* end of 64-bit ABI fadvise handling */ 11702 11703 #ifdef TARGET_NR_madvise 11704 case TARGET_NR_madvise: 11705 /* A straight passthrough may not be safe because qemu sometimes 11706 turns private file-backed mappings into anonymous mappings. 11707 This will break MADV_DONTNEED. 11708 This is a hint, so ignoring and returning success is ok. */ 11709 ret = get_errno(0); 11710 break; 11711 #endif 11712 #if TARGET_ABI_BITS == 32 11713 case TARGET_NR_fcntl64: 11714 { 11715 int cmd; 11716 struct flock64 fl; 11717 from_flock64_fn *copyfrom = copy_from_user_flock64; 11718 to_flock64_fn *copyto = copy_to_user_flock64; 11719 11720 #ifdef TARGET_ARM 11721 if (!((CPUARMState *)cpu_env)->eabi) { 11722 copyfrom = copy_from_user_oabi_flock64; 11723 copyto = copy_to_user_oabi_flock64; 11724 } 11725 #endif 11726 11727 cmd = target_to_host_fcntl_cmd(arg2); 11728 if (cmd == -TARGET_EINVAL) { 11729 ret = cmd; 11730 break; 11731 } 11732 11733 switch(arg2) { 11734 case TARGET_F_GETLK64: 11735 ret = copyfrom(&fl, arg3); 11736 if (ret) { 11737 break; 11738 } 11739 ret = get_errno(fcntl(arg1, cmd, &fl)); 11740 if (ret == 0) { 11741 ret = copyto(arg3, &fl); 11742 } 11743 break; 11744 11745 case TARGET_F_SETLK64: 11746 case TARGET_F_SETLKW64: 11747 ret = copyfrom(&fl, arg3); 11748 if (ret) { 11749 break; 11750 } 11751 ret = get_errno(safe_fcntl(arg1, cmd, &fl)); 11752 break; 11753 default: 11754 ret = do_fcntl(arg1, arg2, arg3); 11755 break; 11756 } 11757 break; 11758 } 11759 #endif 11760 #ifdef TARGET_NR_cacheflush 11761 case TARGET_NR_cacheflush: 11762 /* self-modifying code is handled automatically, so nothing needed */ 11763 ret = 0; 11764 break; 11765 #endif 11766 #ifdef TARGET_NR_security 11767 case TARGET_NR_security: 11768 goto unimplemented; 11769 #endif 11770 #ifdef TARGET_NR_getpagesize 11771 case TARGET_NR_getpagesize: 11772 ret = TARGET_PAGE_SIZE; 11773 break; 11774 #endif 11775 case TARGET_NR_gettid: 11776 ret = get_errno(gettid()); 11777 break; 11778 #ifdef TARGET_NR_readahead 11779 case TARGET_NR_readahead: 11780 #if TARGET_ABI_BITS == 32 11781 if (regpairs_aligned(cpu_env, num)) { 11782 arg2 = arg3; 11783 arg3 = arg4; 11784 arg4 = arg5; 11785 } 11786 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4)); 11787 #else 11788 ret = get_errno(readahead(arg1, arg2, arg3)); 11789 #endif 11790 break; 11791 #endif 11792 #ifdef CONFIG_ATTR 11793 #ifdef TARGET_NR_setxattr 11794 case TARGET_NR_listxattr: 11795 case TARGET_NR_llistxattr: 11796 { 11797 void *p, *b = 0; 11798 if (arg2) { 11799 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 11800 if (!b) { 11801 ret = -TARGET_EFAULT; 11802 break; 11803 } 11804 } 11805 p = lock_user_string(arg1); 11806 if (p) { 11807 if (num == TARGET_NR_listxattr) { 11808 ret = get_errno(listxattr(p, b, arg3)); 11809 } else { 11810 ret = get_errno(llistxattr(p, b, arg3)); 11811 } 11812 } else { 11813 ret = -TARGET_EFAULT; 11814 } 11815 unlock_user(p, arg1, 0); 11816 unlock_user(b, arg2, arg3); 11817 break; 11818 } 11819 case TARGET_NR_flistxattr: 11820 { 11821 void *b = 0; 11822 if (arg2) { 11823 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 11824 if (!b) { 11825 ret = -TARGET_EFAULT; 11826 break; 11827 } 11828 } 11829 ret = get_errno(flistxattr(arg1, b, arg3)); 11830 unlock_user(b, arg2, arg3); 11831 break; 11832 } 11833 case TARGET_NR_setxattr: 11834 case TARGET_NR_lsetxattr: 11835 { 11836 void *p, *n, *v = 0; 11837 if (arg3) { 11838 v = lock_user(VERIFY_READ, arg3, arg4, 1); 11839 if (!v) { 11840 ret = -TARGET_EFAULT; 11841 break; 11842 } 11843 } 11844 p = lock_user_string(arg1); 11845 n = lock_user_string(arg2); 11846 if (p && n) { 11847 if (num == TARGET_NR_setxattr) { 11848 ret = get_errno(setxattr(p, n, v, arg4, arg5)); 11849 } else { 11850 ret = get_errno(lsetxattr(p, n, v, arg4, arg5)); 11851 } 11852 } else { 11853 ret = -TARGET_EFAULT; 11854 } 11855 unlock_user(p, arg1, 0); 11856 unlock_user(n, arg2, 0); 11857 unlock_user(v, arg3, 0); 11858 } 11859 break; 11860 case TARGET_NR_fsetxattr: 11861 { 11862 void *n, *v = 0; 11863 if (arg3) { 11864 v = lock_user(VERIFY_READ, arg3, arg4, 1); 11865 if (!v) { 11866 ret = -TARGET_EFAULT; 11867 break; 11868 } 11869 } 11870 n = lock_user_string(arg2); 11871 if (n) { 11872 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5)); 11873 } else { 11874 ret = -TARGET_EFAULT; 11875 } 11876 unlock_user(n, arg2, 0); 11877 unlock_user(v, arg3, 0); 11878 } 11879 break; 11880 case TARGET_NR_getxattr: 11881 case TARGET_NR_lgetxattr: 11882 { 11883 void *p, *n, *v = 0; 11884 if (arg3) { 11885 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 11886 if (!v) { 11887 ret = -TARGET_EFAULT; 11888 break; 11889 } 11890 } 11891 p = lock_user_string(arg1); 11892 n = lock_user_string(arg2); 11893 if (p && n) { 11894 if (num == TARGET_NR_getxattr) { 11895 ret = get_errno(getxattr(p, n, v, arg4)); 11896 } else { 11897 ret = get_errno(lgetxattr(p, n, v, arg4)); 11898 } 11899 } else { 11900 ret = -TARGET_EFAULT; 11901 } 11902 unlock_user(p, arg1, 0); 11903 unlock_user(n, arg2, 0); 11904 unlock_user(v, arg3, arg4); 11905 } 11906 break; 11907 case TARGET_NR_fgetxattr: 11908 { 11909 void *n, *v = 0; 11910 if (arg3) { 11911 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 11912 if (!v) { 11913 ret = -TARGET_EFAULT; 11914 break; 11915 } 11916 } 11917 n = lock_user_string(arg2); 11918 if (n) { 11919 ret = get_errno(fgetxattr(arg1, n, v, arg4)); 11920 } else { 11921 ret = -TARGET_EFAULT; 11922 } 11923 unlock_user(n, arg2, 0); 11924 unlock_user(v, arg3, arg4); 11925 } 11926 break; 11927 case TARGET_NR_removexattr: 11928 case TARGET_NR_lremovexattr: 11929 { 11930 void *p, *n; 11931 p = lock_user_string(arg1); 11932 n = lock_user_string(arg2); 11933 if (p && n) { 11934 if (num == TARGET_NR_removexattr) { 11935 ret = get_errno(removexattr(p, n)); 11936 } else { 11937 ret = get_errno(lremovexattr(p, n)); 11938 } 11939 } else { 11940 ret = -TARGET_EFAULT; 11941 } 11942 unlock_user(p, arg1, 0); 11943 unlock_user(n, arg2, 0); 11944 } 11945 break; 11946 case TARGET_NR_fremovexattr: 11947 { 11948 void *n; 11949 n = lock_user_string(arg2); 11950 if (n) { 11951 ret = get_errno(fremovexattr(arg1, n)); 11952 } else { 11953 ret = -TARGET_EFAULT; 11954 } 11955 unlock_user(n, arg2, 0); 11956 } 11957 break; 11958 #endif 11959 #endif /* CONFIG_ATTR */ 11960 #ifdef TARGET_NR_set_thread_area 11961 case TARGET_NR_set_thread_area: 11962 #if defined(TARGET_MIPS) 11963 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1; 11964 ret = 0; 11965 break; 11966 #elif defined(TARGET_CRIS) 11967 if (arg1 & 0xff) 11968 ret = -TARGET_EINVAL; 11969 else { 11970 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1; 11971 ret = 0; 11972 } 11973 break; 11974 #elif defined(TARGET_I386) && defined(TARGET_ABI32) 11975 ret = do_set_thread_area(cpu_env, arg1); 11976 break; 11977 #elif defined(TARGET_M68K) 11978 { 11979 TaskState *ts = cpu->opaque; 11980 ts->tp_value = arg1; 11981 ret = 0; 11982 break; 11983 } 11984 #else 11985 goto unimplemented_nowarn; 11986 #endif 11987 #endif 11988 #ifdef TARGET_NR_get_thread_area 11989 case TARGET_NR_get_thread_area: 11990 #if defined(TARGET_I386) && defined(TARGET_ABI32) 11991 ret = do_get_thread_area(cpu_env, arg1); 11992 break; 11993 #elif defined(TARGET_M68K) 11994 { 11995 TaskState *ts = cpu->opaque; 11996 ret = ts->tp_value; 11997 break; 11998 } 11999 #else 12000 goto unimplemented_nowarn; 12001 #endif 12002 #endif 12003 #ifdef TARGET_NR_getdomainname 12004 case TARGET_NR_getdomainname: 12005 goto unimplemented_nowarn; 12006 #endif 12007 12008 #ifdef TARGET_NR_clock_settime 12009 case TARGET_NR_clock_settime: 12010 { 12011 struct timespec ts; 12012 12013 ret = target_to_host_timespec(&ts, arg2); 12014 if (!is_error(ret)) { 12015 ret = get_errno(clock_settime(arg1, &ts)); 12016 } 12017 break; 12018 } 12019 #endif 12020 #ifdef TARGET_NR_clock_gettime 12021 case TARGET_NR_clock_gettime: 12022 { 12023 struct timespec ts; 12024 ret = get_errno(clock_gettime(arg1, &ts)); 12025 if (!is_error(ret)) { 12026 ret = host_to_target_timespec(arg2, &ts); 12027 } 12028 break; 12029 } 12030 #endif 12031 #ifdef TARGET_NR_clock_getres 12032 case TARGET_NR_clock_getres: 12033 { 12034 struct timespec ts; 12035 ret = get_errno(clock_getres(arg1, &ts)); 12036 if (!is_error(ret)) { 12037 host_to_target_timespec(arg2, &ts); 12038 } 12039 break; 12040 } 12041 #endif 12042 #ifdef TARGET_NR_clock_nanosleep 12043 case TARGET_NR_clock_nanosleep: 12044 { 12045 struct timespec ts; 12046 target_to_host_timespec(&ts, arg3); 12047 ret = get_errno(safe_clock_nanosleep(arg1, arg2, 12048 &ts, arg4 ? &ts : NULL)); 12049 if (arg4) 12050 host_to_target_timespec(arg4, &ts); 12051 12052 #if defined(TARGET_PPC) 12053 /* clock_nanosleep is odd in that it returns positive errno values. 12054 * On PPC, CR0 bit 3 should be set in such a situation. */ 12055 if (ret && ret != -TARGET_ERESTARTSYS) { 12056 ((CPUPPCState *)cpu_env)->crf[0] |= 1; 12057 } 12058 #endif 12059 break; 12060 } 12061 #endif 12062 12063 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 12064 case TARGET_NR_set_tid_address: 12065 ret = get_errno(set_tid_address((int *)g2h(arg1))); 12066 break; 12067 #endif 12068 12069 case TARGET_NR_tkill: 12070 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2))); 12071 break; 12072 12073 case TARGET_NR_tgkill: 12074 ret = get_errno(safe_tgkill((int)arg1, (int)arg2, 12075 target_to_host_signal(arg3))); 12076 break; 12077 12078 #ifdef TARGET_NR_set_robust_list 12079 case TARGET_NR_set_robust_list: 12080 case TARGET_NR_get_robust_list: 12081 /* The ABI for supporting robust futexes has userspace pass 12082 * the kernel a pointer to a linked list which is updated by 12083 * userspace after the syscall; the list is walked by the kernel 12084 * when the thread exits. Since the linked list in QEMU guest 12085 * memory isn't a valid linked list for the host and we have 12086 * no way to reliably intercept the thread-death event, we can't 12087 * support these. Silently return ENOSYS so that guest userspace 12088 * falls back to a non-robust futex implementation (which should 12089 * be OK except in the corner case of the guest crashing while 12090 * holding a mutex that is shared with another process via 12091 * shared memory). 12092 */ 12093 goto unimplemented_nowarn; 12094 #endif 12095 12096 #if defined(TARGET_NR_utimensat) 12097 case TARGET_NR_utimensat: 12098 { 12099 struct timespec *tsp, ts[2]; 12100 if (!arg3) { 12101 tsp = NULL; 12102 } else { 12103 target_to_host_timespec(ts, arg3); 12104 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec)); 12105 tsp = ts; 12106 } 12107 if (!arg2) 12108 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 12109 else { 12110 if (!(p = lock_user_string(arg2))) { 12111 ret = -TARGET_EFAULT; 12112 goto fail; 12113 } 12114 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 12115 unlock_user(p, arg2, 0); 12116 } 12117 } 12118 break; 12119 #endif 12120 case TARGET_NR_futex: 12121 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6); 12122 break; 12123 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 12124 case TARGET_NR_inotify_init: 12125 ret = get_errno(sys_inotify_init()); 12126 if (ret >= 0) { 12127 fd_trans_register(ret, &target_inotify_trans); 12128 } 12129 break; 12130 #endif 12131 #ifdef CONFIG_INOTIFY1 12132 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 12133 case TARGET_NR_inotify_init1: 12134 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1, 12135 fcntl_flags_tbl))); 12136 if (ret >= 0) { 12137 fd_trans_register(ret, &target_inotify_trans); 12138 } 12139 break; 12140 #endif 12141 #endif 12142 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 12143 case TARGET_NR_inotify_add_watch: 12144 p = lock_user_string(arg2); 12145 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3)); 12146 unlock_user(p, arg2, 0); 12147 break; 12148 #endif 12149 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 12150 case TARGET_NR_inotify_rm_watch: 12151 ret = get_errno(sys_inotify_rm_watch(arg1, arg2)); 12152 break; 12153 #endif 12154 12155 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 12156 case TARGET_NR_mq_open: 12157 { 12158 struct mq_attr posix_mq_attr; 12159 struct mq_attr *pposix_mq_attr; 12160 int host_flags; 12161 12162 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl); 12163 pposix_mq_attr = NULL; 12164 if (arg4) { 12165 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) { 12166 goto efault; 12167 } 12168 pposix_mq_attr = &posix_mq_attr; 12169 } 12170 p = lock_user_string(arg1 - 1); 12171 if (!p) { 12172 goto efault; 12173 } 12174 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr)); 12175 unlock_user (p, arg1, 0); 12176 } 12177 break; 12178 12179 case TARGET_NR_mq_unlink: 12180 p = lock_user_string(arg1 - 1); 12181 if (!p) { 12182 ret = -TARGET_EFAULT; 12183 break; 12184 } 12185 ret = get_errno(mq_unlink(p)); 12186 unlock_user (p, arg1, 0); 12187 break; 12188 12189 case TARGET_NR_mq_timedsend: 12190 { 12191 struct timespec ts; 12192 12193 p = lock_user (VERIFY_READ, arg2, arg3, 1); 12194 if (arg5 != 0) { 12195 target_to_host_timespec(&ts, arg5); 12196 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts)); 12197 host_to_target_timespec(arg5, &ts); 12198 } else { 12199 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL)); 12200 } 12201 unlock_user (p, arg2, arg3); 12202 } 12203 break; 12204 12205 case TARGET_NR_mq_timedreceive: 12206 { 12207 struct timespec ts; 12208 unsigned int prio; 12209 12210 p = lock_user (VERIFY_READ, arg2, arg3, 1); 12211 if (arg5 != 0) { 12212 target_to_host_timespec(&ts, arg5); 12213 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12214 &prio, &ts)); 12215 host_to_target_timespec(arg5, &ts); 12216 } else { 12217 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12218 &prio, NULL)); 12219 } 12220 unlock_user (p, arg2, arg3); 12221 if (arg4 != 0) 12222 put_user_u32(prio, arg4); 12223 } 12224 break; 12225 12226 /* Not implemented for now... */ 12227 /* case TARGET_NR_mq_notify: */ 12228 /* break; */ 12229 12230 case TARGET_NR_mq_getsetattr: 12231 { 12232 struct mq_attr posix_mq_attr_in, posix_mq_attr_out; 12233 ret = 0; 12234 if (arg2 != 0) { 12235 copy_from_user_mq_attr(&posix_mq_attr_in, arg2); 12236 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in, 12237 &posix_mq_attr_out)); 12238 } else if (arg3 != 0) { 12239 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out)); 12240 } 12241 if (ret == 0 && arg3 != 0) { 12242 copy_to_user_mq_attr(arg3, &posix_mq_attr_out); 12243 } 12244 } 12245 break; 12246 #endif 12247 12248 #ifdef CONFIG_SPLICE 12249 #ifdef TARGET_NR_tee 12250 case TARGET_NR_tee: 12251 { 12252 ret = get_errno(tee(arg1,arg2,arg3,arg4)); 12253 } 12254 break; 12255 #endif 12256 #ifdef TARGET_NR_splice 12257 case TARGET_NR_splice: 12258 { 12259 loff_t loff_in, loff_out; 12260 loff_t *ploff_in = NULL, *ploff_out = NULL; 12261 if (arg2) { 12262 if (get_user_u64(loff_in, arg2)) { 12263 goto efault; 12264 } 12265 ploff_in = &loff_in; 12266 } 12267 if (arg4) { 12268 if (get_user_u64(loff_out, arg4)) { 12269 goto efault; 12270 } 12271 ploff_out = &loff_out; 12272 } 12273 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); 12274 if (arg2) { 12275 if (put_user_u64(loff_in, arg2)) { 12276 goto efault; 12277 } 12278 } 12279 if (arg4) { 12280 if (put_user_u64(loff_out, arg4)) { 12281 goto efault; 12282 } 12283 } 12284 } 12285 break; 12286 #endif 12287 #ifdef TARGET_NR_vmsplice 12288 case TARGET_NR_vmsplice: 12289 { 12290 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 12291 if (vec != NULL) { 12292 ret = get_errno(vmsplice(arg1, vec, arg3, arg4)); 12293 unlock_iovec(vec, arg2, arg3, 0); 12294 } else { 12295 ret = -host_to_target_errno(errno); 12296 } 12297 } 12298 break; 12299 #endif 12300 #endif /* CONFIG_SPLICE */ 12301 #ifdef CONFIG_EVENTFD 12302 #if defined(TARGET_NR_eventfd) 12303 case TARGET_NR_eventfd: 12304 ret = get_errno(eventfd(arg1, 0)); 12305 if (ret >= 0) { 12306 fd_trans_register(ret, &target_eventfd_trans); 12307 } 12308 break; 12309 #endif 12310 #if defined(TARGET_NR_eventfd2) 12311 case TARGET_NR_eventfd2: 12312 { 12313 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)); 12314 if (arg2 & TARGET_O_NONBLOCK) { 12315 host_flags |= O_NONBLOCK; 12316 } 12317 if (arg2 & TARGET_O_CLOEXEC) { 12318 host_flags |= O_CLOEXEC; 12319 } 12320 ret = get_errno(eventfd(arg1, host_flags)); 12321 if (ret >= 0) { 12322 fd_trans_register(ret, &target_eventfd_trans); 12323 } 12324 break; 12325 } 12326 #endif 12327 #endif /* CONFIG_EVENTFD */ 12328 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) 12329 case TARGET_NR_fallocate: 12330 #if TARGET_ABI_BITS == 32 12331 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4), 12332 target_offset64(arg5, arg6))); 12333 #else 12334 ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); 12335 #endif 12336 break; 12337 #endif 12338 #if defined(CONFIG_SYNC_FILE_RANGE) 12339 #if defined(TARGET_NR_sync_file_range) 12340 case TARGET_NR_sync_file_range: 12341 #if TARGET_ABI_BITS == 32 12342 #if defined(TARGET_MIPS) 12343 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 12344 target_offset64(arg5, arg6), arg7)); 12345 #else 12346 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3), 12347 target_offset64(arg4, arg5), arg6)); 12348 #endif /* !TARGET_MIPS */ 12349 #else 12350 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4)); 12351 #endif 12352 break; 12353 #endif 12354 #if defined(TARGET_NR_sync_file_range2) 12355 case TARGET_NR_sync_file_range2: 12356 /* This is like sync_file_range but the arguments are reordered */ 12357 #if TARGET_ABI_BITS == 32 12358 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 12359 target_offset64(arg5, arg6), arg2)); 12360 #else 12361 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2)); 12362 #endif 12363 break; 12364 #endif 12365 #endif 12366 #if defined(TARGET_NR_signalfd4) 12367 case TARGET_NR_signalfd4: 12368 ret = do_signalfd4(arg1, arg2, arg4); 12369 break; 12370 #endif 12371 #if defined(TARGET_NR_signalfd) 12372 case TARGET_NR_signalfd: 12373 ret = do_signalfd4(arg1, arg2, 0); 12374 break; 12375 #endif 12376 #if defined(CONFIG_EPOLL) 12377 #if defined(TARGET_NR_epoll_create) 12378 case TARGET_NR_epoll_create: 12379 ret = get_errno(epoll_create(arg1)); 12380 break; 12381 #endif 12382 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1) 12383 case TARGET_NR_epoll_create1: 12384 ret = get_errno(epoll_create1(arg1)); 12385 break; 12386 #endif 12387 #if defined(TARGET_NR_epoll_ctl) 12388 case TARGET_NR_epoll_ctl: 12389 { 12390 struct epoll_event ep; 12391 struct epoll_event *epp = 0; 12392 if (arg4) { 12393 struct target_epoll_event *target_ep; 12394 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) { 12395 goto efault; 12396 } 12397 ep.events = tswap32(target_ep->events); 12398 /* The epoll_data_t union is just opaque data to the kernel, 12399 * so we transfer all 64 bits across and need not worry what 12400 * actual data type it is. 12401 */ 12402 ep.data.u64 = tswap64(target_ep->data.u64); 12403 unlock_user_struct(target_ep, arg4, 0); 12404 epp = &ep; 12405 } 12406 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp)); 12407 break; 12408 } 12409 #endif 12410 12411 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait) 12412 #if defined(TARGET_NR_epoll_wait) 12413 case TARGET_NR_epoll_wait: 12414 #endif 12415 #if defined(TARGET_NR_epoll_pwait) 12416 case TARGET_NR_epoll_pwait: 12417 #endif 12418 { 12419 struct target_epoll_event *target_ep; 12420 struct epoll_event *ep; 12421 int epfd = arg1; 12422 int maxevents = arg3; 12423 int timeout = arg4; 12424 12425 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) { 12426 ret = -TARGET_EINVAL; 12427 break; 12428 } 12429 12430 target_ep = lock_user(VERIFY_WRITE, arg2, 12431 maxevents * sizeof(struct target_epoll_event), 1); 12432 if (!target_ep) { 12433 goto efault; 12434 } 12435 12436 ep = g_try_new(struct epoll_event, maxevents); 12437 if (!ep) { 12438 unlock_user(target_ep, arg2, 0); 12439 ret = -TARGET_ENOMEM; 12440 break; 12441 } 12442 12443 switch (num) { 12444 #if defined(TARGET_NR_epoll_pwait) 12445 case TARGET_NR_epoll_pwait: 12446 { 12447 target_sigset_t *target_set; 12448 sigset_t _set, *set = &_set; 12449 12450 if (arg5) { 12451 if (arg6 != sizeof(target_sigset_t)) { 12452 ret = -TARGET_EINVAL; 12453 break; 12454 } 12455 12456 target_set = lock_user(VERIFY_READ, arg5, 12457 sizeof(target_sigset_t), 1); 12458 if (!target_set) { 12459 ret = -TARGET_EFAULT; 12460 break; 12461 } 12462 target_to_host_sigset(set, target_set); 12463 unlock_user(target_set, arg5, 0); 12464 } else { 12465 set = NULL; 12466 } 12467 12468 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout, 12469 set, SIGSET_T_SIZE)); 12470 break; 12471 } 12472 #endif 12473 #if defined(TARGET_NR_epoll_wait) 12474 case TARGET_NR_epoll_wait: 12475 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout, 12476 NULL, 0)); 12477 break; 12478 #endif 12479 default: 12480 ret = -TARGET_ENOSYS; 12481 } 12482 if (!is_error(ret)) { 12483 int i; 12484 for (i = 0; i < ret; i++) { 12485 target_ep[i].events = tswap32(ep[i].events); 12486 target_ep[i].data.u64 = tswap64(ep[i].data.u64); 12487 } 12488 unlock_user(target_ep, arg2, 12489 ret * sizeof(struct target_epoll_event)); 12490 } else { 12491 unlock_user(target_ep, arg2, 0); 12492 } 12493 g_free(ep); 12494 break; 12495 } 12496 #endif 12497 #endif 12498 #ifdef TARGET_NR_prlimit64 12499 case TARGET_NR_prlimit64: 12500 { 12501 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */ 12502 struct target_rlimit64 *target_rnew, *target_rold; 12503 struct host_rlimit64 rnew, rold, *rnewp = 0; 12504 int resource = target_to_host_resource(arg2); 12505 if (arg3) { 12506 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { 12507 goto efault; 12508 } 12509 rnew.rlim_cur = tswap64(target_rnew->rlim_cur); 12510 rnew.rlim_max = tswap64(target_rnew->rlim_max); 12511 unlock_user_struct(target_rnew, arg3, 0); 12512 rnewp = &rnew; 12513 } 12514 12515 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0)); 12516 if (!is_error(ret) && arg4) { 12517 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { 12518 goto efault; 12519 } 12520 target_rold->rlim_cur = tswap64(rold.rlim_cur); 12521 target_rold->rlim_max = tswap64(rold.rlim_max); 12522 unlock_user_struct(target_rold, arg4, 1); 12523 } 12524 break; 12525 } 12526 #endif 12527 #ifdef TARGET_NR_gethostname 12528 case TARGET_NR_gethostname: 12529 { 12530 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0); 12531 if (name) { 12532 ret = get_errno(gethostname(name, arg2)); 12533 unlock_user(name, arg1, arg2); 12534 } else { 12535 ret = -TARGET_EFAULT; 12536 } 12537 break; 12538 } 12539 #endif 12540 #ifdef TARGET_NR_atomic_cmpxchg_32 12541 case TARGET_NR_atomic_cmpxchg_32: 12542 { 12543 /* should use start_exclusive from main.c */ 12544 abi_ulong mem_value; 12545 if (get_user_u32(mem_value, arg6)) { 12546 target_siginfo_t info; 12547 info.si_signo = SIGSEGV; 12548 info.si_errno = 0; 12549 info.si_code = TARGET_SEGV_MAPERR; 12550 info._sifields._sigfault._addr = arg6; 12551 queue_signal((CPUArchState *)cpu_env, info.si_signo, 12552 QEMU_SI_FAULT, &info); 12553 ret = 0xdeadbeef; 12554 12555 } 12556 if (mem_value == arg2) 12557 put_user_u32(arg1, arg6); 12558 ret = mem_value; 12559 break; 12560 } 12561 #endif 12562 #ifdef TARGET_NR_atomic_barrier 12563 case TARGET_NR_atomic_barrier: 12564 { 12565 /* Like the kernel implementation and the qemu arm barrier, no-op this? */ 12566 ret = 0; 12567 break; 12568 } 12569 #endif 12570 12571 #ifdef TARGET_NR_timer_create 12572 case TARGET_NR_timer_create: 12573 { 12574 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */ 12575 12576 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL; 12577 12578 int clkid = arg1; 12579 int timer_index = next_free_host_timer(); 12580 12581 if (timer_index < 0) { 12582 ret = -TARGET_EAGAIN; 12583 } else { 12584 timer_t *phtimer = g_posix_timers + timer_index; 12585 12586 if (arg2) { 12587 phost_sevp = &host_sevp; 12588 ret = target_to_host_sigevent(phost_sevp, arg2); 12589 if (ret != 0) { 12590 break; 12591 } 12592 } 12593 12594 ret = get_errno(timer_create(clkid, phost_sevp, phtimer)); 12595 if (ret) { 12596 phtimer = NULL; 12597 } else { 12598 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) { 12599 goto efault; 12600 } 12601 } 12602 } 12603 break; 12604 } 12605 #endif 12606 12607 #ifdef TARGET_NR_timer_settime 12608 case TARGET_NR_timer_settime: 12609 { 12610 /* args: timer_t timerid, int flags, const struct itimerspec *new_value, 12611 * struct itimerspec * old_value */ 12612 target_timer_t timerid = get_timer_id(arg1); 12613 12614 if (timerid < 0) { 12615 ret = timerid; 12616 } else if (arg3 == 0) { 12617 ret = -TARGET_EINVAL; 12618 } else { 12619 timer_t htimer = g_posix_timers[timerid]; 12620 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},}; 12621 12622 if (target_to_host_itimerspec(&hspec_new, arg3)) { 12623 goto efault; 12624 } 12625 ret = get_errno( 12626 timer_settime(htimer, arg2, &hspec_new, &hspec_old)); 12627 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) { 12628 goto efault; 12629 } 12630 } 12631 break; 12632 } 12633 #endif 12634 12635 #ifdef TARGET_NR_timer_gettime 12636 case TARGET_NR_timer_gettime: 12637 { 12638 /* args: timer_t timerid, struct itimerspec *curr_value */ 12639 target_timer_t timerid = get_timer_id(arg1); 12640 12641 if (timerid < 0) { 12642 ret = timerid; 12643 } else if (!arg2) { 12644 ret = -TARGET_EFAULT; 12645 } else { 12646 timer_t htimer = g_posix_timers[timerid]; 12647 struct itimerspec hspec; 12648 ret = get_errno(timer_gettime(htimer, &hspec)); 12649 12650 if (host_to_target_itimerspec(arg2, &hspec)) { 12651 ret = -TARGET_EFAULT; 12652 } 12653 } 12654 break; 12655 } 12656 #endif 12657 12658 #ifdef TARGET_NR_timer_getoverrun 12659 case TARGET_NR_timer_getoverrun: 12660 { 12661 /* args: timer_t timerid */ 12662 target_timer_t timerid = get_timer_id(arg1); 12663 12664 if (timerid < 0) { 12665 ret = timerid; 12666 } else { 12667 timer_t htimer = g_posix_timers[timerid]; 12668 ret = get_errno(timer_getoverrun(htimer)); 12669 } 12670 fd_trans_unregister(ret); 12671 break; 12672 } 12673 #endif 12674 12675 #ifdef TARGET_NR_timer_delete 12676 case TARGET_NR_timer_delete: 12677 { 12678 /* args: timer_t timerid */ 12679 target_timer_t timerid = get_timer_id(arg1); 12680 12681 if (timerid < 0) { 12682 ret = timerid; 12683 } else { 12684 timer_t htimer = g_posix_timers[timerid]; 12685 ret = get_errno(timer_delete(htimer)); 12686 g_posix_timers[timerid] = 0; 12687 } 12688 break; 12689 } 12690 #endif 12691 12692 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD) 12693 case TARGET_NR_timerfd_create: 12694 ret = get_errno(timerfd_create(arg1, 12695 target_to_host_bitmask(arg2, fcntl_flags_tbl))); 12696 break; 12697 #endif 12698 12699 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD) 12700 case TARGET_NR_timerfd_gettime: 12701 { 12702 struct itimerspec its_curr; 12703 12704 ret = get_errno(timerfd_gettime(arg1, &its_curr)); 12705 12706 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) { 12707 goto efault; 12708 } 12709 } 12710 break; 12711 #endif 12712 12713 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD) 12714 case TARGET_NR_timerfd_settime: 12715 { 12716 struct itimerspec its_new, its_old, *p_new; 12717 12718 if (arg3) { 12719 if (target_to_host_itimerspec(&its_new, arg3)) { 12720 goto efault; 12721 } 12722 p_new = &its_new; 12723 } else { 12724 p_new = NULL; 12725 } 12726 12727 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old)); 12728 12729 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) { 12730 goto efault; 12731 } 12732 } 12733 break; 12734 #endif 12735 12736 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get) 12737 case TARGET_NR_ioprio_get: 12738 ret = get_errno(ioprio_get(arg1, arg2)); 12739 break; 12740 #endif 12741 12742 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set) 12743 case TARGET_NR_ioprio_set: 12744 ret = get_errno(ioprio_set(arg1, arg2, arg3)); 12745 break; 12746 #endif 12747 12748 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS) 12749 case TARGET_NR_setns: 12750 ret = get_errno(setns(arg1, arg2)); 12751 break; 12752 #endif 12753 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS) 12754 case TARGET_NR_unshare: 12755 ret = get_errno(unshare(arg1)); 12756 break; 12757 #endif 12758 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp) 12759 case TARGET_NR_kcmp: 12760 ret = get_errno(kcmp(arg1, arg2, arg3, arg4, arg5)); 12761 break; 12762 #endif 12763 12764 default: 12765 unimplemented: 12766 gemu_log("qemu: Unsupported syscall: %d\n", num); 12767 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list) 12768 unimplemented_nowarn: 12769 #endif 12770 ret = -TARGET_ENOSYS; 12771 break; 12772 } 12773 fail: 12774 #ifdef DEBUG 12775 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret); 12776 #endif 12777 if(do_strace) 12778 print_syscall_ret(num, ret); 12779 trace_guest_user_syscall_ret(cpu, num, ret); 12780 return ret; 12781 efault: 12782 ret = -TARGET_EFAULT; 12783 goto fail; 12784 } 12785