1 /* This is the Linux kernel elf-loading code, ported into user space */ 2 #include "qemu/osdep.h" 3 #include <sys/param.h> 4 5 #include <sys/resource.h> 6 #include <sys/shm.h> 7 8 #include "qemu.h" 9 #include "user-internals.h" 10 #include "signal-common.h" 11 #include "loader.h" 12 #include "user-mmap.h" 13 #include "disas/disas.h" 14 #include "qemu/bitops.h" 15 #include "qemu/path.h" 16 #include "qemu/queue.h" 17 #include "qemu/guest-random.h" 18 #include "qemu/units.h" 19 #include "qemu/selfmap.h" 20 #include "qemu/lockable.h" 21 #include "qapi/error.h" 22 #include "qemu/error-report.h" 23 #include "target_signal.h" 24 #include "accel/tcg/debuginfo.h" 25 26 #ifdef _ARCH_PPC64 27 #undef ARCH_DLINFO 28 #undef ELF_PLATFORM 29 #undef ELF_HWCAP 30 #undef ELF_HWCAP2 31 #undef ELF_CLASS 32 #undef ELF_DATA 33 #undef ELF_ARCH 34 #endif 35 36 #define ELF_OSABI ELFOSABI_SYSV 37 38 /* from personality.h */ 39 40 /* 41 * Flags for bug emulation. 42 * 43 * These occupy the top three bytes. 44 */ 45 enum { 46 ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */ 47 FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to 48 descriptors (signal handling) */ 49 MMAP_PAGE_ZERO = 0x0100000, 50 ADDR_COMPAT_LAYOUT = 0x0200000, 51 READ_IMPLIES_EXEC = 0x0400000, 52 ADDR_LIMIT_32BIT = 0x0800000, 53 SHORT_INODE = 0x1000000, 54 WHOLE_SECONDS = 0x2000000, 55 STICKY_TIMEOUTS = 0x4000000, 56 ADDR_LIMIT_3GB = 0x8000000, 57 }; 58 59 /* 60 * Personality types. 61 * 62 * These go in the low byte. Avoid using the top bit, it will 63 * conflict with error returns. 64 */ 65 enum { 66 PER_LINUX = 0x0000, 67 PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT, 68 PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS, 69 PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, 70 PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE, 71 PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS | SHORT_INODE, 72 PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS, 73 PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE, 74 PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS, 75 PER_BSD = 0x0006, 76 PER_SUNOS = 0x0006 | STICKY_TIMEOUTS, 77 PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE, 78 PER_LINUX32 = 0x0008, 79 PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB, 80 PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */ 81 PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */ 82 PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */ 83 PER_RISCOS = 0x000c, 84 PER_SOLARIS = 0x000d | STICKY_TIMEOUTS, 85 PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, 86 PER_OSF4 = 0x000f, /* OSF/1 v4 */ 87 PER_HPUX = 0x0010, 88 PER_MASK = 0x00ff, 89 }; 90 91 /* 92 * Return the base personality without flags. 93 */ 94 #define personality(pers) (pers & PER_MASK) 95 96 int info_is_fdpic(struct image_info *info) 97 { 98 return info->personality == PER_LINUX_FDPIC; 99 } 100 101 /* this flag is uneffective under linux too, should be deleted */ 102 #ifndef MAP_DENYWRITE 103 #define MAP_DENYWRITE 0 104 #endif 105 106 /* should probably go in elf.h */ 107 #ifndef ELIBBAD 108 #define ELIBBAD 80 109 #endif 110 111 #if TARGET_BIG_ENDIAN 112 #define ELF_DATA ELFDATA2MSB 113 #else 114 #define ELF_DATA ELFDATA2LSB 115 #endif 116 117 #ifdef TARGET_ABI_MIPSN32 118 typedef abi_ullong target_elf_greg_t; 119 #define tswapreg(ptr) tswap64(ptr) 120 #else 121 typedef abi_ulong target_elf_greg_t; 122 #define tswapreg(ptr) tswapal(ptr) 123 #endif 124 125 #ifdef USE_UID16 126 typedef abi_ushort target_uid_t; 127 typedef abi_ushort target_gid_t; 128 #else 129 typedef abi_uint target_uid_t; 130 typedef abi_uint target_gid_t; 131 #endif 132 typedef abi_int target_pid_t; 133 134 #ifdef TARGET_I386 135 136 #define ELF_HWCAP get_elf_hwcap() 137 138 static uint32_t get_elf_hwcap(void) 139 { 140 X86CPU *cpu = X86_CPU(thread_cpu); 141 142 return cpu->env.features[FEAT_1_EDX]; 143 } 144 145 #ifdef TARGET_X86_64 146 #define ELF_START_MMAP 0x2aaaaab000ULL 147 148 #define ELF_CLASS ELFCLASS64 149 #define ELF_ARCH EM_X86_64 150 151 #define ELF_PLATFORM "x86_64" 152 153 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 154 { 155 regs->rax = 0; 156 regs->rsp = infop->start_stack; 157 regs->rip = infop->entry; 158 } 159 160 #define ELF_NREG 27 161 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 162 163 /* 164 * Note that ELF_NREG should be 29 as there should be place for 165 * TRAPNO and ERR "registers" as well but linux doesn't dump 166 * those. 167 * 168 * See linux kernel: arch/x86/include/asm/elf.h 169 */ 170 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env) 171 { 172 (*regs)[0] = tswapreg(env->regs[15]); 173 (*regs)[1] = tswapreg(env->regs[14]); 174 (*regs)[2] = tswapreg(env->regs[13]); 175 (*regs)[3] = tswapreg(env->regs[12]); 176 (*regs)[4] = tswapreg(env->regs[R_EBP]); 177 (*regs)[5] = tswapreg(env->regs[R_EBX]); 178 (*regs)[6] = tswapreg(env->regs[11]); 179 (*regs)[7] = tswapreg(env->regs[10]); 180 (*regs)[8] = tswapreg(env->regs[9]); 181 (*regs)[9] = tswapreg(env->regs[8]); 182 (*regs)[10] = tswapreg(env->regs[R_EAX]); 183 (*regs)[11] = tswapreg(env->regs[R_ECX]); 184 (*regs)[12] = tswapreg(env->regs[R_EDX]); 185 (*regs)[13] = tswapreg(env->regs[R_ESI]); 186 (*regs)[14] = tswapreg(env->regs[R_EDI]); 187 (*regs)[15] = tswapreg(env->regs[R_EAX]); /* XXX */ 188 (*regs)[16] = tswapreg(env->eip); 189 (*regs)[17] = tswapreg(env->segs[R_CS].selector & 0xffff); 190 (*regs)[18] = tswapreg(env->eflags); 191 (*regs)[19] = tswapreg(env->regs[R_ESP]); 192 (*regs)[20] = tswapreg(env->segs[R_SS].selector & 0xffff); 193 (*regs)[21] = tswapreg(env->segs[R_FS].selector & 0xffff); 194 (*regs)[22] = tswapreg(env->segs[R_GS].selector & 0xffff); 195 (*regs)[23] = tswapreg(env->segs[R_DS].selector & 0xffff); 196 (*regs)[24] = tswapreg(env->segs[R_ES].selector & 0xffff); 197 (*regs)[25] = tswapreg(env->segs[R_FS].selector & 0xffff); 198 (*regs)[26] = tswapreg(env->segs[R_GS].selector & 0xffff); 199 } 200 201 #if ULONG_MAX > UINT32_MAX 202 #define INIT_GUEST_COMMPAGE 203 static bool init_guest_commpage(void) 204 { 205 /* 206 * The vsyscall page is at a high negative address aka kernel space, 207 * which means that we cannot actually allocate it with target_mmap. 208 * We still should be able to use page_set_flags, unless the user 209 * has specified -R reserved_va, which would trigger an assert(). 210 */ 211 if (reserved_va != 0 && 212 TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE - 1 > reserved_va) { 213 error_report("Cannot allocate vsyscall page"); 214 exit(EXIT_FAILURE); 215 } 216 page_set_flags(TARGET_VSYSCALL_PAGE, 217 TARGET_VSYSCALL_PAGE | ~TARGET_PAGE_MASK, 218 PAGE_EXEC | PAGE_VALID); 219 return true; 220 } 221 #endif 222 #else 223 224 #define ELF_START_MMAP 0x80000000 225 226 /* 227 * This is used to ensure we don't load something for the wrong architecture. 228 */ 229 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) ) 230 231 /* 232 * These are used to set parameters in the core dumps. 233 */ 234 #define ELF_CLASS ELFCLASS32 235 #define ELF_ARCH EM_386 236 237 #define ELF_PLATFORM get_elf_platform() 238 #define EXSTACK_DEFAULT true 239 240 static const char *get_elf_platform(void) 241 { 242 static char elf_platform[] = "i386"; 243 int family = object_property_get_int(OBJECT(thread_cpu), "family", NULL); 244 if (family > 6) { 245 family = 6; 246 } 247 if (family >= 3) { 248 elf_platform[1] = '0' + family; 249 } 250 return elf_platform; 251 } 252 253 static inline void init_thread(struct target_pt_regs *regs, 254 struct image_info *infop) 255 { 256 regs->esp = infop->start_stack; 257 regs->eip = infop->entry; 258 259 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program 260 starts %edx contains a pointer to a function which might be 261 registered using `atexit'. This provides a mean for the 262 dynamic linker to call DT_FINI functions for shared libraries 263 that have been loaded before the code runs. 264 265 A value of 0 tells we have no such handler. */ 266 regs->edx = 0; 267 } 268 269 #define ELF_NREG 17 270 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 271 272 /* 273 * Note that ELF_NREG should be 19 as there should be place for 274 * TRAPNO and ERR "registers" as well but linux doesn't dump 275 * those. 276 * 277 * See linux kernel: arch/x86/include/asm/elf.h 278 */ 279 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env) 280 { 281 (*regs)[0] = tswapreg(env->regs[R_EBX]); 282 (*regs)[1] = tswapreg(env->regs[R_ECX]); 283 (*regs)[2] = tswapreg(env->regs[R_EDX]); 284 (*regs)[3] = tswapreg(env->regs[R_ESI]); 285 (*regs)[4] = tswapreg(env->regs[R_EDI]); 286 (*regs)[5] = tswapreg(env->regs[R_EBP]); 287 (*regs)[6] = tswapreg(env->regs[R_EAX]); 288 (*regs)[7] = tswapreg(env->segs[R_DS].selector & 0xffff); 289 (*regs)[8] = tswapreg(env->segs[R_ES].selector & 0xffff); 290 (*regs)[9] = tswapreg(env->segs[R_FS].selector & 0xffff); 291 (*regs)[10] = tswapreg(env->segs[R_GS].selector & 0xffff); 292 (*regs)[11] = tswapreg(env->regs[R_EAX]); /* XXX */ 293 (*regs)[12] = tswapreg(env->eip); 294 (*regs)[13] = tswapreg(env->segs[R_CS].selector & 0xffff); 295 (*regs)[14] = tswapreg(env->eflags); 296 (*regs)[15] = tswapreg(env->regs[R_ESP]); 297 (*regs)[16] = tswapreg(env->segs[R_SS].selector & 0xffff); 298 } 299 #endif 300 301 #define USE_ELF_CORE_DUMP 302 #define ELF_EXEC_PAGESIZE 4096 303 304 #endif 305 306 #ifdef TARGET_ARM 307 308 #ifndef TARGET_AARCH64 309 /* 32 bit ARM definitions */ 310 311 #define ELF_START_MMAP 0x80000000 312 313 #define ELF_ARCH EM_ARM 314 #define ELF_CLASS ELFCLASS32 315 #define EXSTACK_DEFAULT true 316 317 static inline void init_thread(struct target_pt_regs *regs, 318 struct image_info *infop) 319 { 320 abi_long stack = infop->start_stack; 321 memset(regs, 0, sizeof(*regs)); 322 323 regs->uregs[16] = ARM_CPU_MODE_USR; 324 if (infop->entry & 1) { 325 regs->uregs[16] |= CPSR_T; 326 } 327 regs->uregs[15] = infop->entry & 0xfffffffe; 328 regs->uregs[13] = infop->start_stack; 329 /* FIXME - what to for failure of get_user()? */ 330 get_user_ual(regs->uregs[2], stack + 8); /* envp */ 331 get_user_ual(regs->uregs[1], stack + 4); /* envp */ 332 /* XXX: it seems that r0 is zeroed after ! */ 333 regs->uregs[0] = 0; 334 /* For uClinux PIC binaries. */ 335 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */ 336 regs->uregs[10] = infop->start_data; 337 338 /* Support ARM FDPIC. */ 339 if (info_is_fdpic(infop)) { 340 /* As described in the ABI document, r7 points to the loadmap info 341 * prepared by the kernel. If an interpreter is needed, r8 points 342 * to the interpreter loadmap and r9 points to the interpreter 343 * PT_DYNAMIC info. If no interpreter is needed, r8 is zero, and 344 * r9 points to the main program PT_DYNAMIC info. 345 */ 346 regs->uregs[7] = infop->loadmap_addr; 347 if (infop->interpreter_loadmap_addr) { 348 /* Executable is dynamically loaded. */ 349 regs->uregs[8] = infop->interpreter_loadmap_addr; 350 regs->uregs[9] = infop->interpreter_pt_dynamic_addr; 351 } else { 352 regs->uregs[8] = 0; 353 regs->uregs[9] = infop->pt_dynamic_addr; 354 } 355 } 356 } 357 358 #define ELF_NREG 18 359 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 360 361 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUARMState *env) 362 { 363 (*regs)[0] = tswapreg(env->regs[0]); 364 (*regs)[1] = tswapreg(env->regs[1]); 365 (*regs)[2] = tswapreg(env->regs[2]); 366 (*regs)[3] = tswapreg(env->regs[3]); 367 (*regs)[4] = tswapreg(env->regs[4]); 368 (*regs)[5] = tswapreg(env->regs[5]); 369 (*regs)[6] = tswapreg(env->regs[6]); 370 (*regs)[7] = tswapreg(env->regs[7]); 371 (*regs)[8] = tswapreg(env->regs[8]); 372 (*regs)[9] = tswapreg(env->regs[9]); 373 (*regs)[10] = tswapreg(env->regs[10]); 374 (*regs)[11] = tswapreg(env->regs[11]); 375 (*regs)[12] = tswapreg(env->regs[12]); 376 (*regs)[13] = tswapreg(env->regs[13]); 377 (*regs)[14] = tswapreg(env->regs[14]); 378 (*regs)[15] = tswapreg(env->regs[15]); 379 380 (*regs)[16] = tswapreg(cpsr_read((CPUARMState *)env)); 381 (*regs)[17] = tswapreg(env->regs[0]); /* XXX */ 382 } 383 384 #define USE_ELF_CORE_DUMP 385 #define ELF_EXEC_PAGESIZE 4096 386 387 enum 388 { 389 ARM_HWCAP_ARM_SWP = 1 << 0, 390 ARM_HWCAP_ARM_HALF = 1 << 1, 391 ARM_HWCAP_ARM_THUMB = 1 << 2, 392 ARM_HWCAP_ARM_26BIT = 1 << 3, 393 ARM_HWCAP_ARM_FAST_MULT = 1 << 4, 394 ARM_HWCAP_ARM_FPA = 1 << 5, 395 ARM_HWCAP_ARM_VFP = 1 << 6, 396 ARM_HWCAP_ARM_EDSP = 1 << 7, 397 ARM_HWCAP_ARM_JAVA = 1 << 8, 398 ARM_HWCAP_ARM_IWMMXT = 1 << 9, 399 ARM_HWCAP_ARM_CRUNCH = 1 << 10, 400 ARM_HWCAP_ARM_THUMBEE = 1 << 11, 401 ARM_HWCAP_ARM_NEON = 1 << 12, 402 ARM_HWCAP_ARM_VFPv3 = 1 << 13, 403 ARM_HWCAP_ARM_VFPv3D16 = 1 << 14, 404 ARM_HWCAP_ARM_TLS = 1 << 15, 405 ARM_HWCAP_ARM_VFPv4 = 1 << 16, 406 ARM_HWCAP_ARM_IDIVA = 1 << 17, 407 ARM_HWCAP_ARM_IDIVT = 1 << 18, 408 ARM_HWCAP_ARM_VFPD32 = 1 << 19, 409 ARM_HWCAP_ARM_LPAE = 1 << 20, 410 ARM_HWCAP_ARM_EVTSTRM = 1 << 21, 411 }; 412 413 enum { 414 ARM_HWCAP2_ARM_AES = 1 << 0, 415 ARM_HWCAP2_ARM_PMULL = 1 << 1, 416 ARM_HWCAP2_ARM_SHA1 = 1 << 2, 417 ARM_HWCAP2_ARM_SHA2 = 1 << 3, 418 ARM_HWCAP2_ARM_CRC32 = 1 << 4, 419 }; 420 421 /* The commpage only exists for 32 bit kernels */ 422 423 #define HI_COMMPAGE (intptr_t)0xffff0f00u 424 425 static bool init_guest_commpage(void) 426 { 427 abi_ptr commpage = HI_COMMPAGE & -qemu_host_page_size; 428 void *want = g2h_untagged(commpage); 429 void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE, 430 MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0); 431 432 if (addr == MAP_FAILED) { 433 perror("Allocating guest commpage"); 434 exit(EXIT_FAILURE); 435 } 436 if (addr != want) { 437 return false; 438 } 439 440 /* Set kernel helper versions; rest of page is 0. */ 441 __put_user(5, (uint32_t *)g2h_untagged(0xffff0ffcu)); 442 443 if (mprotect(addr, qemu_host_page_size, PROT_READ)) { 444 perror("Protecting guest commpage"); 445 exit(EXIT_FAILURE); 446 } 447 448 page_set_flags(commpage, commpage | ~qemu_host_page_mask, 449 PAGE_READ | PAGE_EXEC | PAGE_VALID); 450 return true; 451 } 452 453 #define ELF_HWCAP get_elf_hwcap() 454 #define ELF_HWCAP2 get_elf_hwcap2() 455 456 static uint32_t get_elf_hwcap(void) 457 { 458 ARMCPU *cpu = ARM_CPU(thread_cpu); 459 uint32_t hwcaps = 0; 460 461 hwcaps |= ARM_HWCAP_ARM_SWP; 462 hwcaps |= ARM_HWCAP_ARM_HALF; 463 hwcaps |= ARM_HWCAP_ARM_THUMB; 464 hwcaps |= ARM_HWCAP_ARM_FAST_MULT; 465 466 /* probe for the extra features */ 467 #define GET_FEATURE(feat, hwcap) \ 468 do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0) 469 470 #define GET_FEATURE_ID(feat, hwcap) \ 471 do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0) 472 473 /* EDSP is in v5TE and above, but all our v5 CPUs are v5TE */ 474 GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP); 475 GET_FEATURE(ARM_FEATURE_IWMMXT, ARM_HWCAP_ARM_IWMMXT); 476 GET_FEATURE(ARM_FEATURE_THUMB2EE, ARM_HWCAP_ARM_THUMBEE); 477 GET_FEATURE(ARM_FEATURE_NEON, ARM_HWCAP_ARM_NEON); 478 GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS); 479 GET_FEATURE(ARM_FEATURE_LPAE, ARM_HWCAP_ARM_LPAE); 480 GET_FEATURE_ID(aa32_arm_div, ARM_HWCAP_ARM_IDIVA); 481 GET_FEATURE_ID(aa32_thumb_div, ARM_HWCAP_ARM_IDIVT); 482 GET_FEATURE_ID(aa32_vfp, ARM_HWCAP_ARM_VFP); 483 484 if (cpu_isar_feature(aa32_fpsp_v3, cpu) || 485 cpu_isar_feature(aa32_fpdp_v3, cpu)) { 486 hwcaps |= ARM_HWCAP_ARM_VFPv3; 487 if (cpu_isar_feature(aa32_simd_r32, cpu)) { 488 hwcaps |= ARM_HWCAP_ARM_VFPD32; 489 } else { 490 hwcaps |= ARM_HWCAP_ARM_VFPv3D16; 491 } 492 } 493 GET_FEATURE_ID(aa32_simdfmac, ARM_HWCAP_ARM_VFPv4); 494 495 return hwcaps; 496 } 497 498 static uint32_t get_elf_hwcap2(void) 499 { 500 ARMCPU *cpu = ARM_CPU(thread_cpu); 501 uint32_t hwcaps = 0; 502 503 GET_FEATURE_ID(aa32_aes, ARM_HWCAP2_ARM_AES); 504 GET_FEATURE_ID(aa32_pmull, ARM_HWCAP2_ARM_PMULL); 505 GET_FEATURE_ID(aa32_sha1, ARM_HWCAP2_ARM_SHA1); 506 GET_FEATURE_ID(aa32_sha2, ARM_HWCAP2_ARM_SHA2); 507 GET_FEATURE_ID(aa32_crc32, ARM_HWCAP2_ARM_CRC32); 508 return hwcaps; 509 } 510 511 #undef GET_FEATURE 512 #undef GET_FEATURE_ID 513 514 #define ELF_PLATFORM get_elf_platform() 515 516 static const char *get_elf_platform(void) 517 { 518 CPUARMState *env = thread_cpu->env_ptr; 519 520 #if TARGET_BIG_ENDIAN 521 # define END "b" 522 #else 523 # define END "l" 524 #endif 525 526 if (arm_feature(env, ARM_FEATURE_V8)) { 527 return "v8" END; 528 } else if (arm_feature(env, ARM_FEATURE_V7)) { 529 if (arm_feature(env, ARM_FEATURE_M)) { 530 return "v7m" END; 531 } else { 532 return "v7" END; 533 } 534 } else if (arm_feature(env, ARM_FEATURE_V6)) { 535 return "v6" END; 536 } else if (arm_feature(env, ARM_FEATURE_V5)) { 537 return "v5" END; 538 } else { 539 return "v4" END; 540 } 541 542 #undef END 543 } 544 545 #else 546 /* 64 bit ARM definitions */ 547 #define ELF_START_MMAP 0x80000000 548 549 #define ELF_ARCH EM_AARCH64 550 #define ELF_CLASS ELFCLASS64 551 #if TARGET_BIG_ENDIAN 552 # define ELF_PLATFORM "aarch64_be" 553 #else 554 # define ELF_PLATFORM "aarch64" 555 #endif 556 557 static inline void init_thread(struct target_pt_regs *regs, 558 struct image_info *infop) 559 { 560 abi_long stack = infop->start_stack; 561 memset(regs, 0, sizeof(*regs)); 562 563 regs->pc = infop->entry & ~0x3ULL; 564 regs->sp = stack; 565 } 566 567 #define ELF_NREG 34 568 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 569 570 static void elf_core_copy_regs(target_elf_gregset_t *regs, 571 const CPUARMState *env) 572 { 573 int i; 574 575 for (i = 0; i < 32; i++) { 576 (*regs)[i] = tswapreg(env->xregs[i]); 577 } 578 (*regs)[32] = tswapreg(env->pc); 579 (*regs)[33] = tswapreg(pstate_read((CPUARMState *)env)); 580 } 581 582 #define USE_ELF_CORE_DUMP 583 #define ELF_EXEC_PAGESIZE 4096 584 585 enum { 586 ARM_HWCAP_A64_FP = 1 << 0, 587 ARM_HWCAP_A64_ASIMD = 1 << 1, 588 ARM_HWCAP_A64_EVTSTRM = 1 << 2, 589 ARM_HWCAP_A64_AES = 1 << 3, 590 ARM_HWCAP_A64_PMULL = 1 << 4, 591 ARM_HWCAP_A64_SHA1 = 1 << 5, 592 ARM_HWCAP_A64_SHA2 = 1 << 6, 593 ARM_HWCAP_A64_CRC32 = 1 << 7, 594 ARM_HWCAP_A64_ATOMICS = 1 << 8, 595 ARM_HWCAP_A64_FPHP = 1 << 9, 596 ARM_HWCAP_A64_ASIMDHP = 1 << 10, 597 ARM_HWCAP_A64_CPUID = 1 << 11, 598 ARM_HWCAP_A64_ASIMDRDM = 1 << 12, 599 ARM_HWCAP_A64_JSCVT = 1 << 13, 600 ARM_HWCAP_A64_FCMA = 1 << 14, 601 ARM_HWCAP_A64_LRCPC = 1 << 15, 602 ARM_HWCAP_A64_DCPOP = 1 << 16, 603 ARM_HWCAP_A64_SHA3 = 1 << 17, 604 ARM_HWCAP_A64_SM3 = 1 << 18, 605 ARM_HWCAP_A64_SM4 = 1 << 19, 606 ARM_HWCAP_A64_ASIMDDP = 1 << 20, 607 ARM_HWCAP_A64_SHA512 = 1 << 21, 608 ARM_HWCAP_A64_SVE = 1 << 22, 609 ARM_HWCAP_A64_ASIMDFHM = 1 << 23, 610 ARM_HWCAP_A64_DIT = 1 << 24, 611 ARM_HWCAP_A64_USCAT = 1 << 25, 612 ARM_HWCAP_A64_ILRCPC = 1 << 26, 613 ARM_HWCAP_A64_FLAGM = 1 << 27, 614 ARM_HWCAP_A64_SSBS = 1 << 28, 615 ARM_HWCAP_A64_SB = 1 << 29, 616 ARM_HWCAP_A64_PACA = 1 << 30, 617 ARM_HWCAP_A64_PACG = 1UL << 31, 618 619 ARM_HWCAP2_A64_DCPODP = 1 << 0, 620 ARM_HWCAP2_A64_SVE2 = 1 << 1, 621 ARM_HWCAP2_A64_SVEAES = 1 << 2, 622 ARM_HWCAP2_A64_SVEPMULL = 1 << 3, 623 ARM_HWCAP2_A64_SVEBITPERM = 1 << 4, 624 ARM_HWCAP2_A64_SVESHA3 = 1 << 5, 625 ARM_HWCAP2_A64_SVESM4 = 1 << 6, 626 ARM_HWCAP2_A64_FLAGM2 = 1 << 7, 627 ARM_HWCAP2_A64_FRINT = 1 << 8, 628 ARM_HWCAP2_A64_SVEI8MM = 1 << 9, 629 ARM_HWCAP2_A64_SVEF32MM = 1 << 10, 630 ARM_HWCAP2_A64_SVEF64MM = 1 << 11, 631 ARM_HWCAP2_A64_SVEBF16 = 1 << 12, 632 ARM_HWCAP2_A64_I8MM = 1 << 13, 633 ARM_HWCAP2_A64_BF16 = 1 << 14, 634 ARM_HWCAP2_A64_DGH = 1 << 15, 635 ARM_HWCAP2_A64_RNG = 1 << 16, 636 ARM_HWCAP2_A64_BTI = 1 << 17, 637 ARM_HWCAP2_A64_MTE = 1 << 18, 638 ARM_HWCAP2_A64_ECV = 1 << 19, 639 ARM_HWCAP2_A64_AFP = 1 << 20, 640 ARM_HWCAP2_A64_RPRES = 1 << 21, 641 ARM_HWCAP2_A64_MTE3 = 1 << 22, 642 ARM_HWCAP2_A64_SME = 1 << 23, 643 ARM_HWCAP2_A64_SME_I16I64 = 1 << 24, 644 ARM_HWCAP2_A64_SME_F64F64 = 1 << 25, 645 ARM_HWCAP2_A64_SME_I8I32 = 1 << 26, 646 ARM_HWCAP2_A64_SME_F16F32 = 1 << 27, 647 ARM_HWCAP2_A64_SME_B16F32 = 1 << 28, 648 ARM_HWCAP2_A64_SME_F32F32 = 1 << 29, 649 ARM_HWCAP2_A64_SME_FA64 = 1 << 30, 650 }; 651 652 #define ELF_HWCAP get_elf_hwcap() 653 #define ELF_HWCAP2 get_elf_hwcap2() 654 655 #define GET_FEATURE_ID(feat, hwcap) \ 656 do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0) 657 658 static uint32_t get_elf_hwcap(void) 659 { 660 ARMCPU *cpu = ARM_CPU(thread_cpu); 661 uint32_t hwcaps = 0; 662 663 hwcaps |= ARM_HWCAP_A64_FP; 664 hwcaps |= ARM_HWCAP_A64_ASIMD; 665 hwcaps |= ARM_HWCAP_A64_CPUID; 666 667 /* probe for the extra features */ 668 669 GET_FEATURE_ID(aa64_aes, ARM_HWCAP_A64_AES); 670 GET_FEATURE_ID(aa64_pmull, ARM_HWCAP_A64_PMULL); 671 GET_FEATURE_ID(aa64_sha1, ARM_HWCAP_A64_SHA1); 672 GET_FEATURE_ID(aa64_sha256, ARM_HWCAP_A64_SHA2); 673 GET_FEATURE_ID(aa64_sha512, ARM_HWCAP_A64_SHA512); 674 GET_FEATURE_ID(aa64_crc32, ARM_HWCAP_A64_CRC32); 675 GET_FEATURE_ID(aa64_sha3, ARM_HWCAP_A64_SHA3); 676 GET_FEATURE_ID(aa64_sm3, ARM_HWCAP_A64_SM3); 677 GET_FEATURE_ID(aa64_sm4, ARM_HWCAP_A64_SM4); 678 GET_FEATURE_ID(aa64_fp16, ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP); 679 GET_FEATURE_ID(aa64_atomics, ARM_HWCAP_A64_ATOMICS); 680 GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM); 681 GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP); 682 GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA); 683 GET_FEATURE_ID(aa64_sve, ARM_HWCAP_A64_SVE); 684 GET_FEATURE_ID(aa64_pauth, ARM_HWCAP_A64_PACA | ARM_HWCAP_A64_PACG); 685 GET_FEATURE_ID(aa64_fhm, ARM_HWCAP_A64_ASIMDFHM); 686 GET_FEATURE_ID(aa64_jscvt, ARM_HWCAP_A64_JSCVT); 687 GET_FEATURE_ID(aa64_sb, ARM_HWCAP_A64_SB); 688 GET_FEATURE_ID(aa64_condm_4, ARM_HWCAP_A64_FLAGM); 689 GET_FEATURE_ID(aa64_dcpop, ARM_HWCAP_A64_DCPOP); 690 GET_FEATURE_ID(aa64_rcpc_8_3, ARM_HWCAP_A64_LRCPC); 691 GET_FEATURE_ID(aa64_rcpc_8_4, ARM_HWCAP_A64_ILRCPC); 692 693 return hwcaps; 694 } 695 696 static uint32_t get_elf_hwcap2(void) 697 { 698 ARMCPU *cpu = ARM_CPU(thread_cpu); 699 uint32_t hwcaps = 0; 700 701 GET_FEATURE_ID(aa64_dcpodp, ARM_HWCAP2_A64_DCPODP); 702 GET_FEATURE_ID(aa64_sve2, ARM_HWCAP2_A64_SVE2); 703 GET_FEATURE_ID(aa64_sve2_aes, ARM_HWCAP2_A64_SVEAES); 704 GET_FEATURE_ID(aa64_sve2_pmull128, ARM_HWCAP2_A64_SVEPMULL); 705 GET_FEATURE_ID(aa64_sve2_bitperm, ARM_HWCAP2_A64_SVEBITPERM); 706 GET_FEATURE_ID(aa64_sve2_sha3, ARM_HWCAP2_A64_SVESHA3); 707 GET_FEATURE_ID(aa64_sve2_sm4, ARM_HWCAP2_A64_SVESM4); 708 GET_FEATURE_ID(aa64_condm_5, ARM_HWCAP2_A64_FLAGM2); 709 GET_FEATURE_ID(aa64_frint, ARM_HWCAP2_A64_FRINT); 710 GET_FEATURE_ID(aa64_sve_i8mm, ARM_HWCAP2_A64_SVEI8MM); 711 GET_FEATURE_ID(aa64_sve_f32mm, ARM_HWCAP2_A64_SVEF32MM); 712 GET_FEATURE_ID(aa64_sve_f64mm, ARM_HWCAP2_A64_SVEF64MM); 713 GET_FEATURE_ID(aa64_sve_bf16, ARM_HWCAP2_A64_SVEBF16); 714 GET_FEATURE_ID(aa64_i8mm, ARM_HWCAP2_A64_I8MM); 715 GET_FEATURE_ID(aa64_bf16, ARM_HWCAP2_A64_BF16); 716 GET_FEATURE_ID(aa64_rndr, ARM_HWCAP2_A64_RNG); 717 GET_FEATURE_ID(aa64_bti, ARM_HWCAP2_A64_BTI); 718 GET_FEATURE_ID(aa64_mte, ARM_HWCAP2_A64_MTE); 719 GET_FEATURE_ID(aa64_sme, (ARM_HWCAP2_A64_SME | 720 ARM_HWCAP2_A64_SME_F32F32 | 721 ARM_HWCAP2_A64_SME_B16F32 | 722 ARM_HWCAP2_A64_SME_F16F32 | 723 ARM_HWCAP2_A64_SME_I8I32)); 724 GET_FEATURE_ID(aa64_sme_f64f64, ARM_HWCAP2_A64_SME_F64F64); 725 GET_FEATURE_ID(aa64_sme_i16i64, ARM_HWCAP2_A64_SME_I16I64); 726 GET_FEATURE_ID(aa64_sme_fa64, ARM_HWCAP2_A64_SME_FA64); 727 728 return hwcaps; 729 } 730 731 #undef GET_FEATURE_ID 732 733 #endif /* not TARGET_AARCH64 */ 734 #endif /* TARGET_ARM */ 735 736 #ifdef TARGET_SPARC 737 #ifdef TARGET_SPARC64 738 739 #define ELF_START_MMAP 0x80000000 740 #define ELF_HWCAP (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \ 741 | HWCAP_SPARC_MULDIV | HWCAP_SPARC_V9) 742 #ifndef TARGET_ABI32 743 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS ) 744 #else 745 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC ) 746 #endif 747 748 #define ELF_CLASS ELFCLASS64 749 #define ELF_ARCH EM_SPARCV9 750 #else 751 #define ELF_START_MMAP 0x80000000 752 #define ELF_HWCAP (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \ 753 | HWCAP_SPARC_MULDIV) 754 #define ELF_CLASS ELFCLASS32 755 #define ELF_ARCH EM_SPARC 756 #endif /* TARGET_SPARC64 */ 757 758 static inline void init_thread(struct target_pt_regs *regs, 759 struct image_info *infop) 760 { 761 /* Note that target_cpu_copy_regs does not read psr/tstate. */ 762 regs->pc = infop->entry; 763 regs->npc = regs->pc + 4; 764 regs->y = 0; 765 regs->u_regs[14] = (infop->start_stack - 16 * sizeof(abi_ulong) 766 - TARGET_STACK_BIAS); 767 } 768 #endif /* TARGET_SPARC */ 769 770 #ifdef TARGET_PPC 771 772 #define ELF_MACHINE PPC_ELF_MACHINE 773 #define ELF_START_MMAP 0x80000000 774 775 #if defined(TARGET_PPC64) 776 777 #define elf_check_arch(x) ( (x) == EM_PPC64 ) 778 779 #define ELF_CLASS ELFCLASS64 780 781 #else 782 783 #define ELF_CLASS ELFCLASS32 784 #define EXSTACK_DEFAULT true 785 786 #endif 787 788 #define ELF_ARCH EM_PPC 789 790 /* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP). 791 See arch/powerpc/include/asm/cputable.h. */ 792 enum { 793 QEMU_PPC_FEATURE_32 = 0x80000000, 794 QEMU_PPC_FEATURE_64 = 0x40000000, 795 QEMU_PPC_FEATURE_601_INSTR = 0x20000000, 796 QEMU_PPC_FEATURE_HAS_ALTIVEC = 0x10000000, 797 QEMU_PPC_FEATURE_HAS_FPU = 0x08000000, 798 QEMU_PPC_FEATURE_HAS_MMU = 0x04000000, 799 QEMU_PPC_FEATURE_HAS_4xxMAC = 0x02000000, 800 QEMU_PPC_FEATURE_UNIFIED_CACHE = 0x01000000, 801 QEMU_PPC_FEATURE_HAS_SPE = 0x00800000, 802 QEMU_PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000, 803 QEMU_PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000, 804 QEMU_PPC_FEATURE_NO_TB = 0x00100000, 805 QEMU_PPC_FEATURE_POWER4 = 0x00080000, 806 QEMU_PPC_FEATURE_POWER5 = 0x00040000, 807 QEMU_PPC_FEATURE_POWER5_PLUS = 0x00020000, 808 QEMU_PPC_FEATURE_CELL = 0x00010000, 809 QEMU_PPC_FEATURE_BOOKE = 0x00008000, 810 QEMU_PPC_FEATURE_SMT = 0x00004000, 811 QEMU_PPC_FEATURE_ICACHE_SNOOP = 0x00002000, 812 QEMU_PPC_FEATURE_ARCH_2_05 = 0x00001000, 813 QEMU_PPC_FEATURE_PA6T = 0x00000800, 814 QEMU_PPC_FEATURE_HAS_DFP = 0x00000400, 815 QEMU_PPC_FEATURE_POWER6_EXT = 0x00000200, 816 QEMU_PPC_FEATURE_ARCH_2_06 = 0x00000100, 817 QEMU_PPC_FEATURE_HAS_VSX = 0x00000080, 818 QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040, 819 820 QEMU_PPC_FEATURE_TRUE_LE = 0x00000002, 821 QEMU_PPC_FEATURE_PPC_LE = 0x00000001, 822 823 /* Feature definitions in AT_HWCAP2. */ 824 QEMU_PPC_FEATURE2_ARCH_2_07 = 0x80000000, /* ISA 2.07 */ 825 QEMU_PPC_FEATURE2_HAS_HTM = 0x40000000, /* Hardware Transactional Memory */ 826 QEMU_PPC_FEATURE2_HAS_DSCR = 0x20000000, /* Data Stream Control Register */ 827 QEMU_PPC_FEATURE2_HAS_EBB = 0x10000000, /* Event Base Branching */ 828 QEMU_PPC_FEATURE2_HAS_ISEL = 0x08000000, /* Integer Select */ 829 QEMU_PPC_FEATURE2_HAS_TAR = 0x04000000, /* Target Address Register */ 830 QEMU_PPC_FEATURE2_VEC_CRYPTO = 0x02000000, 831 QEMU_PPC_FEATURE2_HTM_NOSC = 0x01000000, 832 QEMU_PPC_FEATURE2_ARCH_3_00 = 0x00800000, /* ISA 3.00 */ 833 QEMU_PPC_FEATURE2_HAS_IEEE128 = 0x00400000, /* VSX IEEE Bin Float 128-bit */ 834 QEMU_PPC_FEATURE2_DARN = 0x00200000, /* darn random number insn */ 835 QEMU_PPC_FEATURE2_SCV = 0x00100000, /* scv syscall */ 836 QEMU_PPC_FEATURE2_HTM_NO_SUSPEND = 0x00080000, /* TM w/o suspended state */ 837 QEMU_PPC_FEATURE2_ARCH_3_1 = 0x00040000, /* ISA 3.1 */ 838 QEMU_PPC_FEATURE2_MMA = 0x00020000, /* Matrix-Multiply Assist */ 839 }; 840 841 #define ELF_HWCAP get_elf_hwcap() 842 843 static uint32_t get_elf_hwcap(void) 844 { 845 PowerPCCPU *cpu = POWERPC_CPU(thread_cpu); 846 uint32_t features = 0; 847 848 /* We don't have to be terribly complete here; the high points are 849 Altivec/FP/SPE support. Anything else is just a bonus. */ 850 #define GET_FEATURE(flag, feature) \ 851 do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0) 852 #define GET_FEATURE2(flags, feature) \ 853 do { \ 854 if ((cpu->env.insns_flags2 & flags) == flags) { \ 855 features |= feature; \ 856 } \ 857 } while (0) 858 GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64); 859 GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU); 860 GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC); 861 GET_FEATURE(PPC_SPE, QEMU_PPC_FEATURE_HAS_SPE); 862 GET_FEATURE(PPC_SPE_SINGLE, QEMU_PPC_FEATURE_HAS_EFP_SINGLE); 863 GET_FEATURE(PPC_SPE_DOUBLE, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE); 864 GET_FEATURE(PPC_BOOKE, QEMU_PPC_FEATURE_BOOKE); 865 GET_FEATURE(PPC_405_MAC, QEMU_PPC_FEATURE_HAS_4xxMAC); 866 GET_FEATURE2(PPC2_DFP, QEMU_PPC_FEATURE_HAS_DFP); 867 GET_FEATURE2(PPC2_VSX, QEMU_PPC_FEATURE_HAS_VSX); 868 GET_FEATURE2((PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 | 869 PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206), 870 QEMU_PPC_FEATURE_ARCH_2_06); 871 #undef GET_FEATURE 872 #undef GET_FEATURE2 873 874 return features; 875 } 876 877 #define ELF_HWCAP2 get_elf_hwcap2() 878 879 static uint32_t get_elf_hwcap2(void) 880 { 881 PowerPCCPU *cpu = POWERPC_CPU(thread_cpu); 882 uint32_t features = 0; 883 884 #define GET_FEATURE(flag, feature) \ 885 do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0) 886 #define GET_FEATURE2(flag, feature) \ 887 do { if (cpu->env.insns_flags2 & flag) { features |= feature; } } while (0) 888 889 GET_FEATURE(PPC_ISEL, QEMU_PPC_FEATURE2_HAS_ISEL); 890 GET_FEATURE2(PPC2_BCTAR_ISA207, QEMU_PPC_FEATURE2_HAS_TAR); 891 GET_FEATURE2((PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | 892 PPC2_ISA207S), QEMU_PPC_FEATURE2_ARCH_2_07 | 893 QEMU_PPC_FEATURE2_VEC_CRYPTO); 894 GET_FEATURE2(PPC2_ISA300, QEMU_PPC_FEATURE2_ARCH_3_00 | 895 QEMU_PPC_FEATURE2_DARN | QEMU_PPC_FEATURE2_HAS_IEEE128); 896 GET_FEATURE2(PPC2_ISA310, QEMU_PPC_FEATURE2_ARCH_3_1 | 897 QEMU_PPC_FEATURE2_MMA); 898 899 #undef GET_FEATURE 900 #undef GET_FEATURE2 901 902 return features; 903 } 904 905 /* 906 * The requirements here are: 907 * - keep the final alignment of sp (sp & 0xf) 908 * - make sure the 32-bit value at the first 16 byte aligned position of 909 * AUXV is greater than 16 for glibc compatibility. 910 * AT_IGNOREPPC is used for that. 911 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC, 912 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined. 913 */ 914 #define DLINFO_ARCH_ITEMS 5 915 #define ARCH_DLINFO \ 916 do { \ 917 PowerPCCPU *cpu = POWERPC_CPU(thread_cpu); \ 918 /* \ 919 * Handle glibc compatibility: these magic entries must \ 920 * be at the lowest addresses in the final auxv. \ 921 */ \ 922 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ 923 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ 924 NEW_AUX_ENT(AT_DCACHEBSIZE, cpu->env.dcache_line_size); \ 925 NEW_AUX_ENT(AT_ICACHEBSIZE, cpu->env.icache_line_size); \ 926 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \ 927 } while (0) 928 929 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop) 930 { 931 _regs->gpr[1] = infop->start_stack; 932 #if defined(TARGET_PPC64) 933 if (get_ppc64_abi(infop) < 2) { 934 uint64_t val; 935 get_user_u64(val, infop->entry + 8); 936 _regs->gpr[2] = val + infop->load_bias; 937 get_user_u64(val, infop->entry); 938 infop->entry = val + infop->load_bias; 939 } else { 940 _regs->gpr[12] = infop->entry; /* r12 set to global entry address */ 941 } 942 #endif 943 _regs->nip = infop->entry; 944 } 945 946 /* See linux kernel: arch/powerpc/include/asm/elf.h. */ 947 #define ELF_NREG 48 948 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 949 950 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *env) 951 { 952 int i; 953 target_ulong ccr = 0; 954 955 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 956 (*regs)[i] = tswapreg(env->gpr[i]); 957 } 958 959 (*regs)[32] = tswapreg(env->nip); 960 (*regs)[33] = tswapreg(env->msr); 961 (*regs)[35] = tswapreg(env->ctr); 962 (*regs)[36] = tswapreg(env->lr); 963 (*regs)[37] = tswapreg(cpu_read_xer(env)); 964 965 ccr = ppc_get_cr(env); 966 (*regs)[38] = tswapreg(ccr); 967 } 968 969 #define USE_ELF_CORE_DUMP 970 #define ELF_EXEC_PAGESIZE 4096 971 972 #endif 973 974 #ifdef TARGET_LOONGARCH64 975 976 #define ELF_START_MMAP 0x80000000 977 978 #define ELF_CLASS ELFCLASS64 979 #define ELF_ARCH EM_LOONGARCH 980 #define EXSTACK_DEFAULT true 981 982 #define elf_check_arch(x) ((x) == EM_LOONGARCH) 983 984 static inline void init_thread(struct target_pt_regs *regs, 985 struct image_info *infop) 986 { 987 /*Set crmd PG,DA = 1,0 */ 988 regs->csr.crmd = 2 << 3; 989 regs->csr.era = infop->entry; 990 regs->regs[3] = infop->start_stack; 991 } 992 993 /* See linux kernel: arch/loongarch/include/asm/elf.h */ 994 #define ELF_NREG 45 995 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 996 997 enum { 998 TARGET_EF_R0 = 0, 999 TARGET_EF_CSR_ERA = TARGET_EF_R0 + 33, 1000 TARGET_EF_CSR_BADV = TARGET_EF_R0 + 34, 1001 }; 1002 1003 static void elf_core_copy_regs(target_elf_gregset_t *regs, 1004 const CPULoongArchState *env) 1005 { 1006 int i; 1007 1008 (*regs)[TARGET_EF_R0] = 0; 1009 1010 for (i = 1; i < ARRAY_SIZE(env->gpr); i++) { 1011 (*regs)[TARGET_EF_R0 + i] = tswapreg(env->gpr[i]); 1012 } 1013 1014 (*regs)[TARGET_EF_CSR_ERA] = tswapreg(env->pc); 1015 (*regs)[TARGET_EF_CSR_BADV] = tswapreg(env->CSR_BADV); 1016 } 1017 1018 #define USE_ELF_CORE_DUMP 1019 #define ELF_EXEC_PAGESIZE 4096 1020 1021 #define ELF_HWCAP get_elf_hwcap() 1022 1023 /* See arch/loongarch/include/uapi/asm/hwcap.h */ 1024 enum { 1025 HWCAP_LOONGARCH_CPUCFG = (1 << 0), 1026 HWCAP_LOONGARCH_LAM = (1 << 1), 1027 HWCAP_LOONGARCH_UAL = (1 << 2), 1028 HWCAP_LOONGARCH_FPU = (1 << 3), 1029 HWCAP_LOONGARCH_LSX = (1 << 4), 1030 HWCAP_LOONGARCH_LASX = (1 << 5), 1031 HWCAP_LOONGARCH_CRC32 = (1 << 6), 1032 HWCAP_LOONGARCH_COMPLEX = (1 << 7), 1033 HWCAP_LOONGARCH_CRYPTO = (1 << 8), 1034 HWCAP_LOONGARCH_LVZ = (1 << 9), 1035 HWCAP_LOONGARCH_LBT_X86 = (1 << 10), 1036 HWCAP_LOONGARCH_LBT_ARM = (1 << 11), 1037 HWCAP_LOONGARCH_LBT_MIPS = (1 << 12), 1038 }; 1039 1040 static uint32_t get_elf_hwcap(void) 1041 { 1042 LoongArchCPU *cpu = LOONGARCH_CPU(thread_cpu); 1043 uint32_t hwcaps = 0; 1044 1045 hwcaps |= HWCAP_LOONGARCH_CRC32; 1046 1047 if (FIELD_EX32(cpu->env.cpucfg[1], CPUCFG1, UAL)) { 1048 hwcaps |= HWCAP_LOONGARCH_UAL; 1049 } 1050 1051 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, FP)) { 1052 hwcaps |= HWCAP_LOONGARCH_FPU; 1053 } 1054 1055 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LAM)) { 1056 hwcaps |= HWCAP_LOONGARCH_LAM; 1057 } 1058 1059 return hwcaps; 1060 } 1061 1062 #define ELF_PLATFORM "loongarch" 1063 1064 #endif /* TARGET_LOONGARCH64 */ 1065 1066 #ifdef TARGET_MIPS 1067 1068 #define ELF_START_MMAP 0x80000000 1069 1070 #ifdef TARGET_MIPS64 1071 #define ELF_CLASS ELFCLASS64 1072 #else 1073 #define ELF_CLASS ELFCLASS32 1074 #endif 1075 #define ELF_ARCH EM_MIPS 1076 #define EXSTACK_DEFAULT true 1077 1078 #ifdef TARGET_ABI_MIPSN32 1079 #define elf_check_abi(x) ((x) & EF_MIPS_ABI2) 1080 #else 1081 #define elf_check_abi(x) (!((x) & EF_MIPS_ABI2)) 1082 #endif 1083 1084 #define ELF_BASE_PLATFORM get_elf_base_platform() 1085 1086 #define MATCH_PLATFORM_INSN(_flags, _base_platform) \ 1087 do { if ((cpu->env.insn_flags & (_flags)) == _flags) \ 1088 { return _base_platform; } } while (0) 1089 1090 static const char *get_elf_base_platform(void) 1091 { 1092 MIPSCPU *cpu = MIPS_CPU(thread_cpu); 1093 1094 /* 64 bit ISAs goes first */ 1095 MATCH_PLATFORM_INSN(CPU_MIPS64R6, "mips64r6"); 1096 MATCH_PLATFORM_INSN(CPU_MIPS64R5, "mips64r5"); 1097 MATCH_PLATFORM_INSN(CPU_MIPS64R2, "mips64r2"); 1098 MATCH_PLATFORM_INSN(CPU_MIPS64R1, "mips64"); 1099 MATCH_PLATFORM_INSN(CPU_MIPS5, "mips5"); 1100 MATCH_PLATFORM_INSN(CPU_MIPS4, "mips4"); 1101 MATCH_PLATFORM_INSN(CPU_MIPS3, "mips3"); 1102 1103 /* 32 bit ISAs */ 1104 MATCH_PLATFORM_INSN(CPU_MIPS32R6, "mips32r6"); 1105 MATCH_PLATFORM_INSN(CPU_MIPS32R5, "mips32r5"); 1106 MATCH_PLATFORM_INSN(CPU_MIPS32R2, "mips32r2"); 1107 MATCH_PLATFORM_INSN(CPU_MIPS32R1, "mips32"); 1108 MATCH_PLATFORM_INSN(CPU_MIPS2, "mips2"); 1109 1110 /* Fallback */ 1111 return "mips"; 1112 } 1113 #undef MATCH_PLATFORM_INSN 1114 1115 static inline void init_thread(struct target_pt_regs *regs, 1116 struct image_info *infop) 1117 { 1118 regs->cp0_status = 2 << CP0St_KSU; 1119 regs->cp0_epc = infop->entry; 1120 regs->regs[29] = infop->start_stack; 1121 } 1122 1123 /* See linux kernel: arch/mips/include/asm/elf.h. */ 1124 #define ELF_NREG 45 1125 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1126 1127 /* See linux kernel: arch/mips/include/asm/reg.h. */ 1128 enum { 1129 #ifdef TARGET_MIPS64 1130 TARGET_EF_R0 = 0, 1131 #else 1132 TARGET_EF_R0 = 6, 1133 #endif 1134 TARGET_EF_R26 = TARGET_EF_R0 + 26, 1135 TARGET_EF_R27 = TARGET_EF_R0 + 27, 1136 TARGET_EF_LO = TARGET_EF_R0 + 32, 1137 TARGET_EF_HI = TARGET_EF_R0 + 33, 1138 TARGET_EF_CP0_EPC = TARGET_EF_R0 + 34, 1139 TARGET_EF_CP0_BADVADDR = TARGET_EF_R0 + 35, 1140 TARGET_EF_CP0_STATUS = TARGET_EF_R0 + 36, 1141 TARGET_EF_CP0_CAUSE = TARGET_EF_R0 + 37 1142 }; 1143 1144 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */ 1145 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMIPSState *env) 1146 { 1147 int i; 1148 1149 for (i = 0; i < TARGET_EF_R0; i++) { 1150 (*regs)[i] = 0; 1151 } 1152 (*regs)[TARGET_EF_R0] = 0; 1153 1154 for (i = 1; i < ARRAY_SIZE(env->active_tc.gpr); i++) { 1155 (*regs)[TARGET_EF_R0 + i] = tswapreg(env->active_tc.gpr[i]); 1156 } 1157 1158 (*regs)[TARGET_EF_R26] = 0; 1159 (*regs)[TARGET_EF_R27] = 0; 1160 (*regs)[TARGET_EF_LO] = tswapreg(env->active_tc.LO[0]); 1161 (*regs)[TARGET_EF_HI] = tswapreg(env->active_tc.HI[0]); 1162 (*regs)[TARGET_EF_CP0_EPC] = tswapreg(env->active_tc.PC); 1163 (*regs)[TARGET_EF_CP0_BADVADDR] = tswapreg(env->CP0_BadVAddr); 1164 (*regs)[TARGET_EF_CP0_STATUS] = tswapreg(env->CP0_Status); 1165 (*regs)[TARGET_EF_CP0_CAUSE] = tswapreg(env->CP0_Cause); 1166 } 1167 1168 #define USE_ELF_CORE_DUMP 1169 #define ELF_EXEC_PAGESIZE 4096 1170 1171 /* See arch/mips/include/uapi/asm/hwcap.h. */ 1172 enum { 1173 HWCAP_MIPS_R6 = (1 << 0), 1174 HWCAP_MIPS_MSA = (1 << 1), 1175 HWCAP_MIPS_CRC32 = (1 << 2), 1176 HWCAP_MIPS_MIPS16 = (1 << 3), 1177 HWCAP_MIPS_MDMX = (1 << 4), 1178 HWCAP_MIPS_MIPS3D = (1 << 5), 1179 HWCAP_MIPS_SMARTMIPS = (1 << 6), 1180 HWCAP_MIPS_DSP = (1 << 7), 1181 HWCAP_MIPS_DSP2 = (1 << 8), 1182 HWCAP_MIPS_DSP3 = (1 << 9), 1183 HWCAP_MIPS_MIPS16E2 = (1 << 10), 1184 HWCAP_LOONGSON_MMI = (1 << 11), 1185 HWCAP_LOONGSON_EXT = (1 << 12), 1186 HWCAP_LOONGSON_EXT2 = (1 << 13), 1187 HWCAP_LOONGSON_CPUCFG = (1 << 14), 1188 }; 1189 1190 #define ELF_HWCAP get_elf_hwcap() 1191 1192 #define GET_FEATURE_INSN(_flag, _hwcap) \ 1193 do { if (cpu->env.insn_flags & (_flag)) { hwcaps |= _hwcap; } } while (0) 1194 1195 #define GET_FEATURE_REG_SET(_reg, _mask, _hwcap) \ 1196 do { if (cpu->env._reg & (_mask)) { hwcaps |= _hwcap; } } while (0) 1197 1198 #define GET_FEATURE_REG_EQU(_reg, _start, _length, _val, _hwcap) \ 1199 do { \ 1200 if (extract32(cpu->env._reg, (_start), (_length)) == (_val)) { \ 1201 hwcaps |= _hwcap; \ 1202 } \ 1203 } while (0) 1204 1205 static uint32_t get_elf_hwcap(void) 1206 { 1207 MIPSCPU *cpu = MIPS_CPU(thread_cpu); 1208 uint32_t hwcaps = 0; 1209 1210 GET_FEATURE_REG_EQU(CP0_Config0, CP0C0_AR, CP0C0_AR_LENGTH, 1211 2, HWCAP_MIPS_R6); 1212 GET_FEATURE_REG_SET(CP0_Config3, 1 << CP0C3_MSAP, HWCAP_MIPS_MSA); 1213 GET_FEATURE_INSN(ASE_LMMI, HWCAP_LOONGSON_MMI); 1214 GET_FEATURE_INSN(ASE_LEXT, HWCAP_LOONGSON_EXT); 1215 1216 return hwcaps; 1217 } 1218 1219 #undef GET_FEATURE_REG_EQU 1220 #undef GET_FEATURE_REG_SET 1221 #undef GET_FEATURE_INSN 1222 1223 #endif /* TARGET_MIPS */ 1224 1225 #ifdef TARGET_MICROBLAZE 1226 1227 #define ELF_START_MMAP 0x80000000 1228 1229 #define elf_check_arch(x) ( (x) == EM_MICROBLAZE || (x) == EM_MICROBLAZE_OLD) 1230 1231 #define ELF_CLASS ELFCLASS32 1232 #define ELF_ARCH EM_MICROBLAZE 1233 1234 static inline void init_thread(struct target_pt_regs *regs, 1235 struct image_info *infop) 1236 { 1237 regs->pc = infop->entry; 1238 regs->r1 = infop->start_stack; 1239 1240 } 1241 1242 #define ELF_EXEC_PAGESIZE 4096 1243 1244 #define USE_ELF_CORE_DUMP 1245 #define ELF_NREG 38 1246 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1247 1248 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */ 1249 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMBState *env) 1250 { 1251 int i, pos = 0; 1252 1253 for (i = 0; i < 32; i++) { 1254 (*regs)[pos++] = tswapreg(env->regs[i]); 1255 } 1256 1257 (*regs)[pos++] = tswapreg(env->pc); 1258 (*regs)[pos++] = tswapreg(mb_cpu_read_msr(env)); 1259 (*regs)[pos++] = 0; 1260 (*regs)[pos++] = tswapreg(env->ear); 1261 (*regs)[pos++] = 0; 1262 (*regs)[pos++] = tswapreg(env->esr); 1263 } 1264 1265 #endif /* TARGET_MICROBLAZE */ 1266 1267 #ifdef TARGET_NIOS2 1268 1269 #define ELF_START_MMAP 0x80000000 1270 1271 #define elf_check_arch(x) ((x) == EM_ALTERA_NIOS2) 1272 1273 #define ELF_CLASS ELFCLASS32 1274 #define ELF_ARCH EM_ALTERA_NIOS2 1275 1276 static void init_thread(struct target_pt_regs *regs, struct image_info *infop) 1277 { 1278 regs->ea = infop->entry; 1279 regs->sp = infop->start_stack; 1280 } 1281 1282 #define LO_COMMPAGE TARGET_PAGE_SIZE 1283 1284 static bool init_guest_commpage(void) 1285 { 1286 static const uint8_t kuser_page[4 + 2 * 64] = { 1287 /* __kuser_helper_version */ 1288 [0x00] = 0x02, 0x00, 0x00, 0x00, 1289 1290 /* __kuser_cmpxchg */ 1291 [0x04] = 0x3a, 0x6c, 0x3b, 0x00, /* trap 16 */ 1292 0x3a, 0x28, 0x00, 0xf8, /* ret */ 1293 1294 /* __kuser_sigtramp */ 1295 [0x44] = 0xc4, 0x22, 0x80, 0x00, /* movi r2, __NR_rt_sigreturn */ 1296 0x3a, 0x68, 0x3b, 0x00, /* trap 0 */ 1297 }; 1298 1299 void *want = g2h_untagged(LO_COMMPAGE & -qemu_host_page_size); 1300 void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE, 1301 MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0); 1302 1303 if (addr == MAP_FAILED) { 1304 perror("Allocating guest commpage"); 1305 exit(EXIT_FAILURE); 1306 } 1307 if (addr != want) { 1308 return false; 1309 } 1310 1311 memcpy(addr, kuser_page, sizeof(kuser_page)); 1312 1313 if (mprotect(addr, qemu_host_page_size, PROT_READ)) { 1314 perror("Protecting guest commpage"); 1315 exit(EXIT_FAILURE); 1316 } 1317 1318 page_set_flags(LO_COMMPAGE, LO_COMMPAGE | ~TARGET_PAGE_MASK, 1319 PAGE_READ | PAGE_EXEC | PAGE_VALID); 1320 return true; 1321 } 1322 1323 #define ELF_EXEC_PAGESIZE 4096 1324 1325 #define USE_ELF_CORE_DUMP 1326 #define ELF_NREG 49 1327 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1328 1329 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */ 1330 static void elf_core_copy_regs(target_elf_gregset_t *regs, 1331 const CPUNios2State *env) 1332 { 1333 int i; 1334 1335 (*regs)[0] = -1; 1336 for (i = 1; i < 8; i++) /* r0-r7 */ 1337 (*regs)[i] = tswapreg(env->regs[i + 7]); 1338 1339 for (i = 8; i < 16; i++) /* r8-r15 */ 1340 (*regs)[i] = tswapreg(env->regs[i - 8]); 1341 1342 for (i = 16; i < 24; i++) /* r16-r23 */ 1343 (*regs)[i] = tswapreg(env->regs[i + 7]); 1344 (*regs)[24] = -1; /* R_ET */ 1345 (*regs)[25] = -1; /* R_BT */ 1346 (*regs)[26] = tswapreg(env->regs[R_GP]); 1347 (*regs)[27] = tswapreg(env->regs[R_SP]); 1348 (*regs)[28] = tswapreg(env->regs[R_FP]); 1349 (*regs)[29] = tswapreg(env->regs[R_EA]); 1350 (*regs)[30] = -1; /* R_SSTATUS */ 1351 (*regs)[31] = tswapreg(env->regs[R_RA]); 1352 1353 (*regs)[32] = tswapreg(env->pc); 1354 1355 (*regs)[33] = -1; /* R_STATUS */ 1356 (*regs)[34] = tswapreg(env->regs[CR_ESTATUS]); 1357 1358 for (i = 35; i < 49; i++) /* ... */ 1359 (*regs)[i] = -1; 1360 } 1361 1362 #endif /* TARGET_NIOS2 */ 1363 1364 #ifdef TARGET_OPENRISC 1365 1366 #define ELF_START_MMAP 0x08000000 1367 1368 #define ELF_ARCH EM_OPENRISC 1369 #define ELF_CLASS ELFCLASS32 1370 #define ELF_DATA ELFDATA2MSB 1371 1372 static inline void init_thread(struct target_pt_regs *regs, 1373 struct image_info *infop) 1374 { 1375 regs->pc = infop->entry; 1376 regs->gpr[1] = infop->start_stack; 1377 } 1378 1379 #define USE_ELF_CORE_DUMP 1380 #define ELF_EXEC_PAGESIZE 8192 1381 1382 /* See linux kernel arch/openrisc/include/asm/elf.h. */ 1383 #define ELF_NREG 34 /* gprs and pc, sr */ 1384 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1385 1386 static void elf_core_copy_regs(target_elf_gregset_t *regs, 1387 const CPUOpenRISCState *env) 1388 { 1389 int i; 1390 1391 for (i = 0; i < 32; i++) { 1392 (*regs)[i] = tswapreg(cpu_get_gpr(env, i)); 1393 } 1394 (*regs)[32] = tswapreg(env->pc); 1395 (*regs)[33] = tswapreg(cpu_get_sr(env)); 1396 } 1397 #define ELF_HWCAP 0 1398 #define ELF_PLATFORM NULL 1399 1400 #endif /* TARGET_OPENRISC */ 1401 1402 #ifdef TARGET_SH4 1403 1404 #define ELF_START_MMAP 0x80000000 1405 1406 #define ELF_CLASS ELFCLASS32 1407 #define ELF_ARCH EM_SH 1408 1409 static inline void init_thread(struct target_pt_regs *regs, 1410 struct image_info *infop) 1411 { 1412 /* Check other registers XXXXX */ 1413 regs->pc = infop->entry; 1414 regs->regs[15] = infop->start_stack; 1415 } 1416 1417 /* See linux kernel: arch/sh/include/asm/elf.h. */ 1418 #define ELF_NREG 23 1419 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1420 1421 /* See linux kernel: arch/sh/include/asm/ptrace.h. */ 1422 enum { 1423 TARGET_REG_PC = 16, 1424 TARGET_REG_PR = 17, 1425 TARGET_REG_SR = 18, 1426 TARGET_REG_GBR = 19, 1427 TARGET_REG_MACH = 20, 1428 TARGET_REG_MACL = 21, 1429 TARGET_REG_SYSCALL = 22 1430 }; 1431 1432 static inline void elf_core_copy_regs(target_elf_gregset_t *regs, 1433 const CPUSH4State *env) 1434 { 1435 int i; 1436 1437 for (i = 0; i < 16; i++) { 1438 (*regs)[i] = tswapreg(env->gregs[i]); 1439 } 1440 1441 (*regs)[TARGET_REG_PC] = tswapreg(env->pc); 1442 (*regs)[TARGET_REG_PR] = tswapreg(env->pr); 1443 (*regs)[TARGET_REG_SR] = tswapreg(env->sr); 1444 (*regs)[TARGET_REG_GBR] = tswapreg(env->gbr); 1445 (*regs)[TARGET_REG_MACH] = tswapreg(env->mach); 1446 (*regs)[TARGET_REG_MACL] = tswapreg(env->macl); 1447 (*regs)[TARGET_REG_SYSCALL] = 0; /* FIXME */ 1448 } 1449 1450 #define USE_ELF_CORE_DUMP 1451 #define ELF_EXEC_PAGESIZE 4096 1452 1453 enum { 1454 SH_CPU_HAS_FPU = 0x0001, /* Hardware FPU support */ 1455 SH_CPU_HAS_P2_FLUSH_BUG = 0x0002, /* Need to flush the cache in P2 area */ 1456 SH_CPU_HAS_MMU_PAGE_ASSOC = 0x0004, /* SH3: TLB way selection bit support */ 1457 SH_CPU_HAS_DSP = 0x0008, /* SH-DSP: DSP support */ 1458 SH_CPU_HAS_PERF_COUNTER = 0x0010, /* Hardware performance counters */ 1459 SH_CPU_HAS_PTEA = 0x0020, /* PTEA register */ 1460 SH_CPU_HAS_LLSC = 0x0040, /* movli.l/movco.l */ 1461 SH_CPU_HAS_L2_CACHE = 0x0080, /* Secondary cache / URAM */ 1462 SH_CPU_HAS_OP32 = 0x0100, /* 32-bit instruction support */ 1463 SH_CPU_HAS_PTEAEX = 0x0200, /* PTE ASID Extension support */ 1464 }; 1465 1466 #define ELF_HWCAP get_elf_hwcap() 1467 1468 static uint32_t get_elf_hwcap(void) 1469 { 1470 SuperHCPU *cpu = SUPERH_CPU(thread_cpu); 1471 uint32_t hwcap = 0; 1472 1473 hwcap |= SH_CPU_HAS_FPU; 1474 1475 if (cpu->env.features & SH_FEATURE_SH4A) { 1476 hwcap |= SH_CPU_HAS_LLSC; 1477 } 1478 1479 return hwcap; 1480 } 1481 1482 #endif 1483 1484 #ifdef TARGET_CRIS 1485 1486 #define ELF_START_MMAP 0x80000000 1487 1488 #define ELF_CLASS ELFCLASS32 1489 #define ELF_ARCH EM_CRIS 1490 1491 static inline void init_thread(struct target_pt_regs *regs, 1492 struct image_info *infop) 1493 { 1494 regs->erp = infop->entry; 1495 } 1496 1497 #define ELF_EXEC_PAGESIZE 8192 1498 1499 #endif 1500 1501 #ifdef TARGET_M68K 1502 1503 #define ELF_START_MMAP 0x80000000 1504 1505 #define ELF_CLASS ELFCLASS32 1506 #define ELF_ARCH EM_68K 1507 1508 /* ??? Does this need to do anything? 1509 #define ELF_PLAT_INIT(_r) */ 1510 1511 static inline void init_thread(struct target_pt_regs *regs, 1512 struct image_info *infop) 1513 { 1514 regs->usp = infop->start_stack; 1515 regs->sr = 0; 1516 regs->pc = infop->entry; 1517 } 1518 1519 /* See linux kernel: arch/m68k/include/asm/elf.h. */ 1520 #define ELF_NREG 20 1521 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1522 1523 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUM68KState *env) 1524 { 1525 (*regs)[0] = tswapreg(env->dregs[1]); 1526 (*regs)[1] = tswapreg(env->dregs[2]); 1527 (*regs)[2] = tswapreg(env->dregs[3]); 1528 (*regs)[3] = tswapreg(env->dregs[4]); 1529 (*regs)[4] = tswapreg(env->dregs[5]); 1530 (*regs)[5] = tswapreg(env->dregs[6]); 1531 (*regs)[6] = tswapreg(env->dregs[7]); 1532 (*regs)[7] = tswapreg(env->aregs[0]); 1533 (*regs)[8] = tswapreg(env->aregs[1]); 1534 (*regs)[9] = tswapreg(env->aregs[2]); 1535 (*regs)[10] = tswapreg(env->aregs[3]); 1536 (*regs)[11] = tswapreg(env->aregs[4]); 1537 (*regs)[12] = tswapreg(env->aregs[5]); 1538 (*regs)[13] = tswapreg(env->aregs[6]); 1539 (*regs)[14] = tswapreg(env->dregs[0]); 1540 (*regs)[15] = tswapreg(env->aregs[7]); 1541 (*regs)[16] = tswapreg(env->dregs[0]); /* FIXME: orig_d0 */ 1542 (*regs)[17] = tswapreg(env->sr); 1543 (*regs)[18] = tswapreg(env->pc); 1544 (*regs)[19] = 0; /* FIXME: regs->format | regs->vector */ 1545 } 1546 1547 #define USE_ELF_CORE_DUMP 1548 #define ELF_EXEC_PAGESIZE 8192 1549 1550 #endif 1551 1552 #ifdef TARGET_ALPHA 1553 1554 #define ELF_START_MMAP (0x30000000000ULL) 1555 1556 #define ELF_CLASS ELFCLASS64 1557 #define ELF_ARCH EM_ALPHA 1558 1559 static inline void init_thread(struct target_pt_regs *regs, 1560 struct image_info *infop) 1561 { 1562 regs->pc = infop->entry; 1563 regs->ps = 8; 1564 regs->usp = infop->start_stack; 1565 } 1566 1567 #define ELF_EXEC_PAGESIZE 8192 1568 1569 #endif /* TARGET_ALPHA */ 1570 1571 #ifdef TARGET_S390X 1572 1573 #define ELF_START_MMAP (0x20000000000ULL) 1574 1575 #define ELF_CLASS ELFCLASS64 1576 #define ELF_DATA ELFDATA2MSB 1577 #define ELF_ARCH EM_S390 1578 1579 #include "elf.h" 1580 1581 #define ELF_HWCAP get_elf_hwcap() 1582 1583 #define GET_FEATURE(_feat, _hwcap) \ 1584 do { if (s390_has_feat(_feat)) { hwcap |= _hwcap; } } while (0) 1585 1586 uint32_t get_elf_hwcap(void) 1587 { 1588 /* 1589 * Let's assume we always have esan3 and zarch. 1590 * 31-bit processes can use 64-bit registers (high gprs). 1591 */ 1592 uint32_t hwcap = HWCAP_S390_ESAN3 | HWCAP_S390_ZARCH | HWCAP_S390_HIGH_GPRS; 1593 1594 GET_FEATURE(S390_FEAT_STFLE, HWCAP_S390_STFLE); 1595 GET_FEATURE(S390_FEAT_MSA, HWCAP_S390_MSA); 1596 GET_FEATURE(S390_FEAT_LONG_DISPLACEMENT, HWCAP_S390_LDISP); 1597 GET_FEATURE(S390_FEAT_EXTENDED_IMMEDIATE, HWCAP_S390_EIMM); 1598 if (s390_has_feat(S390_FEAT_EXTENDED_TRANSLATION_3) && 1599 s390_has_feat(S390_FEAT_ETF3_ENH)) { 1600 hwcap |= HWCAP_S390_ETF3EH; 1601 } 1602 GET_FEATURE(S390_FEAT_VECTOR, HWCAP_S390_VXRS); 1603 GET_FEATURE(S390_FEAT_VECTOR_ENH, HWCAP_S390_VXRS_EXT); 1604 1605 return hwcap; 1606 } 1607 1608 const char *elf_hwcap_str(uint32_t bit) 1609 { 1610 static const char *hwcap_str[] = { 1611 [HWCAP_S390_ESAN3] = "esan3", 1612 [HWCAP_S390_ZARCH] = "zarch", 1613 [HWCAP_S390_STFLE] = "stfle", 1614 [HWCAP_S390_MSA] = "msa", 1615 [HWCAP_S390_LDISP] = "ldisp", 1616 [HWCAP_S390_EIMM] = "eimm", 1617 [HWCAP_S390_DFP] = "dfp", 1618 [HWCAP_S390_HPAGE] = "edat", 1619 [HWCAP_S390_ETF3EH] = "etf3eh", 1620 [HWCAP_S390_HIGH_GPRS] = "highgprs", 1621 [HWCAP_S390_TE] = "te", 1622 [HWCAP_S390_VXRS] = "vx", 1623 [HWCAP_S390_VXRS_BCD] = "vxd", 1624 [HWCAP_S390_VXRS_EXT] = "vxe", 1625 [HWCAP_S390_GS] = "gs", 1626 [HWCAP_S390_VXRS_EXT2] = "vxe2", 1627 [HWCAP_S390_VXRS_PDE] = "vxp", 1628 [HWCAP_S390_SORT] = "sort", 1629 [HWCAP_S390_DFLT] = "dflt", 1630 }; 1631 1632 return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; 1633 } 1634 1635 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 1636 { 1637 regs->psw.addr = infop->entry; 1638 regs->psw.mask = PSW_MASK_64 | PSW_MASK_32; 1639 regs->gprs[15] = infop->start_stack; 1640 } 1641 1642 /* See linux kernel: arch/s390/include/uapi/asm/ptrace.h (s390_regs). */ 1643 #define ELF_NREG 27 1644 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1645 1646 enum { 1647 TARGET_REG_PSWM = 0, 1648 TARGET_REG_PSWA = 1, 1649 TARGET_REG_GPRS = 2, 1650 TARGET_REG_ARS = 18, 1651 TARGET_REG_ORIG_R2 = 26, 1652 }; 1653 1654 static void elf_core_copy_regs(target_elf_gregset_t *regs, 1655 const CPUS390XState *env) 1656 { 1657 int i; 1658 uint32_t *aregs; 1659 1660 (*regs)[TARGET_REG_PSWM] = tswapreg(env->psw.mask); 1661 (*regs)[TARGET_REG_PSWA] = tswapreg(env->psw.addr); 1662 for (i = 0; i < 16; i++) { 1663 (*regs)[TARGET_REG_GPRS + i] = tswapreg(env->regs[i]); 1664 } 1665 aregs = (uint32_t *)&((*regs)[TARGET_REG_ARS]); 1666 for (i = 0; i < 16; i++) { 1667 aregs[i] = tswap32(env->aregs[i]); 1668 } 1669 (*regs)[TARGET_REG_ORIG_R2] = 0; 1670 } 1671 1672 #define USE_ELF_CORE_DUMP 1673 #define ELF_EXEC_PAGESIZE 4096 1674 1675 #endif /* TARGET_S390X */ 1676 1677 #ifdef TARGET_RISCV 1678 1679 #define ELF_START_MMAP 0x80000000 1680 #define ELF_ARCH EM_RISCV 1681 1682 #ifdef TARGET_RISCV32 1683 #define ELF_CLASS ELFCLASS32 1684 #else 1685 #define ELF_CLASS ELFCLASS64 1686 #endif 1687 1688 #define ELF_HWCAP get_elf_hwcap() 1689 1690 static uint32_t get_elf_hwcap(void) 1691 { 1692 #define MISA_BIT(EXT) (1 << (EXT - 'A')) 1693 RISCVCPU *cpu = RISCV_CPU(thread_cpu); 1694 uint32_t mask = MISA_BIT('I') | MISA_BIT('M') | MISA_BIT('A') 1695 | MISA_BIT('F') | MISA_BIT('D') | MISA_BIT('C'); 1696 1697 return cpu->env.misa_ext & mask; 1698 #undef MISA_BIT 1699 } 1700 1701 static inline void init_thread(struct target_pt_regs *regs, 1702 struct image_info *infop) 1703 { 1704 regs->sepc = infop->entry; 1705 regs->sp = infop->start_stack; 1706 } 1707 1708 #define ELF_EXEC_PAGESIZE 4096 1709 1710 #endif /* TARGET_RISCV */ 1711 1712 #ifdef TARGET_HPPA 1713 1714 #define ELF_START_MMAP 0x80000000 1715 #define ELF_CLASS ELFCLASS32 1716 #define ELF_ARCH EM_PARISC 1717 #define ELF_PLATFORM "PARISC" 1718 #define STACK_GROWS_DOWN 0 1719 #define STACK_ALIGNMENT 64 1720 1721 static inline void init_thread(struct target_pt_regs *regs, 1722 struct image_info *infop) 1723 { 1724 regs->iaoq[0] = infop->entry; 1725 regs->iaoq[1] = infop->entry + 4; 1726 regs->gr[23] = 0; 1727 regs->gr[24] = infop->argv; 1728 regs->gr[25] = infop->argc; 1729 /* The top-of-stack contains a linkage buffer. */ 1730 regs->gr[30] = infop->start_stack + 64; 1731 regs->gr[31] = infop->entry; 1732 } 1733 1734 #define LO_COMMPAGE 0 1735 1736 static bool init_guest_commpage(void) 1737 { 1738 void *want = g2h_untagged(LO_COMMPAGE); 1739 void *addr = mmap(want, qemu_host_page_size, PROT_NONE, 1740 MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0); 1741 1742 if (addr == MAP_FAILED) { 1743 perror("Allocating guest commpage"); 1744 exit(EXIT_FAILURE); 1745 } 1746 if (addr != want) { 1747 return false; 1748 } 1749 1750 /* 1751 * On Linux, page zero is normally marked execute only + gateway. 1752 * Normal read or write is supposed to fail (thus PROT_NONE above), 1753 * but specific offsets have kernel code mapped to raise permissions 1754 * and implement syscalls. Here, simply mark the page executable. 1755 * Special case the entry points during translation (see do_page_zero). 1756 */ 1757 page_set_flags(LO_COMMPAGE, LO_COMMPAGE | ~TARGET_PAGE_MASK, 1758 PAGE_EXEC | PAGE_VALID); 1759 return true; 1760 } 1761 1762 #endif /* TARGET_HPPA */ 1763 1764 #ifdef TARGET_XTENSA 1765 1766 #define ELF_START_MMAP 0x20000000 1767 1768 #define ELF_CLASS ELFCLASS32 1769 #define ELF_ARCH EM_XTENSA 1770 1771 static inline void init_thread(struct target_pt_regs *regs, 1772 struct image_info *infop) 1773 { 1774 regs->windowbase = 0; 1775 regs->windowstart = 1; 1776 regs->areg[1] = infop->start_stack; 1777 regs->pc = infop->entry; 1778 if (info_is_fdpic(infop)) { 1779 regs->areg[4] = infop->loadmap_addr; 1780 regs->areg[5] = infop->interpreter_loadmap_addr; 1781 if (infop->interpreter_loadmap_addr) { 1782 regs->areg[6] = infop->interpreter_pt_dynamic_addr; 1783 } else { 1784 regs->areg[6] = infop->pt_dynamic_addr; 1785 } 1786 } 1787 } 1788 1789 /* See linux kernel: arch/xtensa/include/asm/elf.h. */ 1790 #define ELF_NREG 128 1791 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1792 1793 enum { 1794 TARGET_REG_PC, 1795 TARGET_REG_PS, 1796 TARGET_REG_LBEG, 1797 TARGET_REG_LEND, 1798 TARGET_REG_LCOUNT, 1799 TARGET_REG_SAR, 1800 TARGET_REG_WINDOWSTART, 1801 TARGET_REG_WINDOWBASE, 1802 TARGET_REG_THREADPTR, 1803 TARGET_REG_AR0 = 64, 1804 }; 1805 1806 static void elf_core_copy_regs(target_elf_gregset_t *regs, 1807 const CPUXtensaState *env) 1808 { 1809 unsigned i; 1810 1811 (*regs)[TARGET_REG_PC] = tswapreg(env->pc); 1812 (*regs)[TARGET_REG_PS] = tswapreg(env->sregs[PS] & ~PS_EXCM); 1813 (*regs)[TARGET_REG_LBEG] = tswapreg(env->sregs[LBEG]); 1814 (*regs)[TARGET_REG_LEND] = tswapreg(env->sregs[LEND]); 1815 (*regs)[TARGET_REG_LCOUNT] = tswapreg(env->sregs[LCOUNT]); 1816 (*regs)[TARGET_REG_SAR] = tswapreg(env->sregs[SAR]); 1817 (*regs)[TARGET_REG_WINDOWSTART] = tswapreg(env->sregs[WINDOW_START]); 1818 (*regs)[TARGET_REG_WINDOWBASE] = tswapreg(env->sregs[WINDOW_BASE]); 1819 (*regs)[TARGET_REG_THREADPTR] = tswapreg(env->uregs[THREADPTR]); 1820 xtensa_sync_phys_from_window((CPUXtensaState *)env); 1821 for (i = 0; i < env->config->nareg; ++i) { 1822 (*regs)[TARGET_REG_AR0 + i] = tswapreg(env->phys_regs[i]); 1823 } 1824 } 1825 1826 #define USE_ELF_CORE_DUMP 1827 #define ELF_EXEC_PAGESIZE 4096 1828 1829 #endif /* TARGET_XTENSA */ 1830 1831 #ifdef TARGET_HEXAGON 1832 1833 #define ELF_START_MMAP 0x20000000 1834 1835 #define ELF_CLASS ELFCLASS32 1836 #define ELF_ARCH EM_HEXAGON 1837 1838 static inline void init_thread(struct target_pt_regs *regs, 1839 struct image_info *infop) 1840 { 1841 regs->sepc = infop->entry; 1842 regs->sp = infop->start_stack; 1843 } 1844 1845 #endif /* TARGET_HEXAGON */ 1846 1847 #ifndef ELF_BASE_PLATFORM 1848 #define ELF_BASE_PLATFORM (NULL) 1849 #endif 1850 1851 #ifndef ELF_PLATFORM 1852 #define ELF_PLATFORM (NULL) 1853 #endif 1854 1855 #ifndef ELF_MACHINE 1856 #define ELF_MACHINE ELF_ARCH 1857 #endif 1858 1859 #ifndef elf_check_arch 1860 #define elf_check_arch(x) ((x) == ELF_ARCH) 1861 #endif 1862 1863 #ifndef elf_check_abi 1864 #define elf_check_abi(x) (1) 1865 #endif 1866 1867 #ifndef ELF_HWCAP 1868 #define ELF_HWCAP 0 1869 #endif 1870 1871 #ifndef STACK_GROWS_DOWN 1872 #define STACK_GROWS_DOWN 1 1873 #endif 1874 1875 #ifndef STACK_ALIGNMENT 1876 #define STACK_ALIGNMENT 16 1877 #endif 1878 1879 #ifdef TARGET_ABI32 1880 #undef ELF_CLASS 1881 #define ELF_CLASS ELFCLASS32 1882 #undef bswaptls 1883 #define bswaptls(ptr) bswap32s(ptr) 1884 #endif 1885 1886 #ifndef EXSTACK_DEFAULT 1887 #define EXSTACK_DEFAULT false 1888 #endif 1889 1890 #include "elf.h" 1891 1892 /* We must delay the following stanzas until after "elf.h". */ 1893 #if defined(TARGET_AARCH64) 1894 1895 static bool arch_parse_elf_property(uint32_t pr_type, uint32_t pr_datasz, 1896 const uint32_t *data, 1897 struct image_info *info, 1898 Error **errp) 1899 { 1900 if (pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND) { 1901 if (pr_datasz != sizeof(uint32_t)) { 1902 error_setg(errp, "Ill-formed GNU_PROPERTY_AARCH64_FEATURE_1_AND"); 1903 return false; 1904 } 1905 /* We will extract GNU_PROPERTY_AARCH64_FEATURE_1_BTI later. */ 1906 info->note_flags = *data; 1907 } 1908 return true; 1909 } 1910 #define ARCH_USE_GNU_PROPERTY 1 1911 1912 #else 1913 1914 static bool arch_parse_elf_property(uint32_t pr_type, uint32_t pr_datasz, 1915 const uint32_t *data, 1916 struct image_info *info, 1917 Error **errp) 1918 { 1919 g_assert_not_reached(); 1920 } 1921 #define ARCH_USE_GNU_PROPERTY 0 1922 1923 #endif 1924 1925 struct exec 1926 { 1927 unsigned int a_info; /* Use macros N_MAGIC, etc for access */ 1928 unsigned int a_text; /* length of text, in bytes */ 1929 unsigned int a_data; /* length of data, in bytes */ 1930 unsigned int a_bss; /* length of uninitialized data area, in bytes */ 1931 unsigned int a_syms; /* length of symbol table data in file, in bytes */ 1932 unsigned int a_entry; /* start address */ 1933 unsigned int a_trsize; /* length of relocation info for text, in bytes */ 1934 unsigned int a_drsize; /* length of relocation info for data, in bytes */ 1935 }; 1936 1937 1938 #define N_MAGIC(exec) ((exec).a_info & 0xffff) 1939 #define OMAGIC 0407 1940 #define NMAGIC 0410 1941 #define ZMAGIC 0413 1942 #define QMAGIC 0314 1943 1944 /* Necessary parameters */ 1945 #define TARGET_ELF_EXEC_PAGESIZE \ 1946 (((eppnt->p_align & ~qemu_host_page_mask) != 0) ? \ 1947 TARGET_PAGE_SIZE : MAX(qemu_host_page_size, TARGET_PAGE_SIZE)) 1948 #define TARGET_ELF_PAGELENGTH(_v) ROUND_UP((_v), TARGET_ELF_EXEC_PAGESIZE) 1949 #define TARGET_ELF_PAGESTART(_v) ((_v) & \ 1950 ~(abi_ulong)(TARGET_ELF_EXEC_PAGESIZE-1)) 1951 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1)) 1952 1953 #define DLINFO_ITEMS 16 1954 1955 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n) 1956 { 1957 memcpy(to, from, n); 1958 } 1959 1960 #ifdef BSWAP_NEEDED 1961 static void bswap_ehdr(struct elfhdr *ehdr) 1962 { 1963 bswap16s(&ehdr->e_type); /* Object file type */ 1964 bswap16s(&ehdr->e_machine); /* Architecture */ 1965 bswap32s(&ehdr->e_version); /* Object file version */ 1966 bswaptls(&ehdr->e_entry); /* Entry point virtual address */ 1967 bswaptls(&ehdr->e_phoff); /* Program header table file offset */ 1968 bswaptls(&ehdr->e_shoff); /* Section header table file offset */ 1969 bswap32s(&ehdr->e_flags); /* Processor-specific flags */ 1970 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */ 1971 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */ 1972 bswap16s(&ehdr->e_phnum); /* Program header table entry count */ 1973 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */ 1974 bswap16s(&ehdr->e_shnum); /* Section header table entry count */ 1975 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */ 1976 } 1977 1978 static void bswap_phdr(struct elf_phdr *phdr, int phnum) 1979 { 1980 int i; 1981 for (i = 0; i < phnum; ++i, ++phdr) { 1982 bswap32s(&phdr->p_type); /* Segment type */ 1983 bswap32s(&phdr->p_flags); /* Segment flags */ 1984 bswaptls(&phdr->p_offset); /* Segment file offset */ 1985 bswaptls(&phdr->p_vaddr); /* Segment virtual address */ 1986 bswaptls(&phdr->p_paddr); /* Segment physical address */ 1987 bswaptls(&phdr->p_filesz); /* Segment size in file */ 1988 bswaptls(&phdr->p_memsz); /* Segment size in memory */ 1989 bswaptls(&phdr->p_align); /* Segment alignment */ 1990 } 1991 } 1992 1993 static void bswap_shdr(struct elf_shdr *shdr, int shnum) 1994 { 1995 int i; 1996 for (i = 0; i < shnum; ++i, ++shdr) { 1997 bswap32s(&shdr->sh_name); 1998 bswap32s(&shdr->sh_type); 1999 bswaptls(&shdr->sh_flags); 2000 bswaptls(&shdr->sh_addr); 2001 bswaptls(&shdr->sh_offset); 2002 bswaptls(&shdr->sh_size); 2003 bswap32s(&shdr->sh_link); 2004 bswap32s(&shdr->sh_info); 2005 bswaptls(&shdr->sh_addralign); 2006 bswaptls(&shdr->sh_entsize); 2007 } 2008 } 2009 2010 static void bswap_sym(struct elf_sym *sym) 2011 { 2012 bswap32s(&sym->st_name); 2013 bswaptls(&sym->st_value); 2014 bswaptls(&sym->st_size); 2015 bswap16s(&sym->st_shndx); 2016 } 2017 2018 #ifdef TARGET_MIPS 2019 static void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags) 2020 { 2021 bswap16s(&abiflags->version); 2022 bswap32s(&abiflags->ases); 2023 bswap32s(&abiflags->isa_ext); 2024 bswap32s(&abiflags->flags1); 2025 bswap32s(&abiflags->flags2); 2026 } 2027 #endif 2028 #else 2029 static inline void bswap_ehdr(struct elfhdr *ehdr) { } 2030 static inline void bswap_phdr(struct elf_phdr *phdr, int phnum) { } 2031 static inline void bswap_shdr(struct elf_shdr *shdr, int shnum) { } 2032 static inline void bswap_sym(struct elf_sym *sym) { } 2033 #ifdef TARGET_MIPS 2034 static inline void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags) { } 2035 #endif 2036 #endif 2037 2038 #ifdef USE_ELF_CORE_DUMP 2039 static int elf_core_dump(int, const CPUArchState *); 2040 #endif /* USE_ELF_CORE_DUMP */ 2041 static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias); 2042 2043 /* Verify the portions of EHDR within E_IDENT for the target. 2044 This can be performed before bswapping the entire header. */ 2045 static bool elf_check_ident(struct elfhdr *ehdr) 2046 { 2047 return (ehdr->e_ident[EI_MAG0] == ELFMAG0 2048 && ehdr->e_ident[EI_MAG1] == ELFMAG1 2049 && ehdr->e_ident[EI_MAG2] == ELFMAG2 2050 && ehdr->e_ident[EI_MAG3] == ELFMAG3 2051 && ehdr->e_ident[EI_CLASS] == ELF_CLASS 2052 && ehdr->e_ident[EI_DATA] == ELF_DATA 2053 && ehdr->e_ident[EI_VERSION] == EV_CURRENT); 2054 } 2055 2056 /* Verify the portions of EHDR outside of E_IDENT for the target. 2057 This has to wait until after bswapping the header. */ 2058 static bool elf_check_ehdr(struct elfhdr *ehdr) 2059 { 2060 return (elf_check_arch(ehdr->e_machine) 2061 && elf_check_abi(ehdr->e_flags) 2062 && ehdr->e_ehsize == sizeof(struct elfhdr) 2063 && ehdr->e_phentsize == sizeof(struct elf_phdr) 2064 && (ehdr->e_type == ET_EXEC || ehdr->e_type == ET_DYN)); 2065 } 2066 2067 /* 2068 * 'copy_elf_strings()' copies argument/envelope strings from user 2069 * memory to free pages in kernel mem. These are in a format ready 2070 * to be put directly into the top of new user memory. 2071 * 2072 */ 2073 static abi_ulong copy_elf_strings(int argc, char **argv, char *scratch, 2074 abi_ulong p, abi_ulong stack_limit) 2075 { 2076 char *tmp; 2077 int len, i; 2078 abi_ulong top = p; 2079 2080 if (!p) { 2081 return 0; /* bullet-proofing */ 2082 } 2083 2084 if (STACK_GROWS_DOWN) { 2085 int offset = ((p - 1) % TARGET_PAGE_SIZE) + 1; 2086 for (i = argc - 1; i >= 0; --i) { 2087 tmp = argv[i]; 2088 if (!tmp) { 2089 fprintf(stderr, "VFS: argc is wrong"); 2090 exit(-1); 2091 } 2092 len = strlen(tmp) + 1; 2093 tmp += len; 2094 2095 if (len > (p - stack_limit)) { 2096 return 0; 2097 } 2098 while (len) { 2099 int bytes_to_copy = (len > offset) ? offset : len; 2100 tmp -= bytes_to_copy; 2101 p -= bytes_to_copy; 2102 offset -= bytes_to_copy; 2103 len -= bytes_to_copy; 2104 2105 memcpy_fromfs(scratch + offset, tmp, bytes_to_copy); 2106 2107 if (offset == 0) { 2108 memcpy_to_target(p, scratch, top - p); 2109 top = p; 2110 offset = TARGET_PAGE_SIZE; 2111 } 2112 } 2113 } 2114 if (p != top) { 2115 memcpy_to_target(p, scratch + offset, top - p); 2116 } 2117 } else { 2118 int remaining = TARGET_PAGE_SIZE - (p % TARGET_PAGE_SIZE); 2119 for (i = 0; i < argc; ++i) { 2120 tmp = argv[i]; 2121 if (!tmp) { 2122 fprintf(stderr, "VFS: argc is wrong"); 2123 exit(-1); 2124 } 2125 len = strlen(tmp) + 1; 2126 if (len > (stack_limit - p)) { 2127 return 0; 2128 } 2129 while (len) { 2130 int bytes_to_copy = (len > remaining) ? remaining : len; 2131 2132 memcpy_fromfs(scratch + (p - top), tmp, bytes_to_copy); 2133 2134 tmp += bytes_to_copy; 2135 remaining -= bytes_to_copy; 2136 p += bytes_to_copy; 2137 len -= bytes_to_copy; 2138 2139 if (remaining == 0) { 2140 memcpy_to_target(top, scratch, p - top); 2141 top = p; 2142 remaining = TARGET_PAGE_SIZE; 2143 } 2144 } 2145 } 2146 if (p != top) { 2147 memcpy_to_target(top, scratch, p - top); 2148 } 2149 } 2150 2151 return p; 2152 } 2153 2154 /* Older linux kernels provide up to MAX_ARG_PAGES (default: 32) of 2155 * argument/environment space. Newer kernels (>2.6.33) allow more, 2156 * dependent on stack size, but guarantee at least 32 pages for 2157 * backwards compatibility. 2158 */ 2159 #define STACK_LOWER_LIMIT (32 * TARGET_PAGE_SIZE) 2160 2161 static abi_ulong setup_arg_pages(struct linux_binprm *bprm, 2162 struct image_info *info) 2163 { 2164 abi_ulong size, error, guard; 2165 int prot; 2166 2167 size = guest_stack_size; 2168 if (size < STACK_LOWER_LIMIT) { 2169 size = STACK_LOWER_LIMIT; 2170 } 2171 2172 if (STACK_GROWS_DOWN) { 2173 guard = TARGET_PAGE_SIZE; 2174 if (guard < qemu_real_host_page_size()) { 2175 guard = qemu_real_host_page_size(); 2176 } 2177 } else { 2178 /* no guard page for hppa target where stack grows upwards. */ 2179 guard = 0; 2180 } 2181 2182 prot = PROT_READ | PROT_WRITE; 2183 if (info->exec_stack) { 2184 prot |= PROT_EXEC; 2185 } 2186 error = target_mmap(0, size + guard, prot, 2187 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 2188 if (error == -1) { 2189 perror("mmap stack"); 2190 exit(-1); 2191 } 2192 2193 /* We reserve one extra page at the top of the stack as guard. */ 2194 if (STACK_GROWS_DOWN) { 2195 target_mprotect(error, guard, PROT_NONE); 2196 info->stack_limit = error + guard; 2197 return info->stack_limit + size - sizeof(void *); 2198 } else { 2199 info->stack_limit = error + size; 2200 return error; 2201 } 2202 } 2203 2204 /* Map and zero the bss. We need to explicitly zero any fractional pages 2205 after the data section (i.e. bss). */ 2206 static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot) 2207 { 2208 uintptr_t host_start, host_map_start, host_end; 2209 2210 last_bss = TARGET_PAGE_ALIGN(last_bss); 2211 2212 /* ??? There is confusion between qemu_real_host_page_size and 2213 qemu_host_page_size here and elsewhere in target_mmap, which 2214 may lead to the end of the data section mapping from the file 2215 not being mapped. At least there was an explicit test and 2216 comment for that here, suggesting that "the file size must 2217 be known". The comment probably pre-dates the introduction 2218 of the fstat system call in target_mmap which does in fact 2219 find out the size. What isn't clear is if the workaround 2220 here is still actually needed. For now, continue with it, 2221 but merge it with the "normal" mmap that would allocate the bss. */ 2222 2223 host_start = (uintptr_t) g2h_untagged(elf_bss); 2224 host_end = (uintptr_t) g2h_untagged(last_bss); 2225 host_map_start = REAL_HOST_PAGE_ALIGN(host_start); 2226 2227 if (host_map_start < host_end) { 2228 void *p = mmap((void *)host_map_start, host_end - host_map_start, 2229 prot, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 2230 if (p == MAP_FAILED) { 2231 perror("cannot mmap brk"); 2232 exit(-1); 2233 } 2234 } 2235 2236 /* Ensure that the bss page(s) are valid */ 2237 if ((page_get_flags(last_bss-1) & prot) != prot) { 2238 page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss - 1, 2239 prot | PAGE_VALID); 2240 } 2241 2242 if (host_start < host_map_start) { 2243 memset((void *)host_start, 0, host_map_start - host_start); 2244 } 2245 } 2246 2247 #if defined(TARGET_ARM) 2248 static int elf_is_fdpic(struct elfhdr *exec) 2249 { 2250 return exec->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC; 2251 } 2252 #elif defined(TARGET_XTENSA) 2253 static int elf_is_fdpic(struct elfhdr *exec) 2254 { 2255 return exec->e_ident[EI_OSABI] == ELFOSABI_XTENSA_FDPIC; 2256 } 2257 #else 2258 /* Default implementation, always false. */ 2259 static int elf_is_fdpic(struct elfhdr *exec) 2260 { 2261 return 0; 2262 } 2263 #endif 2264 2265 static abi_ulong loader_build_fdpic_loadmap(struct image_info *info, abi_ulong sp) 2266 { 2267 uint16_t n; 2268 struct elf32_fdpic_loadseg *loadsegs = info->loadsegs; 2269 2270 /* elf32_fdpic_loadseg */ 2271 n = info->nsegs; 2272 while (n--) { 2273 sp -= 12; 2274 put_user_u32(loadsegs[n].addr, sp+0); 2275 put_user_u32(loadsegs[n].p_vaddr, sp+4); 2276 put_user_u32(loadsegs[n].p_memsz, sp+8); 2277 } 2278 2279 /* elf32_fdpic_loadmap */ 2280 sp -= 4; 2281 put_user_u16(0, sp+0); /* version */ 2282 put_user_u16(info->nsegs, sp+2); /* nsegs */ 2283 2284 info->personality = PER_LINUX_FDPIC; 2285 info->loadmap_addr = sp; 2286 2287 return sp; 2288 } 2289 2290 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc, 2291 struct elfhdr *exec, 2292 struct image_info *info, 2293 struct image_info *interp_info) 2294 { 2295 abi_ulong sp; 2296 abi_ulong u_argc, u_argv, u_envp, u_auxv; 2297 int size; 2298 int i; 2299 abi_ulong u_rand_bytes; 2300 uint8_t k_rand_bytes[16]; 2301 abi_ulong u_platform, u_base_platform; 2302 const char *k_platform, *k_base_platform; 2303 const int n = sizeof(elf_addr_t); 2304 2305 sp = p; 2306 2307 /* Needs to be before we load the env/argc/... */ 2308 if (elf_is_fdpic(exec)) { 2309 /* Need 4 byte alignment for these structs */ 2310 sp &= ~3; 2311 sp = loader_build_fdpic_loadmap(info, sp); 2312 info->other_info = interp_info; 2313 if (interp_info) { 2314 interp_info->other_info = info; 2315 sp = loader_build_fdpic_loadmap(interp_info, sp); 2316 info->interpreter_loadmap_addr = interp_info->loadmap_addr; 2317 info->interpreter_pt_dynamic_addr = interp_info->pt_dynamic_addr; 2318 } else { 2319 info->interpreter_loadmap_addr = 0; 2320 info->interpreter_pt_dynamic_addr = 0; 2321 } 2322 } 2323 2324 u_base_platform = 0; 2325 k_base_platform = ELF_BASE_PLATFORM; 2326 if (k_base_platform) { 2327 size_t len = strlen(k_base_platform) + 1; 2328 if (STACK_GROWS_DOWN) { 2329 sp -= (len + n - 1) & ~(n - 1); 2330 u_base_platform = sp; 2331 /* FIXME - check return value of memcpy_to_target() for failure */ 2332 memcpy_to_target(sp, k_base_platform, len); 2333 } else { 2334 memcpy_to_target(sp, k_base_platform, len); 2335 u_base_platform = sp; 2336 sp += len + 1; 2337 } 2338 } 2339 2340 u_platform = 0; 2341 k_platform = ELF_PLATFORM; 2342 if (k_platform) { 2343 size_t len = strlen(k_platform) + 1; 2344 if (STACK_GROWS_DOWN) { 2345 sp -= (len + n - 1) & ~(n - 1); 2346 u_platform = sp; 2347 /* FIXME - check return value of memcpy_to_target() for failure */ 2348 memcpy_to_target(sp, k_platform, len); 2349 } else { 2350 memcpy_to_target(sp, k_platform, len); 2351 u_platform = sp; 2352 sp += len + 1; 2353 } 2354 } 2355 2356 /* Provide 16 byte alignment for the PRNG, and basic alignment for 2357 * the argv and envp pointers. 2358 */ 2359 if (STACK_GROWS_DOWN) { 2360 sp = QEMU_ALIGN_DOWN(sp, 16); 2361 } else { 2362 sp = QEMU_ALIGN_UP(sp, 16); 2363 } 2364 2365 /* 2366 * Generate 16 random bytes for userspace PRNG seeding. 2367 */ 2368 qemu_guest_getrandom_nofail(k_rand_bytes, sizeof(k_rand_bytes)); 2369 if (STACK_GROWS_DOWN) { 2370 sp -= 16; 2371 u_rand_bytes = sp; 2372 /* FIXME - check return value of memcpy_to_target() for failure */ 2373 memcpy_to_target(sp, k_rand_bytes, 16); 2374 } else { 2375 memcpy_to_target(sp, k_rand_bytes, 16); 2376 u_rand_bytes = sp; 2377 sp += 16; 2378 } 2379 2380 size = (DLINFO_ITEMS + 1) * 2; 2381 if (k_base_platform) 2382 size += 2; 2383 if (k_platform) 2384 size += 2; 2385 #ifdef DLINFO_ARCH_ITEMS 2386 size += DLINFO_ARCH_ITEMS * 2; 2387 #endif 2388 #ifdef ELF_HWCAP2 2389 size += 2; 2390 #endif 2391 info->auxv_len = size * n; 2392 2393 size += envc + argc + 2; 2394 size += 1; /* argc itself */ 2395 size *= n; 2396 2397 /* Allocate space and finalize stack alignment for entry now. */ 2398 if (STACK_GROWS_DOWN) { 2399 u_argc = QEMU_ALIGN_DOWN(sp - size, STACK_ALIGNMENT); 2400 sp = u_argc; 2401 } else { 2402 u_argc = sp; 2403 sp = QEMU_ALIGN_UP(sp + size, STACK_ALIGNMENT); 2404 } 2405 2406 u_argv = u_argc + n; 2407 u_envp = u_argv + (argc + 1) * n; 2408 u_auxv = u_envp + (envc + 1) * n; 2409 info->saved_auxv = u_auxv; 2410 info->argc = argc; 2411 info->envc = envc; 2412 info->argv = u_argv; 2413 info->envp = u_envp; 2414 2415 /* This is correct because Linux defines 2416 * elf_addr_t as Elf32_Off / Elf64_Off 2417 */ 2418 #define NEW_AUX_ENT(id, val) do { \ 2419 put_user_ual(id, u_auxv); u_auxv += n; \ 2420 put_user_ual(val, u_auxv); u_auxv += n; \ 2421 } while(0) 2422 2423 #ifdef ARCH_DLINFO 2424 /* 2425 * ARCH_DLINFO must come first so platform specific code can enforce 2426 * special alignment requirements on the AUXV if necessary (eg. PPC). 2427 */ 2428 ARCH_DLINFO; 2429 #endif 2430 /* There must be exactly DLINFO_ITEMS entries here, or the assert 2431 * on info->auxv_len will trigger. 2432 */ 2433 NEW_AUX_ENT(AT_PHDR, (abi_ulong)(info->load_addr + exec->e_phoff)); 2434 NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr))); 2435 NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum)); 2436 if ((info->alignment & ~qemu_host_page_mask) != 0) { 2437 /* Target doesn't support host page size alignment */ 2438 NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE)); 2439 } else { 2440 NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(MAX(TARGET_PAGE_SIZE, 2441 qemu_host_page_size))); 2442 } 2443 NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_info ? interp_info->load_addr : 0)); 2444 NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0); 2445 NEW_AUX_ENT(AT_ENTRY, info->entry); 2446 NEW_AUX_ENT(AT_UID, (abi_ulong) getuid()); 2447 NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid()); 2448 NEW_AUX_ENT(AT_GID, (abi_ulong) getgid()); 2449 NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid()); 2450 NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP); 2451 NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK)); 2452 NEW_AUX_ENT(AT_RANDOM, (abi_ulong) u_rand_bytes); 2453 NEW_AUX_ENT(AT_SECURE, (abi_ulong) qemu_getauxval(AT_SECURE)); 2454 NEW_AUX_ENT(AT_EXECFN, info->file_string); 2455 2456 #ifdef ELF_HWCAP2 2457 NEW_AUX_ENT(AT_HWCAP2, (abi_ulong) ELF_HWCAP2); 2458 #endif 2459 2460 if (u_base_platform) { 2461 NEW_AUX_ENT(AT_BASE_PLATFORM, u_base_platform); 2462 } 2463 if (u_platform) { 2464 NEW_AUX_ENT(AT_PLATFORM, u_platform); 2465 } 2466 NEW_AUX_ENT (AT_NULL, 0); 2467 #undef NEW_AUX_ENT 2468 2469 /* Check that our initial calculation of the auxv length matches how much 2470 * we actually put into it. 2471 */ 2472 assert(info->auxv_len == u_auxv - info->saved_auxv); 2473 2474 put_user_ual(argc, u_argc); 2475 2476 p = info->arg_strings; 2477 for (i = 0; i < argc; ++i) { 2478 put_user_ual(p, u_argv); 2479 u_argv += n; 2480 p += target_strlen(p) + 1; 2481 } 2482 put_user_ual(0, u_argv); 2483 2484 p = info->env_strings; 2485 for (i = 0; i < envc; ++i) { 2486 put_user_ual(p, u_envp); 2487 u_envp += n; 2488 p += target_strlen(p) + 1; 2489 } 2490 put_user_ual(0, u_envp); 2491 2492 return sp; 2493 } 2494 2495 #if defined(HI_COMMPAGE) 2496 #define LO_COMMPAGE -1 2497 #elif defined(LO_COMMPAGE) 2498 #define HI_COMMPAGE 0 2499 #else 2500 #define HI_COMMPAGE 0 2501 #define LO_COMMPAGE -1 2502 #ifndef INIT_GUEST_COMMPAGE 2503 #define init_guest_commpage() true 2504 #endif 2505 #endif 2506 2507 static void pgb_fail_in_use(const char *image_name) 2508 { 2509 error_report("%s: requires virtual address space that is in use " 2510 "(omit the -B option or choose a different value)", 2511 image_name); 2512 exit(EXIT_FAILURE); 2513 } 2514 2515 static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr, 2516 abi_ulong guest_hiaddr, long align) 2517 { 2518 const int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE; 2519 void *addr, *test; 2520 2521 if (!QEMU_IS_ALIGNED(guest_base, align)) { 2522 fprintf(stderr, "Requested guest base %p does not satisfy " 2523 "host minimum alignment (0x%lx)\n", 2524 (void *)guest_base, align); 2525 exit(EXIT_FAILURE); 2526 } 2527 2528 /* Sanity check the guest binary. */ 2529 if (reserved_va) { 2530 if (guest_hiaddr > reserved_va) { 2531 error_report("%s: requires more than reserved virtual " 2532 "address space (0x%" PRIx64 " > 0x%lx)", 2533 image_name, (uint64_t)guest_hiaddr, reserved_va); 2534 exit(EXIT_FAILURE); 2535 } 2536 } else { 2537 #if HOST_LONG_BITS < TARGET_ABI_BITS 2538 if ((guest_hiaddr - guest_base) > ~(uintptr_t)0) { 2539 error_report("%s: requires more virtual address space " 2540 "than the host can provide (0x%" PRIx64 ")", 2541 image_name, (uint64_t)guest_hiaddr + 1 - guest_base); 2542 exit(EXIT_FAILURE); 2543 } 2544 #endif 2545 } 2546 2547 /* 2548 * Expand the allocation to the entire reserved_va. 2549 * Exclude the mmap_min_addr hole. 2550 */ 2551 if (reserved_va) { 2552 guest_loaddr = (guest_base >= mmap_min_addr ? 0 2553 : mmap_min_addr - guest_base); 2554 guest_hiaddr = reserved_va; 2555 } 2556 2557 /* Reserve the address space for the binary, or reserved_va. */ 2558 test = g2h_untagged(guest_loaddr); 2559 addr = mmap(test, guest_hiaddr - guest_loaddr + 1, PROT_NONE, flags, -1, 0); 2560 if (test != addr) { 2561 pgb_fail_in_use(image_name); 2562 } 2563 qemu_log_mask(CPU_LOG_PAGE, 2564 "%s: base @ %p for %" PRIu64 " bytes\n", 2565 __func__, addr, (uint64_t)guest_hiaddr - guest_loaddr + 1); 2566 } 2567 2568 /** 2569 * pgd_find_hole_fallback: potential mmap address 2570 * @guest_size: size of available space 2571 * @brk: location of break 2572 * @align: memory alignment 2573 * 2574 * This is a fallback method for finding a hole in the host address 2575 * space if we don't have the benefit of being able to access 2576 * /proc/self/map. It can potentially take a very long time as we can 2577 * only dumbly iterate up the host address space seeing if the 2578 * allocation would work. 2579 */ 2580 static uintptr_t pgd_find_hole_fallback(uintptr_t guest_size, uintptr_t brk, 2581 long align, uintptr_t offset) 2582 { 2583 uintptr_t base; 2584 2585 /* Start (aligned) at the bottom and work our way up */ 2586 base = ROUND_UP(mmap_min_addr, align); 2587 2588 while (true) { 2589 uintptr_t align_start, end; 2590 align_start = ROUND_UP(base, align); 2591 end = align_start + guest_size + offset; 2592 2593 /* if brk is anywhere in the range give ourselves some room to grow. */ 2594 if (align_start <= brk && brk < end) { 2595 base = brk + (16 * MiB); 2596 continue; 2597 } else if (align_start + guest_size < align_start) { 2598 /* we have run out of space */ 2599 return -1; 2600 } else { 2601 int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE | 2602 MAP_FIXED_NOREPLACE; 2603 void * mmap_start = mmap((void *) align_start, guest_size, 2604 PROT_NONE, flags, -1, 0); 2605 if (mmap_start != MAP_FAILED) { 2606 munmap(mmap_start, guest_size); 2607 if (mmap_start == (void *) align_start) { 2608 qemu_log_mask(CPU_LOG_PAGE, 2609 "%s: base @ %p for %" PRIdPTR" bytes\n", 2610 __func__, mmap_start + offset, guest_size); 2611 return (uintptr_t) mmap_start + offset; 2612 } 2613 } 2614 base += qemu_host_page_size; 2615 } 2616 } 2617 } 2618 2619 /* Return value for guest_base, or -1 if no hole found. */ 2620 static uintptr_t pgb_find_hole(uintptr_t guest_loaddr, uintptr_t guest_size, 2621 long align, uintptr_t offset) 2622 { 2623 GSList *maps, *iter; 2624 uintptr_t this_start, this_end, next_start, brk; 2625 intptr_t ret = -1; 2626 2627 assert(QEMU_IS_ALIGNED(guest_loaddr, align)); 2628 2629 maps = read_self_maps(); 2630 2631 /* Read brk after we've read the maps, which will malloc. */ 2632 brk = (uintptr_t)sbrk(0); 2633 2634 if (!maps) { 2635 return pgd_find_hole_fallback(guest_size, brk, align, offset); 2636 } 2637 2638 /* The first hole is before the first map entry. */ 2639 this_start = mmap_min_addr; 2640 2641 for (iter = maps; iter; 2642 this_start = next_start, iter = g_slist_next(iter)) { 2643 uintptr_t align_start, hole_size; 2644 2645 this_end = ((MapInfo *)iter->data)->start; 2646 next_start = ((MapInfo *)iter->data)->end; 2647 align_start = ROUND_UP(this_start + offset, align); 2648 2649 /* Skip holes that are too small. */ 2650 if (align_start >= this_end) { 2651 continue; 2652 } 2653 hole_size = this_end - align_start; 2654 if (hole_size < guest_size) { 2655 continue; 2656 } 2657 2658 /* If this hole contains brk, give ourselves some room to grow. */ 2659 if (this_start <= brk && brk < this_end) { 2660 hole_size -= guest_size; 2661 if (sizeof(uintptr_t) == 8 && hole_size >= 1 * GiB) { 2662 align_start += 1 * GiB; 2663 } else if (hole_size >= 16 * MiB) { 2664 align_start += 16 * MiB; 2665 } else { 2666 align_start = (this_end - guest_size) & -align; 2667 if (align_start < this_start) { 2668 continue; 2669 } 2670 } 2671 } 2672 2673 /* Record the lowest successful match. */ 2674 if (ret < 0) { 2675 ret = align_start; 2676 } 2677 /* If this hole contains the identity map, select it. */ 2678 if (align_start <= guest_loaddr && 2679 guest_loaddr + guest_size <= this_end) { 2680 ret = 0; 2681 } 2682 /* If this hole ends above the identity map, stop looking. */ 2683 if (this_end >= guest_loaddr) { 2684 break; 2685 } 2686 } 2687 free_self_maps(maps); 2688 2689 if (ret != -1) { 2690 qemu_log_mask(CPU_LOG_PAGE, "%s: base @ %" PRIxPTR 2691 " for %" PRIuPTR " bytes\n", 2692 __func__, ret, guest_size); 2693 } 2694 2695 return ret; 2696 } 2697 2698 static void pgb_static(const char *image_name, abi_ulong orig_loaddr, 2699 abi_ulong orig_hiaddr, long align) 2700 { 2701 uintptr_t loaddr = orig_loaddr; 2702 uintptr_t hiaddr = orig_hiaddr; 2703 uintptr_t offset = 0; 2704 uintptr_t addr; 2705 2706 if (hiaddr != orig_hiaddr) { 2707 error_report("%s: requires virtual address space that the " 2708 "host cannot provide (0x%" PRIx64 ")", 2709 image_name, (uint64_t)orig_hiaddr + 1); 2710 exit(EXIT_FAILURE); 2711 } 2712 2713 loaddr &= -align; 2714 if (HI_COMMPAGE) { 2715 /* 2716 * Extend the allocation to include the commpage. 2717 * For a 64-bit host, this is just 4GiB; for a 32-bit host we 2718 * need to ensure there is space bellow the guest_base so we 2719 * can map the commpage in the place needed when the address 2720 * arithmetic wraps around. 2721 */ 2722 if (sizeof(uintptr_t) == 8 || loaddr >= 0x80000000u) { 2723 hiaddr = UINT32_MAX; 2724 } else { 2725 offset = -(HI_COMMPAGE & -align); 2726 } 2727 } else if (LO_COMMPAGE != -1) { 2728 loaddr = MIN(loaddr, LO_COMMPAGE & -align); 2729 } 2730 2731 addr = pgb_find_hole(loaddr, hiaddr - loaddr + 1, align, offset); 2732 if (addr == -1) { 2733 /* 2734 * If HI_COMMPAGE, there *might* be a non-consecutive allocation 2735 * that can satisfy both. But as the normal arm32 link base address 2736 * is ~32k, and we extend down to include the commpage, making the 2737 * overhead only ~96k, this is unlikely. 2738 */ 2739 error_report("%s: Unable to allocate %#zx bytes of " 2740 "virtual address space", image_name, 2741 (size_t)(hiaddr - loaddr)); 2742 exit(EXIT_FAILURE); 2743 } 2744 2745 guest_base = addr; 2746 2747 qemu_log_mask(CPU_LOG_PAGE, "%s: base @ %"PRIxPTR" for %" PRIuPTR" bytes\n", 2748 __func__, addr, hiaddr - loaddr); 2749 } 2750 2751 static void pgb_dynamic(const char *image_name, long align) 2752 { 2753 /* 2754 * The executable is dynamic and does not require a fixed address. 2755 * All we need is a commpage that satisfies align. 2756 * If we do not need a commpage, leave guest_base == 0. 2757 */ 2758 if (HI_COMMPAGE) { 2759 uintptr_t addr, commpage; 2760 2761 /* 64-bit hosts should have used reserved_va. */ 2762 assert(sizeof(uintptr_t) == 4); 2763 2764 /* 2765 * By putting the commpage at the first hole, that puts guest_base 2766 * just above that, and maximises the positive guest addresses. 2767 */ 2768 commpage = HI_COMMPAGE & -align; 2769 addr = pgb_find_hole(commpage, -commpage, align, 0); 2770 assert(addr != -1); 2771 guest_base = addr; 2772 } 2773 } 2774 2775 static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr, 2776 abi_ulong guest_hiaddr, long align) 2777 { 2778 int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE; 2779 void *addr, *test; 2780 2781 if (guest_hiaddr > reserved_va) { 2782 error_report("%s: requires more than reserved virtual " 2783 "address space (0x%" PRIx64 " > 0x%lx)", 2784 image_name, (uint64_t)guest_hiaddr, reserved_va); 2785 exit(EXIT_FAILURE); 2786 } 2787 2788 /* Widen the "image" to the entire reserved address space. */ 2789 pgb_static(image_name, 0, reserved_va, align); 2790 2791 /* osdep.h defines this as 0 if it's missing */ 2792 flags |= MAP_FIXED_NOREPLACE; 2793 2794 /* Reserve the memory on the host. */ 2795 assert(guest_base != 0); 2796 test = g2h_untagged(0); 2797 addr = mmap(test, reserved_va + 1, PROT_NONE, flags, -1, 0); 2798 if (addr == MAP_FAILED || addr != test) { 2799 error_report("Unable to reserve 0x%lx bytes of virtual address " 2800 "space at %p (%s) for use as guest address space (check your " 2801 "virtual memory ulimit setting, mmap_min_addr or reserve less " 2802 "using qemu-user's -R option)", 2803 reserved_va + 1, test, strerror(errno)); 2804 exit(EXIT_FAILURE); 2805 } 2806 2807 qemu_log_mask(CPU_LOG_PAGE, "%s: base @ %p for %lu bytes\n", 2808 __func__, addr, reserved_va + 1); 2809 } 2810 2811 void probe_guest_base(const char *image_name, abi_ulong guest_loaddr, 2812 abi_ulong guest_hiaddr) 2813 { 2814 /* In order to use host shmat, we must be able to honor SHMLBA. */ 2815 uintptr_t align = MAX(SHMLBA, qemu_host_page_size); 2816 2817 if (have_guest_base) { 2818 pgb_have_guest_base(image_name, guest_loaddr, guest_hiaddr, align); 2819 } else if (reserved_va) { 2820 pgb_reserved_va(image_name, guest_loaddr, guest_hiaddr, align); 2821 } else if (guest_loaddr) { 2822 pgb_static(image_name, guest_loaddr, guest_hiaddr, align); 2823 } else { 2824 pgb_dynamic(image_name, align); 2825 } 2826 2827 /* Reserve and initialize the commpage. */ 2828 if (!init_guest_commpage()) { 2829 /* 2830 * With have_guest_base, the user has selected the address and 2831 * we are trying to work with that. Otherwise, we have selected 2832 * free space and init_guest_commpage must succeeded. 2833 */ 2834 assert(have_guest_base); 2835 pgb_fail_in_use(image_name); 2836 } 2837 2838 assert(QEMU_IS_ALIGNED(guest_base, align)); 2839 qemu_log_mask(CPU_LOG_PAGE, "Locating guest address space " 2840 "@ 0x%" PRIx64 "\n", (uint64_t)guest_base); 2841 } 2842 2843 enum { 2844 /* The string "GNU\0" as a magic number. */ 2845 GNU0_MAGIC = const_le32('G' | 'N' << 8 | 'U' << 16), 2846 NOTE_DATA_SZ = 1 * KiB, 2847 NOTE_NAME_SZ = 4, 2848 ELF_GNU_PROPERTY_ALIGN = ELF_CLASS == ELFCLASS32 ? 4 : 8, 2849 }; 2850 2851 /* 2852 * Process a single gnu_property entry. 2853 * Return false for error. 2854 */ 2855 static bool parse_elf_property(const uint32_t *data, int *off, int datasz, 2856 struct image_info *info, bool have_prev_type, 2857 uint32_t *prev_type, Error **errp) 2858 { 2859 uint32_t pr_type, pr_datasz, step; 2860 2861 if (*off > datasz || !QEMU_IS_ALIGNED(*off, ELF_GNU_PROPERTY_ALIGN)) { 2862 goto error_data; 2863 } 2864 datasz -= *off; 2865 data += *off / sizeof(uint32_t); 2866 2867 if (datasz < 2 * sizeof(uint32_t)) { 2868 goto error_data; 2869 } 2870 pr_type = data[0]; 2871 pr_datasz = data[1]; 2872 data += 2; 2873 datasz -= 2 * sizeof(uint32_t); 2874 step = ROUND_UP(pr_datasz, ELF_GNU_PROPERTY_ALIGN); 2875 if (step > datasz) { 2876 goto error_data; 2877 } 2878 2879 /* Properties are supposed to be unique and sorted on pr_type. */ 2880 if (have_prev_type && pr_type <= *prev_type) { 2881 if (pr_type == *prev_type) { 2882 error_setg(errp, "Duplicate property in PT_GNU_PROPERTY"); 2883 } else { 2884 error_setg(errp, "Unsorted property in PT_GNU_PROPERTY"); 2885 } 2886 return false; 2887 } 2888 *prev_type = pr_type; 2889 2890 if (!arch_parse_elf_property(pr_type, pr_datasz, data, info, errp)) { 2891 return false; 2892 } 2893 2894 *off += 2 * sizeof(uint32_t) + step; 2895 return true; 2896 2897 error_data: 2898 error_setg(errp, "Ill-formed property in PT_GNU_PROPERTY"); 2899 return false; 2900 } 2901 2902 /* Process NT_GNU_PROPERTY_TYPE_0. */ 2903 static bool parse_elf_properties(int image_fd, 2904 struct image_info *info, 2905 const struct elf_phdr *phdr, 2906 char bprm_buf[BPRM_BUF_SIZE], 2907 Error **errp) 2908 { 2909 union { 2910 struct elf_note nhdr; 2911 uint32_t data[NOTE_DATA_SZ / sizeof(uint32_t)]; 2912 } note; 2913 2914 int n, off, datasz; 2915 bool have_prev_type; 2916 uint32_t prev_type; 2917 2918 /* Unless the arch requires properties, ignore them. */ 2919 if (!ARCH_USE_GNU_PROPERTY) { 2920 return true; 2921 } 2922 2923 /* If the properties are crazy large, that's too bad. */ 2924 n = phdr->p_filesz; 2925 if (n > sizeof(note)) { 2926 error_setg(errp, "PT_GNU_PROPERTY too large"); 2927 return false; 2928 } 2929 if (n < sizeof(note.nhdr)) { 2930 error_setg(errp, "PT_GNU_PROPERTY too small"); 2931 return false; 2932 } 2933 2934 if (phdr->p_offset + n <= BPRM_BUF_SIZE) { 2935 memcpy(¬e, bprm_buf + phdr->p_offset, n); 2936 } else { 2937 ssize_t len = pread(image_fd, ¬e, n, phdr->p_offset); 2938 if (len != n) { 2939 error_setg_errno(errp, errno, "Error reading file header"); 2940 return false; 2941 } 2942 } 2943 2944 /* 2945 * The contents of a valid PT_GNU_PROPERTY is a sequence 2946 * of uint32_t -- swap them all now. 2947 */ 2948 #ifdef BSWAP_NEEDED 2949 for (int i = 0; i < n / 4; i++) { 2950 bswap32s(note.data + i); 2951 } 2952 #endif 2953 2954 /* 2955 * Note that nhdr is 3 words, and that the "name" described by namesz 2956 * immediately follows nhdr and is thus at the 4th word. Further, all 2957 * of the inputs to the kernel's round_up are multiples of 4. 2958 */ 2959 if (note.nhdr.n_type != NT_GNU_PROPERTY_TYPE_0 || 2960 note.nhdr.n_namesz != NOTE_NAME_SZ || 2961 note.data[3] != GNU0_MAGIC) { 2962 error_setg(errp, "Invalid note in PT_GNU_PROPERTY"); 2963 return false; 2964 } 2965 off = sizeof(note.nhdr) + NOTE_NAME_SZ; 2966 2967 datasz = note.nhdr.n_descsz + off; 2968 if (datasz > n) { 2969 error_setg(errp, "Invalid note size in PT_GNU_PROPERTY"); 2970 return false; 2971 } 2972 2973 have_prev_type = false; 2974 prev_type = 0; 2975 while (1) { 2976 if (off == datasz) { 2977 return true; /* end, exit ok */ 2978 } 2979 if (!parse_elf_property(note.data, &off, datasz, info, 2980 have_prev_type, &prev_type, errp)) { 2981 return false; 2982 } 2983 have_prev_type = true; 2984 } 2985 } 2986 2987 /* Load an ELF image into the address space. 2988 2989 IMAGE_NAME is the filename of the image, to use in error messages. 2990 IMAGE_FD is the open file descriptor for the image. 2991 2992 BPRM_BUF is a copy of the beginning of the file; this of course 2993 contains the elf file header at offset 0. It is assumed that this 2994 buffer is sufficiently aligned to present no problems to the host 2995 in accessing data at aligned offsets within the buffer. 2996 2997 On return: INFO values will be filled in, as necessary or available. */ 2998 2999 static void load_elf_image(const char *image_name, int image_fd, 3000 struct image_info *info, char **pinterp_name, 3001 char bprm_buf[BPRM_BUF_SIZE]) 3002 { 3003 struct elfhdr *ehdr = (struct elfhdr *)bprm_buf; 3004 struct elf_phdr *phdr; 3005 abi_ulong load_addr, load_bias, loaddr, hiaddr, error; 3006 int i, retval, prot_exec; 3007 Error *err = NULL; 3008 3009 /* First of all, some simple consistency checks */ 3010 if (!elf_check_ident(ehdr)) { 3011 error_setg(&err, "Invalid ELF image for this architecture"); 3012 goto exit_errmsg; 3013 } 3014 bswap_ehdr(ehdr); 3015 if (!elf_check_ehdr(ehdr)) { 3016 error_setg(&err, "Invalid ELF image for this architecture"); 3017 goto exit_errmsg; 3018 } 3019 3020 i = ehdr->e_phnum * sizeof(struct elf_phdr); 3021 if (ehdr->e_phoff + i <= BPRM_BUF_SIZE) { 3022 phdr = (struct elf_phdr *)(bprm_buf + ehdr->e_phoff); 3023 } else { 3024 phdr = (struct elf_phdr *) alloca(i); 3025 retval = pread(image_fd, phdr, i, ehdr->e_phoff); 3026 if (retval != i) { 3027 goto exit_read; 3028 } 3029 } 3030 bswap_phdr(phdr, ehdr->e_phnum); 3031 3032 info->nsegs = 0; 3033 info->pt_dynamic_addr = 0; 3034 3035 mmap_lock(); 3036 3037 /* 3038 * Find the maximum size of the image and allocate an appropriate 3039 * amount of memory to handle that. Locate the interpreter, if any. 3040 */ 3041 loaddr = -1, hiaddr = 0; 3042 info->alignment = 0; 3043 info->exec_stack = EXSTACK_DEFAULT; 3044 for (i = 0; i < ehdr->e_phnum; ++i) { 3045 struct elf_phdr *eppnt = phdr + i; 3046 if (eppnt->p_type == PT_LOAD) { 3047 abi_ulong a = eppnt->p_vaddr - eppnt->p_offset; 3048 if (a < loaddr) { 3049 loaddr = a; 3050 } 3051 a = eppnt->p_vaddr + eppnt->p_memsz - 1; 3052 if (a > hiaddr) { 3053 hiaddr = a; 3054 } 3055 ++info->nsegs; 3056 info->alignment |= eppnt->p_align; 3057 } else if (eppnt->p_type == PT_INTERP && pinterp_name) { 3058 g_autofree char *interp_name = NULL; 3059 3060 if (*pinterp_name) { 3061 error_setg(&err, "Multiple PT_INTERP entries"); 3062 goto exit_errmsg; 3063 } 3064 3065 interp_name = g_malloc(eppnt->p_filesz); 3066 3067 if (eppnt->p_offset + eppnt->p_filesz <= BPRM_BUF_SIZE) { 3068 memcpy(interp_name, bprm_buf + eppnt->p_offset, 3069 eppnt->p_filesz); 3070 } else { 3071 retval = pread(image_fd, interp_name, eppnt->p_filesz, 3072 eppnt->p_offset); 3073 if (retval != eppnt->p_filesz) { 3074 goto exit_read; 3075 } 3076 } 3077 if (interp_name[eppnt->p_filesz - 1] != 0) { 3078 error_setg(&err, "Invalid PT_INTERP entry"); 3079 goto exit_errmsg; 3080 } 3081 *pinterp_name = g_steal_pointer(&interp_name); 3082 } else if (eppnt->p_type == PT_GNU_PROPERTY) { 3083 if (!parse_elf_properties(image_fd, info, eppnt, bprm_buf, &err)) { 3084 goto exit_errmsg; 3085 } 3086 } else if (eppnt->p_type == PT_GNU_STACK) { 3087 info->exec_stack = eppnt->p_flags & PF_X; 3088 } 3089 } 3090 3091 if (pinterp_name != NULL) { 3092 /* 3093 * This is the main executable. 3094 * 3095 * Reserve extra space for brk. 3096 * We hold on to this space while placing the interpreter 3097 * and the stack, lest they be placed immediately after 3098 * the data segment and block allocation from the brk. 3099 * 3100 * 16MB is chosen as "large enough" without being so large as 3101 * to allow the result to not fit with a 32-bit guest on a 3102 * 32-bit host. However some 64 bit guests (e.g. s390x) 3103 * attempt to place their heap further ahead and currently 3104 * nothing stops them smashing into QEMUs address space. 3105 */ 3106 #if TARGET_LONG_BITS == 64 3107 info->reserve_brk = 32 * MiB; 3108 #else 3109 info->reserve_brk = 16 * MiB; 3110 #endif 3111 hiaddr += info->reserve_brk; 3112 3113 if (ehdr->e_type == ET_EXEC) { 3114 /* 3115 * Make sure that the low address does not conflict with 3116 * MMAP_MIN_ADDR or the QEMU application itself. 3117 */ 3118 probe_guest_base(image_name, loaddr, hiaddr); 3119 } else { 3120 /* 3121 * The binary is dynamic, but we still need to 3122 * select guest_base. In this case we pass a size. 3123 */ 3124 probe_guest_base(image_name, 0, hiaddr - loaddr); 3125 } 3126 } 3127 3128 /* 3129 * Reserve address space for all of this. 3130 * 3131 * In the case of ET_EXEC, we supply MAP_FIXED so that we get 3132 * exactly the address range that is required. 3133 * 3134 * Otherwise this is ET_DYN, and we are searching for a location 3135 * that can hold the memory space required. If the image is 3136 * pre-linked, LOADDR will be non-zero, and the kernel should 3137 * honor that address if it happens to be free. 3138 * 3139 * In both cases, we will overwrite pages in this range with mappings 3140 * from the executable. 3141 */ 3142 load_addr = target_mmap(loaddr, (size_t)hiaddr - loaddr + 1, PROT_NONE, 3143 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | 3144 (ehdr->e_type == ET_EXEC ? MAP_FIXED : 0), 3145 -1, 0); 3146 if (load_addr == -1) { 3147 goto exit_mmap; 3148 } 3149 load_bias = load_addr - loaddr; 3150 3151 if (elf_is_fdpic(ehdr)) { 3152 struct elf32_fdpic_loadseg *loadsegs = info->loadsegs = 3153 g_malloc(sizeof(*loadsegs) * info->nsegs); 3154 3155 for (i = 0; i < ehdr->e_phnum; ++i) { 3156 switch (phdr[i].p_type) { 3157 case PT_DYNAMIC: 3158 info->pt_dynamic_addr = phdr[i].p_vaddr + load_bias; 3159 break; 3160 case PT_LOAD: 3161 loadsegs->addr = phdr[i].p_vaddr + load_bias; 3162 loadsegs->p_vaddr = phdr[i].p_vaddr; 3163 loadsegs->p_memsz = phdr[i].p_memsz; 3164 ++loadsegs; 3165 break; 3166 } 3167 } 3168 } 3169 3170 info->load_bias = load_bias; 3171 info->code_offset = load_bias; 3172 info->data_offset = load_bias; 3173 info->load_addr = load_addr; 3174 info->entry = ehdr->e_entry + load_bias; 3175 info->start_code = -1; 3176 info->end_code = 0; 3177 info->start_data = -1; 3178 info->end_data = 0; 3179 info->brk = 0; 3180 info->elf_flags = ehdr->e_flags; 3181 3182 prot_exec = PROT_EXEC; 3183 #ifdef TARGET_AARCH64 3184 /* 3185 * If the BTI feature is present, this indicates that the executable 3186 * pages of the startup binary should be mapped with PROT_BTI, so that 3187 * branch targets are enforced. 3188 * 3189 * The startup binary is either the interpreter or the static executable. 3190 * The interpreter is responsible for all pages of a dynamic executable. 3191 * 3192 * Elf notes are backward compatible to older cpus. 3193 * Do not enable BTI unless it is supported. 3194 */ 3195 if ((info->note_flags & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) 3196 && (pinterp_name == NULL || *pinterp_name == 0) 3197 && cpu_isar_feature(aa64_bti, ARM_CPU(thread_cpu))) { 3198 prot_exec |= TARGET_PROT_BTI; 3199 } 3200 #endif 3201 3202 for (i = 0; i < ehdr->e_phnum; i++) { 3203 struct elf_phdr *eppnt = phdr + i; 3204 if (eppnt->p_type == PT_LOAD) { 3205 abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em, vaddr_len; 3206 int elf_prot = 0; 3207 3208 if (eppnt->p_flags & PF_R) { 3209 elf_prot |= PROT_READ; 3210 } 3211 if (eppnt->p_flags & PF_W) { 3212 elf_prot |= PROT_WRITE; 3213 } 3214 if (eppnt->p_flags & PF_X) { 3215 elf_prot |= prot_exec; 3216 } 3217 3218 vaddr = load_bias + eppnt->p_vaddr; 3219 vaddr_po = TARGET_ELF_PAGEOFFSET(vaddr); 3220 vaddr_ps = TARGET_ELF_PAGESTART(vaddr); 3221 3222 vaddr_ef = vaddr + eppnt->p_filesz; 3223 vaddr_em = vaddr + eppnt->p_memsz; 3224 3225 /* 3226 * Some segments may be completely empty, with a non-zero p_memsz 3227 * but no backing file segment. 3228 */ 3229 if (eppnt->p_filesz != 0) { 3230 vaddr_len = TARGET_ELF_PAGELENGTH(eppnt->p_filesz + vaddr_po); 3231 error = target_mmap(vaddr_ps, vaddr_len, elf_prot, 3232 MAP_PRIVATE | MAP_FIXED, 3233 image_fd, eppnt->p_offset - vaddr_po); 3234 3235 if (error == -1) { 3236 goto exit_mmap; 3237 } 3238 3239 /* 3240 * If the load segment requests extra zeros (e.g. bss), map it. 3241 */ 3242 if (eppnt->p_filesz < eppnt->p_memsz) { 3243 zero_bss(vaddr_ef, vaddr_em, elf_prot); 3244 } 3245 } else if (eppnt->p_memsz != 0) { 3246 vaddr_len = TARGET_ELF_PAGELENGTH(eppnt->p_memsz + vaddr_po); 3247 error = target_mmap(vaddr_ps, vaddr_len, elf_prot, 3248 MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, 3249 -1, 0); 3250 3251 if (error == -1) { 3252 goto exit_mmap; 3253 } 3254 } 3255 3256 /* Find the full program boundaries. */ 3257 if (elf_prot & PROT_EXEC) { 3258 if (vaddr < info->start_code) { 3259 info->start_code = vaddr; 3260 } 3261 if (vaddr_ef > info->end_code) { 3262 info->end_code = vaddr_ef; 3263 } 3264 } 3265 if (elf_prot & PROT_WRITE) { 3266 if (vaddr < info->start_data) { 3267 info->start_data = vaddr; 3268 } 3269 if (vaddr_ef > info->end_data) { 3270 info->end_data = vaddr_ef; 3271 } 3272 } 3273 if (vaddr_em > info->brk) { 3274 info->brk = vaddr_em; 3275 } 3276 #ifdef TARGET_MIPS 3277 } else if (eppnt->p_type == PT_MIPS_ABIFLAGS) { 3278 Mips_elf_abiflags_v0 abiflags; 3279 if (eppnt->p_filesz < sizeof(Mips_elf_abiflags_v0)) { 3280 error_setg(&err, "Invalid PT_MIPS_ABIFLAGS entry"); 3281 goto exit_errmsg; 3282 } 3283 if (eppnt->p_offset + eppnt->p_filesz <= BPRM_BUF_SIZE) { 3284 memcpy(&abiflags, bprm_buf + eppnt->p_offset, 3285 sizeof(Mips_elf_abiflags_v0)); 3286 } else { 3287 retval = pread(image_fd, &abiflags, sizeof(Mips_elf_abiflags_v0), 3288 eppnt->p_offset); 3289 if (retval != sizeof(Mips_elf_abiflags_v0)) { 3290 goto exit_read; 3291 } 3292 } 3293 bswap_mips_abiflags(&abiflags); 3294 info->fp_abi = abiflags.fp_abi; 3295 #endif 3296 } 3297 } 3298 3299 if (info->end_data == 0) { 3300 info->start_data = info->end_code; 3301 info->end_data = info->end_code; 3302 } 3303 3304 if (qemu_log_enabled()) { 3305 load_symbols(ehdr, image_fd, load_bias); 3306 } 3307 3308 debuginfo_report_elf(image_name, image_fd, load_bias); 3309 3310 mmap_unlock(); 3311 3312 close(image_fd); 3313 return; 3314 3315 exit_read: 3316 if (retval >= 0) { 3317 error_setg(&err, "Incomplete read of file header"); 3318 } else { 3319 error_setg_errno(&err, errno, "Error reading file header"); 3320 } 3321 goto exit_errmsg; 3322 exit_mmap: 3323 error_setg_errno(&err, errno, "Error mapping file"); 3324 goto exit_errmsg; 3325 exit_errmsg: 3326 error_reportf_err(err, "%s: ", image_name); 3327 exit(-1); 3328 } 3329 3330 static void load_elf_interp(const char *filename, struct image_info *info, 3331 char bprm_buf[BPRM_BUF_SIZE]) 3332 { 3333 int fd, retval; 3334 Error *err = NULL; 3335 3336 fd = open(path(filename), O_RDONLY); 3337 if (fd < 0) { 3338 error_setg_file_open(&err, errno, filename); 3339 error_report_err(err); 3340 exit(-1); 3341 } 3342 3343 retval = read(fd, bprm_buf, BPRM_BUF_SIZE); 3344 if (retval < 0) { 3345 error_setg_errno(&err, errno, "Error reading file header"); 3346 error_reportf_err(err, "%s: ", filename); 3347 exit(-1); 3348 } 3349 3350 if (retval < BPRM_BUF_SIZE) { 3351 memset(bprm_buf + retval, 0, BPRM_BUF_SIZE - retval); 3352 } 3353 3354 load_elf_image(filename, fd, info, NULL, bprm_buf); 3355 } 3356 3357 static int symfind(const void *s0, const void *s1) 3358 { 3359 struct elf_sym *sym = (struct elf_sym *)s1; 3360 __typeof(sym->st_value) addr = *(uint64_t *)s0; 3361 int result = 0; 3362 3363 if (addr < sym->st_value) { 3364 result = -1; 3365 } else if (addr >= sym->st_value + sym->st_size) { 3366 result = 1; 3367 } 3368 return result; 3369 } 3370 3371 static const char *lookup_symbolxx(struct syminfo *s, uint64_t orig_addr) 3372 { 3373 #if ELF_CLASS == ELFCLASS32 3374 struct elf_sym *syms = s->disas_symtab.elf32; 3375 #else 3376 struct elf_sym *syms = s->disas_symtab.elf64; 3377 #endif 3378 3379 // binary search 3380 struct elf_sym *sym; 3381 3382 sym = bsearch(&orig_addr, syms, s->disas_num_syms, sizeof(*syms), symfind); 3383 if (sym != NULL) { 3384 return s->disas_strtab + sym->st_name; 3385 } 3386 3387 return ""; 3388 } 3389 3390 /* FIXME: This should use elf_ops.h */ 3391 static int symcmp(const void *s0, const void *s1) 3392 { 3393 struct elf_sym *sym0 = (struct elf_sym *)s0; 3394 struct elf_sym *sym1 = (struct elf_sym *)s1; 3395 return (sym0->st_value < sym1->st_value) 3396 ? -1 3397 : ((sym0->st_value > sym1->st_value) ? 1 : 0); 3398 } 3399 3400 /* Best attempt to load symbols from this ELF object. */ 3401 static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias) 3402 { 3403 int i, shnum, nsyms, sym_idx = 0, str_idx = 0; 3404 uint64_t segsz; 3405 struct elf_shdr *shdr; 3406 char *strings = NULL; 3407 struct syminfo *s = NULL; 3408 struct elf_sym *new_syms, *syms = NULL; 3409 3410 shnum = hdr->e_shnum; 3411 i = shnum * sizeof(struct elf_shdr); 3412 shdr = (struct elf_shdr *)alloca(i); 3413 if (pread(fd, shdr, i, hdr->e_shoff) != i) { 3414 return; 3415 } 3416 3417 bswap_shdr(shdr, shnum); 3418 for (i = 0; i < shnum; ++i) { 3419 if (shdr[i].sh_type == SHT_SYMTAB) { 3420 sym_idx = i; 3421 str_idx = shdr[i].sh_link; 3422 goto found; 3423 } 3424 } 3425 3426 /* There will be no symbol table if the file was stripped. */ 3427 return; 3428 3429 found: 3430 /* Now know where the strtab and symtab are. Snarf them. */ 3431 s = g_try_new(struct syminfo, 1); 3432 if (!s) { 3433 goto give_up; 3434 } 3435 3436 segsz = shdr[str_idx].sh_size; 3437 s->disas_strtab = strings = g_try_malloc(segsz); 3438 if (!strings || 3439 pread(fd, strings, segsz, shdr[str_idx].sh_offset) != segsz) { 3440 goto give_up; 3441 } 3442 3443 segsz = shdr[sym_idx].sh_size; 3444 syms = g_try_malloc(segsz); 3445 if (!syms || pread(fd, syms, segsz, shdr[sym_idx].sh_offset) != segsz) { 3446 goto give_up; 3447 } 3448 3449 if (segsz / sizeof(struct elf_sym) > INT_MAX) { 3450 /* Implausibly large symbol table: give up rather than ploughing 3451 * on with the number of symbols calculation overflowing 3452 */ 3453 goto give_up; 3454 } 3455 nsyms = segsz / sizeof(struct elf_sym); 3456 for (i = 0; i < nsyms; ) { 3457 bswap_sym(syms + i); 3458 /* Throw away entries which we do not need. */ 3459 if (syms[i].st_shndx == SHN_UNDEF 3460 || syms[i].st_shndx >= SHN_LORESERVE 3461 || ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) { 3462 if (i < --nsyms) { 3463 syms[i] = syms[nsyms]; 3464 } 3465 } else { 3466 #if defined(TARGET_ARM) || defined (TARGET_MIPS) 3467 /* The bottom address bit marks a Thumb or MIPS16 symbol. */ 3468 syms[i].st_value &= ~(target_ulong)1; 3469 #endif 3470 syms[i].st_value += load_bias; 3471 i++; 3472 } 3473 } 3474 3475 /* No "useful" symbol. */ 3476 if (nsyms == 0) { 3477 goto give_up; 3478 } 3479 3480 /* Attempt to free the storage associated with the local symbols 3481 that we threw away. Whether or not this has any effect on the 3482 memory allocation depends on the malloc implementation and how 3483 many symbols we managed to discard. */ 3484 new_syms = g_try_renew(struct elf_sym, syms, nsyms); 3485 if (new_syms == NULL) { 3486 goto give_up; 3487 } 3488 syms = new_syms; 3489 3490 qsort(syms, nsyms, sizeof(*syms), symcmp); 3491 3492 s->disas_num_syms = nsyms; 3493 #if ELF_CLASS == ELFCLASS32 3494 s->disas_symtab.elf32 = syms; 3495 #else 3496 s->disas_symtab.elf64 = syms; 3497 #endif 3498 s->lookup_symbol = lookup_symbolxx; 3499 s->next = syminfos; 3500 syminfos = s; 3501 3502 return; 3503 3504 give_up: 3505 g_free(s); 3506 g_free(strings); 3507 g_free(syms); 3508 } 3509 3510 uint32_t get_elf_eflags(int fd) 3511 { 3512 struct elfhdr ehdr; 3513 off_t offset; 3514 int ret; 3515 3516 /* Read ELF header */ 3517 offset = lseek(fd, 0, SEEK_SET); 3518 if (offset == (off_t) -1) { 3519 return 0; 3520 } 3521 ret = read(fd, &ehdr, sizeof(ehdr)); 3522 if (ret < sizeof(ehdr)) { 3523 return 0; 3524 } 3525 offset = lseek(fd, offset, SEEK_SET); 3526 if (offset == (off_t) -1) { 3527 return 0; 3528 } 3529 3530 /* Check ELF signature */ 3531 if (!elf_check_ident(&ehdr)) { 3532 return 0; 3533 } 3534 3535 /* check header */ 3536 bswap_ehdr(&ehdr); 3537 if (!elf_check_ehdr(&ehdr)) { 3538 return 0; 3539 } 3540 3541 /* return architecture id */ 3542 return ehdr.e_flags; 3543 } 3544 3545 int load_elf_binary(struct linux_binprm *bprm, struct image_info *info) 3546 { 3547 struct image_info interp_info; 3548 struct elfhdr elf_ex; 3549 char *elf_interpreter = NULL; 3550 char *scratch; 3551 3552 memset(&interp_info, 0, sizeof(interp_info)); 3553 #ifdef TARGET_MIPS 3554 interp_info.fp_abi = MIPS_ABI_FP_UNKNOWN; 3555 #endif 3556 3557 info->start_mmap = (abi_ulong)ELF_START_MMAP; 3558 3559 load_elf_image(bprm->filename, bprm->fd, info, 3560 &elf_interpreter, bprm->buf); 3561 3562 /* ??? We need a copy of the elf header for passing to create_elf_tables. 3563 If we do nothing, we'll have overwritten this when we re-use bprm->buf 3564 when we load the interpreter. */ 3565 elf_ex = *(struct elfhdr *)bprm->buf; 3566 3567 /* Do this so that we can load the interpreter, if need be. We will 3568 change some of these later */ 3569 bprm->p = setup_arg_pages(bprm, info); 3570 3571 scratch = g_new0(char, TARGET_PAGE_SIZE); 3572 if (STACK_GROWS_DOWN) { 3573 bprm->p = copy_elf_strings(1, &bprm->filename, scratch, 3574 bprm->p, info->stack_limit); 3575 info->file_string = bprm->p; 3576 bprm->p = copy_elf_strings(bprm->envc, bprm->envp, scratch, 3577 bprm->p, info->stack_limit); 3578 info->env_strings = bprm->p; 3579 bprm->p = copy_elf_strings(bprm->argc, bprm->argv, scratch, 3580 bprm->p, info->stack_limit); 3581 info->arg_strings = bprm->p; 3582 } else { 3583 info->arg_strings = bprm->p; 3584 bprm->p = copy_elf_strings(bprm->argc, bprm->argv, scratch, 3585 bprm->p, info->stack_limit); 3586 info->env_strings = bprm->p; 3587 bprm->p = copy_elf_strings(bprm->envc, bprm->envp, scratch, 3588 bprm->p, info->stack_limit); 3589 info->file_string = bprm->p; 3590 bprm->p = copy_elf_strings(1, &bprm->filename, scratch, 3591 bprm->p, info->stack_limit); 3592 } 3593 3594 g_free(scratch); 3595 3596 if (!bprm->p) { 3597 fprintf(stderr, "%s: %s\n", bprm->filename, strerror(E2BIG)); 3598 exit(-1); 3599 } 3600 3601 if (elf_interpreter) { 3602 load_elf_interp(elf_interpreter, &interp_info, bprm->buf); 3603 3604 /* If the program interpreter is one of these two, then assume 3605 an iBCS2 image. Otherwise assume a native linux image. */ 3606 3607 if (strcmp(elf_interpreter, "/usr/lib/libc.so.1") == 0 3608 || strcmp(elf_interpreter, "/usr/lib/ld.so.1") == 0) { 3609 info->personality = PER_SVR4; 3610 3611 /* Why this, you ask??? Well SVr4 maps page 0 as read-only, 3612 and some applications "depend" upon this behavior. Since 3613 we do not have the power to recompile these, we emulate 3614 the SVr4 behavior. Sigh. */ 3615 target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC, 3616 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 3617 } 3618 #ifdef TARGET_MIPS 3619 info->interp_fp_abi = interp_info.fp_abi; 3620 #endif 3621 } 3622 3623 /* 3624 * TODO: load a vdso, which would also contain the signal trampolines. 3625 * Otherwise, allocate a private page to hold them. 3626 */ 3627 if (TARGET_ARCH_HAS_SIGTRAMP_PAGE) { 3628 abi_long tramp_page = target_mmap(0, TARGET_PAGE_SIZE, 3629 PROT_READ | PROT_WRITE, 3630 MAP_PRIVATE | MAP_ANON, -1, 0); 3631 if (tramp_page == -1) { 3632 return -errno; 3633 } 3634 3635 setup_sigtramp(tramp_page); 3636 target_mprotect(tramp_page, TARGET_PAGE_SIZE, PROT_READ | PROT_EXEC); 3637 } 3638 3639 bprm->p = create_elf_tables(bprm->p, bprm->argc, bprm->envc, &elf_ex, 3640 info, (elf_interpreter ? &interp_info : NULL)); 3641 info->start_stack = bprm->p; 3642 3643 /* If we have an interpreter, set that as the program's entry point. 3644 Copy the load_bias as well, to help PPC64 interpret the entry 3645 point as a function descriptor. Do this after creating elf tables 3646 so that we copy the original program entry point into the AUXV. */ 3647 if (elf_interpreter) { 3648 info->load_bias = interp_info.load_bias; 3649 info->entry = interp_info.entry; 3650 g_free(elf_interpreter); 3651 } 3652 3653 #ifdef USE_ELF_CORE_DUMP 3654 bprm->core_dump = &elf_core_dump; 3655 #endif 3656 3657 /* 3658 * If we reserved extra space for brk, release it now. 3659 * The implementation of do_brk in syscalls.c expects to be able 3660 * to mmap pages in this space. 3661 */ 3662 if (info->reserve_brk) { 3663 abi_ulong start_brk = HOST_PAGE_ALIGN(info->brk); 3664 abi_ulong end_brk = HOST_PAGE_ALIGN(info->brk + info->reserve_brk); 3665 target_munmap(start_brk, end_brk - start_brk); 3666 } 3667 3668 return 0; 3669 } 3670 3671 #ifdef USE_ELF_CORE_DUMP 3672 /* 3673 * Definitions to generate Intel SVR4-like core files. 3674 * These mostly have the same names as the SVR4 types with "target_elf_" 3675 * tacked on the front to prevent clashes with linux definitions, 3676 * and the typedef forms have been avoided. This is mostly like 3677 * the SVR4 structure, but more Linuxy, with things that Linux does 3678 * not support and which gdb doesn't really use excluded. 3679 * 3680 * Fields we don't dump (their contents is zero) in linux-user qemu 3681 * are marked with XXX. 3682 * 3683 * Core dump code is copied from linux kernel (fs/binfmt_elf.c). 3684 * 3685 * Porting ELF coredump for target is (quite) simple process. First you 3686 * define USE_ELF_CORE_DUMP in target ELF code (where init_thread() for 3687 * the target resides): 3688 * 3689 * #define USE_ELF_CORE_DUMP 3690 * 3691 * Next you define type of register set used for dumping. ELF specification 3692 * says that it needs to be array of elf_greg_t that has size of ELF_NREG. 3693 * 3694 * typedef <target_regtype> target_elf_greg_t; 3695 * #define ELF_NREG <number of registers> 3696 * typedef taret_elf_greg_t target_elf_gregset_t[ELF_NREG]; 3697 * 3698 * Last step is to implement target specific function that copies registers 3699 * from given cpu into just specified register set. Prototype is: 3700 * 3701 * static void elf_core_copy_regs(taret_elf_gregset_t *regs, 3702 * const CPUArchState *env); 3703 * 3704 * Parameters: 3705 * regs - copy register values into here (allocated and zeroed by caller) 3706 * env - copy registers from here 3707 * 3708 * Example for ARM target is provided in this file. 3709 */ 3710 3711 /* An ELF note in memory */ 3712 struct memelfnote { 3713 const char *name; 3714 size_t namesz; 3715 size_t namesz_rounded; 3716 int type; 3717 size_t datasz; 3718 size_t datasz_rounded; 3719 void *data; 3720 size_t notesz; 3721 }; 3722 3723 struct target_elf_siginfo { 3724 abi_int si_signo; /* signal number */ 3725 abi_int si_code; /* extra code */ 3726 abi_int si_errno; /* errno */ 3727 }; 3728 3729 struct target_elf_prstatus { 3730 struct target_elf_siginfo pr_info; /* Info associated with signal */ 3731 abi_short pr_cursig; /* Current signal */ 3732 abi_ulong pr_sigpend; /* XXX */ 3733 abi_ulong pr_sighold; /* XXX */ 3734 target_pid_t pr_pid; 3735 target_pid_t pr_ppid; 3736 target_pid_t pr_pgrp; 3737 target_pid_t pr_sid; 3738 struct target_timeval pr_utime; /* XXX User time */ 3739 struct target_timeval pr_stime; /* XXX System time */ 3740 struct target_timeval pr_cutime; /* XXX Cumulative user time */ 3741 struct target_timeval pr_cstime; /* XXX Cumulative system time */ 3742 target_elf_gregset_t pr_reg; /* GP registers */ 3743 abi_int pr_fpvalid; /* XXX */ 3744 }; 3745 3746 #define ELF_PRARGSZ (80) /* Number of chars for args */ 3747 3748 struct target_elf_prpsinfo { 3749 char pr_state; /* numeric process state */ 3750 char pr_sname; /* char for pr_state */ 3751 char pr_zomb; /* zombie */ 3752 char pr_nice; /* nice val */ 3753 abi_ulong pr_flag; /* flags */ 3754 target_uid_t pr_uid; 3755 target_gid_t pr_gid; 3756 target_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; 3757 /* Lots missing */ 3758 char pr_fname[16] QEMU_NONSTRING; /* filename of executable */ 3759 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ 3760 }; 3761 3762 /* Here is the structure in which status of each thread is captured. */ 3763 struct elf_thread_status { 3764 QTAILQ_ENTRY(elf_thread_status) ets_link; 3765 struct target_elf_prstatus prstatus; /* NT_PRSTATUS */ 3766 #if 0 3767 elf_fpregset_t fpu; /* NT_PRFPREG */ 3768 struct task_struct *thread; 3769 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */ 3770 #endif 3771 struct memelfnote notes[1]; 3772 int num_notes; 3773 }; 3774 3775 struct elf_note_info { 3776 struct memelfnote *notes; 3777 struct target_elf_prstatus *prstatus; /* NT_PRSTATUS */ 3778 struct target_elf_prpsinfo *psinfo; /* NT_PRPSINFO */ 3779 3780 QTAILQ_HEAD(, elf_thread_status) thread_list; 3781 #if 0 3782 /* 3783 * Current version of ELF coredump doesn't support 3784 * dumping fp regs etc. 3785 */ 3786 elf_fpregset_t *fpu; 3787 elf_fpxregset_t *xfpu; 3788 int thread_status_size; 3789 #endif 3790 int notes_size; 3791 int numnote; 3792 }; 3793 3794 struct vm_area_struct { 3795 target_ulong vma_start; /* start vaddr of memory region */ 3796 target_ulong vma_end; /* end vaddr of memory region */ 3797 abi_ulong vma_flags; /* protection etc. flags for the region */ 3798 QTAILQ_ENTRY(vm_area_struct) vma_link; 3799 }; 3800 3801 struct mm_struct { 3802 QTAILQ_HEAD(, vm_area_struct) mm_mmap; 3803 int mm_count; /* number of mappings */ 3804 }; 3805 3806 static struct mm_struct *vma_init(void); 3807 static void vma_delete(struct mm_struct *); 3808 static int vma_add_mapping(struct mm_struct *, target_ulong, 3809 target_ulong, abi_ulong); 3810 static int vma_get_mapping_count(const struct mm_struct *); 3811 static struct vm_area_struct *vma_first(const struct mm_struct *); 3812 static struct vm_area_struct *vma_next(struct vm_area_struct *); 3813 static abi_ulong vma_dump_size(const struct vm_area_struct *); 3814 static int vma_walker(void *priv, target_ulong start, target_ulong end, 3815 unsigned long flags); 3816 3817 static void fill_elf_header(struct elfhdr *, int, uint16_t, uint32_t); 3818 static void fill_note(struct memelfnote *, const char *, int, 3819 unsigned int, void *); 3820 static void fill_prstatus(struct target_elf_prstatus *, const TaskState *, int); 3821 static int fill_psinfo(struct target_elf_prpsinfo *, const TaskState *); 3822 static void fill_auxv_note(struct memelfnote *, const TaskState *); 3823 static void fill_elf_note_phdr(struct elf_phdr *, int, off_t); 3824 static size_t note_size(const struct memelfnote *); 3825 static void free_note_info(struct elf_note_info *); 3826 static int fill_note_info(struct elf_note_info *, long, const CPUArchState *); 3827 static void fill_thread_info(struct elf_note_info *, const CPUArchState *); 3828 3829 static int dump_write(int, const void *, size_t); 3830 static int write_note(struct memelfnote *, int); 3831 static int write_note_info(struct elf_note_info *, int); 3832 3833 #ifdef BSWAP_NEEDED 3834 static void bswap_prstatus(struct target_elf_prstatus *prstatus) 3835 { 3836 prstatus->pr_info.si_signo = tswap32(prstatus->pr_info.si_signo); 3837 prstatus->pr_info.si_code = tswap32(prstatus->pr_info.si_code); 3838 prstatus->pr_info.si_errno = tswap32(prstatus->pr_info.si_errno); 3839 prstatus->pr_cursig = tswap16(prstatus->pr_cursig); 3840 prstatus->pr_sigpend = tswapal(prstatus->pr_sigpend); 3841 prstatus->pr_sighold = tswapal(prstatus->pr_sighold); 3842 prstatus->pr_pid = tswap32(prstatus->pr_pid); 3843 prstatus->pr_ppid = tswap32(prstatus->pr_ppid); 3844 prstatus->pr_pgrp = tswap32(prstatus->pr_pgrp); 3845 prstatus->pr_sid = tswap32(prstatus->pr_sid); 3846 /* cpu times are not filled, so we skip them */ 3847 /* regs should be in correct format already */ 3848 prstatus->pr_fpvalid = tswap32(prstatus->pr_fpvalid); 3849 } 3850 3851 static void bswap_psinfo(struct target_elf_prpsinfo *psinfo) 3852 { 3853 psinfo->pr_flag = tswapal(psinfo->pr_flag); 3854 psinfo->pr_uid = tswap16(psinfo->pr_uid); 3855 psinfo->pr_gid = tswap16(psinfo->pr_gid); 3856 psinfo->pr_pid = tswap32(psinfo->pr_pid); 3857 psinfo->pr_ppid = tswap32(psinfo->pr_ppid); 3858 psinfo->pr_pgrp = tswap32(psinfo->pr_pgrp); 3859 psinfo->pr_sid = tswap32(psinfo->pr_sid); 3860 } 3861 3862 static void bswap_note(struct elf_note *en) 3863 { 3864 bswap32s(&en->n_namesz); 3865 bswap32s(&en->n_descsz); 3866 bswap32s(&en->n_type); 3867 } 3868 #else 3869 static inline void bswap_prstatus(struct target_elf_prstatus *p) { } 3870 static inline void bswap_psinfo(struct target_elf_prpsinfo *p) {} 3871 static inline void bswap_note(struct elf_note *en) { } 3872 #endif /* BSWAP_NEEDED */ 3873 3874 /* 3875 * Minimal support for linux memory regions. These are needed 3876 * when we are finding out what memory exactly belongs to 3877 * emulated process. No locks needed here, as long as 3878 * thread that received the signal is stopped. 3879 */ 3880 3881 static struct mm_struct *vma_init(void) 3882 { 3883 struct mm_struct *mm; 3884 3885 if ((mm = g_malloc(sizeof (*mm))) == NULL) 3886 return (NULL); 3887 3888 mm->mm_count = 0; 3889 QTAILQ_INIT(&mm->mm_mmap); 3890 3891 return (mm); 3892 } 3893 3894 static void vma_delete(struct mm_struct *mm) 3895 { 3896 struct vm_area_struct *vma; 3897 3898 while ((vma = vma_first(mm)) != NULL) { 3899 QTAILQ_REMOVE(&mm->mm_mmap, vma, vma_link); 3900 g_free(vma); 3901 } 3902 g_free(mm); 3903 } 3904 3905 static int vma_add_mapping(struct mm_struct *mm, target_ulong start, 3906 target_ulong end, abi_ulong flags) 3907 { 3908 struct vm_area_struct *vma; 3909 3910 if ((vma = g_malloc0(sizeof (*vma))) == NULL) 3911 return (-1); 3912 3913 vma->vma_start = start; 3914 vma->vma_end = end; 3915 vma->vma_flags = flags; 3916 3917 QTAILQ_INSERT_TAIL(&mm->mm_mmap, vma, vma_link); 3918 mm->mm_count++; 3919 3920 return (0); 3921 } 3922 3923 static struct vm_area_struct *vma_first(const struct mm_struct *mm) 3924 { 3925 return (QTAILQ_FIRST(&mm->mm_mmap)); 3926 } 3927 3928 static struct vm_area_struct *vma_next(struct vm_area_struct *vma) 3929 { 3930 return (QTAILQ_NEXT(vma, vma_link)); 3931 } 3932 3933 static int vma_get_mapping_count(const struct mm_struct *mm) 3934 { 3935 return (mm->mm_count); 3936 } 3937 3938 /* 3939 * Calculate file (dump) size of given memory region. 3940 */ 3941 static abi_ulong vma_dump_size(const struct vm_area_struct *vma) 3942 { 3943 /* if we cannot even read the first page, skip it */ 3944 if (!access_ok_untagged(VERIFY_READ, vma->vma_start, TARGET_PAGE_SIZE)) 3945 return (0); 3946 3947 /* 3948 * Usually we don't dump executable pages as they contain 3949 * non-writable code that debugger can read directly from 3950 * target library etc. However, thread stacks are marked 3951 * also executable so we read in first page of given region 3952 * and check whether it contains elf header. If there is 3953 * no elf header, we dump it. 3954 */ 3955 if (vma->vma_flags & PROT_EXEC) { 3956 char page[TARGET_PAGE_SIZE]; 3957 3958 if (copy_from_user(page, vma->vma_start, sizeof (page))) { 3959 return 0; 3960 } 3961 if ((page[EI_MAG0] == ELFMAG0) && 3962 (page[EI_MAG1] == ELFMAG1) && 3963 (page[EI_MAG2] == ELFMAG2) && 3964 (page[EI_MAG3] == ELFMAG3)) { 3965 /* 3966 * Mappings are possibly from ELF binary. Don't dump 3967 * them. 3968 */ 3969 return (0); 3970 } 3971 } 3972 3973 return (vma->vma_end - vma->vma_start); 3974 } 3975 3976 static int vma_walker(void *priv, target_ulong start, target_ulong end, 3977 unsigned long flags) 3978 { 3979 struct mm_struct *mm = (struct mm_struct *)priv; 3980 3981 vma_add_mapping(mm, start, end, flags); 3982 return (0); 3983 } 3984 3985 static void fill_note(struct memelfnote *note, const char *name, int type, 3986 unsigned int sz, void *data) 3987 { 3988 unsigned int namesz; 3989 3990 namesz = strlen(name) + 1; 3991 note->name = name; 3992 note->namesz = namesz; 3993 note->namesz_rounded = roundup(namesz, sizeof (int32_t)); 3994 note->type = type; 3995 note->datasz = sz; 3996 note->datasz_rounded = roundup(sz, sizeof (int32_t)); 3997 3998 note->data = data; 3999 4000 /* 4001 * We calculate rounded up note size here as specified by 4002 * ELF document. 4003 */ 4004 note->notesz = sizeof (struct elf_note) + 4005 note->namesz_rounded + note->datasz_rounded; 4006 } 4007 4008 static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine, 4009 uint32_t flags) 4010 { 4011 (void) memset(elf, 0, sizeof(*elf)); 4012 4013 (void) memcpy(elf->e_ident, ELFMAG, SELFMAG); 4014 elf->e_ident[EI_CLASS] = ELF_CLASS; 4015 elf->e_ident[EI_DATA] = ELF_DATA; 4016 elf->e_ident[EI_VERSION] = EV_CURRENT; 4017 elf->e_ident[EI_OSABI] = ELF_OSABI; 4018 4019 elf->e_type = ET_CORE; 4020 elf->e_machine = machine; 4021 elf->e_version = EV_CURRENT; 4022 elf->e_phoff = sizeof(struct elfhdr); 4023 elf->e_flags = flags; 4024 elf->e_ehsize = sizeof(struct elfhdr); 4025 elf->e_phentsize = sizeof(struct elf_phdr); 4026 elf->e_phnum = segs; 4027 4028 bswap_ehdr(elf); 4029 } 4030 4031 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset) 4032 { 4033 phdr->p_type = PT_NOTE; 4034 phdr->p_offset = offset; 4035 phdr->p_vaddr = 0; 4036 phdr->p_paddr = 0; 4037 phdr->p_filesz = sz; 4038 phdr->p_memsz = 0; 4039 phdr->p_flags = 0; 4040 phdr->p_align = 0; 4041 4042 bswap_phdr(phdr, 1); 4043 } 4044 4045 static size_t note_size(const struct memelfnote *note) 4046 { 4047 return (note->notesz); 4048 } 4049 4050 static void fill_prstatus(struct target_elf_prstatus *prstatus, 4051 const TaskState *ts, int signr) 4052 { 4053 (void) memset(prstatus, 0, sizeof (*prstatus)); 4054 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr; 4055 prstatus->pr_pid = ts->ts_tid; 4056 prstatus->pr_ppid = getppid(); 4057 prstatus->pr_pgrp = getpgrp(); 4058 prstatus->pr_sid = getsid(0); 4059 4060 bswap_prstatus(prstatus); 4061 } 4062 4063 static int fill_psinfo(struct target_elf_prpsinfo *psinfo, const TaskState *ts) 4064 { 4065 char *base_filename; 4066 unsigned int i, len; 4067 4068 (void) memset(psinfo, 0, sizeof (*psinfo)); 4069 4070 len = ts->info->env_strings - ts->info->arg_strings; 4071 if (len >= ELF_PRARGSZ) 4072 len = ELF_PRARGSZ - 1; 4073 if (copy_from_user(&psinfo->pr_psargs, ts->info->arg_strings, len)) { 4074 return -EFAULT; 4075 } 4076 for (i = 0; i < len; i++) 4077 if (psinfo->pr_psargs[i] == 0) 4078 psinfo->pr_psargs[i] = ' '; 4079 psinfo->pr_psargs[len] = 0; 4080 4081 psinfo->pr_pid = getpid(); 4082 psinfo->pr_ppid = getppid(); 4083 psinfo->pr_pgrp = getpgrp(); 4084 psinfo->pr_sid = getsid(0); 4085 psinfo->pr_uid = getuid(); 4086 psinfo->pr_gid = getgid(); 4087 4088 base_filename = g_path_get_basename(ts->bprm->filename); 4089 /* 4090 * Using strncpy here is fine: at max-length, 4091 * this field is not NUL-terminated. 4092 */ 4093 (void) strncpy(psinfo->pr_fname, base_filename, 4094 sizeof(psinfo->pr_fname)); 4095 4096 g_free(base_filename); 4097 bswap_psinfo(psinfo); 4098 return (0); 4099 } 4100 4101 static void fill_auxv_note(struct memelfnote *note, const TaskState *ts) 4102 { 4103 elf_addr_t auxv = (elf_addr_t)ts->info->saved_auxv; 4104 elf_addr_t orig_auxv = auxv; 4105 void *ptr; 4106 int len = ts->info->auxv_len; 4107 4108 /* 4109 * Auxiliary vector is stored in target process stack. It contains 4110 * {type, value} pairs that we need to dump into note. This is not 4111 * strictly necessary but we do it here for sake of completeness. 4112 */ 4113 4114 /* read in whole auxv vector and copy it to memelfnote */ 4115 ptr = lock_user(VERIFY_READ, orig_auxv, len, 0); 4116 if (ptr != NULL) { 4117 fill_note(note, "CORE", NT_AUXV, len, ptr); 4118 unlock_user(ptr, auxv, len); 4119 } 4120 } 4121 4122 /* 4123 * Constructs name of coredump file. We have following convention 4124 * for the name: 4125 * qemu_<basename-of-target-binary>_<date>-<time>_<pid>.core 4126 * 4127 * Returns the filename 4128 */ 4129 static char *core_dump_filename(const TaskState *ts) 4130 { 4131 g_autoptr(GDateTime) now = g_date_time_new_now_local(); 4132 g_autofree char *nowstr = g_date_time_format(now, "%Y%m%d-%H%M%S"); 4133 g_autofree char *base_filename = g_path_get_basename(ts->bprm->filename); 4134 4135 return g_strdup_printf("qemu_%s_%s_%d.core", 4136 base_filename, nowstr, (int)getpid()); 4137 } 4138 4139 static int dump_write(int fd, const void *ptr, size_t size) 4140 { 4141 const char *bufp = (const char *)ptr; 4142 ssize_t bytes_written, bytes_left; 4143 struct rlimit dumpsize; 4144 off_t pos; 4145 4146 bytes_written = 0; 4147 getrlimit(RLIMIT_CORE, &dumpsize); 4148 if ((pos = lseek(fd, 0, SEEK_CUR))==-1) { 4149 if (errno == ESPIPE) { /* not a seekable stream */ 4150 bytes_left = size; 4151 } else { 4152 return pos; 4153 } 4154 } else { 4155 if (dumpsize.rlim_cur <= pos) { 4156 return -1; 4157 } else if (dumpsize.rlim_cur == RLIM_INFINITY) { 4158 bytes_left = size; 4159 } else { 4160 size_t limit_left=dumpsize.rlim_cur - pos; 4161 bytes_left = limit_left >= size ? size : limit_left ; 4162 } 4163 } 4164 4165 /* 4166 * In normal conditions, single write(2) should do but 4167 * in case of socket etc. this mechanism is more portable. 4168 */ 4169 do { 4170 bytes_written = write(fd, bufp, bytes_left); 4171 if (bytes_written < 0) { 4172 if (errno == EINTR) 4173 continue; 4174 return (-1); 4175 } else if (bytes_written == 0) { /* eof */ 4176 return (-1); 4177 } 4178 bufp += bytes_written; 4179 bytes_left -= bytes_written; 4180 } while (bytes_left > 0); 4181 4182 return (0); 4183 } 4184 4185 static int write_note(struct memelfnote *men, int fd) 4186 { 4187 struct elf_note en; 4188 4189 en.n_namesz = men->namesz; 4190 en.n_type = men->type; 4191 en.n_descsz = men->datasz; 4192 4193 bswap_note(&en); 4194 4195 if (dump_write(fd, &en, sizeof(en)) != 0) 4196 return (-1); 4197 if (dump_write(fd, men->name, men->namesz_rounded) != 0) 4198 return (-1); 4199 if (dump_write(fd, men->data, men->datasz_rounded) != 0) 4200 return (-1); 4201 4202 return (0); 4203 } 4204 4205 static void fill_thread_info(struct elf_note_info *info, const CPUArchState *env) 4206 { 4207 CPUState *cpu = env_cpu((CPUArchState *)env); 4208 TaskState *ts = (TaskState *)cpu->opaque; 4209 struct elf_thread_status *ets; 4210 4211 ets = g_malloc0(sizeof (*ets)); 4212 ets->num_notes = 1; /* only prstatus is dumped */ 4213 fill_prstatus(&ets->prstatus, ts, 0); 4214 elf_core_copy_regs(&ets->prstatus.pr_reg, env); 4215 fill_note(&ets->notes[0], "CORE", NT_PRSTATUS, sizeof (ets->prstatus), 4216 &ets->prstatus); 4217 4218 QTAILQ_INSERT_TAIL(&info->thread_list, ets, ets_link); 4219 4220 info->notes_size += note_size(&ets->notes[0]); 4221 } 4222 4223 static void init_note_info(struct elf_note_info *info) 4224 { 4225 /* Initialize the elf_note_info structure so that it is at 4226 * least safe to call free_note_info() on it. Must be 4227 * called before calling fill_note_info(). 4228 */ 4229 memset(info, 0, sizeof (*info)); 4230 QTAILQ_INIT(&info->thread_list); 4231 } 4232 4233 static int fill_note_info(struct elf_note_info *info, 4234 long signr, const CPUArchState *env) 4235 { 4236 #define NUMNOTES 3 4237 CPUState *cpu = env_cpu((CPUArchState *)env); 4238 TaskState *ts = (TaskState *)cpu->opaque; 4239 int i; 4240 4241 info->notes = g_new0(struct memelfnote, NUMNOTES); 4242 if (info->notes == NULL) 4243 return (-ENOMEM); 4244 info->prstatus = g_malloc0(sizeof (*info->prstatus)); 4245 if (info->prstatus == NULL) 4246 return (-ENOMEM); 4247 info->psinfo = g_malloc0(sizeof (*info->psinfo)); 4248 if (info->prstatus == NULL) 4249 return (-ENOMEM); 4250 4251 /* 4252 * First fill in status (and registers) of current thread 4253 * including process info & aux vector. 4254 */ 4255 fill_prstatus(info->prstatus, ts, signr); 4256 elf_core_copy_regs(&info->prstatus->pr_reg, env); 4257 fill_note(&info->notes[0], "CORE", NT_PRSTATUS, 4258 sizeof (*info->prstatus), info->prstatus); 4259 fill_psinfo(info->psinfo, ts); 4260 fill_note(&info->notes[1], "CORE", NT_PRPSINFO, 4261 sizeof (*info->psinfo), info->psinfo); 4262 fill_auxv_note(&info->notes[2], ts); 4263 info->numnote = 3; 4264 4265 info->notes_size = 0; 4266 for (i = 0; i < info->numnote; i++) 4267 info->notes_size += note_size(&info->notes[i]); 4268 4269 /* read and fill status of all threads */ 4270 WITH_QEMU_LOCK_GUARD(&qemu_cpu_list_lock) { 4271 CPU_FOREACH(cpu) { 4272 if (cpu == thread_cpu) { 4273 continue; 4274 } 4275 fill_thread_info(info, cpu->env_ptr); 4276 } 4277 } 4278 4279 return (0); 4280 } 4281 4282 static void free_note_info(struct elf_note_info *info) 4283 { 4284 struct elf_thread_status *ets; 4285 4286 while (!QTAILQ_EMPTY(&info->thread_list)) { 4287 ets = QTAILQ_FIRST(&info->thread_list); 4288 QTAILQ_REMOVE(&info->thread_list, ets, ets_link); 4289 g_free(ets); 4290 } 4291 4292 g_free(info->prstatus); 4293 g_free(info->psinfo); 4294 g_free(info->notes); 4295 } 4296 4297 static int write_note_info(struct elf_note_info *info, int fd) 4298 { 4299 struct elf_thread_status *ets; 4300 int i, error = 0; 4301 4302 /* write prstatus, psinfo and auxv for current thread */ 4303 for (i = 0; i < info->numnote; i++) 4304 if ((error = write_note(&info->notes[i], fd)) != 0) 4305 return (error); 4306 4307 /* write prstatus for each thread */ 4308 QTAILQ_FOREACH(ets, &info->thread_list, ets_link) { 4309 if ((error = write_note(&ets->notes[0], fd)) != 0) 4310 return (error); 4311 } 4312 4313 return (0); 4314 } 4315 4316 /* 4317 * Write out ELF coredump. 4318 * 4319 * See documentation of ELF object file format in: 4320 * http://www.caldera.com/developers/devspecs/gabi41.pdf 4321 * 4322 * Coredump format in linux is following: 4323 * 4324 * 0 +----------------------+ \ 4325 * | ELF header | ET_CORE | 4326 * +----------------------+ | 4327 * | ELF program headers | |--- headers 4328 * | - NOTE section | | 4329 * | - PT_LOAD sections | | 4330 * +----------------------+ / 4331 * | NOTEs: | 4332 * | - NT_PRSTATUS | 4333 * | - NT_PRSINFO | 4334 * | - NT_AUXV | 4335 * +----------------------+ <-- aligned to target page 4336 * | Process memory dump | 4337 * : : 4338 * . . 4339 * : : 4340 * | | 4341 * +----------------------+ 4342 * 4343 * NT_PRSTATUS -> struct elf_prstatus (per thread) 4344 * NT_PRSINFO -> struct elf_prpsinfo 4345 * NT_AUXV is array of { type, value } pairs (see fill_auxv_note()). 4346 * 4347 * Format follows System V format as close as possible. Current 4348 * version limitations are as follows: 4349 * - no floating point registers are dumped 4350 * 4351 * Function returns 0 in case of success, negative errno otherwise. 4352 * 4353 * TODO: make this work also during runtime: it should be 4354 * possible to force coredump from running process and then 4355 * continue processing. For example qemu could set up SIGUSR2 4356 * handler (provided that target process haven't registered 4357 * handler for that) that does the dump when signal is received. 4358 */ 4359 static int elf_core_dump(int signr, const CPUArchState *env) 4360 { 4361 const CPUState *cpu = env_cpu((CPUArchState *)env); 4362 const TaskState *ts = (const TaskState *)cpu->opaque; 4363 struct vm_area_struct *vma = NULL; 4364 g_autofree char *corefile = NULL; 4365 struct elf_note_info info; 4366 struct elfhdr elf; 4367 struct elf_phdr phdr; 4368 struct rlimit dumpsize; 4369 struct mm_struct *mm = NULL; 4370 off_t offset = 0, data_offset = 0; 4371 int segs = 0; 4372 int fd = -1; 4373 4374 init_note_info(&info); 4375 4376 errno = 0; 4377 getrlimit(RLIMIT_CORE, &dumpsize); 4378 if (dumpsize.rlim_cur == 0) 4379 return 0; 4380 4381 corefile = core_dump_filename(ts); 4382 4383 if ((fd = open(corefile, O_WRONLY | O_CREAT, 4384 S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)) < 0) 4385 return (-errno); 4386 4387 /* 4388 * Walk through target process memory mappings and 4389 * set up structure containing this information. After 4390 * this point vma_xxx functions can be used. 4391 */ 4392 if ((mm = vma_init()) == NULL) 4393 goto out; 4394 4395 walk_memory_regions(mm, vma_walker); 4396 segs = vma_get_mapping_count(mm); 4397 4398 /* 4399 * Construct valid coredump ELF header. We also 4400 * add one more segment for notes. 4401 */ 4402 fill_elf_header(&elf, segs + 1, ELF_MACHINE, 0); 4403 if (dump_write(fd, &elf, sizeof (elf)) != 0) 4404 goto out; 4405 4406 /* fill in the in-memory version of notes */ 4407 if (fill_note_info(&info, signr, env) < 0) 4408 goto out; 4409 4410 offset += sizeof (elf); /* elf header */ 4411 offset += (segs + 1) * sizeof (struct elf_phdr); /* program headers */ 4412 4413 /* write out notes program header */ 4414 fill_elf_note_phdr(&phdr, info.notes_size, offset); 4415 4416 offset += info.notes_size; 4417 if (dump_write(fd, &phdr, sizeof (phdr)) != 0) 4418 goto out; 4419 4420 /* 4421 * ELF specification wants data to start at page boundary so 4422 * we align it here. 4423 */ 4424 data_offset = offset = roundup(offset, ELF_EXEC_PAGESIZE); 4425 4426 /* 4427 * Write program headers for memory regions mapped in 4428 * the target process. 4429 */ 4430 for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) { 4431 (void) memset(&phdr, 0, sizeof (phdr)); 4432 4433 phdr.p_type = PT_LOAD; 4434 phdr.p_offset = offset; 4435 phdr.p_vaddr = vma->vma_start; 4436 phdr.p_paddr = 0; 4437 phdr.p_filesz = vma_dump_size(vma); 4438 offset += phdr.p_filesz; 4439 phdr.p_memsz = vma->vma_end - vma->vma_start; 4440 phdr.p_flags = vma->vma_flags & PROT_READ ? PF_R : 0; 4441 if (vma->vma_flags & PROT_WRITE) 4442 phdr.p_flags |= PF_W; 4443 if (vma->vma_flags & PROT_EXEC) 4444 phdr.p_flags |= PF_X; 4445 phdr.p_align = ELF_EXEC_PAGESIZE; 4446 4447 bswap_phdr(&phdr, 1); 4448 if (dump_write(fd, &phdr, sizeof(phdr)) != 0) { 4449 goto out; 4450 } 4451 } 4452 4453 /* 4454 * Next we write notes just after program headers. No 4455 * alignment needed here. 4456 */ 4457 if (write_note_info(&info, fd) < 0) 4458 goto out; 4459 4460 /* align data to page boundary */ 4461 if (lseek(fd, data_offset, SEEK_SET) != data_offset) 4462 goto out; 4463 4464 /* 4465 * Finally we can dump process memory into corefile as well. 4466 */ 4467 for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) { 4468 abi_ulong addr; 4469 abi_ulong end; 4470 4471 end = vma->vma_start + vma_dump_size(vma); 4472 4473 for (addr = vma->vma_start; addr < end; 4474 addr += TARGET_PAGE_SIZE) { 4475 char page[TARGET_PAGE_SIZE]; 4476 int error; 4477 4478 /* 4479 * Read in page from target process memory and 4480 * write it to coredump file. 4481 */ 4482 error = copy_from_user(page, addr, sizeof (page)); 4483 if (error != 0) { 4484 (void) fprintf(stderr, "unable to dump " TARGET_ABI_FMT_lx "\n", 4485 addr); 4486 errno = -error; 4487 goto out; 4488 } 4489 if (dump_write(fd, page, TARGET_PAGE_SIZE) < 0) 4490 goto out; 4491 } 4492 } 4493 4494 out: 4495 free_note_info(&info); 4496 if (mm != NULL) 4497 vma_delete(mm); 4498 (void) close(fd); 4499 4500 if (errno != 0) 4501 return (-errno); 4502 return (0); 4503 } 4504 #endif /* USE_ELF_CORE_DUMP */ 4505 4506 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop) 4507 { 4508 init_thread(regs, infop); 4509 } 4510