1 /* This is the Linux kernel elf-loading code, ported into user space */ 2 #include "qemu/osdep.h" 3 #include <sys/param.h> 4 5 #include <sys/prctl.h> 6 #include <sys/resource.h> 7 #include <sys/shm.h> 8 9 #include "qemu.h" 10 #include "user/tswap-target.h" 11 #include "exec/page-protection.h" 12 #include "user/guest-base.h" 13 #include "user-internals.h" 14 #include "signal-common.h" 15 #include "loader.h" 16 #include "user-mmap.h" 17 #include "disas/disas.h" 18 #include "qemu/bitops.h" 19 #include "qemu/path.h" 20 #include "qemu/queue.h" 21 #include "qemu/guest-random.h" 22 #include "qemu/units.h" 23 #include "qemu/selfmap.h" 24 #include "qemu/lockable.h" 25 #include "qapi/error.h" 26 #include "qemu/error-report.h" 27 #include "target_signal.h" 28 #include "tcg/debuginfo.h" 29 30 #ifdef TARGET_ARM 31 #include "target/arm/cpu-features.h" 32 #endif 33 34 #ifdef _ARCH_PPC64 35 #undef ARCH_DLINFO 36 #undef ELF_PLATFORM 37 #undef ELF_HWCAP 38 #undef ELF_HWCAP2 39 #undef ELF_CLASS 40 #undef ELF_DATA 41 #undef ELF_ARCH 42 #endif 43 44 #ifndef TARGET_ARCH_HAS_SIGTRAMP_PAGE 45 #define TARGET_ARCH_HAS_SIGTRAMP_PAGE 0 46 #endif 47 48 typedef struct { 49 const uint8_t *image; 50 const uint32_t *relocs; 51 unsigned image_size; 52 unsigned reloc_count; 53 unsigned sigreturn_ofs; 54 unsigned rt_sigreturn_ofs; 55 } VdsoImageInfo; 56 57 #define ELF_OSABI ELFOSABI_SYSV 58 59 /* from personality.h */ 60 61 /* 62 * Flags for bug emulation. 63 * 64 * These occupy the top three bytes. 65 */ 66 enum { 67 ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */ 68 FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to 69 descriptors (signal handling) */ 70 MMAP_PAGE_ZERO = 0x0100000, 71 ADDR_COMPAT_LAYOUT = 0x0200000, 72 READ_IMPLIES_EXEC = 0x0400000, 73 ADDR_LIMIT_32BIT = 0x0800000, 74 SHORT_INODE = 0x1000000, 75 WHOLE_SECONDS = 0x2000000, 76 STICKY_TIMEOUTS = 0x4000000, 77 ADDR_LIMIT_3GB = 0x8000000, 78 }; 79 80 /* 81 * Personality types. 82 * 83 * These go in the low byte. Avoid using the top bit, it will 84 * conflict with error returns. 85 */ 86 enum { 87 PER_LINUX = 0x0000, 88 PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT, 89 PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS, 90 PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, 91 PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE, 92 PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS | SHORT_INODE, 93 PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS, 94 PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE, 95 PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS, 96 PER_BSD = 0x0006, 97 PER_SUNOS = 0x0006 | STICKY_TIMEOUTS, 98 PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE, 99 PER_LINUX32 = 0x0008, 100 PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB, 101 PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */ 102 PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */ 103 PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */ 104 PER_RISCOS = 0x000c, 105 PER_SOLARIS = 0x000d | STICKY_TIMEOUTS, 106 PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, 107 PER_OSF4 = 0x000f, /* OSF/1 v4 */ 108 PER_HPUX = 0x0010, 109 PER_MASK = 0x00ff, 110 }; 111 112 /* 113 * Return the base personality without flags. 114 */ 115 #define personality(pers) (pers & PER_MASK) 116 117 int info_is_fdpic(struct image_info *info) 118 { 119 return info->personality == PER_LINUX_FDPIC; 120 } 121 122 /* this flag is uneffective under linux too, should be deleted */ 123 #ifndef MAP_DENYWRITE 124 #define MAP_DENYWRITE 0 125 #endif 126 127 /* should probably go in elf.h */ 128 #ifndef ELIBBAD 129 #define ELIBBAD 80 130 #endif 131 132 #if TARGET_BIG_ENDIAN 133 #define ELF_DATA ELFDATA2MSB 134 #else 135 #define ELF_DATA ELFDATA2LSB 136 #endif 137 138 #ifdef TARGET_ABI_MIPSN32 139 typedef abi_ullong target_elf_greg_t; 140 #define tswapreg(ptr) tswap64(ptr) 141 #else 142 typedef abi_ulong target_elf_greg_t; 143 #define tswapreg(ptr) tswapal(ptr) 144 #endif 145 146 #ifdef USE_UID16 147 typedef abi_ushort target_uid_t; 148 typedef abi_ushort target_gid_t; 149 #else 150 typedef abi_uint target_uid_t; 151 typedef abi_uint target_gid_t; 152 #endif 153 typedef abi_int target_pid_t; 154 155 #ifdef TARGET_I386 156 157 #define ELF_HWCAP get_elf_hwcap() 158 159 static uint32_t get_elf_hwcap(void) 160 { 161 X86CPU *cpu = X86_CPU(thread_cpu); 162 163 return cpu->env.features[FEAT_1_EDX]; 164 } 165 166 #ifdef TARGET_X86_64 167 #define ELF_CLASS ELFCLASS64 168 #define ELF_ARCH EM_X86_64 169 170 #define ELF_PLATFORM "x86_64" 171 172 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 173 { 174 regs->rax = 0; 175 regs->rsp = infop->start_stack; 176 regs->rip = infop->entry; 177 } 178 179 #define ELF_NREG 27 180 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 181 182 /* 183 * Note that ELF_NREG should be 29 as there should be place for 184 * TRAPNO and ERR "registers" as well but linux doesn't dump 185 * those. 186 * 187 * See linux kernel: arch/x86/include/asm/elf.h 188 */ 189 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env) 190 { 191 (*regs)[0] = tswapreg(env->regs[15]); 192 (*regs)[1] = tswapreg(env->regs[14]); 193 (*regs)[2] = tswapreg(env->regs[13]); 194 (*regs)[3] = tswapreg(env->regs[12]); 195 (*regs)[4] = tswapreg(env->regs[R_EBP]); 196 (*regs)[5] = tswapreg(env->regs[R_EBX]); 197 (*regs)[6] = tswapreg(env->regs[11]); 198 (*regs)[7] = tswapreg(env->regs[10]); 199 (*regs)[8] = tswapreg(env->regs[9]); 200 (*regs)[9] = tswapreg(env->regs[8]); 201 (*regs)[10] = tswapreg(env->regs[R_EAX]); 202 (*regs)[11] = tswapreg(env->regs[R_ECX]); 203 (*regs)[12] = tswapreg(env->regs[R_EDX]); 204 (*regs)[13] = tswapreg(env->regs[R_ESI]); 205 (*regs)[14] = tswapreg(env->regs[R_EDI]); 206 (*regs)[15] = tswapreg(env->regs[R_EAX]); /* XXX */ 207 (*regs)[16] = tswapreg(env->eip); 208 (*regs)[17] = tswapreg(env->segs[R_CS].selector & 0xffff); 209 (*regs)[18] = tswapreg(env->eflags); 210 (*regs)[19] = tswapreg(env->regs[R_ESP]); 211 (*regs)[20] = tswapreg(env->segs[R_SS].selector & 0xffff); 212 (*regs)[21] = tswapreg(env->segs[R_FS].selector & 0xffff); 213 (*regs)[22] = tswapreg(env->segs[R_GS].selector & 0xffff); 214 (*regs)[23] = tswapreg(env->segs[R_DS].selector & 0xffff); 215 (*regs)[24] = tswapreg(env->segs[R_ES].selector & 0xffff); 216 (*regs)[25] = tswapreg(env->segs[R_FS].selector & 0xffff); 217 (*regs)[26] = tswapreg(env->segs[R_GS].selector & 0xffff); 218 } 219 220 #if ULONG_MAX > UINT32_MAX 221 #define INIT_GUEST_COMMPAGE 222 static bool init_guest_commpage(void) 223 { 224 /* 225 * The vsyscall page is at a high negative address aka kernel space, 226 * which means that we cannot actually allocate it with target_mmap. 227 * We still should be able to use page_set_flags, unless the user 228 * has specified -R reserved_va, which would trigger an assert(). 229 */ 230 if (reserved_va != 0 && 231 TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE - 1 > reserved_va) { 232 error_report("Cannot allocate vsyscall page"); 233 exit(EXIT_FAILURE); 234 } 235 page_set_flags(TARGET_VSYSCALL_PAGE, 236 TARGET_VSYSCALL_PAGE | ~TARGET_PAGE_MASK, 237 PAGE_EXEC | PAGE_VALID); 238 return true; 239 } 240 #endif 241 #else 242 243 /* 244 * This is used to ensure we don't load something for the wrong architecture. 245 */ 246 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) ) 247 248 /* 249 * These are used to set parameters in the core dumps. 250 */ 251 #define ELF_CLASS ELFCLASS32 252 #define ELF_ARCH EM_386 253 254 #define ELF_PLATFORM get_elf_platform() 255 #define EXSTACK_DEFAULT true 256 257 static const char *get_elf_platform(void) 258 { 259 static char elf_platform[] = "i386"; 260 int family = object_property_get_int(OBJECT(thread_cpu), "family", NULL); 261 if (family > 6) { 262 family = 6; 263 } 264 if (family >= 3) { 265 elf_platform[1] = '0' + family; 266 } 267 return elf_platform; 268 } 269 270 static inline void init_thread(struct target_pt_regs *regs, 271 struct image_info *infop) 272 { 273 regs->esp = infop->start_stack; 274 regs->eip = infop->entry; 275 276 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program 277 starts %edx contains a pointer to a function which might be 278 registered using `atexit'. This provides a mean for the 279 dynamic linker to call DT_FINI functions for shared libraries 280 that have been loaded before the code runs. 281 282 A value of 0 tells we have no such handler. */ 283 regs->edx = 0; 284 } 285 286 #define ELF_NREG 17 287 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 288 289 /* 290 * Note that ELF_NREG should be 19 as there should be place for 291 * TRAPNO and ERR "registers" as well but linux doesn't dump 292 * those. 293 * 294 * See linux kernel: arch/x86/include/asm/elf.h 295 */ 296 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env) 297 { 298 (*regs)[0] = tswapreg(env->regs[R_EBX]); 299 (*regs)[1] = tswapreg(env->regs[R_ECX]); 300 (*regs)[2] = tswapreg(env->regs[R_EDX]); 301 (*regs)[3] = tswapreg(env->regs[R_ESI]); 302 (*regs)[4] = tswapreg(env->regs[R_EDI]); 303 (*regs)[5] = tswapreg(env->regs[R_EBP]); 304 (*regs)[6] = tswapreg(env->regs[R_EAX]); 305 (*regs)[7] = tswapreg(env->segs[R_DS].selector & 0xffff); 306 (*regs)[8] = tswapreg(env->segs[R_ES].selector & 0xffff); 307 (*regs)[9] = tswapreg(env->segs[R_FS].selector & 0xffff); 308 (*regs)[10] = tswapreg(env->segs[R_GS].selector & 0xffff); 309 (*regs)[11] = tswapreg(env->regs[R_EAX]); /* XXX */ 310 (*regs)[12] = tswapreg(env->eip); 311 (*regs)[13] = tswapreg(env->segs[R_CS].selector & 0xffff); 312 (*regs)[14] = tswapreg(env->eflags); 313 (*regs)[15] = tswapreg(env->regs[R_ESP]); 314 (*regs)[16] = tswapreg(env->segs[R_SS].selector & 0xffff); 315 } 316 317 /* 318 * i386 is the only target which supplies AT_SYSINFO for the vdso. 319 * All others only supply AT_SYSINFO_EHDR. 320 */ 321 #define DLINFO_ARCH_ITEMS (vdso_info != NULL) 322 #define ARCH_DLINFO \ 323 do { \ 324 if (vdso_info) { \ 325 NEW_AUX_ENT(AT_SYSINFO, vdso_info->entry); \ 326 } \ 327 } while (0) 328 329 #endif /* TARGET_X86_64 */ 330 331 #define VDSO_HEADER "vdso.c.inc" 332 333 #define USE_ELF_CORE_DUMP 334 #define ELF_EXEC_PAGESIZE 4096 335 336 #endif /* TARGET_I386 */ 337 338 #ifdef TARGET_ARM 339 340 #ifndef TARGET_AARCH64 341 /* 32 bit ARM definitions */ 342 343 #define ELF_ARCH EM_ARM 344 #define ELF_CLASS ELFCLASS32 345 #define EXSTACK_DEFAULT true 346 347 static inline void init_thread(struct target_pt_regs *regs, 348 struct image_info *infop) 349 { 350 abi_long stack = infop->start_stack; 351 memset(regs, 0, sizeof(*regs)); 352 353 regs->uregs[16] = ARM_CPU_MODE_USR; 354 if (infop->entry & 1) { 355 regs->uregs[16] |= CPSR_T; 356 } 357 regs->uregs[15] = infop->entry & 0xfffffffe; 358 regs->uregs[13] = infop->start_stack; 359 /* FIXME - what to for failure of get_user()? */ 360 get_user_ual(regs->uregs[2], stack + 8); /* envp */ 361 get_user_ual(regs->uregs[1], stack + 4); /* envp */ 362 /* XXX: it seems that r0 is zeroed after ! */ 363 regs->uregs[0] = 0; 364 /* For uClinux PIC binaries. */ 365 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */ 366 regs->uregs[10] = infop->start_data; 367 368 /* Support ARM FDPIC. */ 369 if (info_is_fdpic(infop)) { 370 /* As described in the ABI document, r7 points to the loadmap info 371 * prepared by the kernel. If an interpreter is needed, r8 points 372 * to the interpreter loadmap and r9 points to the interpreter 373 * PT_DYNAMIC info. If no interpreter is needed, r8 is zero, and 374 * r9 points to the main program PT_DYNAMIC info. 375 */ 376 regs->uregs[7] = infop->loadmap_addr; 377 if (infop->interpreter_loadmap_addr) { 378 /* Executable is dynamically loaded. */ 379 regs->uregs[8] = infop->interpreter_loadmap_addr; 380 regs->uregs[9] = infop->interpreter_pt_dynamic_addr; 381 } else { 382 regs->uregs[8] = 0; 383 regs->uregs[9] = infop->pt_dynamic_addr; 384 } 385 } 386 } 387 388 #define ELF_NREG 18 389 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 390 391 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUARMState *env) 392 { 393 (*regs)[0] = tswapreg(env->regs[0]); 394 (*regs)[1] = tswapreg(env->regs[1]); 395 (*regs)[2] = tswapreg(env->regs[2]); 396 (*regs)[3] = tswapreg(env->regs[3]); 397 (*regs)[4] = tswapreg(env->regs[4]); 398 (*regs)[5] = tswapreg(env->regs[5]); 399 (*regs)[6] = tswapreg(env->regs[6]); 400 (*regs)[7] = tswapreg(env->regs[7]); 401 (*regs)[8] = tswapreg(env->regs[8]); 402 (*regs)[9] = tswapreg(env->regs[9]); 403 (*regs)[10] = tswapreg(env->regs[10]); 404 (*regs)[11] = tswapreg(env->regs[11]); 405 (*regs)[12] = tswapreg(env->regs[12]); 406 (*regs)[13] = tswapreg(env->regs[13]); 407 (*regs)[14] = tswapreg(env->regs[14]); 408 (*regs)[15] = tswapreg(env->regs[15]); 409 410 (*regs)[16] = tswapreg(cpsr_read((CPUARMState *)env)); 411 (*regs)[17] = tswapreg(env->regs[0]); /* XXX */ 412 } 413 414 #define USE_ELF_CORE_DUMP 415 #define ELF_EXEC_PAGESIZE 4096 416 417 enum 418 { 419 ARM_HWCAP_ARM_SWP = 1 << 0, 420 ARM_HWCAP_ARM_HALF = 1 << 1, 421 ARM_HWCAP_ARM_THUMB = 1 << 2, 422 ARM_HWCAP_ARM_26BIT = 1 << 3, 423 ARM_HWCAP_ARM_FAST_MULT = 1 << 4, 424 ARM_HWCAP_ARM_FPA = 1 << 5, 425 ARM_HWCAP_ARM_VFP = 1 << 6, 426 ARM_HWCAP_ARM_EDSP = 1 << 7, 427 ARM_HWCAP_ARM_JAVA = 1 << 8, 428 ARM_HWCAP_ARM_IWMMXT = 1 << 9, 429 ARM_HWCAP_ARM_CRUNCH = 1 << 10, 430 ARM_HWCAP_ARM_THUMBEE = 1 << 11, 431 ARM_HWCAP_ARM_NEON = 1 << 12, 432 ARM_HWCAP_ARM_VFPv3 = 1 << 13, 433 ARM_HWCAP_ARM_VFPv3D16 = 1 << 14, 434 ARM_HWCAP_ARM_TLS = 1 << 15, 435 ARM_HWCAP_ARM_VFPv4 = 1 << 16, 436 ARM_HWCAP_ARM_IDIVA = 1 << 17, 437 ARM_HWCAP_ARM_IDIVT = 1 << 18, 438 ARM_HWCAP_ARM_VFPD32 = 1 << 19, 439 ARM_HWCAP_ARM_LPAE = 1 << 20, 440 ARM_HWCAP_ARM_EVTSTRM = 1 << 21, 441 ARM_HWCAP_ARM_FPHP = 1 << 22, 442 ARM_HWCAP_ARM_ASIMDHP = 1 << 23, 443 ARM_HWCAP_ARM_ASIMDDP = 1 << 24, 444 ARM_HWCAP_ARM_ASIMDFHM = 1 << 25, 445 ARM_HWCAP_ARM_ASIMDBF16 = 1 << 26, 446 ARM_HWCAP_ARM_I8MM = 1 << 27, 447 }; 448 449 enum { 450 ARM_HWCAP2_ARM_AES = 1 << 0, 451 ARM_HWCAP2_ARM_PMULL = 1 << 1, 452 ARM_HWCAP2_ARM_SHA1 = 1 << 2, 453 ARM_HWCAP2_ARM_SHA2 = 1 << 3, 454 ARM_HWCAP2_ARM_CRC32 = 1 << 4, 455 ARM_HWCAP2_ARM_SB = 1 << 5, 456 ARM_HWCAP2_ARM_SSBS = 1 << 6, 457 }; 458 459 /* The commpage only exists for 32 bit kernels */ 460 461 #define HI_COMMPAGE (intptr_t)0xffff0f00u 462 463 static bool init_guest_commpage(void) 464 { 465 ARMCPU *cpu = ARM_CPU(thread_cpu); 466 int host_page_size = qemu_real_host_page_size(); 467 abi_ptr commpage; 468 void *want; 469 void *addr; 470 471 /* 472 * M-profile allocates maximum of 2GB address space, so can never 473 * allocate the commpage. Skip it. 474 */ 475 if (arm_feature(&cpu->env, ARM_FEATURE_M)) { 476 return true; 477 } 478 479 commpage = HI_COMMPAGE & -host_page_size; 480 want = g2h_untagged(commpage); 481 addr = mmap(want, host_page_size, PROT_READ | PROT_WRITE, 482 MAP_ANONYMOUS | MAP_PRIVATE | 483 (commpage < reserved_va ? MAP_FIXED : MAP_FIXED_NOREPLACE), 484 -1, 0); 485 486 if (addr == MAP_FAILED) { 487 perror("Allocating guest commpage"); 488 exit(EXIT_FAILURE); 489 } 490 if (addr != want) { 491 return false; 492 } 493 494 /* Set kernel helper versions; rest of page is 0. */ 495 __put_user(5, (uint32_t *)g2h_untagged(0xffff0ffcu)); 496 497 if (mprotect(addr, host_page_size, PROT_READ)) { 498 perror("Protecting guest commpage"); 499 exit(EXIT_FAILURE); 500 } 501 502 page_set_flags(commpage, commpage | (host_page_size - 1), 503 PAGE_READ | PAGE_EXEC | PAGE_VALID); 504 return true; 505 } 506 507 #define ELF_HWCAP get_elf_hwcap() 508 #define ELF_HWCAP2 get_elf_hwcap2() 509 510 uint32_t get_elf_hwcap(void) 511 { 512 ARMCPU *cpu = ARM_CPU(thread_cpu); 513 uint32_t hwcaps = 0; 514 515 hwcaps |= ARM_HWCAP_ARM_SWP; 516 hwcaps |= ARM_HWCAP_ARM_HALF; 517 hwcaps |= ARM_HWCAP_ARM_THUMB; 518 hwcaps |= ARM_HWCAP_ARM_FAST_MULT; 519 520 /* probe for the extra features */ 521 #define GET_FEATURE(feat, hwcap) \ 522 do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0) 523 524 #define GET_FEATURE_ID(feat, hwcap) \ 525 do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0) 526 527 /* EDSP is in v5TE and above, but all our v5 CPUs are v5TE */ 528 GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP); 529 GET_FEATURE(ARM_FEATURE_IWMMXT, ARM_HWCAP_ARM_IWMMXT); 530 GET_FEATURE(ARM_FEATURE_THUMB2EE, ARM_HWCAP_ARM_THUMBEE); 531 GET_FEATURE(ARM_FEATURE_NEON, ARM_HWCAP_ARM_NEON); 532 GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS); 533 GET_FEATURE(ARM_FEATURE_LPAE, ARM_HWCAP_ARM_LPAE); 534 GET_FEATURE_ID(aa32_arm_div, ARM_HWCAP_ARM_IDIVA); 535 GET_FEATURE_ID(aa32_thumb_div, ARM_HWCAP_ARM_IDIVT); 536 GET_FEATURE_ID(aa32_vfp, ARM_HWCAP_ARM_VFP); 537 538 if (cpu_isar_feature(aa32_fpsp_v3, cpu) || 539 cpu_isar_feature(aa32_fpdp_v3, cpu)) { 540 hwcaps |= ARM_HWCAP_ARM_VFPv3; 541 if (cpu_isar_feature(aa32_simd_r32, cpu)) { 542 hwcaps |= ARM_HWCAP_ARM_VFPD32; 543 } else { 544 hwcaps |= ARM_HWCAP_ARM_VFPv3D16; 545 } 546 } 547 GET_FEATURE_ID(aa32_simdfmac, ARM_HWCAP_ARM_VFPv4); 548 /* 549 * MVFR1.FPHP and .SIMDHP must be in sync, and QEMU uses the same 550 * isar_feature function for both. The kernel reports them as two hwcaps. 551 */ 552 GET_FEATURE_ID(aa32_fp16_arith, ARM_HWCAP_ARM_FPHP); 553 GET_FEATURE_ID(aa32_fp16_arith, ARM_HWCAP_ARM_ASIMDHP); 554 GET_FEATURE_ID(aa32_dp, ARM_HWCAP_ARM_ASIMDDP); 555 GET_FEATURE_ID(aa32_fhm, ARM_HWCAP_ARM_ASIMDFHM); 556 GET_FEATURE_ID(aa32_bf16, ARM_HWCAP_ARM_ASIMDBF16); 557 GET_FEATURE_ID(aa32_i8mm, ARM_HWCAP_ARM_I8MM); 558 559 return hwcaps; 560 } 561 562 uint64_t get_elf_hwcap2(void) 563 { 564 ARMCPU *cpu = ARM_CPU(thread_cpu); 565 uint64_t hwcaps = 0; 566 567 GET_FEATURE_ID(aa32_aes, ARM_HWCAP2_ARM_AES); 568 GET_FEATURE_ID(aa32_pmull, ARM_HWCAP2_ARM_PMULL); 569 GET_FEATURE_ID(aa32_sha1, ARM_HWCAP2_ARM_SHA1); 570 GET_FEATURE_ID(aa32_sha2, ARM_HWCAP2_ARM_SHA2); 571 GET_FEATURE_ID(aa32_crc32, ARM_HWCAP2_ARM_CRC32); 572 GET_FEATURE_ID(aa32_sb, ARM_HWCAP2_ARM_SB); 573 GET_FEATURE_ID(aa32_ssbs, ARM_HWCAP2_ARM_SSBS); 574 return hwcaps; 575 } 576 577 const char *elf_hwcap_str(uint32_t bit) 578 { 579 static const char *hwcap_str[] = { 580 [__builtin_ctz(ARM_HWCAP_ARM_SWP )] = "swp", 581 [__builtin_ctz(ARM_HWCAP_ARM_HALF )] = "half", 582 [__builtin_ctz(ARM_HWCAP_ARM_THUMB )] = "thumb", 583 [__builtin_ctz(ARM_HWCAP_ARM_26BIT )] = "26bit", 584 [__builtin_ctz(ARM_HWCAP_ARM_FAST_MULT)] = "fast_mult", 585 [__builtin_ctz(ARM_HWCAP_ARM_FPA )] = "fpa", 586 [__builtin_ctz(ARM_HWCAP_ARM_VFP )] = "vfp", 587 [__builtin_ctz(ARM_HWCAP_ARM_EDSP )] = "edsp", 588 [__builtin_ctz(ARM_HWCAP_ARM_JAVA )] = "java", 589 [__builtin_ctz(ARM_HWCAP_ARM_IWMMXT )] = "iwmmxt", 590 [__builtin_ctz(ARM_HWCAP_ARM_CRUNCH )] = "crunch", 591 [__builtin_ctz(ARM_HWCAP_ARM_THUMBEE )] = "thumbee", 592 [__builtin_ctz(ARM_HWCAP_ARM_NEON )] = "neon", 593 [__builtin_ctz(ARM_HWCAP_ARM_VFPv3 )] = "vfpv3", 594 [__builtin_ctz(ARM_HWCAP_ARM_VFPv3D16 )] = "vfpv3d16", 595 [__builtin_ctz(ARM_HWCAP_ARM_TLS )] = "tls", 596 [__builtin_ctz(ARM_HWCAP_ARM_VFPv4 )] = "vfpv4", 597 [__builtin_ctz(ARM_HWCAP_ARM_IDIVA )] = "idiva", 598 [__builtin_ctz(ARM_HWCAP_ARM_IDIVT )] = "idivt", 599 [__builtin_ctz(ARM_HWCAP_ARM_VFPD32 )] = "vfpd32", 600 [__builtin_ctz(ARM_HWCAP_ARM_LPAE )] = "lpae", 601 [__builtin_ctz(ARM_HWCAP_ARM_EVTSTRM )] = "evtstrm", 602 [__builtin_ctz(ARM_HWCAP_ARM_FPHP )] = "fphp", 603 [__builtin_ctz(ARM_HWCAP_ARM_ASIMDHP )] = "asimdhp", 604 [__builtin_ctz(ARM_HWCAP_ARM_ASIMDDP )] = "asimddp", 605 [__builtin_ctz(ARM_HWCAP_ARM_ASIMDFHM )] = "asimdfhm", 606 [__builtin_ctz(ARM_HWCAP_ARM_ASIMDBF16)] = "asimdbf16", 607 [__builtin_ctz(ARM_HWCAP_ARM_I8MM )] = "i8mm", 608 }; 609 610 return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; 611 } 612 613 const char *elf_hwcap2_str(uint32_t bit) 614 { 615 static const char *hwcap_str[] = { 616 [__builtin_ctz(ARM_HWCAP2_ARM_AES )] = "aes", 617 [__builtin_ctz(ARM_HWCAP2_ARM_PMULL)] = "pmull", 618 [__builtin_ctz(ARM_HWCAP2_ARM_SHA1 )] = "sha1", 619 [__builtin_ctz(ARM_HWCAP2_ARM_SHA2 )] = "sha2", 620 [__builtin_ctz(ARM_HWCAP2_ARM_CRC32)] = "crc32", 621 [__builtin_ctz(ARM_HWCAP2_ARM_SB )] = "sb", 622 [__builtin_ctz(ARM_HWCAP2_ARM_SSBS )] = "ssbs", 623 }; 624 625 return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; 626 } 627 628 #undef GET_FEATURE 629 #undef GET_FEATURE_ID 630 631 #define ELF_PLATFORM get_elf_platform() 632 633 static const char *get_elf_platform(void) 634 { 635 CPUARMState *env = cpu_env(thread_cpu); 636 637 #if TARGET_BIG_ENDIAN 638 # define END "b" 639 #else 640 # define END "l" 641 #endif 642 643 if (arm_feature(env, ARM_FEATURE_V8)) { 644 return "v8" END; 645 } else if (arm_feature(env, ARM_FEATURE_V7)) { 646 if (arm_feature(env, ARM_FEATURE_M)) { 647 return "v7m" END; 648 } else { 649 return "v7" END; 650 } 651 } else if (arm_feature(env, ARM_FEATURE_V6)) { 652 return "v6" END; 653 } else if (arm_feature(env, ARM_FEATURE_V5)) { 654 return "v5" END; 655 } else { 656 return "v4" END; 657 } 658 659 #undef END 660 } 661 662 #else 663 /* 64 bit ARM definitions */ 664 665 #define ELF_ARCH EM_AARCH64 666 #define ELF_CLASS ELFCLASS64 667 #if TARGET_BIG_ENDIAN 668 # define ELF_PLATFORM "aarch64_be" 669 #else 670 # define ELF_PLATFORM "aarch64" 671 #endif 672 673 static inline void init_thread(struct target_pt_regs *regs, 674 struct image_info *infop) 675 { 676 abi_long stack = infop->start_stack; 677 memset(regs, 0, sizeof(*regs)); 678 679 regs->pc = infop->entry & ~0x3ULL; 680 regs->sp = stack; 681 } 682 683 #define ELF_NREG 34 684 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 685 686 static void elf_core_copy_regs(target_elf_gregset_t *regs, 687 const CPUARMState *env) 688 { 689 int i; 690 691 for (i = 0; i < 32; i++) { 692 (*regs)[i] = tswapreg(env->xregs[i]); 693 } 694 (*regs)[32] = tswapreg(env->pc); 695 (*regs)[33] = tswapreg(pstate_read((CPUARMState *)env)); 696 } 697 698 #define USE_ELF_CORE_DUMP 699 #define ELF_EXEC_PAGESIZE 4096 700 701 enum { 702 ARM_HWCAP_A64_FP = 1 << 0, 703 ARM_HWCAP_A64_ASIMD = 1 << 1, 704 ARM_HWCAP_A64_EVTSTRM = 1 << 2, 705 ARM_HWCAP_A64_AES = 1 << 3, 706 ARM_HWCAP_A64_PMULL = 1 << 4, 707 ARM_HWCAP_A64_SHA1 = 1 << 5, 708 ARM_HWCAP_A64_SHA2 = 1 << 6, 709 ARM_HWCAP_A64_CRC32 = 1 << 7, 710 ARM_HWCAP_A64_ATOMICS = 1 << 8, 711 ARM_HWCAP_A64_FPHP = 1 << 9, 712 ARM_HWCAP_A64_ASIMDHP = 1 << 10, 713 ARM_HWCAP_A64_CPUID = 1 << 11, 714 ARM_HWCAP_A64_ASIMDRDM = 1 << 12, 715 ARM_HWCAP_A64_JSCVT = 1 << 13, 716 ARM_HWCAP_A64_FCMA = 1 << 14, 717 ARM_HWCAP_A64_LRCPC = 1 << 15, 718 ARM_HWCAP_A64_DCPOP = 1 << 16, 719 ARM_HWCAP_A64_SHA3 = 1 << 17, 720 ARM_HWCAP_A64_SM3 = 1 << 18, 721 ARM_HWCAP_A64_SM4 = 1 << 19, 722 ARM_HWCAP_A64_ASIMDDP = 1 << 20, 723 ARM_HWCAP_A64_SHA512 = 1 << 21, 724 ARM_HWCAP_A64_SVE = 1 << 22, 725 ARM_HWCAP_A64_ASIMDFHM = 1 << 23, 726 ARM_HWCAP_A64_DIT = 1 << 24, 727 ARM_HWCAP_A64_USCAT = 1 << 25, 728 ARM_HWCAP_A64_ILRCPC = 1 << 26, 729 ARM_HWCAP_A64_FLAGM = 1 << 27, 730 ARM_HWCAP_A64_SSBS = 1 << 28, 731 ARM_HWCAP_A64_SB = 1 << 29, 732 ARM_HWCAP_A64_PACA = 1 << 30, 733 ARM_HWCAP_A64_PACG = 1UL << 31, 734 735 ARM_HWCAP2_A64_DCPODP = 1 << 0, 736 ARM_HWCAP2_A64_SVE2 = 1 << 1, 737 ARM_HWCAP2_A64_SVEAES = 1 << 2, 738 ARM_HWCAP2_A64_SVEPMULL = 1 << 3, 739 ARM_HWCAP2_A64_SVEBITPERM = 1 << 4, 740 ARM_HWCAP2_A64_SVESHA3 = 1 << 5, 741 ARM_HWCAP2_A64_SVESM4 = 1 << 6, 742 ARM_HWCAP2_A64_FLAGM2 = 1 << 7, 743 ARM_HWCAP2_A64_FRINT = 1 << 8, 744 ARM_HWCAP2_A64_SVEI8MM = 1 << 9, 745 ARM_HWCAP2_A64_SVEF32MM = 1 << 10, 746 ARM_HWCAP2_A64_SVEF64MM = 1 << 11, 747 ARM_HWCAP2_A64_SVEBF16 = 1 << 12, 748 ARM_HWCAP2_A64_I8MM = 1 << 13, 749 ARM_HWCAP2_A64_BF16 = 1 << 14, 750 ARM_HWCAP2_A64_DGH = 1 << 15, 751 ARM_HWCAP2_A64_RNG = 1 << 16, 752 ARM_HWCAP2_A64_BTI = 1 << 17, 753 ARM_HWCAP2_A64_MTE = 1 << 18, 754 ARM_HWCAP2_A64_ECV = 1 << 19, 755 ARM_HWCAP2_A64_AFP = 1 << 20, 756 ARM_HWCAP2_A64_RPRES = 1 << 21, 757 ARM_HWCAP2_A64_MTE3 = 1 << 22, 758 ARM_HWCAP2_A64_SME = 1 << 23, 759 ARM_HWCAP2_A64_SME_I16I64 = 1 << 24, 760 ARM_HWCAP2_A64_SME_F64F64 = 1 << 25, 761 ARM_HWCAP2_A64_SME_I8I32 = 1 << 26, 762 ARM_HWCAP2_A64_SME_F16F32 = 1 << 27, 763 ARM_HWCAP2_A64_SME_B16F32 = 1 << 28, 764 ARM_HWCAP2_A64_SME_F32F32 = 1 << 29, 765 ARM_HWCAP2_A64_SME_FA64 = 1 << 30, 766 ARM_HWCAP2_A64_WFXT = 1ULL << 31, 767 ARM_HWCAP2_A64_EBF16 = 1ULL << 32, 768 ARM_HWCAP2_A64_SVE_EBF16 = 1ULL << 33, 769 ARM_HWCAP2_A64_CSSC = 1ULL << 34, 770 ARM_HWCAP2_A64_RPRFM = 1ULL << 35, 771 ARM_HWCAP2_A64_SVE2P1 = 1ULL << 36, 772 ARM_HWCAP2_A64_SME2 = 1ULL << 37, 773 ARM_HWCAP2_A64_SME2P1 = 1ULL << 38, 774 ARM_HWCAP2_A64_SME_I16I32 = 1ULL << 39, 775 ARM_HWCAP2_A64_SME_BI32I32 = 1ULL << 40, 776 ARM_HWCAP2_A64_SME_B16B16 = 1ULL << 41, 777 ARM_HWCAP2_A64_SME_F16F16 = 1ULL << 42, 778 ARM_HWCAP2_A64_MOPS = 1ULL << 43, 779 ARM_HWCAP2_A64_HBC = 1ULL << 44, 780 }; 781 782 #define ELF_HWCAP get_elf_hwcap() 783 #define ELF_HWCAP2 get_elf_hwcap2() 784 785 #define GET_FEATURE_ID(feat, hwcap) \ 786 do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0) 787 788 uint32_t get_elf_hwcap(void) 789 { 790 ARMCPU *cpu = ARM_CPU(thread_cpu); 791 uint32_t hwcaps = 0; 792 793 hwcaps |= ARM_HWCAP_A64_FP; 794 hwcaps |= ARM_HWCAP_A64_ASIMD; 795 hwcaps |= ARM_HWCAP_A64_CPUID; 796 797 /* probe for the extra features */ 798 799 GET_FEATURE_ID(aa64_aes, ARM_HWCAP_A64_AES); 800 GET_FEATURE_ID(aa64_pmull, ARM_HWCAP_A64_PMULL); 801 GET_FEATURE_ID(aa64_sha1, ARM_HWCAP_A64_SHA1); 802 GET_FEATURE_ID(aa64_sha256, ARM_HWCAP_A64_SHA2); 803 GET_FEATURE_ID(aa64_sha512, ARM_HWCAP_A64_SHA512); 804 GET_FEATURE_ID(aa64_crc32, ARM_HWCAP_A64_CRC32); 805 GET_FEATURE_ID(aa64_sha3, ARM_HWCAP_A64_SHA3); 806 GET_FEATURE_ID(aa64_sm3, ARM_HWCAP_A64_SM3); 807 GET_FEATURE_ID(aa64_sm4, ARM_HWCAP_A64_SM4); 808 GET_FEATURE_ID(aa64_fp16, ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP); 809 GET_FEATURE_ID(aa64_atomics, ARM_HWCAP_A64_ATOMICS); 810 GET_FEATURE_ID(aa64_lse2, ARM_HWCAP_A64_USCAT); 811 GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM); 812 GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP); 813 GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA); 814 GET_FEATURE_ID(aa64_sve, ARM_HWCAP_A64_SVE); 815 GET_FEATURE_ID(aa64_pauth, ARM_HWCAP_A64_PACA | ARM_HWCAP_A64_PACG); 816 GET_FEATURE_ID(aa64_fhm, ARM_HWCAP_A64_ASIMDFHM); 817 GET_FEATURE_ID(aa64_dit, ARM_HWCAP_A64_DIT); 818 GET_FEATURE_ID(aa64_jscvt, ARM_HWCAP_A64_JSCVT); 819 GET_FEATURE_ID(aa64_sb, ARM_HWCAP_A64_SB); 820 GET_FEATURE_ID(aa64_condm_4, ARM_HWCAP_A64_FLAGM); 821 GET_FEATURE_ID(aa64_dcpop, ARM_HWCAP_A64_DCPOP); 822 GET_FEATURE_ID(aa64_rcpc_8_3, ARM_HWCAP_A64_LRCPC); 823 GET_FEATURE_ID(aa64_rcpc_8_4, ARM_HWCAP_A64_ILRCPC); 824 825 return hwcaps; 826 } 827 828 uint64_t get_elf_hwcap2(void) 829 { 830 ARMCPU *cpu = ARM_CPU(thread_cpu); 831 uint64_t hwcaps = 0; 832 833 GET_FEATURE_ID(aa64_dcpodp, ARM_HWCAP2_A64_DCPODP); 834 GET_FEATURE_ID(aa64_sve2, ARM_HWCAP2_A64_SVE2); 835 GET_FEATURE_ID(aa64_sve2_aes, ARM_HWCAP2_A64_SVEAES); 836 GET_FEATURE_ID(aa64_sve2_pmull128, ARM_HWCAP2_A64_SVEPMULL); 837 GET_FEATURE_ID(aa64_sve2_bitperm, ARM_HWCAP2_A64_SVEBITPERM); 838 GET_FEATURE_ID(aa64_sve2_sha3, ARM_HWCAP2_A64_SVESHA3); 839 GET_FEATURE_ID(aa64_sve2_sm4, ARM_HWCAP2_A64_SVESM4); 840 GET_FEATURE_ID(aa64_condm_5, ARM_HWCAP2_A64_FLAGM2); 841 GET_FEATURE_ID(aa64_frint, ARM_HWCAP2_A64_FRINT); 842 GET_FEATURE_ID(aa64_sve_i8mm, ARM_HWCAP2_A64_SVEI8MM); 843 GET_FEATURE_ID(aa64_sve_f32mm, ARM_HWCAP2_A64_SVEF32MM); 844 GET_FEATURE_ID(aa64_sve_f64mm, ARM_HWCAP2_A64_SVEF64MM); 845 GET_FEATURE_ID(aa64_sve_bf16, ARM_HWCAP2_A64_SVEBF16); 846 GET_FEATURE_ID(aa64_i8mm, ARM_HWCAP2_A64_I8MM); 847 GET_FEATURE_ID(aa64_bf16, ARM_HWCAP2_A64_BF16); 848 GET_FEATURE_ID(aa64_rndr, ARM_HWCAP2_A64_RNG); 849 GET_FEATURE_ID(aa64_bti, ARM_HWCAP2_A64_BTI); 850 GET_FEATURE_ID(aa64_mte, ARM_HWCAP2_A64_MTE); 851 GET_FEATURE_ID(aa64_mte3, ARM_HWCAP2_A64_MTE3); 852 GET_FEATURE_ID(aa64_sme, (ARM_HWCAP2_A64_SME | 853 ARM_HWCAP2_A64_SME_F32F32 | 854 ARM_HWCAP2_A64_SME_B16F32 | 855 ARM_HWCAP2_A64_SME_F16F32 | 856 ARM_HWCAP2_A64_SME_I8I32)); 857 GET_FEATURE_ID(aa64_sme_f64f64, ARM_HWCAP2_A64_SME_F64F64); 858 GET_FEATURE_ID(aa64_sme_i16i64, ARM_HWCAP2_A64_SME_I16I64); 859 GET_FEATURE_ID(aa64_sme_fa64, ARM_HWCAP2_A64_SME_FA64); 860 GET_FEATURE_ID(aa64_hbc, ARM_HWCAP2_A64_HBC); 861 GET_FEATURE_ID(aa64_mops, ARM_HWCAP2_A64_MOPS); 862 863 return hwcaps; 864 } 865 866 const char *elf_hwcap_str(uint32_t bit) 867 { 868 static const char *hwcap_str[] = { 869 [__builtin_ctz(ARM_HWCAP_A64_FP )] = "fp", 870 [__builtin_ctz(ARM_HWCAP_A64_ASIMD )] = "asimd", 871 [__builtin_ctz(ARM_HWCAP_A64_EVTSTRM )] = "evtstrm", 872 [__builtin_ctz(ARM_HWCAP_A64_AES )] = "aes", 873 [__builtin_ctz(ARM_HWCAP_A64_PMULL )] = "pmull", 874 [__builtin_ctz(ARM_HWCAP_A64_SHA1 )] = "sha1", 875 [__builtin_ctz(ARM_HWCAP_A64_SHA2 )] = "sha2", 876 [__builtin_ctz(ARM_HWCAP_A64_CRC32 )] = "crc32", 877 [__builtin_ctz(ARM_HWCAP_A64_ATOMICS )] = "atomics", 878 [__builtin_ctz(ARM_HWCAP_A64_FPHP )] = "fphp", 879 [__builtin_ctz(ARM_HWCAP_A64_ASIMDHP )] = "asimdhp", 880 [__builtin_ctz(ARM_HWCAP_A64_CPUID )] = "cpuid", 881 [__builtin_ctz(ARM_HWCAP_A64_ASIMDRDM)] = "asimdrdm", 882 [__builtin_ctz(ARM_HWCAP_A64_JSCVT )] = "jscvt", 883 [__builtin_ctz(ARM_HWCAP_A64_FCMA )] = "fcma", 884 [__builtin_ctz(ARM_HWCAP_A64_LRCPC )] = "lrcpc", 885 [__builtin_ctz(ARM_HWCAP_A64_DCPOP )] = "dcpop", 886 [__builtin_ctz(ARM_HWCAP_A64_SHA3 )] = "sha3", 887 [__builtin_ctz(ARM_HWCAP_A64_SM3 )] = "sm3", 888 [__builtin_ctz(ARM_HWCAP_A64_SM4 )] = "sm4", 889 [__builtin_ctz(ARM_HWCAP_A64_ASIMDDP )] = "asimddp", 890 [__builtin_ctz(ARM_HWCAP_A64_SHA512 )] = "sha512", 891 [__builtin_ctz(ARM_HWCAP_A64_SVE )] = "sve", 892 [__builtin_ctz(ARM_HWCAP_A64_ASIMDFHM)] = "asimdfhm", 893 [__builtin_ctz(ARM_HWCAP_A64_DIT )] = "dit", 894 [__builtin_ctz(ARM_HWCAP_A64_USCAT )] = "uscat", 895 [__builtin_ctz(ARM_HWCAP_A64_ILRCPC )] = "ilrcpc", 896 [__builtin_ctz(ARM_HWCAP_A64_FLAGM )] = "flagm", 897 [__builtin_ctz(ARM_HWCAP_A64_SSBS )] = "ssbs", 898 [__builtin_ctz(ARM_HWCAP_A64_SB )] = "sb", 899 [__builtin_ctz(ARM_HWCAP_A64_PACA )] = "paca", 900 [__builtin_ctz(ARM_HWCAP_A64_PACG )] = "pacg", 901 }; 902 903 return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; 904 } 905 906 const char *elf_hwcap2_str(uint32_t bit) 907 { 908 static const char *hwcap_str[] = { 909 [__builtin_ctz(ARM_HWCAP2_A64_DCPODP )] = "dcpodp", 910 [__builtin_ctz(ARM_HWCAP2_A64_SVE2 )] = "sve2", 911 [__builtin_ctz(ARM_HWCAP2_A64_SVEAES )] = "sveaes", 912 [__builtin_ctz(ARM_HWCAP2_A64_SVEPMULL )] = "svepmull", 913 [__builtin_ctz(ARM_HWCAP2_A64_SVEBITPERM )] = "svebitperm", 914 [__builtin_ctz(ARM_HWCAP2_A64_SVESHA3 )] = "svesha3", 915 [__builtin_ctz(ARM_HWCAP2_A64_SVESM4 )] = "svesm4", 916 [__builtin_ctz(ARM_HWCAP2_A64_FLAGM2 )] = "flagm2", 917 [__builtin_ctz(ARM_HWCAP2_A64_FRINT )] = "frint", 918 [__builtin_ctz(ARM_HWCAP2_A64_SVEI8MM )] = "svei8mm", 919 [__builtin_ctz(ARM_HWCAP2_A64_SVEF32MM )] = "svef32mm", 920 [__builtin_ctz(ARM_HWCAP2_A64_SVEF64MM )] = "svef64mm", 921 [__builtin_ctz(ARM_HWCAP2_A64_SVEBF16 )] = "svebf16", 922 [__builtin_ctz(ARM_HWCAP2_A64_I8MM )] = "i8mm", 923 [__builtin_ctz(ARM_HWCAP2_A64_BF16 )] = "bf16", 924 [__builtin_ctz(ARM_HWCAP2_A64_DGH )] = "dgh", 925 [__builtin_ctz(ARM_HWCAP2_A64_RNG )] = "rng", 926 [__builtin_ctz(ARM_HWCAP2_A64_BTI )] = "bti", 927 [__builtin_ctz(ARM_HWCAP2_A64_MTE )] = "mte", 928 [__builtin_ctz(ARM_HWCAP2_A64_ECV )] = "ecv", 929 [__builtin_ctz(ARM_HWCAP2_A64_AFP )] = "afp", 930 [__builtin_ctz(ARM_HWCAP2_A64_RPRES )] = "rpres", 931 [__builtin_ctz(ARM_HWCAP2_A64_MTE3 )] = "mte3", 932 [__builtin_ctz(ARM_HWCAP2_A64_SME )] = "sme", 933 [__builtin_ctz(ARM_HWCAP2_A64_SME_I16I64 )] = "smei16i64", 934 [__builtin_ctz(ARM_HWCAP2_A64_SME_F64F64 )] = "smef64f64", 935 [__builtin_ctz(ARM_HWCAP2_A64_SME_I8I32 )] = "smei8i32", 936 [__builtin_ctz(ARM_HWCAP2_A64_SME_F16F32 )] = "smef16f32", 937 [__builtin_ctz(ARM_HWCAP2_A64_SME_B16F32 )] = "smeb16f32", 938 [__builtin_ctz(ARM_HWCAP2_A64_SME_F32F32 )] = "smef32f32", 939 [__builtin_ctz(ARM_HWCAP2_A64_SME_FA64 )] = "smefa64", 940 [__builtin_ctz(ARM_HWCAP2_A64_WFXT )] = "wfxt", 941 [__builtin_ctzll(ARM_HWCAP2_A64_EBF16 )] = "ebf16", 942 [__builtin_ctzll(ARM_HWCAP2_A64_SVE_EBF16 )] = "sveebf16", 943 [__builtin_ctzll(ARM_HWCAP2_A64_CSSC )] = "cssc", 944 [__builtin_ctzll(ARM_HWCAP2_A64_RPRFM )] = "rprfm", 945 [__builtin_ctzll(ARM_HWCAP2_A64_SVE2P1 )] = "sve2p1", 946 [__builtin_ctzll(ARM_HWCAP2_A64_SME2 )] = "sme2", 947 [__builtin_ctzll(ARM_HWCAP2_A64_SME2P1 )] = "sme2p1", 948 [__builtin_ctzll(ARM_HWCAP2_A64_SME_I16I32 )] = "smei16i32", 949 [__builtin_ctzll(ARM_HWCAP2_A64_SME_BI32I32)] = "smebi32i32", 950 [__builtin_ctzll(ARM_HWCAP2_A64_SME_B16B16 )] = "smeb16b16", 951 [__builtin_ctzll(ARM_HWCAP2_A64_SME_F16F16 )] = "smef16f16", 952 [__builtin_ctzll(ARM_HWCAP2_A64_MOPS )] = "mops", 953 [__builtin_ctzll(ARM_HWCAP2_A64_HBC )] = "hbc", 954 }; 955 956 return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; 957 } 958 959 #undef GET_FEATURE_ID 960 961 #endif /* not TARGET_AARCH64 */ 962 963 #if TARGET_BIG_ENDIAN 964 # define VDSO_HEADER "vdso-be.c.inc" 965 #else 966 # define VDSO_HEADER "vdso-le.c.inc" 967 #endif 968 969 #endif /* TARGET_ARM */ 970 971 #ifdef TARGET_SPARC 972 #ifdef TARGET_SPARC64 973 974 #define ELF_HWCAP (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \ 975 | HWCAP_SPARC_MULDIV | HWCAP_SPARC_V9) 976 #ifndef TARGET_ABI32 977 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS ) 978 #else 979 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC ) 980 #endif 981 982 #define ELF_CLASS ELFCLASS64 983 #define ELF_ARCH EM_SPARCV9 984 #else 985 #define ELF_HWCAP (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \ 986 | HWCAP_SPARC_MULDIV) 987 #define ELF_CLASS ELFCLASS32 988 #define ELF_ARCH EM_SPARC 989 #endif /* TARGET_SPARC64 */ 990 991 static inline void init_thread(struct target_pt_regs *regs, 992 struct image_info *infop) 993 { 994 /* Note that target_cpu_copy_regs does not read psr/tstate. */ 995 regs->pc = infop->entry; 996 regs->npc = regs->pc + 4; 997 regs->y = 0; 998 regs->u_regs[14] = (infop->start_stack - 16 * sizeof(abi_ulong) 999 - TARGET_STACK_BIAS); 1000 } 1001 #endif /* TARGET_SPARC */ 1002 1003 #ifdef TARGET_PPC 1004 1005 #define ELF_MACHINE PPC_ELF_MACHINE 1006 1007 #if defined(TARGET_PPC64) 1008 1009 #define elf_check_arch(x) ( (x) == EM_PPC64 ) 1010 1011 #define ELF_CLASS ELFCLASS64 1012 1013 #else 1014 1015 #define ELF_CLASS ELFCLASS32 1016 #define EXSTACK_DEFAULT true 1017 1018 #endif 1019 1020 #define ELF_ARCH EM_PPC 1021 1022 /* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP). 1023 See arch/powerpc/include/asm/cputable.h. */ 1024 enum { 1025 QEMU_PPC_FEATURE_32 = 0x80000000, 1026 QEMU_PPC_FEATURE_64 = 0x40000000, 1027 QEMU_PPC_FEATURE_601_INSTR = 0x20000000, 1028 QEMU_PPC_FEATURE_HAS_ALTIVEC = 0x10000000, 1029 QEMU_PPC_FEATURE_HAS_FPU = 0x08000000, 1030 QEMU_PPC_FEATURE_HAS_MMU = 0x04000000, 1031 QEMU_PPC_FEATURE_HAS_4xxMAC = 0x02000000, 1032 QEMU_PPC_FEATURE_UNIFIED_CACHE = 0x01000000, 1033 QEMU_PPC_FEATURE_HAS_SPE = 0x00800000, 1034 QEMU_PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000, 1035 QEMU_PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000, 1036 QEMU_PPC_FEATURE_NO_TB = 0x00100000, 1037 QEMU_PPC_FEATURE_POWER4 = 0x00080000, 1038 QEMU_PPC_FEATURE_POWER5 = 0x00040000, 1039 QEMU_PPC_FEATURE_POWER5_PLUS = 0x00020000, 1040 QEMU_PPC_FEATURE_CELL = 0x00010000, 1041 QEMU_PPC_FEATURE_BOOKE = 0x00008000, 1042 QEMU_PPC_FEATURE_SMT = 0x00004000, 1043 QEMU_PPC_FEATURE_ICACHE_SNOOP = 0x00002000, 1044 QEMU_PPC_FEATURE_ARCH_2_05 = 0x00001000, 1045 QEMU_PPC_FEATURE_PA6T = 0x00000800, 1046 QEMU_PPC_FEATURE_HAS_DFP = 0x00000400, 1047 QEMU_PPC_FEATURE_POWER6_EXT = 0x00000200, 1048 QEMU_PPC_FEATURE_ARCH_2_06 = 0x00000100, 1049 QEMU_PPC_FEATURE_HAS_VSX = 0x00000080, 1050 QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040, 1051 1052 QEMU_PPC_FEATURE_TRUE_LE = 0x00000002, 1053 QEMU_PPC_FEATURE_PPC_LE = 0x00000001, 1054 1055 /* Feature definitions in AT_HWCAP2. */ 1056 QEMU_PPC_FEATURE2_ARCH_2_07 = 0x80000000, /* ISA 2.07 */ 1057 QEMU_PPC_FEATURE2_HAS_HTM = 0x40000000, /* Hardware Transactional Memory */ 1058 QEMU_PPC_FEATURE2_HAS_DSCR = 0x20000000, /* Data Stream Control Register */ 1059 QEMU_PPC_FEATURE2_HAS_EBB = 0x10000000, /* Event Base Branching */ 1060 QEMU_PPC_FEATURE2_HAS_ISEL = 0x08000000, /* Integer Select */ 1061 QEMU_PPC_FEATURE2_HAS_TAR = 0x04000000, /* Target Address Register */ 1062 QEMU_PPC_FEATURE2_VEC_CRYPTO = 0x02000000, 1063 QEMU_PPC_FEATURE2_HTM_NOSC = 0x01000000, 1064 QEMU_PPC_FEATURE2_ARCH_3_00 = 0x00800000, /* ISA 3.00 */ 1065 QEMU_PPC_FEATURE2_HAS_IEEE128 = 0x00400000, /* VSX IEEE Bin Float 128-bit */ 1066 QEMU_PPC_FEATURE2_DARN = 0x00200000, /* darn random number insn */ 1067 QEMU_PPC_FEATURE2_SCV = 0x00100000, /* scv syscall */ 1068 QEMU_PPC_FEATURE2_HTM_NO_SUSPEND = 0x00080000, /* TM w/o suspended state */ 1069 QEMU_PPC_FEATURE2_ARCH_3_1 = 0x00040000, /* ISA 3.1 */ 1070 QEMU_PPC_FEATURE2_MMA = 0x00020000, /* Matrix-Multiply Assist */ 1071 }; 1072 1073 #define ELF_HWCAP get_elf_hwcap() 1074 1075 static uint32_t get_elf_hwcap(void) 1076 { 1077 PowerPCCPU *cpu = POWERPC_CPU(thread_cpu); 1078 uint32_t features = 0; 1079 1080 /* We don't have to be terribly complete here; the high points are 1081 Altivec/FP/SPE support. Anything else is just a bonus. */ 1082 #define GET_FEATURE(flag, feature) \ 1083 do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0) 1084 #define GET_FEATURE2(flags, feature) \ 1085 do { \ 1086 if ((cpu->env.insns_flags2 & flags) == flags) { \ 1087 features |= feature; \ 1088 } \ 1089 } while (0) 1090 GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64); 1091 GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU); 1092 GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC); 1093 GET_FEATURE(PPC_SPE, QEMU_PPC_FEATURE_HAS_SPE); 1094 GET_FEATURE(PPC_SPE_SINGLE, QEMU_PPC_FEATURE_HAS_EFP_SINGLE); 1095 GET_FEATURE(PPC_SPE_DOUBLE, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE); 1096 GET_FEATURE(PPC_BOOKE, QEMU_PPC_FEATURE_BOOKE); 1097 GET_FEATURE(PPC_405_MAC, QEMU_PPC_FEATURE_HAS_4xxMAC); 1098 GET_FEATURE2(PPC2_DFP, QEMU_PPC_FEATURE_HAS_DFP); 1099 GET_FEATURE2(PPC2_VSX, QEMU_PPC_FEATURE_HAS_VSX); 1100 GET_FEATURE2((PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 | 1101 PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206), 1102 QEMU_PPC_FEATURE_ARCH_2_06); 1103 #undef GET_FEATURE 1104 #undef GET_FEATURE2 1105 1106 return features; 1107 } 1108 1109 #define ELF_HWCAP2 get_elf_hwcap2() 1110 1111 static uint32_t get_elf_hwcap2(void) 1112 { 1113 PowerPCCPU *cpu = POWERPC_CPU(thread_cpu); 1114 uint32_t features = 0; 1115 1116 #define GET_FEATURE(flag, feature) \ 1117 do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0) 1118 #define GET_FEATURE2(flag, feature) \ 1119 do { if (cpu->env.insns_flags2 & flag) { features |= feature; } } while (0) 1120 1121 GET_FEATURE(PPC_ISEL, QEMU_PPC_FEATURE2_HAS_ISEL); 1122 GET_FEATURE2(PPC2_BCTAR_ISA207, QEMU_PPC_FEATURE2_HAS_TAR); 1123 GET_FEATURE2((PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | 1124 PPC2_ISA207S), QEMU_PPC_FEATURE2_ARCH_2_07 | 1125 QEMU_PPC_FEATURE2_VEC_CRYPTO); 1126 GET_FEATURE2(PPC2_ISA300, QEMU_PPC_FEATURE2_ARCH_3_00 | 1127 QEMU_PPC_FEATURE2_DARN | QEMU_PPC_FEATURE2_HAS_IEEE128); 1128 GET_FEATURE2(PPC2_ISA310, QEMU_PPC_FEATURE2_ARCH_3_1 | 1129 QEMU_PPC_FEATURE2_MMA); 1130 1131 #undef GET_FEATURE 1132 #undef GET_FEATURE2 1133 1134 return features; 1135 } 1136 1137 /* 1138 * The requirements here are: 1139 * - keep the final alignment of sp (sp & 0xf) 1140 * - make sure the 32-bit value at the first 16 byte aligned position of 1141 * AUXV is greater than 16 for glibc compatibility. 1142 * AT_IGNOREPPC is used for that. 1143 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC, 1144 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined. 1145 */ 1146 #define DLINFO_ARCH_ITEMS 5 1147 #define ARCH_DLINFO \ 1148 do { \ 1149 PowerPCCPU *cpu = POWERPC_CPU(thread_cpu); \ 1150 /* \ 1151 * Handle glibc compatibility: these magic entries must \ 1152 * be at the lowest addresses in the final auxv. \ 1153 */ \ 1154 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ 1155 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ 1156 NEW_AUX_ENT(AT_DCACHEBSIZE, cpu->env.dcache_line_size); \ 1157 NEW_AUX_ENT(AT_ICACHEBSIZE, cpu->env.icache_line_size); \ 1158 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \ 1159 } while (0) 1160 1161 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop) 1162 { 1163 _regs->gpr[1] = infop->start_stack; 1164 #if defined(TARGET_PPC64) 1165 if (get_ppc64_abi(infop) < 2) { 1166 uint64_t val; 1167 get_user_u64(val, infop->entry + 8); 1168 _regs->gpr[2] = val + infop->load_bias; 1169 get_user_u64(val, infop->entry); 1170 infop->entry = val + infop->load_bias; 1171 } else { 1172 _regs->gpr[12] = infop->entry; /* r12 set to global entry address */ 1173 } 1174 #endif 1175 _regs->nip = infop->entry; 1176 } 1177 1178 /* See linux kernel: arch/powerpc/include/asm/elf.h. */ 1179 #define ELF_NREG 48 1180 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1181 1182 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *env) 1183 { 1184 int i; 1185 target_ulong ccr = 0; 1186 1187 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 1188 (*regs)[i] = tswapreg(env->gpr[i]); 1189 } 1190 1191 (*regs)[32] = tswapreg(env->nip); 1192 (*regs)[33] = tswapreg(env->msr); 1193 (*regs)[35] = tswapreg(env->ctr); 1194 (*regs)[36] = tswapreg(env->lr); 1195 (*regs)[37] = tswapreg(cpu_read_xer(env)); 1196 1197 ccr = ppc_get_cr(env); 1198 (*regs)[38] = tswapreg(ccr); 1199 } 1200 1201 #define USE_ELF_CORE_DUMP 1202 #define ELF_EXEC_PAGESIZE 4096 1203 1204 #ifndef TARGET_PPC64 1205 # define VDSO_HEADER "vdso-32.c.inc" 1206 #elif TARGET_BIG_ENDIAN 1207 # define VDSO_HEADER "vdso-64.c.inc" 1208 #else 1209 # define VDSO_HEADER "vdso-64le.c.inc" 1210 #endif 1211 1212 #endif 1213 1214 #ifdef TARGET_LOONGARCH64 1215 1216 #define ELF_CLASS ELFCLASS64 1217 #define ELF_ARCH EM_LOONGARCH 1218 #define EXSTACK_DEFAULT true 1219 1220 #define elf_check_arch(x) ((x) == EM_LOONGARCH) 1221 1222 #define VDSO_HEADER "vdso.c.inc" 1223 1224 static inline void init_thread(struct target_pt_regs *regs, 1225 struct image_info *infop) 1226 { 1227 /*Set crmd PG,DA = 1,0 */ 1228 regs->csr.crmd = 2 << 3; 1229 regs->csr.era = infop->entry; 1230 regs->regs[3] = infop->start_stack; 1231 } 1232 1233 /* See linux kernel: arch/loongarch/include/asm/elf.h */ 1234 #define ELF_NREG 45 1235 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1236 1237 enum { 1238 TARGET_EF_R0 = 0, 1239 TARGET_EF_CSR_ERA = TARGET_EF_R0 + 33, 1240 TARGET_EF_CSR_BADV = TARGET_EF_R0 + 34, 1241 }; 1242 1243 static void elf_core_copy_regs(target_elf_gregset_t *regs, 1244 const CPULoongArchState *env) 1245 { 1246 int i; 1247 1248 (*regs)[TARGET_EF_R0] = 0; 1249 1250 for (i = 1; i < ARRAY_SIZE(env->gpr); i++) { 1251 (*regs)[TARGET_EF_R0 + i] = tswapreg(env->gpr[i]); 1252 } 1253 1254 (*regs)[TARGET_EF_CSR_ERA] = tswapreg(env->pc); 1255 (*regs)[TARGET_EF_CSR_BADV] = tswapreg(env->CSR_BADV); 1256 } 1257 1258 #define USE_ELF_CORE_DUMP 1259 #define ELF_EXEC_PAGESIZE 4096 1260 1261 #define ELF_HWCAP get_elf_hwcap() 1262 1263 /* See arch/loongarch/include/uapi/asm/hwcap.h */ 1264 enum { 1265 HWCAP_LOONGARCH_CPUCFG = (1 << 0), 1266 HWCAP_LOONGARCH_LAM = (1 << 1), 1267 HWCAP_LOONGARCH_UAL = (1 << 2), 1268 HWCAP_LOONGARCH_FPU = (1 << 3), 1269 HWCAP_LOONGARCH_LSX = (1 << 4), 1270 HWCAP_LOONGARCH_LASX = (1 << 5), 1271 HWCAP_LOONGARCH_CRC32 = (1 << 6), 1272 HWCAP_LOONGARCH_COMPLEX = (1 << 7), 1273 HWCAP_LOONGARCH_CRYPTO = (1 << 8), 1274 HWCAP_LOONGARCH_LVZ = (1 << 9), 1275 HWCAP_LOONGARCH_LBT_X86 = (1 << 10), 1276 HWCAP_LOONGARCH_LBT_ARM = (1 << 11), 1277 HWCAP_LOONGARCH_LBT_MIPS = (1 << 12), 1278 }; 1279 1280 static uint32_t get_elf_hwcap(void) 1281 { 1282 LoongArchCPU *cpu = LOONGARCH_CPU(thread_cpu); 1283 uint32_t hwcaps = 0; 1284 1285 hwcaps |= HWCAP_LOONGARCH_CRC32; 1286 1287 if (FIELD_EX32(cpu->env.cpucfg[1], CPUCFG1, UAL)) { 1288 hwcaps |= HWCAP_LOONGARCH_UAL; 1289 } 1290 1291 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, FP)) { 1292 hwcaps |= HWCAP_LOONGARCH_FPU; 1293 } 1294 1295 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LAM)) { 1296 hwcaps |= HWCAP_LOONGARCH_LAM; 1297 } 1298 1299 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LSX)) { 1300 hwcaps |= HWCAP_LOONGARCH_LSX; 1301 } 1302 1303 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LASX)) { 1304 hwcaps |= HWCAP_LOONGARCH_LASX; 1305 } 1306 1307 return hwcaps; 1308 } 1309 1310 #define ELF_PLATFORM "loongarch" 1311 1312 #endif /* TARGET_LOONGARCH64 */ 1313 1314 #ifdef TARGET_MIPS 1315 1316 #ifdef TARGET_MIPS64 1317 #define ELF_CLASS ELFCLASS64 1318 #else 1319 #define ELF_CLASS ELFCLASS32 1320 #endif 1321 #define ELF_ARCH EM_MIPS 1322 #define EXSTACK_DEFAULT true 1323 1324 #ifdef TARGET_ABI_MIPSN32 1325 #define elf_check_abi(x) ((x) & EF_MIPS_ABI2) 1326 #else 1327 #define elf_check_abi(x) (!((x) & EF_MIPS_ABI2)) 1328 #endif 1329 1330 #define ELF_BASE_PLATFORM get_elf_base_platform() 1331 1332 #define MATCH_PLATFORM_INSN(_flags, _base_platform) \ 1333 do { if ((cpu->env.insn_flags & (_flags)) == _flags) \ 1334 { return _base_platform; } } while (0) 1335 1336 static const char *get_elf_base_platform(void) 1337 { 1338 MIPSCPU *cpu = MIPS_CPU(thread_cpu); 1339 1340 /* 64 bit ISAs goes first */ 1341 MATCH_PLATFORM_INSN(CPU_MIPS64R6, "mips64r6"); 1342 MATCH_PLATFORM_INSN(CPU_MIPS64R5, "mips64r5"); 1343 MATCH_PLATFORM_INSN(CPU_MIPS64R2, "mips64r2"); 1344 MATCH_PLATFORM_INSN(CPU_MIPS64R1, "mips64"); 1345 MATCH_PLATFORM_INSN(CPU_MIPS5, "mips5"); 1346 MATCH_PLATFORM_INSN(CPU_MIPS4, "mips4"); 1347 MATCH_PLATFORM_INSN(CPU_MIPS3, "mips3"); 1348 1349 /* 32 bit ISAs */ 1350 MATCH_PLATFORM_INSN(CPU_MIPS32R6, "mips32r6"); 1351 MATCH_PLATFORM_INSN(CPU_MIPS32R5, "mips32r5"); 1352 MATCH_PLATFORM_INSN(CPU_MIPS32R2, "mips32r2"); 1353 MATCH_PLATFORM_INSN(CPU_MIPS32R1, "mips32"); 1354 MATCH_PLATFORM_INSN(CPU_MIPS2, "mips2"); 1355 1356 /* Fallback */ 1357 return "mips"; 1358 } 1359 #undef MATCH_PLATFORM_INSN 1360 1361 static inline void init_thread(struct target_pt_regs *regs, 1362 struct image_info *infop) 1363 { 1364 regs->cp0_status = 2 << CP0St_KSU; 1365 regs->cp0_epc = infop->entry; 1366 regs->regs[29] = infop->start_stack; 1367 } 1368 1369 /* See linux kernel: arch/mips/include/asm/elf.h. */ 1370 #define ELF_NREG 45 1371 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1372 1373 /* See linux kernel: arch/mips/include/asm/reg.h. */ 1374 enum { 1375 #ifdef TARGET_MIPS64 1376 TARGET_EF_R0 = 0, 1377 #else 1378 TARGET_EF_R0 = 6, 1379 #endif 1380 TARGET_EF_R26 = TARGET_EF_R0 + 26, 1381 TARGET_EF_R27 = TARGET_EF_R0 + 27, 1382 TARGET_EF_LO = TARGET_EF_R0 + 32, 1383 TARGET_EF_HI = TARGET_EF_R0 + 33, 1384 TARGET_EF_CP0_EPC = TARGET_EF_R0 + 34, 1385 TARGET_EF_CP0_BADVADDR = TARGET_EF_R0 + 35, 1386 TARGET_EF_CP0_STATUS = TARGET_EF_R0 + 36, 1387 TARGET_EF_CP0_CAUSE = TARGET_EF_R0 + 37 1388 }; 1389 1390 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */ 1391 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMIPSState *env) 1392 { 1393 int i; 1394 1395 for (i = 0; i < TARGET_EF_R0; i++) { 1396 (*regs)[i] = 0; 1397 } 1398 (*regs)[TARGET_EF_R0] = 0; 1399 1400 for (i = 1; i < ARRAY_SIZE(env->active_tc.gpr); i++) { 1401 (*regs)[TARGET_EF_R0 + i] = tswapreg(env->active_tc.gpr[i]); 1402 } 1403 1404 (*regs)[TARGET_EF_R26] = 0; 1405 (*regs)[TARGET_EF_R27] = 0; 1406 (*regs)[TARGET_EF_LO] = tswapreg(env->active_tc.LO[0]); 1407 (*regs)[TARGET_EF_HI] = tswapreg(env->active_tc.HI[0]); 1408 (*regs)[TARGET_EF_CP0_EPC] = tswapreg(env->active_tc.PC); 1409 (*regs)[TARGET_EF_CP0_BADVADDR] = tswapreg(env->CP0_BadVAddr); 1410 (*regs)[TARGET_EF_CP0_STATUS] = tswapreg(env->CP0_Status); 1411 (*regs)[TARGET_EF_CP0_CAUSE] = tswapreg(env->CP0_Cause); 1412 } 1413 1414 #define USE_ELF_CORE_DUMP 1415 #define ELF_EXEC_PAGESIZE 4096 1416 1417 /* See arch/mips/include/uapi/asm/hwcap.h. */ 1418 enum { 1419 HWCAP_MIPS_R6 = (1 << 0), 1420 HWCAP_MIPS_MSA = (1 << 1), 1421 HWCAP_MIPS_CRC32 = (1 << 2), 1422 HWCAP_MIPS_MIPS16 = (1 << 3), 1423 HWCAP_MIPS_MDMX = (1 << 4), 1424 HWCAP_MIPS_MIPS3D = (1 << 5), 1425 HWCAP_MIPS_SMARTMIPS = (1 << 6), 1426 HWCAP_MIPS_DSP = (1 << 7), 1427 HWCAP_MIPS_DSP2 = (1 << 8), 1428 HWCAP_MIPS_DSP3 = (1 << 9), 1429 HWCAP_MIPS_MIPS16E2 = (1 << 10), 1430 HWCAP_LOONGSON_MMI = (1 << 11), 1431 HWCAP_LOONGSON_EXT = (1 << 12), 1432 HWCAP_LOONGSON_EXT2 = (1 << 13), 1433 HWCAP_LOONGSON_CPUCFG = (1 << 14), 1434 }; 1435 1436 #define ELF_HWCAP get_elf_hwcap() 1437 1438 #define GET_FEATURE_INSN(_flag, _hwcap) \ 1439 do { if (cpu->env.insn_flags & (_flag)) { hwcaps |= _hwcap; } } while (0) 1440 1441 #define GET_FEATURE_REG_SET(_reg, _mask, _hwcap) \ 1442 do { if (cpu->env._reg & (_mask)) { hwcaps |= _hwcap; } } while (0) 1443 1444 #define GET_FEATURE_REG_EQU(_reg, _start, _length, _val, _hwcap) \ 1445 do { \ 1446 if (extract32(cpu->env._reg, (_start), (_length)) == (_val)) { \ 1447 hwcaps |= _hwcap; \ 1448 } \ 1449 } while (0) 1450 1451 static uint32_t get_elf_hwcap(void) 1452 { 1453 MIPSCPU *cpu = MIPS_CPU(thread_cpu); 1454 uint32_t hwcaps = 0; 1455 1456 GET_FEATURE_REG_EQU(CP0_Config0, CP0C0_AR, CP0C0_AR_LENGTH, 1457 2, HWCAP_MIPS_R6); 1458 GET_FEATURE_REG_SET(CP0_Config3, 1 << CP0C3_MSAP, HWCAP_MIPS_MSA); 1459 GET_FEATURE_INSN(ASE_LMMI, HWCAP_LOONGSON_MMI); 1460 GET_FEATURE_INSN(ASE_LEXT, HWCAP_LOONGSON_EXT); 1461 1462 return hwcaps; 1463 } 1464 1465 #undef GET_FEATURE_REG_EQU 1466 #undef GET_FEATURE_REG_SET 1467 #undef GET_FEATURE_INSN 1468 1469 #endif /* TARGET_MIPS */ 1470 1471 #ifdef TARGET_MICROBLAZE 1472 1473 #define elf_check_arch(x) ( (x) == EM_MICROBLAZE || (x) == EM_MICROBLAZE_OLD) 1474 1475 #define ELF_CLASS ELFCLASS32 1476 #define ELF_ARCH EM_MICROBLAZE 1477 1478 static inline void init_thread(struct target_pt_regs *regs, 1479 struct image_info *infop) 1480 { 1481 regs->pc = infop->entry; 1482 regs->r1 = infop->start_stack; 1483 1484 } 1485 1486 #define ELF_EXEC_PAGESIZE 4096 1487 1488 #define USE_ELF_CORE_DUMP 1489 #define ELF_NREG 38 1490 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1491 1492 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */ 1493 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMBState *env) 1494 { 1495 int i, pos = 0; 1496 1497 for (i = 0; i < 32; i++) { 1498 (*regs)[pos++] = tswapreg(env->regs[i]); 1499 } 1500 1501 (*regs)[pos++] = tswapreg(env->pc); 1502 (*regs)[pos++] = tswapreg(mb_cpu_read_msr(env)); 1503 (*regs)[pos++] = 0; 1504 (*regs)[pos++] = tswapreg(env->ear); 1505 (*regs)[pos++] = 0; 1506 (*regs)[pos++] = tswapreg(env->esr); 1507 } 1508 1509 #endif /* TARGET_MICROBLAZE */ 1510 1511 #ifdef TARGET_OPENRISC 1512 1513 #define ELF_ARCH EM_OPENRISC 1514 #define ELF_CLASS ELFCLASS32 1515 #define ELF_DATA ELFDATA2MSB 1516 1517 static inline void init_thread(struct target_pt_regs *regs, 1518 struct image_info *infop) 1519 { 1520 regs->pc = infop->entry; 1521 regs->gpr[1] = infop->start_stack; 1522 } 1523 1524 #define USE_ELF_CORE_DUMP 1525 #define ELF_EXEC_PAGESIZE 8192 1526 1527 /* See linux kernel arch/openrisc/include/asm/elf.h. */ 1528 #define ELF_NREG 34 /* gprs and pc, sr */ 1529 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1530 1531 static void elf_core_copy_regs(target_elf_gregset_t *regs, 1532 const CPUOpenRISCState *env) 1533 { 1534 int i; 1535 1536 for (i = 0; i < 32; i++) { 1537 (*regs)[i] = tswapreg(cpu_get_gpr(env, i)); 1538 } 1539 (*regs)[32] = tswapreg(env->pc); 1540 (*regs)[33] = tswapreg(cpu_get_sr(env)); 1541 } 1542 #define ELF_HWCAP 0 1543 #define ELF_PLATFORM NULL 1544 1545 #endif /* TARGET_OPENRISC */ 1546 1547 #ifdef TARGET_SH4 1548 1549 #define ELF_CLASS ELFCLASS32 1550 #define ELF_ARCH EM_SH 1551 1552 static inline void init_thread(struct target_pt_regs *regs, 1553 struct image_info *infop) 1554 { 1555 /* Check other registers XXXXX */ 1556 regs->pc = infop->entry; 1557 regs->regs[15] = infop->start_stack; 1558 } 1559 1560 /* See linux kernel: arch/sh/include/asm/elf.h. */ 1561 #define ELF_NREG 23 1562 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1563 1564 /* See linux kernel: arch/sh/include/asm/ptrace.h. */ 1565 enum { 1566 TARGET_REG_PC = 16, 1567 TARGET_REG_PR = 17, 1568 TARGET_REG_SR = 18, 1569 TARGET_REG_GBR = 19, 1570 TARGET_REG_MACH = 20, 1571 TARGET_REG_MACL = 21, 1572 TARGET_REG_SYSCALL = 22 1573 }; 1574 1575 static inline void elf_core_copy_regs(target_elf_gregset_t *regs, 1576 const CPUSH4State *env) 1577 { 1578 int i; 1579 1580 for (i = 0; i < 16; i++) { 1581 (*regs)[i] = tswapreg(env->gregs[i]); 1582 } 1583 1584 (*regs)[TARGET_REG_PC] = tswapreg(env->pc); 1585 (*regs)[TARGET_REG_PR] = tswapreg(env->pr); 1586 (*regs)[TARGET_REG_SR] = tswapreg(env->sr); 1587 (*regs)[TARGET_REG_GBR] = tswapreg(env->gbr); 1588 (*regs)[TARGET_REG_MACH] = tswapreg(env->mach); 1589 (*regs)[TARGET_REG_MACL] = tswapreg(env->macl); 1590 (*regs)[TARGET_REG_SYSCALL] = 0; /* FIXME */ 1591 } 1592 1593 #define USE_ELF_CORE_DUMP 1594 #define ELF_EXEC_PAGESIZE 4096 1595 1596 enum { 1597 SH_CPU_HAS_FPU = 0x0001, /* Hardware FPU support */ 1598 SH_CPU_HAS_P2_FLUSH_BUG = 0x0002, /* Need to flush the cache in P2 area */ 1599 SH_CPU_HAS_MMU_PAGE_ASSOC = 0x0004, /* SH3: TLB way selection bit support */ 1600 SH_CPU_HAS_DSP = 0x0008, /* SH-DSP: DSP support */ 1601 SH_CPU_HAS_PERF_COUNTER = 0x0010, /* Hardware performance counters */ 1602 SH_CPU_HAS_PTEA = 0x0020, /* PTEA register */ 1603 SH_CPU_HAS_LLSC = 0x0040, /* movli.l/movco.l */ 1604 SH_CPU_HAS_L2_CACHE = 0x0080, /* Secondary cache / URAM */ 1605 SH_CPU_HAS_OP32 = 0x0100, /* 32-bit instruction support */ 1606 SH_CPU_HAS_PTEAEX = 0x0200, /* PTE ASID Extension support */ 1607 }; 1608 1609 #define ELF_HWCAP get_elf_hwcap() 1610 1611 static uint32_t get_elf_hwcap(void) 1612 { 1613 SuperHCPU *cpu = SUPERH_CPU(thread_cpu); 1614 uint32_t hwcap = 0; 1615 1616 hwcap |= SH_CPU_HAS_FPU; 1617 1618 if (cpu->env.features & SH_FEATURE_SH4A) { 1619 hwcap |= SH_CPU_HAS_LLSC; 1620 } 1621 1622 return hwcap; 1623 } 1624 1625 #endif 1626 1627 #ifdef TARGET_CRIS 1628 1629 #define ELF_CLASS ELFCLASS32 1630 #define ELF_ARCH EM_CRIS 1631 1632 static inline void init_thread(struct target_pt_regs *regs, 1633 struct image_info *infop) 1634 { 1635 regs->erp = infop->entry; 1636 } 1637 1638 #define ELF_EXEC_PAGESIZE 8192 1639 1640 #endif 1641 1642 #ifdef TARGET_M68K 1643 1644 #define ELF_CLASS ELFCLASS32 1645 #define ELF_ARCH EM_68K 1646 1647 /* ??? Does this need to do anything? 1648 #define ELF_PLAT_INIT(_r) */ 1649 1650 static inline void init_thread(struct target_pt_regs *regs, 1651 struct image_info *infop) 1652 { 1653 regs->usp = infop->start_stack; 1654 regs->sr = 0; 1655 regs->pc = infop->entry; 1656 } 1657 1658 /* See linux kernel: arch/m68k/include/asm/elf.h. */ 1659 #define ELF_NREG 20 1660 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1661 1662 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUM68KState *env) 1663 { 1664 (*regs)[0] = tswapreg(env->dregs[1]); 1665 (*regs)[1] = tswapreg(env->dregs[2]); 1666 (*regs)[2] = tswapreg(env->dregs[3]); 1667 (*regs)[3] = tswapreg(env->dregs[4]); 1668 (*regs)[4] = tswapreg(env->dregs[5]); 1669 (*regs)[5] = tswapreg(env->dregs[6]); 1670 (*regs)[6] = tswapreg(env->dregs[7]); 1671 (*regs)[7] = tswapreg(env->aregs[0]); 1672 (*regs)[8] = tswapreg(env->aregs[1]); 1673 (*regs)[9] = tswapreg(env->aregs[2]); 1674 (*regs)[10] = tswapreg(env->aregs[3]); 1675 (*regs)[11] = tswapreg(env->aregs[4]); 1676 (*regs)[12] = tswapreg(env->aregs[5]); 1677 (*regs)[13] = tswapreg(env->aregs[6]); 1678 (*regs)[14] = tswapreg(env->dregs[0]); 1679 (*regs)[15] = tswapreg(env->aregs[7]); 1680 (*regs)[16] = tswapreg(env->dregs[0]); /* FIXME: orig_d0 */ 1681 (*regs)[17] = tswapreg(env->sr); 1682 (*regs)[18] = tswapreg(env->pc); 1683 (*regs)[19] = 0; /* FIXME: regs->format | regs->vector */ 1684 } 1685 1686 #define USE_ELF_CORE_DUMP 1687 #define ELF_EXEC_PAGESIZE 8192 1688 1689 #endif 1690 1691 #ifdef TARGET_ALPHA 1692 1693 #define ELF_CLASS ELFCLASS64 1694 #define ELF_ARCH EM_ALPHA 1695 1696 static inline void init_thread(struct target_pt_regs *regs, 1697 struct image_info *infop) 1698 { 1699 regs->pc = infop->entry; 1700 regs->ps = 8; 1701 regs->usp = infop->start_stack; 1702 } 1703 1704 #define ELF_EXEC_PAGESIZE 8192 1705 1706 #endif /* TARGET_ALPHA */ 1707 1708 #ifdef TARGET_S390X 1709 1710 #define ELF_CLASS ELFCLASS64 1711 #define ELF_DATA ELFDATA2MSB 1712 #define ELF_ARCH EM_S390 1713 1714 #include "elf.h" 1715 1716 #define ELF_HWCAP get_elf_hwcap() 1717 1718 #define GET_FEATURE(_feat, _hwcap) \ 1719 do { if (s390_has_feat(_feat)) { hwcap |= _hwcap; } } while (0) 1720 1721 uint32_t get_elf_hwcap(void) 1722 { 1723 /* 1724 * Let's assume we always have esan3 and zarch. 1725 * 31-bit processes can use 64-bit registers (high gprs). 1726 */ 1727 uint32_t hwcap = HWCAP_S390_ESAN3 | HWCAP_S390_ZARCH | HWCAP_S390_HIGH_GPRS; 1728 1729 GET_FEATURE(S390_FEAT_STFLE, HWCAP_S390_STFLE); 1730 GET_FEATURE(S390_FEAT_MSA, HWCAP_S390_MSA); 1731 GET_FEATURE(S390_FEAT_LONG_DISPLACEMENT, HWCAP_S390_LDISP); 1732 GET_FEATURE(S390_FEAT_EXTENDED_IMMEDIATE, HWCAP_S390_EIMM); 1733 if (s390_has_feat(S390_FEAT_EXTENDED_TRANSLATION_3) && 1734 s390_has_feat(S390_FEAT_ETF3_ENH)) { 1735 hwcap |= HWCAP_S390_ETF3EH; 1736 } 1737 GET_FEATURE(S390_FEAT_VECTOR, HWCAP_S390_VXRS); 1738 GET_FEATURE(S390_FEAT_VECTOR_ENH, HWCAP_S390_VXRS_EXT); 1739 GET_FEATURE(S390_FEAT_VECTOR_ENH2, HWCAP_S390_VXRS_EXT2); 1740 1741 return hwcap; 1742 } 1743 1744 const char *elf_hwcap_str(uint32_t bit) 1745 { 1746 static const char *hwcap_str[] = { 1747 [HWCAP_S390_NR_ESAN3] = "esan3", 1748 [HWCAP_S390_NR_ZARCH] = "zarch", 1749 [HWCAP_S390_NR_STFLE] = "stfle", 1750 [HWCAP_S390_NR_MSA] = "msa", 1751 [HWCAP_S390_NR_LDISP] = "ldisp", 1752 [HWCAP_S390_NR_EIMM] = "eimm", 1753 [HWCAP_S390_NR_DFP] = "dfp", 1754 [HWCAP_S390_NR_HPAGE] = "edat", 1755 [HWCAP_S390_NR_ETF3EH] = "etf3eh", 1756 [HWCAP_S390_NR_HIGH_GPRS] = "highgprs", 1757 [HWCAP_S390_NR_TE] = "te", 1758 [HWCAP_S390_NR_VXRS] = "vx", 1759 [HWCAP_S390_NR_VXRS_BCD] = "vxd", 1760 [HWCAP_S390_NR_VXRS_EXT] = "vxe", 1761 [HWCAP_S390_NR_GS] = "gs", 1762 [HWCAP_S390_NR_VXRS_EXT2] = "vxe2", 1763 [HWCAP_S390_NR_VXRS_PDE] = "vxp", 1764 [HWCAP_S390_NR_SORT] = "sort", 1765 [HWCAP_S390_NR_DFLT] = "dflt", 1766 [HWCAP_S390_NR_NNPA] = "nnpa", 1767 [HWCAP_S390_NR_PCI_MIO] = "pcimio", 1768 [HWCAP_S390_NR_SIE] = "sie", 1769 }; 1770 1771 return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; 1772 } 1773 1774 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 1775 { 1776 regs->psw.addr = infop->entry; 1777 regs->psw.mask = PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | \ 1778 PSW_MASK_MCHECK | PSW_MASK_PSTATE | PSW_MASK_64 | \ 1779 PSW_MASK_32; 1780 regs->gprs[15] = infop->start_stack; 1781 } 1782 1783 /* See linux kernel: arch/s390/include/uapi/asm/ptrace.h (s390_regs). */ 1784 #define ELF_NREG 27 1785 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1786 1787 enum { 1788 TARGET_REG_PSWM = 0, 1789 TARGET_REG_PSWA = 1, 1790 TARGET_REG_GPRS = 2, 1791 TARGET_REG_ARS = 18, 1792 TARGET_REG_ORIG_R2 = 26, 1793 }; 1794 1795 static void elf_core_copy_regs(target_elf_gregset_t *regs, 1796 const CPUS390XState *env) 1797 { 1798 int i; 1799 uint32_t *aregs; 1800 1801 (*regs)[TARGET_REG_PSWM] = tswapreg(env->psw.mask); 1802 (*regs)[TARGET_REG_PSWA] = tswapreg(env->psw.addr); 1803 for (i = 0; i < 16; i++) { 1804 (*regs)[TARGET_REG_GPRS + i] = tswapreg(env->regs[i]); 1805 } 1806 aregs = (uint32_t *)&((*regs)[TARGET_REG_ARS]); 1807 for (i = 0; i < 16; i++) { 1808 aregs[i] = tswap32(env->aregs[i]); 1809 } 1810 (*regs)[TARGET_REG_ORIG_R2] = 0; 1811 } 1812 1813 #define USE_ELF_CORE_DUMP 1814 #define ELF_EXEC_PAGESIZE 4096 1815 1816 #define VDSO_HEADER "vdso.c.inc" 1817 1818 #endif /* TARGET_S390X */ 1819 1820 #ifdef TARGET_RISCV 1821 1822 #define ELF_ARCH EM_RISCV 1823 1824 #ifdef TARGET_RISCV32 1825 #define ELF_CLASS ELFCLASS32 1826 #define VDSO_HEADER "vdso-32.c.inc" 1827 #else 1828 #define ELF_CLASS ELFCLASS64 1829 #define VDSO_HEADER "vdso-64.c.inc" 1830 #endif 1831 1832 #define ELF_HWCAP get_elf_hwcap() 1833 1834 static uint32_t get_elf_hwcap(void) 1835 { 1836 #define MISA_BIT(EXT) (1 << (EXT - 'A')) 1837 RISCVCPU *cpu = RISCV_CPU(thread_cpu); 1838 uint32_t mask = MISA_BIT('I') | MISA_BIT('M') | MISA_BIT('A') 1839 | MISA_BIT('F') | MISA_BIT('D') | MISA_BIT('C') 1840 | MISA_BIT('V'); 1841 1842 return cpu->env.misa_ext & mask; 1843 #undef MISA_BIT 1844 } 1845 1846 static inline void init_thread(struct target_pt_regs *regs, 1847 struct image_info *infop) 1848 { 1849 regs->sepc = infop->entry; 1850 regs->sp = infop->start_stack; 1851 } 1852 1853 #define ELF_EXEC_PAGESIZE 4096 1854 1855 #endif /* TARGET_RISCV */ 1856 1857 #ifdef TARGET_HPPA 1858 1859 #define ELF_CLASS ELFCLASS32 1860 #define ELF_ARCH EM_PARISC 1861 #define ELF_PLATFORM "PARISC" 1862 #define STACK_GROWS_DOWN 0 1863 #define STACK_ALIGNMENT 64 1864 1865 #define VDSO_HEADER "vdso.c.inc" 1866 1867 static inline void init_thread(struct target_pt_regs *regs, 1868 struct image_info *infop) 1869 { 1870 regs->iaoq[0] = infop->entry; 1871 regs->iaoq[1] = infop->entry + 4; 1872 regs->gr[23] = 0; 1873 regs->gr[24] = infop->argv; 1874 regs->gr[25] = infop->argc; 1875 /* The top-of-stack contains a linkage buffer. */ 1876 regs->gr[30] = infop->start_stack + 64; 1877 regs->gr[31] = infop->entry; 1878 } 1879 1880 #define LO_COMMPAGE 0 1881 1882 static bool init_guest_commpage(void) 1883 { 1884 /* If reserved_va, then we have already mapped 0 page on the host. */ 1885 if (!reserved_va) { 1886 void *want, *addr; 1887 1888 want = g2h_untagged(LO_COMMPAGE); 1889 addr = mmap(want, TARGET_PAGE_SIZE, PROT_NONE, 1890 MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED_NOREPLACE, -1, 0); 1891 if (addr == MAP_FAILED) { 1892 perror("Allocating guest commpage"); 1893 exit(EXIT_FAILURE); 1894 } 1895 if (addr != want) { 1896 return false; 1897 } 1898 } 1899 1900 /* 1901 * On Linux, page zero is normally marked execute only + gateway. 1902 * Normal read or write is supposed to fail (thus PROT_NONE above), 1903 * but specific offsets have kernel code mapped to raise permissions 1904 * and implement syscalls. Here, simply mark the page executable. 1905 * Special case the entry points during translation (see do_page_zero). 1906 */ 1907 page_set_flags(LO_COMMPAGE, LO_COMMPAGE | ~TARGET_PAGE_MASK, 1908 PAGE_EXEC | PAGE_VALID); 1909 return true; 1910 } 1911 1912 #endif /* TARGET_HPPA */ 1913 1914 #ifdef TARGET_XTENSA 1915 1916 #define ELF_CLASS ELFCLASS32 1917 #define ELF_ARCH EM_XTENSA 1918 1919 static inline void init_thread(struct target_pt_regs *regs, 1920 struct image_info *infop) 1921 { 1922 regs->windowbase = 0; 1923 regs->windowstart = 1; 1924 regs->areg[1] = infop->start_stack; 1925 regs->pc = infop->entry; 1926 if (info_is_fdpic(infop)) { 1927 regs->areg[4] = infop->loadmap_addr; 1928 regs->areg[5] = infop->interpreter_loadmap_addr; 1929 if (infop->interpreter_loadmap_addr) { 1930 regs->areg[6] = infop->interpreter_pt_dynamic_addr; 1931 } else { 1932 regs->areg[6] = infop->pt_dynamic_addr; 1933 } 1934 } 1935 } 1936 1937 /* See linux kernel: arch/xtensa/include/asm/elf.h. */ 1938 #define ELF_NREG 128 1939 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1940 1941 enum { 1942 TARGET_REG_PC, 1943 TARGET_REG_PS, 1944 TARGET_REG_LBEG, 1945 TARGET_REG_LEND, 1946 TARGET_REG_LCOUNT, 1947 TARGET_REG_SAR, 1948 TARGET_REG_WINDOWSTART, 1949 TARGET_REG_WINDOWBASE, 1950 TARGET_REG_THREADPTR, 1951 TARGET_REG_AR0 = 64, 1952 }; 1953 1954 static void elf_core_copy_regs(target_elf_gregset_t *regs, 1955 const CPUXtensaState *env) 1956 { 1957 unsigned i; 1958 1959 (*regs)[TARGET_REG_PC] = tswapreg(env->pc); 1960 (*regs)[TARGET_REG_PS] = tswapreg(env->sregs[PS] & ~PS_EXCM); 1961 (*regs)[TARGET_REG_LBEG] = tswapreg(env->sregs[LBEG]); 1962 (*regs)[TARGET_REG_LEND] = tswapreg(env->sregs[LEND]); 1963 (*regs)[TARGET_REG_LCOUNT] = tswapreg(env->sregs[LCOUNT]); 1964 (*regs)[TARGET_REG_SAR] = tswapreg(env->sregs[SAR]); 1965 (*regs)[TARGET_REG_WINDOWSTART] = tswapreg(env->sregs[WINDOW_START]); 1966 (*regs)[TARGET_REG_WINDOWBASE] = tswapreg(env->sregs[WINDOW_BASE]); 1967 (*regs)[TARGET_REG_THREADPTR] = tswapreg(env->uregs[THREADPTR]); 1968 xtensa_sync_phys_from_window((CPUXtensaState *)env); 1969 for (i = 0; i < env->config->nareg; ++i) { 1970 (*regs)[TARGET_REG_AR0 + i] = tswapreg(env->phys_regs[i]); 1971 } 1972 } 1973 1974 #define USE_ELF_CORE_DUMP 1975 #define ELF_EXEC_PAGESIZE 4096 1976 1977 #endif /* TARGET_XTENSA */ 1978 1979 #ifdef TARGET_HEXAGON 1980 1981 #define ELF_CLASS ELFCLASS32 1982 #define ELF_ARCH EM_HEXAGON 1983 1984 static inline void init_thread(struct target_pt_regs *regs, 1985 struct image_info *infop) 1986 { 1987 regs->sepc = infop->entry; 1988 regs->sp = infop->start_stack; 1989 } 1990 1991 #endif /* TARGET_HEXAGON */ 1992 1993 #ifndef ELF_BASE_PLATFORM 1994 #define ELF_BASE_PLATFORM (NULL) 1995 #endif 1996 1997 #ifndef ELF_PLATFORM 1998 #define ELF_PLATFORM (NULL) 1999 #endif 2000 2001 #ifndef ELF_MACHINE 2002 #define ELF_MACHINE ELF_ARCH 2003 #endif 2004 2005 #ifndef elf_check_arch 2006 #define elf_check_arch(x) ((x) == ELF_ARCH) 2007 #endif 2008 2009 #ifndef elf_check_abi 2010 #define elf_check_abi(x) (1) 2011 #endif 2012 2013 #ifndef ELF_HWCAP 2014 #define ELF_HWCAP 0 2015 #endif 2016 2017 #ifndef STACK_GROWS_DOWN 2018 #define STACK_GROWS_DOWN 1 2019 #endif 2020 2021 #ifndef STACK_ALIGNMENT 2022 #define STACK_ALIGNMENT 16 2023 #endif 2024 2025 #ifdef TARGET_ABI32 2026 #undef ELF_CLASS 2027 #define ELF_CLASS ELFCLASS32 2028 #undef bswaptls 2029 #define bswaptls(ptr) bswap32s(ptr) 2030 #endif 2031 2032 #ifndef EXSTACK_DEFAULT 2033 #define EXSTACK_DEFAULT false 2034 #endif 2035 2036 #include "elf.h" 2037 2038 /* We must delay the following stanzas until after "elf.h". */ 2039 #if defined(TARGET_AARCH64) 2040 2041 static bool arch_parse_elf_property(uint32_t pr_type, uint32_t pr_datasz, 2042 const uint32_t *data, 2043 struct image_info *info, 2044 Error **errp) 2045 { 2046 if (pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND) { 2047 if (pr_datasz != sizeof(uint32_t)) { 2048 error_setg(errp, "Ill-formed GNU_PROPERTY_AARCH64_FEATURE_1_AND"); 2049 return false; 2050 } 2051 /* We will extract GNU_PROPERTY_AARCH64_FEATURE_1_BTI later. */ 2052 info->note_flags = *data; 2053 } 2054 return true; 2055 } 2056 #define ARCH_USE_GNU_PROPERTY 1 2057 2058 #else 2059 2060 static bool arch_parse_elf_property(uint32_t pr_type, uint32_t pr_datasz, 2061 const uint32_t *data, 2062 struct image_info *info, 2063 Error **errp) 2064 { 2065 g_assert_not_reached(); 2066 } 2067 #define ARCH_USE_GNU_PROPERTY 0 2068 2069 #endif 2070 2071 struct exec 2072 { 2073 unsigned int a_info; /* Use macros N_MAGIC, etc for access */ 2074 unsigned int a_text; /* length of text, in bytes */ 2075 unsigned int a_data; /* length of data, in bytes */ 2076 unsigned int a_bss; /* length of uninitialized data area, in bytes */ 2077 unsigned int a_syms; /* length of symbol table data in file, in bytes */ 2078 unsigned int a_entry; /* start address */ 2079 unsigned int a_trsize; /* length of relocation info for text, in bytes */ 2080 unsigned int a_drsize; /* length of relocation info for data, in bytes */ 2081 }; 2082 2083 2084 #define N_MAGIC(exec) ((exec).a_info & 0xffff) 2085 #define OMAGIC 0407 2086 #define NMAGIC 0410 2087 #define ZMAGIC 0413 2088 #define QMAGIC 0314 2089 2090 #define DLINFO_ITEMS 16 2091 2092 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n) 2093 { 2094 memcpy(to, from, n); 2095 } 2096 2097 #ifdef BSWAP_NEEDED 2098 static void bswap_ehdr(struct elfhdr *ehdr) 2099 { 2100 bswap16s(&ehdr->e_type); /* Object file type */ 2101 bswap16s(&ehdr->e_machine); /* Architecture */ 2102 bswap32s(&ehdr->e_version); /* Object file version */ 2103 bswaptls(&ehdr->e_entry); /* Entry point virtual address */ 2104 bswaptls(&ehdr->e_phoff); /* Program header table file offset */ 2105 bswaptls(&ehdr->e_shoff); /* Section header table file offset */ 2106 bswap32s(&ehdr->e_flags); /* Processor-specific flags */ 2107 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */ 2108 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */ 2109 bswap16s(&ehdr->e_phnum); /* Program header table entry count */ 2110 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */ 2111 bswap16s(&ehdr->e_shnum); /* Section header table entry count */ 2112 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */ 2113 } 2114 2115 static void bswap_phdr(struct elf_phdr *phdr, int phnum) 2116 { 2117 int i; 2118 for (i = 0; i < phnum; ++i, ++phdr) { 2119 bswap32s(&phdr->p_type); /* Segment type */ 2120 bswap32s(&phdr->p_flags); /* Segment flags */ 2121 bswaptls(&phdr->p_offset); /* Segment file offset */ 2122 bswaptls(&phdr->p_vaddr); /* Segment virtual address */ 2123 bswaptls(&phdr->p_paddr); /* Segment physical address */ 2124 bswaptls(&phdr->p_filesz); /* Segment size in file */ 2125 bswaptls(&phdr->p_memsz); /* Segment size in memory */ 2126 bswaptls(&phdr->p_align); /* Segment alignment */ 2127 } 2128 } 2129 2130 static void bswap_shdr(struct elf_shdr *shdr, int shnum) 2131 { 2132 int i; 2133 for (i = 0; i < shnum; ++i, ++shdr) { 2134 bswap32s(&shdr->sh_name); 2135 bswap32s(&shdr->sh_type); 2136 bswaptls(&shdr->sh_flags); 2137 bswaptls(&shdr->sh_addr); 2138 bswaptls(&shdr->sh_offset); 2139 bswaptls(&shdr->sh_size); 2140 bswap32s(&shdr->sh_link); 2141 bswap32s(&shdr->sh_info); 2142 bswaptls(&shdr->sh_addralign); 2143 bswaptls(&shdr->sh_entsize); 2144 } 2145 } 2146 2147 static void bswap_sym(struct elf_sym *sym) 2148 { 2149 bswap32s(&sym->st_name); 2150 bswaptls(&sym->st_value); 2151 bswaptls(&sym->st_size); 2152 bswap16s(&sym->st_shndx); 2153 } 2154 2155 #ifdef TARGET_MIPS 2156 static void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags) 2157 { 2158 bswap16s(&abiflags->version); 2159 bswap32s(&abiflags->ases); 2160 bswap32s(&abiflags->isa_ext); 2161 bswap32s(&abiflags->flags1); 2162 bswap32s(&abiflags->flags2); 2163 } 2164 #endif 2165 #else 2166 static inline void bswap_ehdr(struct elfhdr *ehdr) { } 2167 static inline void bswap_phdr(struct elf_phdr *phdr, int phnum) { } 2168 static inline void bswap_shdr(struct elf_shdr *shdr, int shnum) { } 2169 static inline void bswap_sym(struct elf_sym *sym) { } 2170 #ifdef TARGET_MIPS 2171 static inline void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags) { } 2172 #endif 2173 #endif 2174 2175 #ifdef USE_ELF_CORE_DUMP 2176 static int elf_core_dump(int, const CPUArchState *); 2177 #endif /* USE_ELF_CORE_DUMP */ 2178 static void load_symbols(struct elfhdr *hdr, const ImageSource *src, 2179 abi_ulong load_bias); 2180 2181 /* Verify the portions of EHDR within E_IDENT for the target. 2182 This can be performed before bswapping the entire header. */ 2183 static bool elf_check_ident(struct elfhdr *ehdr) 2184 { 2185 return (ehdr->e_ident[EI_MAG0] == ELFMAG0 2186 && ehdr->e_ident[EI_MAG1] == ELFMAG1 2187 && ehdr->e_ident[EI_MAG2] == ELFMAG2 2188 && ehdr->e_ident[EI_MAG3] == ELFMAG3 2189 && ehdr->e_ident[EI_CLASS] == ELF_CLASS 2190 && ehdr->e_ident[EI_DATA] == ELF_DATA 2191 && ehdr->e_ident[EI_VERSION] == EV_CURRENT); 2192 } 2193 2194 /* Verify the portions of EHDR outside of E_IDENT for the target. 2195 This has to wait until after bswapping the header. */ 2196 static bool elf_check_ehdr(struct elfhdr *ehdr) 2197 { 2198 return (elf_check_arch(ehdr->e_machine) 2199 && elf_check_abi(ehdr->e_flags) 2200 && ehdr->e_ehsize == sizeof(struct elfhdr) 2201 && ehdr->e_phentsize == sizeof(struct elf_phdr) 2202 && (ehdr->e_type == ET_EXEC || ehdr->e_type == ET_DYN)); 2203 } 2204 2205 /* 2206 * 'copy_elf_strings()' copies argument/envelope strings from user 2207 * memory to free pages in kernel mem. These are in a format ready 2208 * to be put directly into the top of new user memory. 2209 * 2210 */ 2211 static abi_ulong copy_elf_strings(int argc, char **argv, char *scratch, 2212 abi_ulong p, abi_ulong stack_limit) 2213 { 2214 char *tmp; 2215 int len, i; 2216 abi_ulong top = p; 2217 2218 if (!p) { 2219 return 0; /* bullet-proofing */ 2220 } 2221 2222 if (STACK_GROWS_DOWN) { 2223 int offset = ((p - 1) % TARGET_PAGE_SIZE) + 1; 2224 for (i = argc - 1; i >= 0; --i) { 2225 tmp = argv[i]; 2226 if (!tmp) { 2227 fprintf(stderr, "VFS: argc is wrong"); 2228 exit(-1); 2229 } 2230 len = strlen(tmp) + 1; 2231 tmp += len; 2232 2233 if (len > (p - stack_limit)) { 2234 return 0; 2235 } 2236 while (len) { 2237 int bytes_to_copy = (len > offset) ? offset : len; 2238 tmp -= bytes_to_copy; 2239 p -= bytes_to_copy; 2240 offset -= bytes_to_copy; 2241 len -= bytes_to_copy; 2242 2243 memcpy_fromfs(scratch + offset, tmp, bytes_to_copy); 2244 2245 if (offset == 0) { 2246 memcpy_to_target(p, scratch, top - p); 2247 top = p; 2248 offset = TARGET_PAGE_SIZE; 2249 } 2250 } 2251 } 2252 if (p != top) { 2253 memcpy_to_target(p, scratch + offset, top - p); 2254 } 2255 } else { 2256 int remaining = TARGET_PAGE_SIZE - (p % TARGET_PAGE_SIZE); 2257 for (i = 0; i < argc; ++i) { 2258 tmp = argv[i]; 2259 if (!tmp) { 2260 fprintf(stderr, "VFS: argc is wrong"); 2261 exit(-1); 2262 } 2263 len = strlen(tmp) + 1; 2264 if (len > (stack_limit - p)) { 2265 return 0; 2266 } 2267 while (len) { 2268 int bytes_to_copy = (len > remaining) ? remaining : len; 2269 2270 memcpy_fromfs(scratch + (p - top), tmp, bytes_to_copy); 2271 2272 tmp += bytes_to_copy; 2273 remaining -= bytes_to_copy; 2274 p += bytes_to_copy; 2275 len -= bytes_to_copy; 2276 2277 if (remaining == 0) { 2278 memcpy_to_target(top, scratch, p - top); 2279 top = p; 2280 remaining = TARGET_PAGE_SIZE; 2281 } 2282 } 2283 } 2284 if (p != top) { 2285 memcpy_to_target(top, scratch, p - top); 2286 } 2287 } 2288 2289 return p; 2290 } 2291 2292 /* Older linux kernels provide up to MAX_ARG_PAGES (default: 32) of 2293 * argument/environment space. Newer kernels (>2.6.33) allow more, 2294 * dependent on stack size, but guarantee at least 32 pages for 2295 * backwards compatibility. 2296 */ 2297 #define STACK_LOWER_LIMIT (32 * TARGET_PAGE_SIZE) 2298 2299 static abi_ulong setup_arg_pages(struct linux_binprm *bprm, 2300 struct image_info *info) 2301 { 2302 abi_ulong size, error, guard; 2303 int prot; 2304 2305 size = guest_stack_size; 2306 if (size < STACK_LOWER_LIMIT) { 2307 size = STACK_LOWER_LIMIT; 2308 } 2309 2310 if (STACK_GROWS_DOWN) { 2311 guard = TARGET_PAGE_SIZE; 2312 if (guard < qemu_real_host_page_size()) { 2313 guard = qemu_real_host_page_size(); 2314 } 2315 } else { 2316 /* no guard page for hppa target where stack grows upwards. */ 2317 guard = 0; 2318 } 2319 2320 prot = PROT_READ | PROT_WRITE; 2321 if (info->exec_stack) { 2322 prot |= PROT_EXEC; 2323 } 2324 error = target_mmap(0, size + guard, prot, 2325 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 2326 if (error == -1) { 2327 perror("mmap stack"); 2328 exit(-1); 2329 } 2330 2331 /* We reserve one extra page at the top of the stack as guard. */ 2332 if (STACK_GROWS_DOWN) { 2333 target_mprotect(error, guard, PROT_NONE); 2334 info->stack_limit = error + guard; 2335 return info->stack_limit + size - sizeof(void *); 2336 } else { 2337 info->stack_limit = error + size; 2338 return error; 2339 } 2340 } 2341 2342 /** 2343 * zero_bss: 2344 * 2345 * Map and zero the bss. We need to explicitly zero any fractional pages 2346 * after the data section (i.e. bss). Return false on mapping failure. 2347 */ 2348 static bool zero_bss(abi_ulong start_bss, abi_ulong end_bss, 2349 int prot, Error **errp) 2350 { 2351 abi_ulong align_bss; 2352 2353 /* We only expect writable bss; the code segment shouldn't need this. */ 2354 if (!(prot & PROT_WRITE)) { 2355 error_setg(errp, "PT_LOAD with non-writable bss"); 2356 return false; 2357 } 2358 2359 align_bss = TARGET_PAGE_ALIGN(start_bss); 2360 end_bss = TARGET_PAGE_ALIGN(end_bss); 2361 2362 if (start_bss < align_bss) { 2363 int flags = page_get_flags(start_bss); 2364 2365 if (!(flags & PAGE_RWX)) { 2366 /* 2367 * The whole address space of the executable was reserved 2368 * at the start, therefore all pages will be VALID. 2369 * But assuming there are no PROT_NONE PT_LOAD segments, 2370 * a PROT_NONE page means no data all bss, and we can 2371 * simply extend the new anon mapping back to the start 2372 * of the page of bss. 2373 */ 2374 align_bss -= TARGET_PAGE_SIZE; 2375 } else { 2376 /* 2377 * The start of the bss shares a page with something. 2378 * The only thing that we expect is the data section, 2379 * which would already be marked writable. 2380 * Overlapping the RX code segment seems malformed. 2381 */ 2382 if (!(flags & PAGE_WRITE)) { 2383 error_setg(errp, "PT_LOAD with bss overlapping " 2384 "non-writable page"); 2385 return false; 2386 } 2387 2388 /* The page is already mapped and writable. */ 2389 memset(g2h_untagged(start_bss), 0, align_bss - start_bss); 2390 } 2391 } 2392 2393 if (align_bss < end_bss && 2394 target_mmap(align_bss, end_bss - align_bss, prot, 2395 MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) == -1) { 2396 error_setg_errno(errp, errno, "Error mapping bss"); 2397 return false; 2398 } 2399 return true; 2400 } 2401 2402 #if defined(TARGET_ARM) 2403 static int elf_is_fdpic(struct elfhdr *exec) 2404 { 2405 return exec->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC; 2406 } 2407 #elif defined(TARGET_XTENSA) 2408 static int elf_is_fdpic(struct elfhdr *exec) 2409 { 2410 return exec->e_ident[EI_OSABI] == ELFOSABI_XTENSA_FDPIC; 2411 } 2412 #else 2413 /* Default implementation, always false. */ 2414 static int elf_is_fdpic(struct elfhdr *exec) 2415 { 2416 return 0; 2417 } 2418 #endif 2419 2420 static abi_ulong loader_build_fdpic_loadmap(struct image_info *info, abi_ulong sp) 2421 { 2422 uint16_t n; 2423 struct elf32_fdpic_loadseg *loadsegs = info->loadsegs; 2424 2425 /* elf32_fdpic_loadseg */ 2426 n = info->nsegs; 2427 while (n--) { 2428 sp -= 12; 2429 put_user_u32(loadsegs[n].addr, sp+0); 2430 put_user_u32(loadsegs[n].p_vaddr, sp+4); 2431 put_user_u32(loadsegs[n].p_memsz, sp+8); 2432 } 2433 2434 /* elf32_fdpic_loadmap */ 2435 sp -= 4; 2436 put_user_u16(0, sp+0); /* version */ 2437 put_user_u16(info->nsegs, sp+2); /* nsegs */ 2438 2439 info->personality = PER_LINUX_FDPIC; 2440 info->loadmap_addr = sp; 2441 2442 return sp; 2443 } 2444 2445 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc, 2446 struct elfhdr *exec, 2447 struct image_info *info, 2448 struct image_info *interp_info, 2449 struct image_info *vdso_info) 2450 { 2451 abi_ulong sp; 2452 abi_ulong u_argc, u_argv, u_envp, u_auxv; 2453 int size; 2454 int i; 2455 abi_ulong u_rand_bytes; 2456 uint8_t k_rand_bytes[16]; 2457 abi_ulong u_platform, u_base_platform; 2458 const char *k_platform, *k_base_platform; 2459 const int n = sizeof(elf_addr_t); 2460 2461 sp = p; 2462 2463 /* Needs to be before we load the env/argc/... */ 2464 if (elf_is_fdpic(exec)) { 2465 /* Need 4 byte alignment for these structs */ 2466 sp &= ~3; 2467 sp = loader_build_fdpic_loadmap(info, sp); 2468 info->other_info = interp_info; 2469 if (interp_info) { 2470 interp_info->other_info = info; 2471 sp = loader_build_fdpic_loadmap(interp_info, sp); 2472 info->interpreter_loadmap_addr = interp_info->loadmap_addr; 2473 info->interpreter_pt_dynamic_addr = interp_info->pt_dynamic_addr; 2474 } else { 2475 info->interpreter_loadmap_addr = 0; 2476 info->interpreter_pt_dynamic_addr = 0; 2477 } 2478 } 2479 2480 u_base_platform = 0; 2481 k_base_platform = ELF_BASE_PLATFORM; 2482 if (k_base_platform) { 2483 size_t len = strlen(k_base_platform) + 1; 2484 if (STACK_GROWS_DOWN) { 2485 sp -= (len + n - 1) & ~(n - 1); 2486 u_base_platform = sp; 2487 /* FIXME - check return value of memcpy_to_target() for failure */ 2488 memcpy_to_target(sp, k_base_platform, len); 2489 } else { 2490 memcpy_to_target(sp, k_base_platform, len); 2491 u_base_platform = sp; 2492 sp += len + 1; 2493 } 2494 } 2495 2496 u_platform = 0; 2497 k_platform = ELF_PLATFORM; 2498 if (k_platform) { 2499 size_t len = strlen(k_platform) + 1; 2500 if (STACK_GROWS_DOWN) { 2501 sp -= (len + n - 1) & ~(n - 1); 2502 u_platform = sp; 2503 /* FIXME - check return value of memcpy_to_target() for failure */ 2504 memcpy_to_target(sp, k_platform, len); 2505 } else { 2506 memcpy_to_target(sp, k_platform, len); 2507 u_platform = sp; 2508 sp += len + 1; 2509 } 2510 } 2511 2512 /* Provide 16 byte alignment for the PRNG, and basic alignment for 2513 * the argv and envp pointers. 2514 */ 2515 if (STACK_GROWS_DOWN) { 2516 sp = QEMU_ALIGN_DOWN(sp, 16); 2517 } else { 2518 sp = QEMU_ALIGN_UP(sp, 16); 2519 } 2520 2521 /* 2522 * Generate 16 random bytes for userspace PRNG seeding. 2523 */ 2524 qemu_guest_getrandom_nofail(k_rand_bytes, sizeof(k_rand_bytes)); 2525 if (STACK_GROWS_DOWN) { 2526 sp -= 16; 2527 u_rand_bytes = sp; 2528 /* FIXME - check return value of memcpy_to_target() for failure */ 2529 memcpy_to_target(sp, k_rand_bytes, 16); 2530 } else { 2531 memcpy_to_target(sp, k_rand_bytes, 16); 2532 u_rand_bytes = sp; 2533 sp += 16; 2534 } 2535 2536 size = (DLINFO_ITEMS + 1) * 2; 2537 if (k_base_platform) { 2538 size += 2; 2539 } 2540 if (k_platform) { 2541 size += 2; 2542 } 2543 if (vdso_info) { 2544 size += 2; 2545 } 2546 #ifdef DLINFO_ARCH_ITEMS 2547 size += DLINFO_ARCH_ITEMS * 2; 2548 #endif 2549 #ifdef ELF_HWCAP2 2550 size += 2; 2551 #endif 2552 info->auxv_len = size * n; 2553 2554 size += envc + argc + 2; 2555 size += 1; /* argc itself */ 2556 size *= n; 2557 2558 /* Allocate space and finalize stack alignment for entry now. */ 2559 if (STACK_GROWS_DOWN) { 2560 u_argc = QEMU_ALIGN_DOWN(sp - size, STACK_ALIGNMENT); 2561 sp = u_argc; 2562 } else { 2563 u_argc = sp; 2564 sp = QEMU_ALIGN_UP(sp + size, STACK_ALIGNMENT); 2565 } 2566 2567 u_argv = u_argc + n; 2568 u_envp = u_argv + (argc + 1) * n; 2569 u_auxv = u_envp + (envc + 1) * n; 2570 info->saved_auxv = u_auxv; 2571 info->argc = argc; 2572 info->envc = envc; 2573 info->argv = u_argv; 2574 info->envp = u_envp; 2575 2576 /* This is correct because Linux defines 2577 * elf_addr_t as Elf32_Off / Elf64_Off 2578 */ 2579 #define NEW_AUX_ENT(id, val) do { \ 2580 put_user_ual(id, u_auxv); u_auxv += n; \ 2581 put_user_ual(val, u_auxv); u_auxv += n; \ 2582 } while(0) 2583 2584 #ifdef ARCH_DLINFO 2585 /* 2586 * ARCH_DLINFO must come first so platform specific code can enforce 2587 * special alignment requirements on the AUXV if necessary (eg. PPC). 2588 */ 2589 ARCH_DLINFO; 2590 #endif 2591 /* There must be exactly DLINFO_ITEMS entries here, or the assert 2592 * on info->auxv_len will trigger. 2593 */ 2594 NEW_AUX_ENT(AT_PHDR, (abi_ulong)(info->load_addr + exec->e_phoff)); 2595 NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr))); 2596 NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum)); 2597 NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE)); 2598 NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_info ? interp_info->load_addr : 0)); 2599 NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0); 2600 NEW_AUX_ENT(AT_ENTRY, info->entry); 2601 NEW_AUX_ENT(AT_UID, (abi_ulong) getuid()); 2602 NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid()); 2603 NEW_AUX_ENT(AT_GID, (abi_ulong) getgid()); 2604 NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid()); 2605 NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP); 2606 NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK)); 2607 NEW_AUX_ENT(AT_RANDOM, (abi_ulong) u_rand_bytes); 2608 NEW_AUX_ENT(AT_SECURE, (abi_ulong) qemu_getauxval(AT_SECURE)); 2609 NEW_AUX_ENT(AT_EXECFN, info->file_string); 2610 2611 #ifdef ELF_HWCAP2 2612 NEW_AUX_ENT(AT_HWCAP2, (abi_ulong) ELF_HWCAP2); 2613 #endif 2614 2615 if (u_base_platform) { 2616 NEW_AUX_ENT(AT_BASE_PLATFORM, u_base_platform); 2617 } 2618 if (u_platform) { 2619 NEW_AUX_ENT(AT_PLATFORM, u_platform); 2620 } 2621 if (vdso_info) { 2622 NEW_AUX_ENT(AT_SYSINFO_EHDR, vdso_info->load_addr); 2623 } 2624 NEW_AUX_ENT (AT_NULL, 0); 2625 #undef NEW_AUX_ENT 2626 2627 /* Check that our initial calculation of the auxv length matches how much 2628 * we actually put into it. 2629 */ 2630 assert(info->auxv_len == u_auxv - info->saved_auxv); 2631 2632 put_user_ual(argc, u_argc); 2633 2634 p = info->arg_strings; 2635 for (i = 0; i < argc; ++i) { 2636 put_user_ual(p, u_argv); 2637 u_argv += n; 2638 p += target_strlen(p) + 1; 2639 } 2640 put_user_ual(0, u_argv); 2641 2642 p = info->env_strings; 2643 for (i = 0; i < envc; ++i) { 2644 put_user_ual(p, u_envp); 2645 u_envp += n; 2646 p += target_strlen(p) + 1; 2647 } 2648 put_user_ual(0, u_envp); 2649 2650 return sp; 2651 } 2652 2653 #if defined(HI_COMMPAGE) 2654 #define LO_COMMPAGE -1 2655 #elif defined(LO_COMMPAGE) 2656 #define HI_COMMPAGE 0 2657 #else 2658 #define HI_COMMPAGE 0 2659 #define LO_COMMPAGE -1 2660 #ifndef INIT_GUEST_COMMPAGE 2661 #define init_guest_commpage() true 2662 #endif 2663 #endif 2664 2665 /** 2666 * pgb_try_mmap: 2667 * @addr: host start address 2668 * @addr_last: host last address 2669 * @keep: do not unmap the probe region 2670 * 2671 * Return 1 if [@addr, @addr_last] is not mapped in the host, 2672 * return 0 if it is not available to map, and -1 on mmap error. 2673 * If @keep, the region is left mapped on success, otherwise unmapped. 2674 */ 2675 static int pgb_try_mmap(uintptr_t addr, uintptr_t addr_last, bool keep) 2676 { 2677 size_t size = addr_last - addr + 1; 2678 void *p = mmap((void *)addr, size, PROT_NONE, 2679 MAP_ANONYMOUS | MAP_PRIVATE | 2680 MAP_NORESERVE | MAP_FIXED_NOREPLACE, -1, 0); 2681 int ret; 2682 2683 if (p == MAP_FAILED) { 2684 return errno == EEXIST ? 0 : -1; 2685 } 2686 ret = p == (void *)addr; 2687 if (!keep || !ret) { 2688 munmap(p, size); 2689 } 2690 return ret; 2691 } 2692 2693 /** 2694 * pgb_try_mmap_skip_brk(uintptr_t addr, uintptr_t size, uintptr_t brk) 2695 * @addr: host address 2696 * @addr_last: host last address 2697 * @brk: host brk 2698 * 2699 * Like pgb_try_mmap, but additionally reserve some memory following brk. 2700 */ 2701 static int pgb_try_mmap_skip_brk(uintptr_t addr, uintptr_t addr_last, 2702 uintptr_t brk, bool keep) 2703 { 2704 uintptr_t brk_last = brk + 16 * MiB - 1; 2705 2706 /* Do not map anything close to the host brk. */ 2707 if (addr <= brk_last && brk <= addr_last) { 2708 return 0; 2709 } 2710 return pgb_try_mmap(addr, addr_last, keep); 2711 } 2712 2713 /** 2714 * pgb_try_mmap_set: 2715 * @ga: set of guest addrs 2716 * @base: guest_base 2717 * @brk: host brk 2718 * 2719 * Return true if all @ga can be mapped by the host at @base. 2720 * On success, retain the mapping at index 0 for reserved_va. 2721 */ 2722 2723 typedef struct PGBAddrs { 2724 uintptr_t bounds[3][2]; /* start/last pairs */ 2725 int nbounds; 2726 } PGBAddrs; 2727 2728 static bool pgb_try_mmap_set(const PGBAddrs *ga, uintptr_t base, uintptr_t brk) 2729 { 2730 for (int i = ga->nbounds - 1; i >= 0; --i) { 2731 if (pgb_try_mmap_skip_brk(ga->bounds[i][0] + base, 2732 ga->bounds[i][1] + base, 2733 brk, i == 0 && reserved_va) <= 0) { 2734 return false; 2735 } 2736 } 2737 return true; 2738 } 2739 2740 /** 2741 * pgb_addr_set: 2742 * @ga: output set of guest addrs 2743 * @guest_loaddr: guest image low address 2744 * @guest_loaddr: guest image high address 2745 * @identity: create for identity mapping 2746 * 2747 * Fill in @ga with the image, COMMPAGE and NULL page. 2748 */ 2749 static bool pgb_addr_set(PGBAddrs *ga, abi_ulong guest_loaddr, 2750 abi_ulong guest_hiaddr, bool try_identity) 2751 { 2752 int n; 2753 2754 /* 2755 * With a low commpage, or a guest mapped very low, 2756 * we may not be able to use the identity map. 2757 */ 2758 if (try_identity) { 2759 if (LO_COMMPAGE != -1 && LO_COMMPAGE < mmap_min_addr) { 2760 return false; 2761 } 2762 if (guest_loaddr != 0 && guest_loaddr < mmap_min_addr) { 2763 return false; 2764 } 2765 } 2766 2767 memset(ga, 0, sizeof(*ga)); 2768 n = 0; 2769 2770 if (reserved_va) { 2771 ga->bounds[n][0] = try_identity ? mmap_min_addr : 0; 2772 ga->bounds[n][1] = reserved_va; 2773 n++; 2774 /* LO_COMMPAGE and NULL handled by reserving from 0. */ 2775 } else { 2776 /* Add any LO_COMMPAGE or NULL page. */ 2777 if (LO_COMMPAGE != -1) { 2778 ga->bounds[n][0] = 0; 2779 ga->bounds[n][1] = LO_COMMPAGE + TARGET_PAGE_SIZE - 1; 2780 n++; 2781 } else if (!try_identity) { 2782 ga->bounds[n][0] = 0; 2783 ga->bounds[n][1] = TARGET_PAGE_SIZE - 1; 2784 n++; 2785 } 2786 2787 /* Add the guest image for ET_EXEC. */ 2788 if (guest_loaddr) { 2789 ga->bounds[n][0] = guest_loaddr; 2790 ga->bounds[n][1] = guest_hiaddr; 2791 n++; 2792 } 2793 } 2794 2795 /* 2796 * Temporarily disable 2797 * "comparison is always false due to limited range of data type" 2798 * due to comparison between unsigned and (possible) 0. 2799 */ 2800 #pragma GCC diagnostic push 2801 #pragma GCC diagnostic ignored "-Wtype-limits" 2802 2803 /* Add any HI_COMMPAGE not covered by reserved_va. */ 2804 if (reserved_va < HI_COMMPAGE) { 2805 ga->bounds[n][0] = HI_COMMPAGE & qemu_real_host_page_mask(); 2806 ga->bounds[n][1] = HI_COMMPAGE + TARGET_PAGE_SIZE - 1; 2807 n++; 2808 } 2809 2810 #pragma GCC diagnostic pop 2811 2812 ga->nbounds = n; 2813 return true; 2814 } 2815 2816 static void pgb_fail_in_use(const char *image_name) 2817 { 2818 error_report("%s: requires virtual address space that is in use " 2819 "(omit the -B option or choose a different value)", 2820 image_name); 2821 exit(EXIT_FAILURE); 2822 } 2823 2824 static void pgb_fixed(const char *image_name, uintptr_t guest_loaddr, 2825 uintptr_t guest_hiaddr, uintptr_t align) 2826 { 2827 PGBAddrs ga; 2828 uintptr_t brk = (uintptr_t)sbrk(0); 2829 2830 if (!QEMU_IS_ALIGNED(guest_base, align)) { 2831 fprintf(stderr, "Requested guest base %p does not satisfy " 2832 "host minimum alignment (0x%" PRIxPTR ")\n", 2833 (void *)guest_base, align); 2834 exit(EXIT_FAILURE); 2835 } 2836 2837 if (!pgb_addr_set(&ga, guest_loaddr, guest_hiaddr, !guest_base) 2838 || !pgb_try_mmap_set(&ga, guest_base, brk)) { 2839 pgb_fail_in_use(image_name); 2840 } 2841 } 2842 2843 /** 2844 * pgb_find_fallback: 2845 * 2846 * This is a fallback method for finding holes in the host address space 2847 * if we don't have the benefit of being able to access /proc/self/map. 2848 * It can potentially take a very long time as we can only dumbly iterate 2849 * up the host address space seeing if the allocation would work. 2850 */ 2851 static uintptr_t pgb_find_fallback(const PGBAddrs *ga, uintptr_t align, 2852 uintptr_t brk) 2853 { 2854 /* TODO: come up with a better estimate of how much to skip. */ 2855 uintptr_t skip = sizeof(uintptr_t) == 4 ? MiB : GiB; 2856 2857 for (uintptr_t base = skip; ; base += skip) { 2858 base = ROUND_UP(base, align); 2859 if (pgb_try_mmap_set(ga, base, brk)) { 2860 return base; 2861 } 2862 if (base >= -skip) { 2863 return -1; 2864 } 2865 } 2866 } 2867 2868 static uintptr_t pgb_try_itree(const PGBAddrs *ga, uintptr_t base, 2869 IntervalTreeRoot *root) 2870 { 2871 for (int i = ga->nbounds - 1; i >= 0; --i) { 2872 uintptr_t s = base + ga->bounds[i][0]; 2873 uintptr_t l = base + ga->bounds[i][1]; 2874 IntervalTreeNode *n; 2875 2876 if (l < s) { 2877 /* Wraparound. Skip to advance S to mmap_min_addr. */ 2878 return mmap_min_addr - s; 2879 } 2880 2881 n = interval_tree_iter_first(root, s, l); 2882 if (n != NULL) { 2883 /* Conflict. Skip to advance S to LAST + 1. */ 2884 return n->last - s + 1; 2885 } 2886 } 2887 return 0; /* success */ 2888 } 2889 2890 static uintptr_t pgb_find_itree(const PGBAddrs *ga, IntervalTreeRoot *root, 2891 uintptr_t align, uintptr_t brk) 2892 { 2893 uintptr_t last = mmap_min_addr; 2894 uintptr_t base, skip; 2895 2896 while (true) { 2897 base = ROUND_UP(last, align); 2898 if (base < last) { 2899 return -1; 2900 } 2901 2902 skip = pgb_try_itree(ga, base, root); 2903 if (skip == 0) { 2904 break; 2905 } 2906 2907 last = base + skip; 2908 if (last < base) { 2909 return -1; 2910 } 2911 } 2912 2913 /* 2914 * We've chosen 'base' based on holes in the interval tree, 2915 * but we don't yet know if it is a valid host address. 2916 * Because it is the first matching hole, if the host addresses 2917 * are invalid we know there are no further matches. 2918 */ 2919 return pgb_try_mmap_set(ga, base, brk) ? base : -1; 2920 } 2921 2922 static void pgb_dynamic(const char *image_name, uintptr_t guest_loaddr, 2923 uintptr_t guest_hiaddr, uintptr_t align) 2924 { 2925 IntervalTreeRoot *root; 2926 uintptr_t brk, ret; 2927 PGBAddrs ga; 2928 2929 /* Try the identity map first. */ 2930 if (pgb_addr_set(&ga, guest_loaddr, guest_hiaddr, true)) { 2931 brk = (uintptr_t)sbrk(0); 2932 if (pgb_try_mmap_set(&ga, 0, brk)) { 2933 guest_base = 0; 2934 return; 2935 } 2936 } 2937 2938 /* 2939 * Rebuild the address set for non-identity map. 2940 * This differs in the mapping of the guest NULL page. 2941 */ 2942 pgb_addr_set(&ga, guest_loaddr, guest_hiaddr, false); 2943 2944 root = read_self_maps(); 2945 2946 /* Read brk after we've read the maps, which will malloc. */ 2947 brk = (uintptr_t)sbrk(0); 2948 2949 if (!root) { 2950 ret = pgb_find_fallback(&ga, align, brk); 2951 } else { 2952 /* 2953 * Reserve the area close to the host brk. 2954 * This will be freed with the rest of the tree. 2955 */ 2956 IntervalTreeNode *b = g_new0(IntervalTreeNode, 1); 2957 b->start = brk; 2958 b->last = brk + 16 * MiB - 1; 2959 interval_tree_insert(b, root); 2960 2961 ret = pgb_find_itree(&ga, root, align, brk); 2962 free_self_maps(root); 2963 } 2964 2965 if (ret == -1) { 2966 int w = TARGET_LONG_BITS / 4; 2967 2968 error_report("%s: Unable to find a guest_base to satisfy all " 2969 "guest address mapping requirements", image_name); 2970 2971 for (int i = 0; i < ga.nbounds; ++i) { 2972 error_printf(" %0*" PRIx64 "-%0*" PRIx64 "\n", 2973 w, (uint64_t)ga.bounds[i][0], 2974 w, (uint64_t)ga.bounds[i][1]); 2975 } 2976 exit(EXIT_FAILURE); 2977 } 2978 guest_base = ret; 2979 } 2980 2981 void probe_guest_base(const char *image_name, abi_ulong guest_loaddr, 2982 abi_ulong guest_hiaddr) 2983 { 2984 /* In order to use host shmat, we must be able to honor SHMLBA. */ 2985 uintptr_t align = MAX(SHMLBA, TARGET_PAGE_SIZE); 2986 2987 /* Sanity check the guest binary. */ 2988 if (reserved_va) { 2989 if (guest_hiaddr > reserved_va) { 2990 error_report("%s: requires more than reserved virtual " 2991 "address space (0x%" PRIx64 " > 0x%lx)", 2992 image_name, (uint64_t)guest_hiaddr, reserved_va); 2993 exit(EXIT_FAILURE); 2994 } 2995 } else { 2996 if (guest_hiaddr != (uintptr_t)guest_hiaddr) { 2997 error_report("%s: requires more virtual address space " 2998 "than the host can provide (0x%" PRIx64 ")", 2999 image_name, (uint64_t)guest_hiaddr + 1); 3000 exit(EXIT_FAILURE); 3001 } 3002 } 3003 3004 if (have_guest_base) { 3005 pgb_fixed(image_name, guest_loaddr, guest_hiaddr, align); 3006 } else { 3007 pgb_dynamic(image_name, guest_loaddr, guest_hiaddr, align); 3008 } 3009 3010 /* Reserve and initialize the commpage. */ 3011 if (!init_guest_commpage()) { 3012 /* We have already probed for the commpage being free. */ 3013 g_assert_not_reached(); 3014 } 3015 3016 assert(QEMU_IS_ALIGNED(guest_base, align)); 3017 qemu_log_mask(CPU_LOG_PAGE, "Locating guest address space " 3018 "@ 0x%" PRIx64 "\n", (uint64_t)guest_base); 3019 } 3020 3021 enum { 3022 /* The string "GNU\0" as a magic number. */ 3023 GNU0_MAGIC = const_le32('G' | 'N' << 8 | 'U' << 16), 3024 NOTE_DATA_SZ = 1 * KiB, 3025 NOTE_NAME_SZ = 4, 3026 ELF_GNU_PROPERTY_ALIGN = ELF_CLASS == ELFCLASS32 ? 4 : 8, 3027 }; 3028 3029 /* 3030 * Process a single gnu_property entry. 3031 * Return false for error. 3032 */ 3033 static bool parse_elf_property(const uint32_t *data, int *off, int datasz, 3034 struct image_info *info, bool have_prev_type, 3035 uint32_t *prev_type, Error **errp) 3036 { 3037 uint32_t pr_type, pr_datasz, step; 3038 3039 if (*off > datasz || !QEMU_IS_ALIGNED(*off, ELF_GNU_PROPERTY_ALIGN)) { 3040 goto error_data; 3041 } 3042 datasz -= *off; 3043 data += *off / sizeof(uint32_t); 3044 3045 if (datasz < 2 * sizeof(uint32_t)) { 3046 goto error_data; 3047 } 3048 pr_type = data[0]; 3049 pr_datasz = data[1]; 3050 data += 2; 3051 datasz -= 2 * sizeof(uint32_t); 3052 step = ROUND_UP(pr_datasz, ELF_GNU_PROPERTY_ALIGN); 3053 if (step > datasz) { 3054 goto error_data; 3055 } 3056 3057 /* Properties are supposed to be unique and sorted on pr_type. */ 3058 if (have_prev_type && pr_type <= *prev_type) { 3059 if (pr_type == *prev_type) { 3060 error_setg(errp, "Duplicate property in PT_GNU_PROPERTY"); 3061 } else { 3062 error_setg(errp, "Unsorted property in PT_GNU_PROPERTY"); 3063 } 3064 return false; 3065 } 3066 *prev_type = pr_type; 3067 3068 if (!arch_parse_elf_property(pr_type, pr_datasz, data, info, errp)) { 3069 return false; 3070 } 3071 3072 *off += 2 * sizeof(uint32_t) + step; 3073 return true; 3074 3075 error_data: 3076 error_setg(errp, "Ill-formed property in PT_GNU_PROPERTY"); 3077 return false; 3078 } 3079 3080 /* Process NT_GNU_PROPERTY_TYPE_0. */ 3081 static bool parse_elf_properties(const ImageSource *src, 3082 struct image_info *info, 3083 const struct elf_phdr *phdr, 3084 Error **errp) 3085 { 3086 union { 3087 struct elf_note nhdr; 3088 uint32_t data[NOTE_DATA_SZ / sizeof(uint32_t)]; 3089 } note; 3090 3091 int n, off, datasz; 3092 bool have_prev_type; 3093 uint32_t prev_type; 3094 3095 /* Unless the arch requires properties, ignore them. */ 3096 if (!ARCH_USE_GNU_PROPERTY) { 3097 return true; 3098 } 3099 3100 /* If the properties are crazy large, that's too bad. */ 3101 n = phdr->p_filesz; 3102 if (n > sizeof(note)) { 3103 error_setg(errp, "PT_GNU_PROPERTY too large"); 3104 return false; 3105 } 3106 if (n < sizeof(note.nhdr)) { 3107 error_setg(errp, "PT_GNU_PROPERTY too small"); 3108 return false; 3109 } 3110 3111 if (!imgsrc_read(¬e, phdr->p_offset, n, src, errp)) { 3112 return false; 3113 } 3114 3115 /* 3116 * The contents of a valid PT_GNU_PROPERTY is a sequence 3117 * of uint32_t -- swap them all now. 3118 */ 3119 #ifdef BSWAP_NEEDED 3120 for (int i = 0; i < n / 4; i++) { 3121 bswap32s(note.data + i); 3122 } 3123 #endif 3124 3125 /* 3126 * Note that nhdr is 3 words, and that the "name" described by namesz 3127 * immediately follows nhdr and is thus at the 4th word. Further, all 3128 * of the inputs to the kernel's round_up are multiples of 4. 3129 */ 3130 if (note.nhdr.n_type != NT_GNU_PROPERTY_TYPE_0 || 3131 note.nhdr.n_namesz != NOTE_NAME_SZ || 3132 note.data[3] != GNU0_MAGIC) { 3133 error_setg(errp, "Invalid note in PT_GNU_PROPERTY"); 3134 return false; 3135 } 3136 off = sizeof(note.nhdr) + NOTE_NAME_SZ; 3137 3138 datasz = note.nhdr.n_descsz + off; 3139 if (datasz > n) { 3140 error_setg(errp, "Invalid note size in PT_GNU_PROPERTY"); 3141 return false; 3142 } 3143 3144 have_prev_type = false; 3145 prev_type = 0; 3146 while (1) { 3147 if (off == datasz) { 3148 return true; /* end, exit ok */ 3149 } 3150 if (!parse_elf_property(note.data, &off, datasz, info, 3151 have_prev_type, &prev_type, errp)) { 3152 return false; 3153 } 3154 have_prev_type = true; 3155 } 3156 } 3157 3158 /** 3159 * load_elf_image: Load an ELF image into the address space. 3160 * @image_name: the filename of the image, to use in error messages. 3161 * @src: the ImageSource from which to read. 3162 * @info: info collected from the loaded image. 3163 * @ehdr: the ELF header, not yet bswapped. 3164 * @pinterp_name: record any PT_INTERP string found. 3165 * 3166 * On return: @info values will be filled in, as necessary or available. 3167 */ 3168 3169 static void load_elf_image(const char *image_name, const ImageSource *src, 3170 struct image_info *info, struct elfhdr *ehdr, 3171 char **pinterp_name) 3172 { 3173 g_autofree struct elf_phdr *phdr = NULL; 3174 abi_ulong load_addr, load_bias, loaddr, hiaddr, error; 3175 int i, prot_exec; 3176 Error *err = NULL; 3177 3178 /* 3179 * First of all, some simple consistency checks. 3180 * Note that we rely on the bswapped ehdr staying in bprm_buf, 3181 * for later use by load_elf_binary and create_elf_tables. 3182 */ 3183 if (!imgsrc_read(ehdr, 0, sizeof(*ehdr), src, &err)) { 3184 goto exit_errmsg; 3185 } 3186 if (!elf_check_ident(ehdr)) { 3187 error_setg(&err, "Invalid ELF image for this architecture"); 3188 goto exit_errmsg; 3189 } 3190 bswap_ehdr(ehdr); 3191 if (!elf_check_ehdr(ehdr)) { 3192 error_setg(&err, "Invalid ELF image for this architecture"); 3193 goto exit_errmsg; 3194 } 3195 3196 phdr = imgsrc_read_alloc(ehdr->e_phoff, 3197 ehdr->e_phnum * sizeof(struct elf_phdr), 3198 src, &err); 3199 if (phdr == NULL) { 3200 goto exit_errmsg; 3201 } 3202 bswap_phdr(phdr, ehdr->e_phnum); 3203 3204 info->nsegs = 0; 3205 info->pt_dynamic_addr = 0; 3206 3207 mmap_lock(); 3208 3209 /* 3210 * Find the maximum size of the image and allocate an appropriate 3211 * amount of memory to handle that. Locate the interpreter, if any. 3212 */ 3213 loaddr = -1, hiaddr = 0; 3214 info->alignment = 0; 3215 info->exec_stack = EXSTACK_DEFAULT; 3216 for (i = 0; i < ehdr->e_phnum; ++i) { 3217 struct elf_phdr *eppnt = phdr + i; 3218 if (eppnt->p_type == PT_LOAD) { 3219 abi_ulong a = eppnt->p_vaddr & TARGET_PAGE_MASK; 3220 if (a < loaddr) { 3221 loaddr = a; 3222 } 3223 a = eppnt->p_vaddr + eppnt->p_memsz - 1; 3224 if (a > hiaddr) { 3225 hiaddr = a; 3226 } 3227 ++info->nsegs; 3228 info->alignment |= eppnt->p_align; 3229 } else if (eppnt->p_type == PT_INTERP && pinterp_name) { 3230 g_autofree char *interp_name = NULL; 3231 3232 if (*pinterp_name) { 3233 error_setg(&err, "Multiple PT_INTERP entries"); 3234 goto exit_errmsg; 3235 } 3236 3237 interp_name = imgsrc_read_alloc(eppnt->p_offset, eppnt->p_filesz, 3238 src, &err); 3239 if (interp_name == NULL) { 3240 goto exit_errmsg; 3241 } 3242 if (interp_name[eppnt->p_filesz - 1] != 0) { 3243 error_setg(&err, "Invalid PT_INTERP entry"); 3244 goto exit_errmsg; 3245 } 3246 *pinterp_name = g_steal_pointer(&interp_name); 3247 } else if (eppnt->p_type == PT_GNU_PROPERTY) { 3248 if (!parse_elf_properties(src, info, eppnt, &err)) { 3249 goto exit_errmsg; 3250 } 3251 } else if (eppnt->p_type == PT_GNU_STACK) { 3252 info->exec_stack = eppnt->p_flags & PF_X; 3253 } 3254 } 3255 3256 load_addr = loaddr; 3257 3258 if (pinterp_name != NULL) { 3259 if (ehdr->e_type == ET_EXEC) { 3260 /* 3261 * Make sure that the low address does not conflict with 3262 * MMAP_MIN_ADDR or the QEMU application itself. 3263 */ 3264 probe_guest_base(image_name, loaddr, hiaddr); 3265 } else { 3266 abi_ulong align; 3267 3268 /* 3269 * The binary is dynamic, but we still need to 3270 * select guest_base. In this case we pass a size. 3271 */ 3272 probe_guest_base(image_name, 0, hiaddr - loaddr); 3273 3274 /* 3275 * Avoid collision with the loader by providing a different 3276 * default load address. 3277 */ 3278 load_addr += elf_et_dyn_base; 3279 3280 /* 3281 * TODO: Better support for mmap alignment is desirable. 3282 * Since we do not have complete control over the guest 3283 * address space, we prefer the kernel to choose some address 3284 * rather than force the use of LOAD_ADDR via MAP_FIXED. 3285 * But without MAP_FIXED we cannot guarantee alignment, 3286 * only suggest it. 3287 */ 3288 align = pow2ceil(info->alignment); 3289 if (align) { 3290 load_addr &= -align; 3291 } 3292 } 3293 } 3294 3295 /* 3296 * Reserve address space for all of this. 3297 * 3298 * In the case of ET_EXEC, we supply MAP_FIXED_NOREPLACE so that we get 3299 * exactly the address range that is required. Without reserved_va, 3300 * the guest address space is not isolated. We have attempted to avoid 3301 * conflict with the host program itself via probe_guest_base, but using 3302 * MAP_FIXED_NOREPLACE instead of MAP_FIXED provides an extra check. 3303 * 3304 * Otherwise this is ET_DYN, and we are searching for a location 3305 * that can hold the memory space required. If the image is 3306 * pre-linked, LOAD_ADDR will be non-zero, and the kernel should 3307 * honor that address if it happens to be free. 3308 * 3309 * In both cases, we will overwrite pages in this range with mappings 3310 * from the executable. 3311 */ 3312 load_addr = target_mmap(load_addr, (size_t)hiaddr - loaddr + 1, PROT_NONE, 3313 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | 3314 (ehdr->e_type == ET_EXEC ? MAP_FIXED_NOREPLACE : 0), 3315 -1, 0); 3316 if (load_addr == -1) { 3317 goto exit_mmap; 3318 } 3319 load_bias = load_addr - loaddr; 3320 3321 if (elf_is_fdpic(ehdr)) { 3322 struct elf32_fdpic_loadseg *loadsegs = info->loadsegs = 3323 g_malloc(sizeof(*loadsegs) * info->nsegs); 3324 3325 for (i = 0; i < ehdr->e_phnum; ++i) { 3326 switch (phdr[i].p_type) { 3327 case PT_DYNAMIC: 3328 info->pt_dynamic_addr = phdr[i].p_vaddr + load_bias; 3329 break; 3330 case PT_LOAD: 3331 loadsegs->addr = phdr[i].p_vaddr + load_bias; 3332 loadsegs->p_vaddr = phdr[i].p_vaddr; 3333 loadsegs->p_memsz = phdr[i].p_memsz; 3334 ++loadsegs; 3335 break; 3336 } 3337 } 3338 } 3339 3340 info->load_bias = load_bias; 3341 info->code_offset = load_bias; 3342 info->data_offset = load_bias; 3343 info->load_addr = load_addr; 3344 info->entry = ehdr->e_entry + load_bias; 3345 info->start_code = -1; 3346 info->end_code = 0; 3347 info->start_data = -1; 3348 info->end_data = 0; 3349 /* Usual start for brk is after all sections of the main executable. */ 3350 info->brk = TARGET_PAGE_ALIGN(hiaddr + load_bias); 3351 info->elf_flags = ehdr->e_flags; 3352 3353 prot_exec = PROT_EXEC; 3354 #ifdef TARGET_AARCH64 3355 /* 3356 * If the BTI feature is present, this indicates that the executable 3357 * pages of the startup binary should be mapped with PROT_BTI, so that 3358 * branch targets are enforced. 3359 * 3360 * The startup binary is either the interpreter or the static executable. 3361 * The interpreter is responsible for all pages of a dynamic executable. 3362 * 3363 * Elf notes are backward compatible to older cpus. 3364 * Do not enable BTI unless it is supported. 3365 */ 3366 if ((info->note_flags & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) 3367 && (pinterp_name == NULL || *pinterp_name == 0) 3368 && cpu_isar_feature(aa64_bti, ARM_CPU(thread_cpu))) { 3369 prot_exec |= TARGET_PROT_BTI; 3370 } 3371 #endif 3372 3373 for (i = 0; i < ehdr->e_phnum; i++) { 3374 struct elf_phdr *eppnt = phdr + i; 3375 if (eppnt->p_type == PT_LOAD) { 3376 abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em; 3377 int elf_prot = 0; 3378 3379 if (eppnt->p_flags & PF_R) { 3380 elf_prot |= PROT_READ; 3381 } 3382 if (eppnt->p_flags & PF_W) { 3383 elf_prot |= PROT_WRITE; 3384 } 3385 if (eppnt->p_flags & PF_X) { 3386 elf_prot |= prot_exec; 3387 } 3388 3389 vaddr = load_bias + eppnt->p_vaddr; 3390 vaddr_po = vaddr & ~TARGET_PAGE_MASK; 3391 vaddr_ps = vaddr & TARGET_PAGE_MASK; 3392 3393 vaddr_ef = vaddr + eppnt->p_filesz; 3394 vaddr_em = vaddr + eppnt->p_memsz; 3395 3396 /* 3397 * Some segments may be completely empty, with a non-zero p_memsz 3398 * but no backing file segment. 3399 */ 3400 if (eppnt->p_filesz != 0) { 3401 error = imgsrc_mmap(vaddr_ps, eppnt->p_filesz + vaddr_po, 3402 elf_prot, MAP_PRIVATE | MAP_FIXED, 3403 src, eppnt->p_offset - vaddr_po); 3404 if (error == -1) { 3405 goto exit_mmap; 3406 } 3407 } 3408 3409 /* If the load segment requests extra zeros (e.g. bss), map it. */ 3410 if (vaddr_ef < vaddr_em && 3411 !zero_bss(vaddr_ef, vaddr_em, elf_prot, &err)) { 3412 goto exit_errmsg; 3413 } 3414 3415 /* Find the full program boundaries. */ 3416 if (elf_prot & PROT_EXEC) { 3417 if (vaddr < info->start_code) { 3418 info->start_code = vaddr; 3419 } 3420 if (vaddr_ef > info->end_code) { 3421 info->end_code = vaddr_ef; 3422 } 3423 } 3424 if (elf_prot & PROT_WRITE) { 3425 if (vaddr < info->start_data) { 3426 info->start_data = vaddr; 3427 } 3428 if (vaddr_ef > info->end_data) { 3429 info->end_data = vaddr_ef; 3430 } 3431 } 3432 #ifdef TARGET_MIPS 3433 } else if (eppnt->p_type == PT_MIPS_ABIFLAGS) { 3434 Mips_elf_abiflags_v0 abiflags; 3435 3436 if (!imgsrc_read(&abiflags, eppnt->p_offset, sizeof(abiflags), 3437 src, &err)) { 3438 goto exit_errmsg; 3439 } 3440 bswap_mips_abiflags(&abiflags); 3441 info->fp_abi = abiflags.fp_abi; 3442 #endif 3443 } 3444 } 3445 3446 if (info->end_data == 0) { 3447 info->start_data = info->end_code; 3448 info->end_data = info->end_code; 3449 } 3450 3451 if (qemu_log_enabled()) { 3452 load_symbols(ehdr, src, load_bias); 3453 } 3454 3455 debuginfo_report_elf(image_name, src->fd, load_bias); 3456 3457 mmap_unlock(); 3458 3459 close(src->fd); 3460 return; 3461 3462 exit_mmap: 3463 error_setg_errno(&err, errno, "Error mapping file"); 3464 goto exit_errmsg; 3465 exit_errmsg: 3466 error_reportf_err(err, "%s: ", image_name); 3467 exit(-1); 3468 } 3469 3470 static void load_elf_interp(const char *filename, struct image_info *info, 3471 char bprm_buf[BPRM_BUF_SIZE]) 3472 { 3473 struct elfhdr ehdr; 3474 ImageSource src; 3475 int fd, retval; 3476 Error *err = NULL; 3477 3478 fd = open(path(filename), O_RDONLY); 3479 if (fd < 0) { 3480 error_setg_file_open(&err, errno, filename); 3481 error_report_err(err); 3482 exit(-1); 3483 } 3484 3485 retval = read(fd, bprm_buf, BPRM_BUF_SIZE); 3486 if (retval < 0) { 3487 error_setg_errno(&err, errno, "Error reading file header"); 3488 error_reportf_err(err, "%s: ", filename); 3489 exit(-1); 3490 } 3491 3492 src.fd = fd; 3493 src.cache = bprm_buf; 3494 src.cache_size = retval; 3495 3496 load_elf_image(filename, &src, info, &ehdr, NULL); 3497 } 3498 3499 #ifdef VDSO_HEADER 3500 #include VDSO_HEADER 3501 #define vdso_image_info() &vdso_image_info 3502 #else 3503 #define vdso_image_info() NULL 3504 #endif 3505 3506 static void load_elf_vdso(struct image_info *info, const VdsoImageInfo *vdso) 3507 { 3508 ImageSource src; 3509 struct elfhdr ehdr; 3510 abi_ulong load_bias, load_addr; 3511 3512 src.fd = -1; 3513 src.cache = vdso->image; 3514 src.cache_size = vdso->image_size; 3515 3516 load_elf_image("<internal-vdso>", &src, info, &ehdr, NULL); 3517 load_addr = info->load_addr; 3518 load_bias = info->load_bias; 3519 3520 /* 3521 * We need to relocate the VDSO image. The one built into the kernel 3522 * is built for a fixed address. The one built for QEMU is not, since 3523 * that requires close control of the guest address space. 3524 * We pre-processed the image to locate all of the addresses that need 3525 * to be updated. 3526 */ 3527 for (unsigned i = 0, n = vdso->reloc_count; i < n; i++) { 3528 abi_ulong *addr = g2h_untagged(load_addr + vdso->relocs[i]); 3529 *addr = tswapal(tswapal(*addr) + load_bias); 3530 } 3531 3532 /* Install signal trampolines, if present. */ 3533 if (vdso->sigreturn_ofs) { 3534 default_sigreturn = load_addr + vdso->sigreturn_ofs; 3535 } 3536 if (vdso->rt_sigreturn_ofs) { 3537 default_rt_sigreturn = load_addr + vdso->rt_sigreturn_ofs; 3538 } 3539 3540 /* Remove write from VDSO segment. */ 3541 target_mprotect(info->start_data, info->end_data - info->start_data, 3542 PROT_READ | PROT_EXEC); 3543 } 3544 3545 static int symfind(const void *s0, const void *s1) 3546 { 3547 struct elf_sym *sym = (struct elf_sym *)s1; 3548 __typeof(sym->st_value) addr = *(uint64_t *)s0; 3549 int result = 0; 3550 3551 if (addr < sym->st_value) { 3552 result = -1; 3553 } else if (addr >= sym->st_value + sym->st_size) { 3554 result = 1; 3555 } 3556 return result; 3557 } 3558 3559 static const char *lookup_symbolxx(struct syminfo *s, uint64_t orig_addr) 3560 { 3561 #if ELF_CLASS == ELFCLASS32 3562 struct elf_sym *syms = s->disas_symtab.elf32; 3563 #else 3564 struct elf_sym *syms = s->disas_symtab.elf64; 3565 #endif 3566 3567 // binary search 3568 struct elf_sym *sym; 3569 3570 sym = bsearch(&orig_addr, syms, s->disas_num_syms, sizeof(*syms), symfind); 3571 if (sym != NULL) { 3572 return s->disas_strtab + sym->st_name; 3573 } 3574 3575 return ""; 3576 } 3577 3578 /* FIXME: This should use elf_ops.h.inc */ 3579 static int symcmp(const void *s0, const void *s1) 3580 { 3581 struct elf_sym *sym0 = (struct elf_sym *)s0; 3582 struct elf_sym *sym1 = (struct elf_sym *)s1; 3583 return (sym0->st_value < sym1->st_value) 3584 ? -1 3585 : ((sym0->st_value > sym1->st_value) ? 1 : 0); 3586 } 3587 3588 /* Best attempt to load symbols from this ELF object. */ 3589 static void load_symbols(struct elfhdr *hdr, const ImageSource *src, 3590 abi_ulong load_bias) 3591 { 3592 int i, shnum, nsyms, sym_idx = 0, str_idx = 0; 3593 g_autofree struct elf_shdr *shdr = NULL; 3594 char *strings = NULL; 3595 struct elf_sym *syms = NULL; 3596 struct elf_sym *new_syms; 3597 uint64_t segsz; 3598 3599 shnum = hdr->e_shnum; 3600 shdr = imgsrc_read_alloc(hdr->e_shoff, shnum * sizeof(struct elf_shdr), 3601 src, NULL); 3602 if (shdr == NULL) { 3603 return; 3604 } 3605 3606 bswap_shdr(shdr, shnum); 3607 for (i = 0; i < shnum; ++i) { 3608 if (shdr[i].sh_type == SHT_SYMTAB) { 3609 sym_idx = i; 3610 str_idx = shdr[i].sh_link; 3611 goto found; 3612 } 3613 } 3614 3615 /* There will be no symbol table if the file was stripped. */ 3616 return; 3617 3618 found: 3619 /* Now know where the strtab and symtab are. Snarf them. */ 3620 3621 segsz = shdr[str_idx].sh_size; 3622 strings = g_try_malloc(segsz); 3623 if (!strings) { 3624 goto give_up; 3625 } 3626 if (!imgsrc_read(strings, shdr[str_idx].sh_offset, segsz, src, NULL)) { 3627 goto give_up; 3628 } 3629 3630 segsz = shdr[sym_idx].sh_size; 3631 if (segsz / sizeof(struct elf_sym) > INT_MAX) { 3632 /* 3633 * Implausibly large symbol table: give up rather than ploughing 3634 * on with the number of symbols calculation overflowing. 3635 */ 3636 goto give_up; 3637 } 3638 nsyms = segsz / sizeof(struct elf_sym); 3639 syms = g_try_malloc(segsz); 3640 if (!syms) { 3641 goto give_up; 3642 } 3643 if (!imgsrc_read(syms, shdr[sym_idx].sh_offset, segsz, src, NULL)) { 3644 goto give_up; 3645 } 3646 3647 for (i = 0; i < nsyms; ) { 3648 bswap_sym(syms + i); 3649 /* Throw away entries which we do not need. */ 3650 if (syms[i].st_shndx == SHN_UNDEF 3651 || syms[i].st_shndx >= SHN_LORESERVE 3652 || ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) { 3653 if (i < --nsyms) { 3654 syms[i] = syms[nsyms]; 3655 } 3656 } else { 3657 #if defined(TARGET_ARM) || defined (TARGET_MIPS) 3658 /* The bottom address bit marks a Thumb or MIPS16 symbol. */ 3659 syms[i].st_value &= ~(target_ulong)1; 3660 #endif 3661 syms[i].st_value += load_bias; 3662 i++; 3663 } 3664 } 3665 3666 /* No "useful" symbol. */ 3667 if (nsyms == 0) { 3668 goto give_up; 3669 } 3670 3671 /* 3672 * Attempt to free the storage associated with the local symbols 3673 * that we threw away. Whether or not this has any effect on the 3674 * memory allocation depends on the malloc implementation and how 3675 * many symbols we managed to discard. 3676 */ 3677 new_syms = g_try_renew(struct elf_sym, syms, nsyms); 3678 if (new_syms == NULL) { 3679 goto give_up; 3680 } 3681 syms = new_syms; 3682 3683 qsort(syms, nsyms, sizeof(*syms), symcmp); 3684 3685 { 3686 struct syminfo *s = g_new(struct syminfo, 1); 3687 3688 s->disas_strtab = strings; 3689 s->disas_num_syms = nsyms; 3690 #if ELF_CLASS == ELFCLASS32 3691 s->disas_symtab.elf32 = syms; 3692 #else 3693 s->disas_symtab.elf64 = syms; 3694 #endif 3695 s->lookup_symbol = lookup_symbolxx; 3696 s->next = syminfos; 3697 syminfos = s; 3698 } 3699 return; 3700 3701 give_up: 3702 g_free(strings); 3703 g_free(syms); 3704 } 3705 3706 uint32_t get_elf_eflags(int fd) 3707 { 3708 struct elfhdr ehdr; 3709 off_t offset; 3710 int ret; 3711 3712 /* Read ELF header */ 3713 offset = lseek(fd, 0, SEEK_SET); 3714 if (offset == (off_t) -1) { 3715 return 0; 3716 } 3717 ret = read(fd, &ehdr, sizeof(ehdr)); 3718 if (ret < sizeof(ehdr)) { 3719 return 0; 3720 } 3721 offset = lseek(fd, offset, SEEK_SET); 3722 if (offset == (off_t) -1) { 3723 return 0; 3724 } 3725 3726 /* Check ELF signature */ 3727 if (!elf_check_ident(&ehdr)) { 3728 return 0; 3729 } 3730 3731 /* check header */ 3732 bswap_ehdr(&ehdr); 3733 if (!elf_check_ehdr(&ehdr)) { 3734 return 0; 3735 } 3736 3737 /* return architecture id */ 3738 return ehdr.e_flags; 3739 } 3740 3741 int load_elf_binary(struct linux_binprm *bprm, struct image_info *info) 3742 { 3743 /* 3744 * We need a copy of the elf header for passing to create_elf_tables. 3745 * We will have overwritten the original when we re-use bprm->buf 3746 * while loading the interpreter. Allocate the storage for this now 3747 * and let elf_load_image do any swapping that may be required. 3748 */ 3749 struct elfhdr ehdr; 3750 struct image_info interp_info, vdso_info; 3751 char *elf_interpreter = NULL; 3752 char *scratch; 3753 3754 memset(&interp_info, 0, sizeof(interp_info)); 3755 #ifdef TARGET_MIPS 3756 interp_info.fp_abi = MIPS_ABI_FP_UNKNOWN; 3757 #endif 3758 3759 load_elf_image(bprm->filename, &bprm->src, info, &ehdr, &elf_interpreter); 3760 3761 /* Do this so that we can load the interpreter, if need be. We will 3762 change some of these later */ 3763 bprm->p = setup_arg_pages(bprm, info); 3764 3765 scratch = g_new0(char, TARGET_PAGE_SIZE); 3766 if (STACK_GROWS_DOWN) { 3767 bprm->p = copy_elf_strings(1, &bprm->filename, scratch, 3768 bprm->p, info->stack_limit); 3769 info->file_string = bprm->p; 3770 bprm->p = copy_elf_strings(bprm->envc, bprm->envp, scratch, 3771 bprm->p, info->stack_limit); 3772 info->env_strings = bprm->p; 3773 bprm->p = copy_elf_strings(bprm->argc, bprm->argv, scratch, 3774 bprm->p, info->stack_limit); 3775 info->arg_strings = bprm->p; 3776 } else { 3777 info->arg_strings = bprm->p; 3778 bprm->p = copy_elf_strings(bprm->argc, bprm->argv, scratch, 3779 bprm->p, info->stack_limit); 3780 info->env_strings = bprm->p; 3781 bprm->p = copy_elf_strings(bprm->envc, bprm->envp, scratch, 3782 bprm->p, info->stack_limit); 3783 info->file_string = bprm->p; 3784 bprm->p = copy_elf_strings(1, &bprm->filename, scratch, 3785 bprm->p, info->stack_limit); 3786 } 3787 3788 g_free(scratch); 3789 3790 if (!bprm->p) { 3791 fprintf(stderr, "%s: %s\n", bprm->filename, strerror(E2BIG)); 3792 exit(-1); 3793 } 3794 3795 if (elf_interpreter) { 3796 load_elf_interp(elf_interpreter, &interp_info, bprm->buf); 3797 3798 /* 3799 * While unusual because of ELF_ET_DYN_BASE, if we are unlucky 3800 * with the mappings the interpreter can be loaded above but 3801 * near the main executable, which can leave very little room 3802 * for the heap. 3803 * If the current brk has less than 16MB, use the end of the 3804 * interpreter. 3805 */ 3806 if (interp_info.brk > info->brk && 3807 interp_info.load_bias - info->brk < 16 * MiB) { 3808 info->brk = interp_info.brk; 3809 } 3810 3811 /* If the program interpreter is one of these two, then assume 3812 an iBCS2 image. Otherwise assume a native linux image. */ 3813 3814 if (strcmp(elf_interpreter, "/usr/lib/libc.so.1") == 0 3815 || strcmp(elf_interpreter, "/usr/lib/ld.so.1") == 0) { 3816 info->personality = PER_SVR4; 3817 3818 /* Why this, you ask??? Well SVr4 maps page 0 as read-only, 3819 and some applications "depend" upon this behavior. Since 3820 we do not have the power to recompile these, we emulate 3821 the SVr4 behavior. Sigh. */ 3822 target_mmap(0, TARGET_PAGE_SIZE, PROT_READ | PROT_EXEC, 3823 MAP_FIXED_NOREPLACE | MAP_PRIVATE | MAP_ANONYMOUS, 3824 -1, 0); 3825 } 3826 #ifdef TARGET_MIPS 3827 info->interp_fp_abi = interp_info.fp_abi; 3828 #endif 3829 } 3830 3831 /* 3832 * Load a vdso if available, which will amongst other things contain the 3833 * signal trampolines. Otherwise, allocate a separate page for them. 3834 */ 3835 const VdsoImageInfo *vdso = vdso_image_info(); 3836 if (vdso) { 3837 load_elf_vdso(&vdso_info, vdso); 3838 info->vdso = vdso_info.load_bias; 3839 } else if (TARGET_ARCH_HAS_SIGTRAMP_PAGE) { 3840 abi_long tramp_page = target_mmap(0, TARGET_PAGE_SIZE, 3841 PROT_READ | PROT_WRITE, 3842 MAP_PRIVATE | MAP_ANON, -1, 0); 3843 if (tramp_page == -1) { 3844 return -errno; 3845 } 3846 3847 setup_sigtramp(tramp_page); 3848 target_mprotect(tramp_page, TARGET_PAGE_SIZE, PROT_READ | PROT_EXEC); 3849 } 3850 3851 bprm->p = create_elf_tables(bprm->p, bprm->argc, bprm->envc, &ehdr, info, 3852 elf_interpreter ? &interp_info : NULL, 3853 vdso ? &vdso_info : NULL); 3854 info->start_stack = bprm->p; 3855 3856 /* If we have an interpreter, set that as the program's entry point. 3857 Copy the load_bias as well, to help PPC64 interpret the entry 3858 point as a function descriptor. Do this after creating elf tables 3859 so that we copy the original program entry point into the AUXV. */ 3860 if (elf_interpreter) { 3861 info->load_bias = interp_info.load_bias; 3862 info->entry = interp_info.entry; 3863 g_free(elf_interpreter); 3864 } 3865 3866 #ifdef USE_ELF_CORE_DUMP 3867 bprm->core_dump = &elf_core_dump; 3868 #endif 3869 3870 return 0; 3871 } 3872 3873 #ifdef USE_ELF_CORE_DUMP 3874 #include "exec/translate-all.h" 3875 3876 /* 3877 * Definitions to generate Intel SVR4-like core files. 3878 * These mostly have the same names as the SVR4 types with "target_elf_" 3879 * tacked on the front to prevent clashes with linux definitions, 3880 * and the typedef forms have been avoided. This is mostly like 3881 * the SVR4 structure, but more Linuxy, with things that Linux does 3882 * not support and which gdb doesn't really use excluded. 3883 * 3884 * Fields we don't dump (their contents is zero) in linux-user qemu 3885 * are marked with XXX. 3886 * 3887 * Core dump code is copied from linux kernel (fs/binfmt_elf.c). 3888 * 3889 * Porting ELF coredump for target is (quite) simple process. First you 3890 * define USE_ELF_CORE_DUMP in target ELF code (where init_thread() for 3891 * the target resides): 3892 * 3893 * #define USE_ELF_CORE_DUMP 3894 * 3895 * Next you define type of register set used for dumping. ELF specification 3896 * says that it needs to be array of elf_greg_t that has size of ELF_NREG. 3897 * 3898 * typedef <target_regtype> target_elf_greg_t; 3899 * #define ELF_NREG <number of registers> 3900 * typedef taret_elf_greg_t target_elf_gregset_t[ELF_NREG]; 3901 * 3902 * Last step is to implement target specific function that copies registers 3903 * from given cpu into just specified register set. Prototype is: 3904 * 3905 * static void elf_core_copy_regs(taret_elf_gregset_t *regs, 3906 * const CPUArchState *env); 3907 * 3908 * Parameters: 3909 * regs - copy register values into here (allocated and zeroed by caller) 3910 * env - copy registers from here 3911 * 3912 * Example for ARM target is provided in this file. 3913 */ 3914 3915 struct target_elf_siginfo { 3916 abi_int si_signo; /* signal number */ 3917 abi_int si_code; /* extra code */ 3918 abi_int si_errno; /* errno */ 3919 }; 3920 3921 struct target_elf_prstatus { 3922 struct target_elf_siginfo pr_info; /* Info associated with signal */ 3923 abi_short pr_cursig; /* Current signal */ 3924 abi_ulong pr_sigpend; /* XXX */ 3925 abi_ulong pr_sighold; /* XXX */ 3926 target_pid_t pr_pid; 3927 target_pid_t pr_ppid; 3928 target_pid_t pr_pgrp; 3929 target_pid_t pr_sid; 3930 struct target_timeval pr_utime; /* XXX User time */ 3931 struct target_timeval pr_stime; /* XXX System time */ 3932 struct target_timeval pr_cutime; /* XXX Cumulative user time */ 3933 struct target_timeval pr_cstime; /* XXX Cumulative system time */ 3934 target_elf_gregset_t pr_reg; /* GP registers */ 3935 abi_int pr_fpvalid; /* XXX */ 3936 }; 3937 3938 #define ELF_PRARGSZ (80) /* Number of chars for args */ 3939 3940 struct target_elf_prpsinfo { 3941 char pr_state; /* numeric process state */ 3942 char pr_sname; /* char for pr_state */ 3943 char pr_zomb; /* zombie */ 3944 char pr_nice; /* nice val */ 3945 abi_ulong pr_flag; /* flags */ 3946 target_uid_t pr_uid; 3947 target_gid_t pr_gid; 3948 target_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; 3949 /* Lots missing */ 3950 char pr_fname[16] QEMU_NONSTRING; /* filename of executable */ 3951 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ 3952 }; 3953 3954 #ifdef BSWAP_NEEDED 3955 static void bswap_prstatus(struct target_elf_prstatus *prstatus) 3956 { 3957 prstatus->pr_info.si_signo = tswap32(prstatus->pr_info.si_signo); 3958 prstatus->pr_info.si_code = tswap32(prstatus->pr_info.si_code); 3959 prstatus->pr_info.si_errno = tswap32(prstatus->pr_info.si_errno); 3960 prstatus->pr_cursig = tswap16(prstatus->pr_cursig); 3961 prstatus->pr_sigpend = tswapal(prstatus->pr_sigpend); 3962 prstatus->pr_sighold = tswapal(prstatus->pr_sighold); 3963 prstatus->pr_pid = tswap32(prstatus->pr_pid); 3964 prstatus->pr_ppid = tswap32(prstatus->pr_ppid); 3965 prstatus->pr_pgrp = tswap32(prstatus->pr_pgrp); 3966 prstatus->pr_sid = tswap32(prstatus->pr_sid); 3967 /* cpu times are not filled, so we skip them */ 3968 /* regs should be in correct format already */ 3969 prstatus->pr_fpvalid = tswap32(prstatus->pr_fpvalid); 3970 } 3971 3972 static void bswap_psinfo(struct target_elf_prpsinfo *psinfo) 3973 { 3974 psinfo->pr_flag = tswapal(psinfo->pr_flag); 3975 psinfo->pr_uid = tswap16(psinfo->pr_uid); 3976 psinfo->pr_gid = tswap16(psinfo->pr_gid); 3977 psinfo->pr_pid = tswap32(psinfo->pr_pid); 3978 psinfo->pr_ppid = tswap32(psinfo->pr_ppid); 3979 psinfo->pr_pgrp = tswap32(psinfo->pr_pgrp); 3980 psinfo->pr_sid = tswap32(psinfo->pr_sid); 3981 } 3982 3983 static void bswap_note(struct elf_note *en) 3984 { 3985 bswap32s(&en->n_namesz); 3986 bswap32s(&en->n_descsz); 3987 bswap32s(&en->n_type); 3988 } 3989 #else 3990 static inline void bswap_prstatus(struct target_elf_prstatus *p) { } 3991 static inline void bswap_psinfo(struct target_elf_prpsinfo *p) {} 3992 static inline void bswap_note(struct elf_note *en) { } 3993 #endif /* BSWAP_NEEDED */ 3994 3995 /* 3996 * Calculate file (dump) size of given memory region. 3997 */ 3998 static size_t vma_dump_size(target_ulong start, target_ulong end, 3999 unsigned long flags) 4000 { 4001 /* The area must be readable. */ 4002 if (!(flags & PAGE_READ)) { 4003 return 0; 4004 } 4005 4006 /* 4007 * Usually we don't dump executable pages as they contain 4008 * non-writable code that debugger can read directly from 4009 * target library etc. If there is no elf header, we dump it. 4010 */ 4011 if (!(flags & PAGE_WRITE_ORG) && 4012 (flags & PAGE_EXEC) && 4013 memcmp(g2h_untagged(start), ELFMAG, SELFMAG) == 0) { 4014 return 0; 4015 } 4016 4017 return end - start; 4018 } 4019 4020 static size_t size_note(const char *name, size_t datasz) 4021 { 4022 size_t namesz = strlen(name) + 1; 4023 4024 namesz = ROUND_UP(namesz, 4); 4025 datasz = ROUND_UP(datasz, 4); 4026 4027 return sizeof(struct elf_note) + namesz + datasz; 4028 } 4029 4030 static void *fill_note(void **pptr, int type, const char *name, size_t datasz) 4031 { 4032 void *ptr = *pptr; 4033 struct elf_note *n = ptr; 4034 size_t namesz = strlen(name) + 1; 4035 4036 n->n_namesz = namesz; 4037 n->n_descsz = datasz; 4038 n->n_type = type; 4039 bswap_note(n); 4040 4041 ptr += sizeof(*n); 4042 memcpy(ptr, name, namesz); 4043 4044 namesz = ROUND_UP(namesz, 4); 4045 datasz = ROUND_UP(datasz, 4); 4046 4047 *pptr = ptr + namesz + datasz; 4048 return ptr + namesz; 4049 } 4050 4051 static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine, 4052 uint32_t flags) 4053 { 4054 memcpy(elf->e_ident, ELFMAG, SELFMAG); 4055 4056 elf->e_ident[EI_CLASS] = ELF_CLASS; 4057 elf->e_ident[EI_DATA] = ELF_DATA; 4058 elf->e_ident[EI_VERSION] = EV_CURRENT; 4059 elf->e_ident[EI_OSABI] = ELF_OSABI; 4060 4061 elf->e_type = ET_CORE; 4062 elf->e_machine = machine; 4063 elf->e_version = EV_CURRENT; 4064 elf->e_phoff = sizeof(struct elfhdr); 4065 elf->e_flags = flags; 4066 elf->e_ehsize = sizeof(struct elfhdr); 4067 elf->e_phentsize = sizeof(struct elf_phdr); 4068 elf->e_phnum = segs; 4069 4070 bswap_ehdr(elf); 4071 } 4072 4073 static void fill_elf_note_phdr(struct elf_phdr *phdr, size_t sz, off_t offset) 4074 { 4075 phdr->p_type = PT_NOTE; 4076 phdr->p_offset = offset; 4077 phdr->p_filesz = sz; 4078 4079 bswap_phdr(phdr, 1); 4080 } 4081 4082 static void fill_prstatus_note(void *data, const TaskState *ts, 4083 CPUState *cpu, int signr) 4084 { 4085 /* 4086 * Because note memory is only aligned to 4, and target_elf_prstatus 4087 * may well have higher alignment requirements, fill locally and 4088 * memcpy to the destination afterward. 4089 */ 4090 struct target_elf_prstatus prstatus = { 4091 .pr_info.si_signo = signr, 4092 .pr_cursig = signr, 4093 .pr_pid = ts->ts_tid, 4094 .pr_ppid = getppid(), 4095 .pr_pgrp = getpgrp(), 4096 .pr_sid = getsid(0), 4097 }; 4098 4099 elf_core_copy_regs(&prstatus.pr_reg, cpu_env(cpu)); 4100 bswap_prstatus(&prstatus); 4101 memcpy(data, &prstatus, sizeof(prstatus)); 4102 } 4103 4104 static void fill_prpsinfo_note(void *data, const TaskState *ts) 4105 { 4106 /* 4107 * Because note memory is only aligned to 4, and target_elf_prpsinfo 4108 * may well have higher alignment requirements, fill locally and 4109 * memcpy to the destination afterward. 4110 */ 4111 struct target_elf_prpsinfo psinfo = { 4112 .pr_pid = getpid(), 4113 .pr_ppid = getppid(), 4114 .pr_pgrp = getpgrp(), 4115 .pr_sid = getsid(0), 4116 .pr_uid = getuid(), 4117 .pr_gid = getgid(), 4118 }; 4119 char *base_filename; 4120 size_t len; 4121 4122 len = ts->info->env_strings - ts->info->arg_strings; 4123 len = MIN(len, ELF_PRARGSZ); 4124 memcpy(&psinfo.pr_psargs, g2h_untagged(ts->info->arg_strings), len); 4125 for (size_t i = 0; i < len; i++) { 4126 if (psinfo.pr_psargs[i] == 0) { 4127 psinfo.pr_psargs[i] = ' '; 4128 } 4129 } 4130 4131 base_filename = g_path_get_basename(ts->bprm->filename); 4132 /* 4133 * Using strncpy here is fine: at max-length, 4134 * this field is not NUL-terminated. 4135 */ 4136 strncpy(psinfo.pr_fname, base_filename, sizeof(psinfo.pr_fname)); 4137 g_free(base_filename); 4138 4139 bswap_psinfo(&psinfo); 4140 memcpy(data, &psinfo, sizeof(psinfo)); 4141 } 4142 4143 static void fill_auxv_note(void *data, const TaskState *ts) 4144 { 4145 memcpy(data, g2h_untagged(ts->info->saved_auxv), ts->info->auxv_len); 4146 } 4147 4148 /* 4149 * Constructs name of coredump file. We have following convention 4150 * for the name: 4151 * qemu_<basename-of-target-binary>_<date>-<time>_<pid>.core 4152 * 4153 * Returns the filename 4154 */ 4155 static char *core_dump_filename(const TaskState *ts) 4156 { 4157 g_autoptr(GDateTime) now = g_date_time_new_now_local(); 4158 g_autofree char *nowstr = g_date_time_format(now, "%Y%m%d-%H%M%S"); 4159 g_autofree char *base_filename = g_path_get_basename(ts->bprm->filename); 4160 4161 return g_strdup_printf("qemu_%s_%s_%d.core", 4162 base_filename, nowstr, (int)getpid()); 4163 } 4164 4165 static int dump_write(int fd, const void *ptr, size_t size) 4166 { 4167 const char *bufp = (const char *)ptr; 4168 ssize_t bytes_written, bytes_left; 4169 4170 bytes_written = 0; 4171 bytes_left = size; 4172 4173 /* 4174 * In normal conditions, single write(2) should do but 4175 * in case of socket etc. this mechanism is more portable. 4176 */ 4177 do { 4178 bytes_written = write(fd, bufp, bytes_left); 4179 if (bytes_written < 0) { 4180 if (errno == EINTR) 4181 continue; 4182 return (-1); 4183 } else if (bytes_written == 0) { /* eof */ 4184 return (-1); 4185 } 4186 bufp += bytes_written; 4187 bytes_left -= bytes_written; 4188 } while (bytes_left > 0); 4189 4190 return (0); 4191 } 4192 4193 static int wmr_page_unprotect_regions(void *opaque, target_ulong start, 4194 target_ulong end, unsigned long flags) 4195 { 4196 if ((flags & (PAGE_WRITE | PAGE_WRITE_ORG)) == PAGE_WRITE_ORG) { 4197 size_t step = MAX(TARGET_PAGE_SIZE, qemu_real_host_page_size()); 4198 4199 while (1) { 4200 page_unprotect(start, 0); 4201 if (end - start <= step) { 4202 break; 4203 } 4204 start += step; 4205 } 4206 } 4207 return 0; 4208 } 4209 4210 typedef struct { 4211 unsigned count; 4212 size_t size; 4213 } CountAndSizeRegions; 4214 4215 static int wmr_count_and_size_regions(void *opaque, target_ulong start, 4216 target_ulong end, unsigned long flags) 4217 { 4218 CountAndSizeRegions *css = opaque; 4219 4220 css->count++; 4221 css->size += vma_dump_size(start, end, flags); 4222 return 0; 4223 } 4224 4225 typedef struct { 4226 struct elf_phdr *phdr; 4227 off_t offset; 4228 } FillRegionPhdr; 4229 4230 static int wmr_fill_region_phdr(void *opaque, target_ulong start, 4231 target_ulong end, unsigned long flags) 4232 { 4233 FillRegionPhdr *d = opaque; 4234 struct elf_phdr *phdr = d->phdr; 4235 4236 phdr->p_type = PT_LOAD; 4237 phdr->p_vaddr = start; 4238 phdr->p_paddr = 0; 4239 phdr->p_filesz = vma_dump_size(start, end, flags); 4240 phdr->p_offset = d->offset; 4241 d->offset += phdr->p_filesz; 4242 phdr->p_memsz = end - start; 4243 phdr->p_flags = (flags & PAGE_READ ? PF_R : 0) 4244 | (flags & PAGE_WRITE_ORG ? PF_W : 0) 4245 | (flags & PAGE_EXEC ? PF_X : 0); 4246 phdr->p_align = ELF_EXEC_PAGESIZE; 4247 4248 bswap_phdr(phdr, 1); 4249 d->phdr = phdr + 1; 4250 return 0; 4251 } 4252 4253 static int wmr_write_region(void *opaque, target_ulong start, 4254 target_ulong end, unsigned long flags) 4255 { 4256 int fd = *(int *)opaque; 4257 size_t size = vma_dump_size(start, end, flags); 4258 4259 if (!size) { 4260 return 0; 4261 } 4262 return dump_write(fd, g2h_untagged(start), size); 4263 } 4264 4265 /* 4266 * Write out ELF coredump. 4267 * 4268 * See documentation of ELF object file format in: 4269 * http://www.caldera.com/developers/devspecs/gabi41.pdf 4270 * 4271 * Coredump format in linux is following: 4272 * 4273 * 0 +----------------------+ \ 4274 * | ELF header | ET_CORE | 4275 * +----------------------+ | 4276 * | ELF program headers | |--- headers 4277 * | - NOTE section | | 4278 * | - PT_LOAD sections | | 4279 * +----------------------+ / 4280 * | NOTEs: | 4281 * | - NT_PRSTATUS | 4282 * | - NT_PRSINFO | 4283 * | - NT_AUXV | 4284 * +----------------------+ <-- aligned to target page 4285 * | Process memory dump | 4286 * : : 4287 * . . 4288 * : : 4289 * | | 4290 * +----------------------+ 4291 * 4292 * NT_PRSTATUS -> struct elf_prstatus (per thread) 4293 * NT_PRSINFO -> struct elf_prpsinfo 4294 * NT_AUXV is array of { type, value } pairs (see fill_auxv_note()). 4295 * 4296 * Format follows System V format as close as possible. Current 4297 * version limitations are as follows: 4298 * - no floating point registers are dumped 4299 * 4300 * Function returns 0 in case of success, negative errno otherwise. 4301 * 4302 * TODO: make this work also during runtime: it should be 4303 * possible to force coredump from running process and then 4304 * continue processing. For example qemu could set up SIGUSR2 4305 * handler (provided that target process haven't registered 4306 * handler for that) that does the dump when signal is received. 4307 */ 4308 static int elf_core_dump(int signr, const CPUArchState *env) 4309 { 4310 const CPUState *cpu = env_cpu((CPUArchState *)env); 4311 const TaskState *ts = (const TaskState *)get_task_state((CPUState *)cpu); 4312 struct rlimit dumpsize; 4313 CountAndSizeRegions css; 4314 off_t offset, note_offset, data_offset; 4315 size_t note_size; 4316 int cpus, ret; 4317 int fd = -1; 4318 CPUState *cpu_iter; 4319 4320 if (prctl(PR_GET_DUMPABLE) == 0) { 4321 return 0; 4322 } 4323 4324 if (getrlimit(RLIMIT_CORE, &dumpsize) < 0 || dumpsize.rlim_cur == 0) { 4325 return 0; 4326 } 4327 4328 cpu_list_lock(); 4329 mmap_lock(); 4330 4331 /* By unprotecting, we merge vmas that might be split. */ 4332 walk_memory_regions(NULL, wmr_page_unprotect_regions); 4333 4334 /* 4335 * Walk through target process memory mappings and 4336 * set up structure containing this information. 4337 */ 4338 memset(&css, 0, sizeof(css)); 4339 walk_memory_regions(&css, wmr_count_and_size_regions); 4340 4341 cpus = 0; 4342 CPU_FOREACH(cpu_iter) { 4343 cpus++; 4344 } 4345 4346 offset = sizeof(struct elfhdr); 4347 offset += (css.count + 1) * sizeof(struct elf_phdr); 4348 note_offset = offset; 4349 4350 offset += size_note("CORE", ts->info->auxv_len); 4351 offset += size_note("CORE", sizeof(struct target_elf_prpsinfo)); 4352 offset += size_note("CORE", sizeof(struct target_elf_prstatus)) * cpus; 4353 note_size = offset - note_offset; 4354 data_offset = ROUND_UP(offset, ELF_EXEC_PAGESIZE); 4355 4356 /* Do not dump if the corefile size exceeds the limit. */ 4357 if (dumpsize.rlim_cur != RLIM_INFINITY 4358 && dumpsize.rlim_cur < data_offset + css.size) { 4359 errno = 0; 4360 goto out; 4361 } 4362 4363 { 4364 g_autofree char *corefile = core_dump_filename(ts); 4365 fd = open(corefile, O_WRONLY | O_CREAT | O_TRUNC, 4366 S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); 4367 } 4368 if (fd < 0) { 4369 goto out; 4370 } 4371 4372 /* 4373 * There is a fair amount of alignment padding within the notes 4374 * as well as preceeding the process memory. Allocate a zeroed 4375 * block to hold it all. Write all of the headers directly into 4376 * this buffer and then write it out as a block. 4377 */ 4378 { 4379 g_autofree void *header = g_malloc0(data_offset); 4380 FillRegionPhdr frp; 4381 void *hptr, *dptr; 4382 4383 /* Create elf file header. */ 4384 hptr = header; 4385 fill_elf_header(hptr, css.count + 1, ELF_MACHINE, 0); 4386 hptr += sizeof(struct elfhdr); 4387 4388 /* Create elf program headers. */ 4389 fill_elf_note_phdr(hptr, note_size, note_offset); 4390 hptr += sizeof(struct elf_phdr); 4391 4392 frp.phdr = hptr; 4393 frp.offset = data_offset; 4394 walk_memory_regions(&frp, wmr_fill_region_phdr); 4395 hptr = frp.phdr; 4396 4397 /* Create the notes. */ 4398 dptr = fill_note(&hptr, NT_AUXV, "CORE", ts->info->auxv_len); 4399 fill_auxv_note(dptr, ts); 4400 4401 dptr = fill_note(&hptr, NT_PRPSINFO, "CORE", 4402 sizeof(struct target_elf_prpsinfo)); 4403 fill_prpsinfo_note(dptr, ts); 4404 4405 CPU_FOREACH(cpu_iter) { 4406 dptr = fill_note(&hptr, NT_PRSTATUS, "CORE", 4407 sizeof(struct target_elf_prstatus)); 4408 fill_prstatus_note(dptr, ts, cpu_iter, 4409 cpu_iter == cpu ? signr : 0); 4410 } 4411 4412 if (dump_write(fd, header, data_offset) < 0) { 4413 goto out; 4414 } 4415 } 4416 4417 /* 4418 * Finally write process memory into the corefile as well. 4419 */ 4420 if (walk_memory_regions(&fd, wmr_write_region) < 0) { 4421 goto out; 4422 } 4423 errno = 0; 4424 4425 out: 4426 ret = -errno; 4427 mmap_unlock(); 4428 cpu_list_unlock(); 4429 if (fd >= 0) { 4430 close(fd); 4431 } 4432 return ret; 4433 } 4434 #endif /* USE_ELF_CORE_DUMP */ 4435 4436 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop) 4437 { 4438 init_thread(regs, infop); 4439 } 4440