1 /* This is the Linux kernel elf-loading code, ported into user space */ 2 #include "qemu/osdep.h" 3 #include <sys/param.h> 4 5 #include <sys/prctl.h> 6 #include <sys/resource.h> 7 #include <sys/shm.h> 8 9 #include "qemu.h" 10 #include "user/tswap-target.h" 11 #include "exec/page-protection.h" 12 #include "user/guest-base.h" 13 #include "user-internals.h" 14 #include "signal-common.h" 15 #include "loader.h" 16 #include "user-mmap.h" 17 #include "disas/disas.h" 18 #include "qemu/bitops.h" 19 #include "qemu/path.h" 20 #include "qemu/queue.h" 21 #include "qemu/guest-random.h" 22 #include "qemu/units.h" 23 #include "qemu/selfmap.h" 24 #include "qemu/lockable.h" 25 #include "qapi/error.h" 26 #include "qemu/error-report.h" 27 #include "target_signal.h" 28 #include "tcg/debuginfo.h" 29 30 #ifdef TARGET_ARM 31 #include "target/arm/cpu-features.h" 32 #endif 33 34 #ifdef _ARCH_PPC64 35 #undef ARCH_DLINFO 36 #undef ELF_PLATFORM 37 #undef ELF_HWCAP 38 #undef ELF_HWCAP2 39 #undef ELF_CLASS 40 #undef ELF_DATA 41 #undef ELF_ARCH 42 #endif 43 44 #ifndef TARGET_ARCH_HAS_SIGTRAMP_PAGE 45 #define TARGET_ARCH_HAS_SIGTRAMP_PAGE 0 46 #endif 47 48 typedef struct { 49 const uint8_t *image; 50 const uint32_t *relocs; 51 unsigned image_size; 52 unsigned reloc_count; 53 unsigned sigreturn_ofs; 54 unsigned rt_sigreturn_ofs; 55 } VdsoImageInfo; 56 57 #define ELF_OSABI ELFOSABI_SYSV 58 59 /* from personality.h */ 60 61 /* 62 * Flags for bug emulation. 63 * 64 * These occupy the top three bytes. 65 */ 66 enum { 67 ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */ 68 FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to 69 descriptors (signal handling) */ 70 MMAP_PAGE_ZERO = 0x0100000, 71 ADDR_COMPAT_LAYOUT = 0x0200000, 72 READ_IMPLIES_EXEC = 0x0400000, 73 ADDR_LIMIT_32BIT = 0x0800000, 74 SHORT_INODE = 0x1000000, 75 WHOLE_SECONDS = 0x2000000, 76 STICKY_TIMEOUTS = 0x4000000, 77 ADDR_LIMIT_3GB = 0x8000000, 78 }; 79 80 /* 81 * Personality types. 82 * 83 * These go in the low byte. Avoid using the top bit, it will 84 * conflict with error returns. 85 */ 86 enum { 87 PER_LINUX = 0x0000, 88 PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT, 89 PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS, 90 PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, 91 PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE, 92 PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS | SHORT_INODE, 93 PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS, 94 PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE, 95 PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS, 96 PER_BSD = 0x0006, 97 PER_SUNOS = 0x0006 | STICKY_TIMEOUTS, 98 PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE, 99 PER_LINUX32 = 0x0008, 100 PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB, 101 PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */ 102 PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */ 103 PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */ 104 PER_RISCOS = 0x000c, 105 PER_SOLARIS = 0x000d | STICKY_TIMEOUTS, 106 PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, 107 PER_OSF4 = 0x000f, /* OSF/1 v4 */ 108 PER_HPUX = 0x0010, 109 PER_MASK = 0x00ff, 110 }; 111 112 /* 113 * Return the base personality without flags. 114 */ 115 #define personality(pers) (pers & PER_MASK) 116 117 int info_is_fdpic(struct image_info *info) 118 { 119 return info->personality == PER_LINUX_FDPIC; 120 } 121 122 /* this flag is uneffective under linux too, should be deleted */ 123 #ifndef MAP_DENYWRITE 124 #define MAP_DENYWRITE 0 125 #endif 126 127 /* should probably go in elf.h */ 128 #ifndef ELIBBAD 129 #define ELIBBAD 80 130 #endif 131 132 #if TARGET_BIG_ENDIAN 133 #define ELF_DATA ELFDATA2MSB 134 #else 135 #define ELF_DATA ELFDATA2LSB 136 #endif 137 138 #ifdef TARGET_ABI_MIPSN32 139 typedef abi_ullong target_elf_greg_t; 140 #define tswapreg(ptr) tswap64(ptr) 141 #else 142 typedef abi_ulong target_elf_greg_t; 143 #define tswapreg(ptr) tswapal(ptr) 144 #endif 145 146 #ifdef USE_UID16 147 typedef abi_ushort target_uid_t; 148 typedef abi_ushort target_gid_t; 149 #else 150 typedef abi_uint target_uid_t; 151 typedef abi_uint target_gid_t; 152 #endif 153 typedef abi_int target_pid_t; 154 155 #ifdef TARGET_I386 156 157 #define ELF_HWCAP get_elf_hwcap() 158 159 static uint32_t get_elf_hwcap(void) 160 { 161 X86CPU *cpu = X86_CPU(thread_cpu); 162 163 return cpu->env.features[FEAT_1_EDX]; 164 } 165 166 #ifdef TARGET_X86_64 167 #define ELF_CLASS ELFCLASS64 168 #define ELF_ARCH EM_X86_64 169 170 #define ELF_PLATFORM "x86_64" 171 172 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 173 { 174 regs->rax = 0; 175 regs->rsp = infop->start_stack; 176 regs->rip = infop->entry; 177 } 178 179 #define ELF_NREG 27 180 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 181 182 /* 183 * Note that ELF_NREG should be 29 as there should be place for 184 * TRAPNO and ERR "registers" as well but linux doesn't dump 185 * those. 186 * 187 * See linux kernel: arch/x86/include/asm/elf.h 188 */ 189 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env) 190 { 191 (*regs)[0] = tswapreg(env->regs[15]); 192 (*regs)[1] = tswapreg(env->regs[14]); 193 (*regs)[2] = tswapreg(env->regs[13]); 194 (*regs)[3] = tswapreg(env->regs[12]); 195 (*regs)[4] = tswapreg(env->regs[R_EBP]); 196 (*regs)[5] = tswapreg(env->regs[R_EBX]); 197 (*regs)[6] = tswapreg(env->regs[11]); 198 (*regs)[7] = tswapreg(env->regs[10]); 199 (*regs)[8] = tswapreg(env->regs[9]); 200 (*regs)[9] = tswapreg(env->regs[8]); 201 (*regs)[10] = tswapreg(env->regs[R_EAX]); 202 (*regs)[11] = tswapreg(env->regs[R_ECX]); 203 (*regs)[12] = tswapreg(env->regs[R_EDX]); 204 (*regs)[13] = tswapreg(env->regs[R_ESI]); 205 (*regs)[14] = tswapreg(env->regs[R_EDI]); 206 (*regs)[15] = tswapreg(env->regs[R_EAX]); /* XXX */ 207 (*regs)[16] = tswapreg(env->eip); 208 (*regs)[17] = tswapreg(env->segs[R_CS].selector & 0xffff); 209 (*regs)[18] = tswapreg(env->eflags); 210 (*regs)[19] = tswapreg(env->regs[R_ESP]); 211 (*regs)[20] = tswapreg(env->segs[R_SS].selector & 0xffff); 212 (*regs)[21] = tswapreg(env->segs[R_FS].selector & 0xffff); 213 (*regs)[22] = tswapreg(env->segs[R_GS].selector & 0xffff); 214 (*regs)[23] = tswapreg(env->segs[R_DS].selector & 0xffff); 215 (*regs)[24] = tswapreg(env->segs[R_ES].selector & 0xffff); 216 (*regs)[25] = tswapreg(env->segs[R_FS].selector & 0xffff); 217 (*regs)[26] = tswapreg(env->segs[R_GS].selector & 0xffff); 218 } 219 220 #if ULONG_MAX > UINT32_MAX 221 #define INIT_GUEST_COMMPAGE 222 static bool init_guest_commpage(void) 223 { 224 /* 225 * The vsyscall page is at a high negative address aka kernel space, 226 * which means that we cannot actually allocate it with target_mmap. 227 * We still should be able to use page_set_flags, unless the user 228 * has specified -R reserved_va, which would trigger an assert(). 229 */ 230 if (reserved_va != 0 && 231 TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE - 1 > reserved_va) { 232 error_report("Cannot allocate vsyscall page"); 233 exit(EXIT_FAILURE); 234 } 235 page_set_flags(TARGET_VSYSCALL_PAGE, 236 TARGET_VSYSCALL_PAGE | ~TARGET_PAGE_MASK, 237 PAGE_EXEC | PAGE_VALID); 238 return true; 239 } 240 #endif 241 #else 242 243 /* 244 * This is used to ensure we don't load something for the wrong architecture. 245 */ 246 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) ) 247 248 /* 249 * These are used to set parameters in the core dumps. 250 */ 251 #define ELF_CLASS ELFCLASS32 252 #define ELF_ARCH EM_386 253 254 #define ELF_PLATFORM get_elf_platform() 255 #define EXSTACK_DEFAULT true 256 257 static const char *get_elf_platform(void) 258 { 259 static char elf_platform[] = "i386"; 260 int family = object_property_get_int(OBJECT(thread_cpu), "family", NULL); 261 if (family > 6) { 262 family = 6; 263 } 264 if (family >= 3) { 265 elf_platform[1] = '0' + family; 266 } 267 return elf_platform; 268 } 269 270 static inline void init_thread(struct target_pt_regs *regs, 271 struct image_info *infop) 272 { 273 regs->esp = infop->start_stack; 274 regs->eip = infop->entry; 275 276 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program 277 starts %edx contains a pointer to a function which might be 278 registered using `atexit'. This provides a mean for the 279 dynamic linker to call DT_FINI functions for shared libraries 280 that have been loaded before the code runs. 281 282 A value of 0 tells we have no such handler. */ 283 regs->edx = 0; 284 } 285 286 #define ELF_NREG 17 287 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 288 289 /* 290 * Note that ELF_NREG should be 19 as there should be place for 291 * TRAPNO and ERR "registers" as well but linux doesn't dump 292 * those. 293 * 294 * See linux kernel: arch/x86/include/asm/elf.h 295 */ 296 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env) 297 { 298 (*regs)[0] = tswapreg(env->regs[R_EBX]); 299 (*regs)[1] = tswapreg(env->regs[R_ECX]); 300 (*regs)[2] = tswapreg(env->regs[R_EDX]); 301 (*regs)[3] = tswapreg(env->regs[R_ESI]); 302 (*regs)[4] = tswapreg(env->regs[R_EDI]); 303 (*regs)[5] = tswapreg(env->regs[R_EBP]); 304 (*regs)[6] = tswapreg(env->regs[R_EAX]); 305 (*regs)[7] = tswapreg(env->segs[R_DS].selector & 0xffff); 306 (*regs)[8] = tswapreg(env->segs[R_ES].selector & 0xffff); 307 (*regs)[9] = tswapreg(env->segs[R_FS].selector & 0xffff); 308 (*regs)[10] = tswapreg(env->segs[R_GS].selector & 0xffff); 309 (*regs)[11] = tswapreg(env->regs[R_EAX]); /* XXX */ 310 (*regs)[12] = tswapreg(env->eip); 311 (*regs)[13] = tswapreg(env->segs[R_CS].selector & 0xffff); 312 (*regs)[14] = tswapreg(env->eflags); 313 (*regs)[15] = tswapreg(env->regs[R_ESP]); 314 (*regs)[16] = tswapreg(env->segs[R_SS].selector & 0xffff); 315 } 316 317 /* 318 * i386 is the only target which supplies AT_SYSINFO for the vdso. 319 * All others only supply AT_SYSINFO_EHDR. 320 */ 321 #define DLINFO_ARCH_ITEMS (vdso_info != NULL) 322 #define ARCH_DLINFO \ 323 do { \ 324 if (vdso_info) { \ 325 NEW_AUX_ENT(AT_SYSINFO, vdso_info->entry); \ 326 } \ 327 } while (0) 328 329 #endif /* TARGET_X86_64 */ 330 331 #define VDSO_HEADER "vdso.c.inc" 332 333 #define USE_ELF_CORE_DUMP 334 #define ELF_EXEC_PAGESIZE 4096 335 336 #endif /* TARGET_I386 */ 337 338 #ifdef TARGET_ARM 339 340 #ifndef TARGET_AARCH64 341 /* 32 bit ARM definitions */ 342 343 #define ELF_ARCH EM_ARM 344 #define ELF_CLASS ELFCLASS32 345 #define EXSTACK_DEFAULT true 346 347 static inline void init_thread(struct target_pt_regs *regs, 348 struct image_info *infop) 349 { 350 abi_long stack = infop->start_stack; 351 memset(regs, 0, sizeof(*regs)); 352 353 regs->uregs[16] = ARM_CPU_MODE_USR; 354 if (infop->entry & 1) { 355 regs->uregs[16] |= CPSR_T; 356 } 357 regs->uregs[15] = infop->entry & 0xfffffffe; 358 regs->uregs[13] = infop->start_stack; 359 /* FIXME - what to for failure of get_user()? */ 360 get_user_ual(regs->uregs[2], stack + 8); /* envp */ 361 get_user_ual(regs->uregs[1], stack + 4); /* envp */ 362 /* XXX: it seems that r0 is zeroed after ! */ 363 regs->uregs[0] = 0; 364 /* For uClinux PIC binaries. */ 365 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */ 366 regs->uregs[10] = infop->start_data; 367 368 /* Support ARM FDPIC. */ 369 if (info_is_fdpic(infop)) { 370 /* As described in the ABI document, r7 points to the loadmap info 371 * prepared by the kernel. If an interpreter is needed, r8 points 372 * to the interpreter loadmap and r9 points to the interpreter 373 * PT_DYNAMIC info. If no interpreter is needed, r8 is zero, and 374 * r9 points to the main program PT_DYNAMIC info. 375 */ 376 regs->uregs[7] = infop->loadmap_addr; 377 if (infop->interpreter_loadmap_addr) { 378 /* Executable is dynamically loaded. */ 379 regs->uregs[8] = infop->interpreter_loadmap_addr; 380 regs->uregs[9] = infop->interpreter_pt_dynamic_addr; 381 } else { 382 regs->uregs[8] = 0; 383 regs->uregs[9] = infop->pt_dynamic_addr; 384 } 385 } 386 } 387 388 #define ELF_NREG 18 389 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 390 391 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUARMState *env) 392 { 393 (*regs)[0] = tswapreg(env->regs[0]); 394 (*regs)[1] = tswapreg(env->regs[1]); 395 (*regs)[2] = tswapreg(env->regs[2]); 396 (*regs)[3] = tswapreg(env->regs[3]); 397 (*regs)[4] = tswapreg(env->regs[4]); 398 (*regs)[5] = tswapreg(env->regs[5]); 399 (*regs)[6] = tswapreg(env->regs[6]); 400 (*regs)[7] = tswapreg(env->regs[7]); 401 (*regs)[8] = tswapreg(env->regs[8]); 402 (*regs)[9] = tswapreg(env->regs[9]); 403 (*regs)[10] = tswapreg(env->regs[10]); 404 (*regs)[11] = tswapreg(env->regs[11]); 405 (*regs)[12] = tswapreg(env->regs[12]); 406 (*regs)[13] = tswapreg(env->regs[13]); 407 (*regs)[14] = tswapreg(env->regs[14]); 408 (*regs)[15] = tswapreg(env->regs[15]); 409 410 (*regs)[16] = tswapreg(cpsr_read((CPUARMState *)env)); 411 (*regs)[17] = tswapreg(env->regs[0]); /* XXX */ 412 } 413 414 #define USE_ELF_CORE_DUMP 415 #define ELF_EXEC_PAGESIZE 4096 416 417 enum 418 { 419 ARM_HWCAP_ARM_SWP = 1 << 0, 420 ARM_HWCAP_ARM_HALF = 1 << 1, 421 ARM_HWCAP_ARM_THUMB = 1 << 2, 422 ARM_HWCAP_ARM_26BIT = 1 << 3, 423 ARM_HWCAP_ARM_FAST_MULT = 1 << 4, 424 ARM_HWCAP_ARM_FPA = 1 << 5, 425 ARM_HWCAP_ARM_VFP = 1 << 6, 426 ARM_HWCAP_ARM_EDSP = 1 << 7, 427 ARM_HWCAP_ARM_JAVA = 1 << 8, 428 ARM_HWCAP_ARM_IWMMXT = 1 << 9, 429 ARM_HWCAP_ARM_CRUNCH = 1 << 10, 430 ARM_HWCAP_ARM_THUMBEE = 1 << 11, 431 ARM_HWCAP_ARM_NEON = 1 << 12, 432 ARM_HWCAP_ARM_VFPv3 = 1 << 13, 433 ARM_HWCAP_ARM_VFPv3D16 = 1 << 14, 434 ARM_HWCAP_ARM_TLS = 1 << 15, 435 ARM_HWCAP_ARM_VFPv4 = 1 << 16, 436 ARM_HWCAP_ARM_IDIVA = 1 << 17, 437 ARM_HWCAP_ARM_IDIVT = 1 << 18, 438 ARM_HWCAP_ARM_VFPD32 = 1 << 19, 439 ARM_HWCAP_ARM_LPAE = 1 << 20, 440 ARM_HWCAP_ARM_EVTSTRM = 1 << 21, 441 ARM_HWCAP_ARM_FPHP = 1 << 22, 442 ARM_HWCAP_ARM_ASIMDHP = 1 << 23, 443 ARM_HWCAP_ARM_ASIMDDP = 1 << 24, 444 ARM_HWCAP_ARM_ASIMDFHM = 1 << 25, 445 ARM_HWCAP_ARM_ASIMDBF16 = 1 << 26, 446 ARM_HWCAP_ARM_I8MM = 1 << 27, 447 }; 448 449 enum { 450 ARM_HWCAP2_ARM_AES = 1 << 0, 451 ARM_HWCAP2_ARM_PMULL = 1 << 1, 452 ARM_HWCAP2_ARM_SHA1 = 1 << 2, 453 ARM_HWCAP2_ARM_SHA2 = 1 << 3, 454 ARM_HWCAP2_ARM_CRC32 = 1 << 4, 455 ARM_HWCAP2_ARM_SB = 1 << 5, 456 ARM_HWCAP2_ARM_SSBS = 1 << 6, 457 }; 458 459 /* The commpage only exists for 32 bit kernels */ 460 461 #define HI_COMMPAGE (intptr_t)0xffff0f00u 462 463 static bool init_guest_commpage(void) 464 { 465 ARMCPU *cpu = ARM_CPU(thread_cpu); 466 int host_page_size = qemu_real_host_page_size(); 467 abi_ptr commpage; 468 void *want; 469 void *addr; 470 471 /* 472 * M-profile allocates maximum of 2GB address space, so can never 473 * allocate the commpage. Skip it. 474 */ 475 if (arm_feature(&cpu->env, ARM_FEATURE_M)) { 476 return true; 477 } 478 479 commpage = HI_COMMPAGE & -host_page_size; 480 want = g2h_untagged(commpage); 481 addr = mmap(want, host_page_size, PROT_READ | PROT_WRITE, 482 MAP_ANONYMOUS | MAP_PRIVATE | 483 (commpage < reserved_va ? MAP_FIXED : MAP_FIXED_NOREPLACE), 484 -1, 0); 485 486 if (addr == MAP_FAILED) { 487 perror("Allocating guest commpage"); 488 exit(EXIT_FAILURE); 489 } 490 if (addr != want) { 491 return false; 492 } 493 494 /* Set kernel helper versions; rest of page is 0. */ 495 __put_user(5, (uint32_t *)g2h_untagged(0xffff0ffcu)); 496 497 if (mprotect(addr, host_page_size, PROT_READ)) { 498 perror("Protecting guest commpage"); 499 exit(EXIT_FAILURE); 500 } 501 502 page_set_flags(commpage, commpage | (host_page_size - 1), 503 PAGE_READ | PAGE_EXEC | PAGE_VALID); 504 return true; 505 } 506 507 #define ELF_HWCAP get_elf_hwcap() 508 #define ELF_HWCAP2 get_elf_hwcap2() 509 510 uint32_t get_elf_hwcap(void) 511 { 512 ARMCPU *cpu = ARM_CPU(thread_cpu); 513 uint32_t hwcaps = 0; 514 515 hwcaps |= ARM_HWCAP_ARM_SWP; 516 hwcaps |= ARM_HWCAP_ARM_HALF; 517 hwcaps |= ARM_HWCAP_ARM_THUMB; 518 hwcaps |= ARM_HWCAP_ARM_FAST_MULT; 519 520 /* probe for the extra features */ 521 #define GET_FEATURE(feat, hwcap) \ 522 do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0) 523 524 #define GET_FEATURE_ID(feat, hwcap) \ 525 do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0) 526 527 /* EDSP is in v5TE and above, but all our v5 CPUs are v5TE */ 528 GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP); 529 GET_FEATURE(ARM_FEATURE_IWMMXT, ARM_HWCAP_ARM_IWMMXT); 530 GET_FEATURE(ARM_FEATURE_THUMB2EE, ARM_HWCAP_ARM_THUMBEE); 531 GET_FEATURE(ARM_FEATURE_NEON, ARM_HWCAP_ARM_NEON); 532 GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS); 533 GET_FEATURE(ARM_FEATURE_LPAE, ARM_HWCAP_ARM_LPAE); 534 GET_FEATURE_ID(aa32_arm_div, ARM_HWCAP_ARM_IDIVA); 535 GET_FEATURE_ID(aa32_thumb_div, ARM_HWCAP_ARM_IDIVT); 536 GET_FEATURE_ID(aa32_vfp, ARM_HWCAP_ARM_VFP); 537 538 if (cpu_isar_feature(aa32_fpsp_v3, cpu) || 539 cpu_isar_feature(aa32_fpdp_v3, cpu)) { 540 hwcaps |= ARM_HWCAP_ARM_VFPv3; 541 if (cpu_isar_feature(aa32_simd_r32, cpu)) { 542 hwcaps |= ARM_HWCAP_ARM_VFPD32; 543 } else { 544 hwcaps |= ARM_HWCAP_ARM_VFPv3D16; 545 } 546 } 547 GET_FEATURE_ID(aa32_simdfmac, ARM_HWCAP_ARM_VFPv4); 548 /* 549 * MVFR1.FPHP and .SIMDHP must be in sync, and QEMU uses the same 550 * isar_feature function for both. The kernel reports them as two hwcaps. 551 */ 552 GET_FEATURE_ID(aa32_fp16_arith, ARM_HWCAP_ARM_FPHP); 553 GET_FEATURE_ID(aa32_fp16_arith, ARM_HWCAP_ARM_ASIMDHP); 554 GET_FEATURE_ID(aa32_dp, ARM_HWCAP_ARM_ASIMDDP); 555 GET_FEATURE_ID(aa32_fhm, ARM_HWCAP_ARM_ASIMDFHM); 556 GET_FEATURE_ID(aa32_bf16, ARM_HWCAP_ARM_ASIMDBF16); 557 GET_FEATURE_ID(aa32_i8mm, ARM_HWCAP_ARM_I8MM); 558 559 return hwcaps; 560 } 561 562 uint64_t get_elf_hwcap2(void) 563 { 564 ARMCPU *cpu = ARM_CPU(thread_cpu); 565 uint64_t hwcaps = 0; 566 567 GET_FEATURE_ID(aa32_aes, ARM_HWCAP2_ARM_AES); 568 GET_FEATURE_ID(aa32_pmull, ARM_HWCAP2_ARM_PMULL); 569 GET_FEATURE_ID(aa32_sha1, ARM_HWCAP2_ARM_SHA1); 570 GET_FEATURE_ID(aa32_sha2, ARM_HWCAP2_ARM_SHA2); 571 GET_FEATURE_ID(aa32_crc32, ARM_HWCAP2_ARM_CRC32); 572 GET_FEATURE_ID(aa32_sb, ARM_HWCAP2_ARM_SB); 573 GET_FEATURE_ID(aa32_ssbs, ARM_HWCAP2_ARM_SSBS); 574 return hwcaps; 575 } 576 577 const char *elf_hwcap_str(uint32_t bit) 578 { 579 static const char *hwcap_str[] = { 580 [__builtin_ctz(ARM_HWCAP_ARM_SWP )] = "swp", 581 [__builtin_ctz(ARM_HWCAP_ARM_HALF )] = "half", 582 [__builtin_ctz(ARM_HWCAP_ARM_THUMB )] = "thumb", 583 [__builtin_ctz(ARM_HWCAP_ARM_26BIT )] = "26bit", 584 [__builtin_ctz(ARM_HWCAP_ARM_FAST_MULT)] = "fast_mult", 585 [__builtin_ctz(ARM_HWCAP_ARM_FPA )] = "fpa", 586 [__builtin_ctz(ARM_HWCAP_ARM_VFP )] = "vfp", 587 [__builtin_ctz(ARM_HWCAP_ARM_EDSP )] = "edsp", 588 [__builtin_ctz(ARM_HWCAP_ARM_JAVA )] = "java", 589 [__builtin_ctz(ARM_HWCAP_ARM_IWMMXT )] = "iwmmxt", 590 [__builtin_ctz(ARM_HWCAP_ARM_CRUNCH )] = "crunch", 591 [__builtin_ctz(ARM_HWCAP_ARM_THUMBEE )] = "thumbee", 592 [__builtin_ctz(ARM_HWCAP_ARM_NEON )] = "neon", 593 [__builtin_ctz(ARM_HWCAP_ARM_VFPv3 )] = "vfpv3", 594 [__builtin_ctz(ARM_HWCAP_ARM_VFPv3D16 )] = "vfpv3d16", 595 [__builtin_ctz(ARM_HWCAP_ARM_TLS )] = "tls", 596 [__builtin_ctz(ARM_HWCAP_ARM_VFPv4 )] = "vfpv4", 597 [__builtin_ctz(ARM_HWCAP_ARM_IDIVA )] = "idiva", 598 [__builtin_ctz(ARM_HWCAP_ARM_IDIVT )] = "idivt", 599 [__builtin_ctz(ARM_HWCAP_ARM_VFPD32 )] = "vfpd32", 600 [__builtin_ctz(ARM_HWCAP_ARM_LPAE )] = "lpae", 601 [__builtin_ctz(ARM_HWCAP_ARM_EVTSTRM )] = "evtstrm", 602 [__builtin_ctz(ARM_HWCAP_ARM_FPHP )] = "fphp", 603 [__builtin_ctz(ARM_HWCAP_ARM_ASIMDHP )] = "asimdhp", 604 [__builtin_ctz(ARM_HWCAP_ARM_ASIMDDP )] = "asimddp", 605 [__builtin_ctz(ARM_HWCAP_ARM_ASIMDFHM )] = "asimdfhm", 606 [__builtin_ctz(ARM_HWCAP_ARM_ASIMDBF16)] = "asimdbf16", 607 [__builtin_ctz(ARM_HWCAP_ARM_I8MM )] = "i8mm", 608 }; 609 610 return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; 611 } 612 613 const char *elf_hwcap2_str(uint32_t bit) 614 { 615 static const char *hwcap_str[] = { 616 [__builtin_ctz(ARM_HWCAP2_ARM_AES )] = "aes", 617 [__builtin_ctz(ARM_HWCAP2_ARM_PMULL)] = "pmull", 618 [__builtin_ctz(ARM_HWCAP2_ARM_SHA1 )] = "sha1", 619 [__builtin_ctz(ARM_HWCAP2_ARM_SHA2 )] = "sha2", 620 [__builtin_ctz(ARM_HWCAP2_ARM_CRC32)] = "crc32", 621 [__builtin_ctz(ARM_HWCAP2_ARM_SB )] = "sb", 622 [__builtin_ctz(ARM_HWCAP2_ARM_SSBS )] = "ssbs", 623 }; 624 625 return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; 626 } 627 628 #undef GET_FEATURE 629 #undef GET_FEATURE_ID 630 631 #define ELF_PLATFORM get_elf_platform() 632 633 static const char *get_elf_platform(void) 634 { 635 CPUARMState *env = cpu_env(thread_cpu); 636 637 #if TARGET_BIG_ENDIAN 638 # define END "b" 639 #else 640 # define END "l" 641 #endif 642 643 if (arm_feature(env, ARM_FEATURE_V8)) { 644 return "v8" END; 645 } else if (arm_feature(env, ARM_FEATURE_V7)) { 646 if (arm_feature(env, ARM_FEATURE_M)) { 647 return "v7m" END; 648 } else { 649 return "v7" END; 650 } 651 } else if (arm_feature(env, ARM_FEATURE_V6)) { 652 return "v6" END; 653 } else if (arm_feature(env, ARM_FEATURE_V5)) { 654 return "v5" END; 655 } else { 656 return "v4" END; 657 } 658 659 #undef END 660 } 661 662 #else 663 /* 64 bit ARM definitions */ 664 665 #define ELF_ARCH EM_AARCH64 666 #define ELF_CLASS ELFCLASS64 667 #if TARGET_BIG_ENDIAN 668 # define ELF_PLATFORM "aarch64_be" 669 #else 670 # define ELF_PLATFORM "aarch64" 671 #endif 672 673 static inline void init_thread(struct target_pt_regs *regs, 674 struct image_info *infop) 675 { 676 abi_long stack = infop->start_stack; 677 memset(regs, 0, sizeof(*regs)); 678 679 regs->pc = infop->entry & ~0x3ULL; 680 regs->sp = stack; 681 } 682 683 #define ELF_NREG 34 684 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 685 686 static void elf_core_copy_regs(target_elf_gregset_t *regs, 687 const CPUARMState *env) 688 { 689 int i; 690 691 for (i = 0; i < 32; i++) { 692 (*regs)[i] = tswapreg(env->xregs[i]); 693 } 694 (*regs)[32] = tswapreg(env->pc); 695 (*regs)[33] = tswapreg(pstate_read((CPUARMState *)env)); 696 } 697 698 #define USE_ELF_CORE_DUMP 699 #define ELF_EXEC_PAGESIZE 4096 700 701 enum { 702 ARM_HWCAP_A64_FP = 1 << 0, 703 ARM_HWCAP_A64_ASIMD = 1 << 1, 704 ARM_HWCAP_A64_EVTSTRM = 1 << 2, 705 ARM_HWCAP_A64_AES = 1 << 3, 706 ARM_HWCAP_A64_PMULL = 1 << 4, 707 ARM_HWCAP_A64_SHA1 = 1 << 5, 708 ARM_HWCAP_A64_SHA2 = 1 << 6, 709 ARM_HWCAP_A64_CRC32 = 1 << 7, 710 ARM_HWCAP_A64_ATOMICS = 1 << 8, 711 ARM_HWCAP_A64_FPHP = 1 << 9, 712 ARM_HWCAP_A64_ASIMDHP = 1 << 10, 713 ARM_HWCAP_A64_CPUID = 1 << 11, 714 ARM_HWCAP_A64_ASIMDRDM = 1 << 12, 715 ARM_HWCAP_A64_JSCVT = 1 << 13, 716 ARM_HWCAP_A64_FCMA = 1 << 14, 717 ARM_HWCAP_A64_LRCPC = 1 << 15, 718 ARM_HWCAP_A64_DCPOP = 1 << 16, 719 ARM_HWCAP_A64_SHA3 = 1 << 17, 720 ARM_HWCAP_A64_SM3 = 1 << 18, 721 ARM_HWCAP_A64_SM4 = 1 << 19, 722 ARM_HWCAP_A64_ASIMDDP = 1 << 20, 723 ARM_HWCAP_A64_SHA512 = 1 << 21, 724 ARM_HWCAP_A64_SVE = 1 << 22, 725 ARM_HWCAP_A64_ASIMDFHM = 1 << 23, 726 ARM_HWCAP_A64_DIT = 1 << 24, 727 ARM_HWCAP_A64_USCAT = 1 << 25, 728 ARM_HWCAP_A64_ILRCPC = 1 << 26, 729 ARM_HWCAP_A64_FLAGM = 1 << 27, 730 ARM_HWCAP_A64_SSBS = 1 << 28, 731 ARM_HWCAP_A64_SB = 1 << 29, 732 ARM_HWCAP_A64_PACA = 1 << 30, 733 ARM_HWCAP_A64_PACG = 1UL << 31, 734 735 ARM_HWCAP2_A64_DCPODP = 1 << 0, 736 ARM_HWCAP2_A64_SVE2 = 1 << 1, 737 ARM_HWCAP2_A64_SVEAES = 1 << 2, 738 ARM_HWCAP2_A64_SVEPMULL = 1 << 3, 739 ARM_HWCAP2_A64_SVEBITPERM = 1 << 4, 740 ARM_HWCAP2_A64_SVESHA3 = 1 << 5, 741 ARM_HWCAP2_A64_SVESM4 = 1 << 6, 742 ARM_HWCAP2_A64_FLAGM2 = 1 << 7, 743 ARM_HWCAP2_A64_FRINT = 1 << 8, 744 ARM_HWCAP2_A64_SVEI8MM = 1 << 9, 745 ARM_HWCAP2_A64_SVEF32MM = 1 << 10, 746 ARM_HWCAP2_A64_SVEF64MM = 1 << 11, 747 ARM_HWCAP2_A64_SVEBF16 = 1 << 12, 748 ARM_HWCAP2_A64_I8MM = 1 << 13, 749 ARM_HWCAP2_A64_BF16 = 1 << 14, 750 ARM_HWCAP2_A64_DGH = 1 << 15, 751 ARM_HWCAP2_A64_RNG = 1 << 16, 752 ARM_HWCAP2_A64_BTI = 1 << 17, 753 ARM_HWCAP2_A64_MTE = 1 << 18, 754 ARM_HWCAP2_A64_ECV = 1 << 19, 755 ARM_HWCAP2_A64_AFP = 1 << 20, 756 ARM_HWCAP2_A64_RPRES = 1 << 21, 757 ARM_HWCAP2_A64_MTE3 = 1 << 22, 758 ARM_HWCAP2_A64_SME = 1 << 23, 759 ARM_HWCAP2_A64_SME_I16I64 = 1 << 24, 760 ARM_HWCAP2_A64_SME_F64F64 = 1 << 25, 761 ARM_HWCAP2_A64_SME_I8I32 = 1 << 26, 762 ARM_HWCAP2_A64_SME_F16F32 = 1 << 27, 763 ARM_HWCAP2_A64_SME_B16F32 = 1 << 28, 764 ARM_HWCAP2_A64_SME_F32F32 = 1 << 29, 765 ARM_HWCAP2_A64_SME_FA64 = 1 << 30, 766 ARM_HWCAP2_A64_WFXT = 1ULL << 31, 767 ARM_HWCAP2_A64_EBF16 = 1ULL << 32, 768 ARM_HWCAP2_A64_SVE_EBF16 = 1ULL << 33, 769 ARM_HWCAP2_A64_CSSC = 1ULL << 34, 770 ARM_HWCAP2_A64_RPRFM = 1ULL << 35, 771 ARM_HWCAP2_A64_SVE2P1 = 1ULL << 36, 772 ARM_HWCAP2_A64_SME2 = 1ULL << 37, 773 ARM_HWCAP2_A64_SME2P1 = 1ULL << 38, 774 ARM_HWCAP2_A64_SME_I16I32 = 1ULL << 39, 775 ARM_HWCAP2_A64_SME_BI32I32 = 1ULL << 40, 776 ARM_HWCAP2_A64_SME_B16B16 = 1ULL << 41, 777 ARM_HWCAP2_A64_SME_F16F16 = 1ULL << 42, 778 ARM_HWCAP2_A64_MOPS = 1ULL << 43, 779 ARM_HWCAP2_A64_HBC = 1ULL << 44, 780 }; 781 782 #define ELF_HWCAP get_elf_hwcap() 783 #define ELF_HWCAP2 get_elf_hwcap2() 784 785 #define GET_FEATURE_ID(feat, hwcap) \ 786 do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0) 787 788 uint32_t get_elf_hwcap(void) 789 { 790 ARMCPU *cpu = ARM_CPU(thread_cpu); 791 uint32_t hwcaps = 0; 792 793 hwcaps |= ARM_HWCAP_A64_FP; 794 hwcaps |= ARM_HWCAP_A64_ASIMD; 795 hwcaps |= ARM_HWCAP_A64_CPUID; 796 797 /* probe for the extra features */ 798 799 GET_FEATURE_ID(aa64_aes, ARM_HWCAP_A64_AES); 800 GET_FEATURE_ID(aa64_pmull, ARM_HWCAP_A64_PMULL); 801 GET_FEATURE_ID(aa64_sha1, ARM_HWCAP_A64_SHA1); 802 GET_FEATURE_ID(aa64_sha256, ARM_HWCAP_A64_SHA2); 803 GET_FEATURE_ID(aa64_sha512, ARM_HWCAP_A64_SHA512); 804 GET_FEATURE_ID(aa64_crc32, ARM_HWCAP_A64_CRC32); 805 GET_FEATURE_ID(aa64_sha3, ARM_HWCAP_A64_SHA3); 806 GET_FEATURE_ID(aa64_sm3, ARM_HWCAP_A64_SM3); 807 GET_FEATURE_ID(aa64_sm4, ARM_HWCAP_A64_SM4); 808 GET_FEATURE_ID(aa64_fp16, ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP); 809 GET_FEATURE_ID(aa64_atomics, ARM_HWCAP_A64_ATOMICS); 810 GET_FEATURE_ID(aa64_lse2, ARM_HWCAP_A64_USCAT); 811 GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM); 812 GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP); 813 GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA); 814 GET_FEATURE_ID(aa64_sve, ARM_HWCAP_A64_SVE); 815 GET_FEATURE_ID(aa64_pauth, ARM_HWCAP_A64_PACA | ARM_HWCAP_A64_PACG); 816 GET_FEATURE_ID(aa64_fhm, ARM_HWCAP_A64_ASIMDFHM); 817 GET_FEATURE_ID(aa64_dit, ARM_HWCAP_A64_DIT); 818 GET_FEATURE_ID(aa64_jscvt, ARM_HWCAP_A64_JSCVT); 819 GET_FEATURE_ID(aa64_sb, ARM_HWCAP_A64_SB); 820 GET_FEATURE_ID(aa64_condm_4, ARM_HWCAP_A64_FLAGM); 821 GET_FEATURE_ID(aa64_dcpop, ARM_HWCAP_A64_DCPOP); 822 GET_FEATURE_ID(aa64_rcpc_8_3, ARM_HWCAP_A64_LRCPC); 823 GET_FEATURE_ID(aa64_rcpc_8_4, ARM_HWCAP_A64_ILRCPC); 824 825 return hwcaps; 826 } 827 828 uint64_t get_elf_hwcap2(void) 829 { 830 ARMCPU *cpu = ARM_CPU(thread_cpu); 831 uint64_t hwcaps = 0; 832 833 GET_FEATURE_ID(aa64_dcpodp, ARM_HWCAP2_A64_DCPODP); 834 GET_FEATURE_ID(aa64_sve2, ARM_HWCAP2_A64_SVE2); 835 GET_FEATURE_ID(aa64_sve2_aes, ARM_HWCAP2_A64_SVEAES); 836 GET_FEATURE_ID(aa64_sve2_pmull128, ARM_HWCAP2_A64_SVEPMULL); 837 GET_FEATURE_ID(aa64_sve2_bitperm, ARM_HWCAP2_A64_SVEBITPERM); 838 GET_FEATURE_ID(aa64_sve2_sha3, ARM_HWCAP2_A64_SVESHA3); 839 GET_FEATURE_ID(aa64_sve2_sm4, ARM_HWCAP2_A64_SVESM4); 840 GET_FEATURE_ID(aa64_condm_5, ARM_HWCAP2_A64_FLAGM2); 841 GET_FEATURE_ID(aa64_frint, ARM_HWCAP2_A64_FRINT); 842 GET_FEATURE_ID(aa64_sve_i8mm, ARM_HWCAP2_A64_SVEI8MM); 843 GET_FEATURE_ID(aa64_sve_f32mm, ARM_HWCAP2_A64_SVEF32MM); 844 GET_FEATURE_ID(aa64_sve_f64mm, ARM_HWCAP2_A64_SVEF64MM); 845 GET_FEATURE_ID(aa64_sve_bf16, ARM_HWCAP2_A64_SVEBF16); 846 GET_FEATURE_ID(aa64_i8mm, ARM_HWCAP2_A64_I8MM); 847 GET_FEATURE_ID(aa64_bf16, ARM_HWCAP2_A64_BF16); 848 GET_FEATURE_ID(aa64_rndr, ARM_HWCAP2_A64_RNG); 849 GET_FEATURE_ID(aa64_bti, ARM_HWCAP2_A64_BTI); 850 GET_FEATURE_ID(aa64_mte, ARM_HWCAP2_A64_MTE); 851 GET_FEATURE_ID(aa64_mte3, ARM_HWCAP2_A64_MTE3); 852 GET_FEATURE_ID(aa64_sme, (ARM_HWCAP2_A64_SME | 853 ARM_HWCAP2_A64_SME_F32F32 | 854 ARM_HWCAP2_A64_SME_B16F32 | 855 ARM_HWCAP2_A64_SME_F16F32 | 856 ARM_HWCAP2_A64_SME_I8I32)); 857 GET_FEATURE_ID(aa64_sme_f64f64, ARM_HWCAP2_A64_SME_F64F64); 858 GET_FEATURE_ID(aa64_sme_i16i64, ARM_HWCAP2_A64_SME_I16I64); 859 GET_FEATURE_ID(aa64_sme_fa64, ARM_HWCAP2_A64_SME_FA64); 860 GET_FEATURE_ID(aa64_hbc, ARM_HWCAP2_A64_HBC); 861 GET_FEATURE_ID(aa64_mops, ARM_HWCAP2_A64_MOPS); 862 863 return hwcaps; 864 } 865 866 const char *elf_hwcap_str(uint32_t bit) 867 { 868 static const char *hwcap_str[] = { 869 [__builtin_ctz(ARM_HWCAP_A64_FP )] = "fp", 870 [__builtin_ctz(ARM_HWCAP_A64_ASIMD )] = "asimd", 871 [__builtin_ctz(ARM_HWCAP_A64_EVTSTRM )] = "evtstrm", 872 [__builtin_ctz(ARM_HWCAP_A64_AES )] = "aes", 873 [__builtin_ctz(ARM_HWCAP_A64_PMULL )] = "pmull", 874 [__builtin_ctz(ARM_HWCAP_A64_SHA1 )] = "sha1", 875 [__builtin_ctz(ARM_HWCAP_A64_SHA2 )] = "sha2", 876 [__builtin_ctz(ARM_HWCAP_A64_CRC32 )] = "crc32", 877 [__builtin_ctz(ARM_HWCAP_A64_ATOMICS )] = "atomics", 878 [__builtin_ctz(ARM_HWCAP_A64_FPHP )] = "fphp", 879 [__builtin_ctz(ARM_HWCAP_A64_ASIMDHP )] = "asimdhp", 880 [__builtin_ctz(ARM_HWCAP_A64_CPUID )] = "cpuid", 881 [__builtin_ctz(ARM_HWCAP_A64_ASIMDRDM)] = "asimdrdm", 882 [__builtin_ctz(ARM_HWCAP_A64_JSCVT )] = "jscvt", 883 [__builtin_ctz(ARM_HWCAP_A64_FCMA )] = "fcma", 884 [__builtin_ctz(ARM_HWCAP_A64_LRCPC )] = "lrcpc", 885 [__builtin_ctz(ARM_HWCAP_A64_DCPOP )] = "dcpop", 886 [__builtin_ctz(ARM_HWCAP_A64_SHA3 )] = "sha3", 887 [__builtin_ctz(ARM_HWCAP_A64_SM3 )] = "sm3", 888 [__builtin_ctz(ARM_HWCAP_A64_SM4 )] = "sm4", 889 [__builtin_ctz(ARM_HWCAP_A64_ASIMDDP )] = "asimddp", 890 [__builtin_ctz(ARM_HWCAP_A64_SHA512 )] = "sha512", 891 [__builtin_ctz(ARM_HWCAP_A64_SVE )] = "sve", 892 [__builtin_ctz(ARM_HWCAP_A64_ASIMDFHM)] = "asimdfhm", 893 [__builtin_ctz(ARM_HWCAP_A64_DIT )] = "dit", 894 [__builtin_ctz(ARM_HWCAP_A64_USCAT )] = "uscat", 895 [__builtin_ctz(ARM_HWCAP_A64_ILRCPC )] = "ilrcpc", 896 [__builtin_ctz(ARM_HWCAP_A64_FLAGM )] = "flagm", 897 [__builtin_ctz(ARM_HWCAP_A64_SSBS )] = "ssbs", 898 [__builtin_ctz(ARM_HWCAP_A64_SB )] = "sb", 899 [__builtin_ctz(ARM_HWCAP_A64_PACA )] = "paca", 900 [__builtin_ctz(ARM_HWCAP_A64_PACG )] = "pacg", 901 }; 902 903 return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; 904 } 905 906 const char *elf_hwcap2_str(uint32_t bit) 907 { 908 static const char *hwcap_str[] = { 909 [__builtin_ctz(ARM_HWCAP2_A64_DCPODP )] = "dcpodp", 910 [__builtin_ctz(ARM_HWCAP2_A64_SVE2 )] = "sve2", 911 [__builtin_ctz(ARM_HWCAP2_A64_SVEAES )] = "sveaes", 912 [__builtin_ctz(ARM_HWCAP2_A64_SVEPMULL )] = "svepmull", 913 [__builtin_ctz(ARM_HWCAP2_A64_SVEBITPERM )] = "svebitperm", 914 [__builtin_ctz(ARM_HWCAP2_A64_SVESHA3 )] = "svesha3", 915 [__builtin_ctz(ARM_HWCAP2_A64_SVESM4 )] = "svesm4", 916 [__builtin_ctz(ARM_HWCAP2_A64_FLAGM2 )] = "flagm2", 917 [__builtin_ctz(ARM_HWCAP2_A64_FRINT )] = "frint", 918 [__builtin_ctz(ARM_HWCAP2_A64_SVEI8MM )] = "svei8mm", 919 [__builtin_ctz(ARM_HWCAP2_A64_SVEF32MM )] = "svef32mm", 920 [__builtin_ctz(ARM_HWCAP2_A64_SVEF64MM )] = "svef64mm", 921 [__builtin_ctz(ARM_HWCAP2_A64_SVEBF16 )] = "svebf16", 922 [__builtin_ctz(ARM_HWCAP2_A64_I8MM )] = "i8mm", 923 [__builtin_ctz(ARM_HWCAP2_A64_BF16 )] = "bf16", 924 [__builtin_ctz(ARM_HWCAP2_A64_DGH )] = "dgh", 925 [__builtin_ctz(ARM_HWCAP2_A64_RNG )] = "rng", 926 [__builtin_ctz(ARM_HWCAP2_A64_BTI )] = "bti", 927 [__builtin_ctz(ARM_HWCAP2_A64_MTE )] = "mte", 928 [__builtin_ctz(ARM_HWCAP2_A64_ECV )] = "ecv", 929 [__builtin_ctz(ARM_HWCAP2_A64_AFP )] = "afp", 930 [__builtin_ctz(ARM_HWCAP2_A64_RPRES )] = "rpres", 931 [__builtin_ctz(ARM_HWCAP2_A64_MTE3 )] = "mte3", 932 [__builtin_ctz(ARM_HWCAP2_A64_SME )] = "sme", 933 [__builtin_ctz(ARM_HWCAP2_A64_SME_I16I64 )] = "smei16i64", 934 [__builtin_ctz(ARM_HWCAP2_A64_SME_F64F64 )] = "smef64f64", 935 [__builtin_ctz(ARM_HWCAP2_A64_SME_I8I32 )] = "smei8i32", 936 [__builtin_ctz(ARM_HWCAP2_A64_SME_F16F32 )] = "smef16f32", 937 [__builtin_ctz(ARM_HWCAP2_A64_SME_B16F32 )] = "smeb16f32", 938 [__builtin_ctz(ARM_HWCAP2_A64_SME_F32F32 )] = "smef32f32", 939 [__builtin_ctz(ARM_HWCAP2_A64_SME_FA64 )] = "smefa64", 940 [__builtin_ctz(ARM_HWCAP2_A64_WFXT )] = "wfxt", 941 [__builtin_ctzll(ARM_HWCAP2_A64_EBF16 )] = "ebf16", 942 [__builtin_ctzll(ARM_HWCAP2_A64_SVE_EBF16 )] = "sveebf16", 943 [__builtin_ctzll(ARM_HWCAP2_A64_CSSC )] = "cssc", 944 [__builtin_ctzll(ARM_HWCAP2_A64_RPRFM )] = "rprfm", 945 [__builtin_ctzll(ARM_HWCAP2_A64_SVE2P1 )] = "sve2p1", 946 [__builtin_ctzll(ARM_HWCAP2_A64_SME2 )] = "sme2", 947 [__builtin_ctzll(ARM_HWCAP2_A64_SME2P1 )] = "sme2p1", 948 [__builtin_ctzll(ARM_HWCAP2_A64_SME_I16I32 )] = "smei16i32", 949 [__builtin_ctzll(ARM_HWCAP2_A64_SME_BI32I32)] = "smebi32i32", 950 [__builtin_ctzll(ARM_HWCAP2_A64_SME_B16B16 )] = "smeb16b16", 951 [__builtin_ctzll(ARM_HWCAP2_A64_SME_F16F16 )] = "smef16f16", 952 [__builtin_ctzll(ARM_HWCAP2_A64_MOPS )] = "mops", 953 [__builtin_ctzll(ARM_HWCAP2_A64_HBC )] = "hbc", 954 }; 955 956 return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; 957 } 958 959 #undef GET_FEATURE_ID 960 961 #endif /* not TARGET_AARCH64 */ 962 963 #if TARGET_BIG_ENDIAN 964 # define VDSO_HEADER "vdso-be.c.inc" 965 #else 966 # define VDSO_HEADER "vdso-le.c.inc" 967 #endif 968 969 #endif /* TARGET_ARM */ 970 971 #ifdef TARGET_SPARC 972 973 #ifndef TARGET_SPARC64 974 # define ELF_CLASS ELFCLASS32 975 # define ELF_ARCH EM_SPARC 976 #elif defined(TARGET_ABI32) 977 # define ELF_CLASS ELFCLASS32 978 # define elf_check_arch(x) ((x) == EM_SPARC32PLUS || (x) == EM_SPARC) 979 #else 980 # define ELF_CLASS ELFCLASS64 981 # define ELF_ARCH EM_SPARCV9 982 #endif 983 984 #include "elf.h" 985 986 #define ELF_HWCAP get_elf_hwcap() 987 988 static uint32_t get_elf_hwcap(void) 989 { 990 /* There are not many sparc32 hwcap bits -- we have all of them. */ 991 uint32_t r = HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | 992 HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV; 993 994 #ifdef TARGET_SPARC64 995 CPUSPARCState *env = cpu_env(thread_cpu); 996 uint32_t features = env->def.features; 997 998 r |= HWCAP_SPARC_V9 | HWCAP_SPARC_V8PLUS; 999 /* 32x32 multiply and divide are efficient. */ 1000 r |= HWCAP_SPARC_MUL32 | HWCAP_SPARC_DIV32; 1001 /* We don't have an internal feature bit for this. */ 1002 r |= HWCAP_SPARC_POPC; 1003 r |= features & CPU_FEATURE_FSMULD ? HWCAP_SPARC_FSMULD : 0; 1004 r |= features & CPU_FEATURE_VIS1 ? HWCAP_SPARC_VIS : 0; 1005 r |= features & CPU_FEATURE_VIS2 ? HWCAP_SPARC_VIS2 : 0; 1006 r |= features & CPU_FEATURE_FMAF ? HWCAP_SPARC_FMAF : 0; 1007 r |= features & CPU_FEATURE_VIS3 ? HWCAP_SPARC_VIS3 : 0; 1008 r |= features & CPU_FEATURE_IMA ? HWCAP_SPARC_IMA : 0; 1009 #endif 1010 1011 return r; 1012 } 1013 1014 static inline void init_thread(struct target_pt_regs *regs, 1015 struct image_info *infop) 1016 { 1017 /* Note that target_cpu_copy_regs does not read psr/tstate. */ 1018 regs->pc = infop->entry; 1019 regs->npc = regs->pc + 4; 1020 regs->y = 0; 1021 regs->u_regs[14] = (infop->start_stack - 16 * sizeof(abi_ulong) 1022 - TARGET_STACK_BIAS); 1023 } 1024 #endif /* TARGET_SPARC */ 1025 1026 #ifdef TARGET_PPC 1027 1028 #define ELF_MACHINE PPC_ELF_MACHINE 1029 1030 #if defined(TARGET_PPC64) 1031 1032 #define elf_check_arch(x) ( (x) == EM_PPC64 ) 1033 1034 #define ELF_CLASS ELFCLASS64 1035 1036 #else 1037 1038 #define ELF_CLASS ELFCLASS32 1039 #define EXSTACK_DEFAULT true 1040 1041 #endif 1042 1043 #define ELF_ARCH EM_PPC 1044 1045 /* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP). 1046 See arch/powerpc/include/asm/cputable.h. */ 1047 enum { 1048 QEMU_PPC_FEATURE_32 = 0x80000000, 1049 QEMU_PPC_FEATURE_64 = 0x40000000, 1050 QEMU_PPC_FEATURE_601_INSTR = 0x20000000, 1051 QEMU_PPC_FEATURE_HAS_ALTIVEC = 0x10000000, 1052 QEMU_PPC_FEATURE_HAS_FPU = 0x08000000, 1053 QEMU_PPC_FEATURE_HAS_MMU = 0x04000000, 1054 QEMU_PPC_FEATURE_HAS_4xxMAC = 0x02000000, 1055 QEMU_PPC_FEATURE_UNIFIED_CACHE = 0x01000000, 1056 QEMU_PPC_FEATURE_HAS_SPE = 0x00800000, 1057 QEMU_PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000, 1058 QEMU_PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000, 1059 QEMU_PPC_FEATURE_NO_TB = 0x00100000, 1060 QEMU_PPC_FEATURE_POWER4 = 0x00080000, 1061 QEMU_PPC_FEATURE_POWER5 = 0x00040000, 1062 QEMU_PPC_FEATURE_POWER5_PLUS = 0x00020000, 1063 QEMU_PPC_FEATURE_CELL = 0x00010000, 1064 QEMU_PPC_FEATURE_BOOKE = 0x00008000, 1065 QEMU_PPC_FEATURE_SMT = 0x00004000, 1066 QEMU_PPC_FEATURE_ICACHE_SNOOP = 0x00002000, 1067 QEMU_PPC_FEATURE_ARCH_2_05 = 0x00001000, 1068 QEMU_PPC_FEATURE_PA6T = 0x00000800, 1069 QEMU_PPC_FEATURE_HAS_DFP = 0x00000400, 1070 QEMU_PPC_FEATURE_POWER6_EXT = 0x00000200, 1071 QEMU_PPC_FEATURE_ARCH_2_06 = 0x00000100, 1072 QEMU_PPC_FEATURE_HAS_VSX = 0x00000080, 1073 QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040, 1074 1075 QEMU_PPC_FEATURE_TRUE_LE = 0x00000002, 1076 QEMU_PPC_FEATURE_PPC_LE = 0x00000001, 1077 1078 /* Feature definitions in AT_HWCAP2. */ 1079 QEMU_PPC_FEATURE2_ARCH_2_07 = 0x80000000, /* ISA 2.07 */ 1080 QEMU_PPC_FEATURE2_HAS_HTM = 0x40000000, /* Hardware Transactional Memory */ 1081 QEMU_PPC_FEATURE2_HAS_DSCR = 0x20000000, /* Data Stream Control Register */ 1082 QEMU_PPC_FEATURE2_HAS_EBB = 0x10000000, /* Event Base Branching */ 1083 QEMU_PPC_FEATURE2_HAS_ISEL = 0x08000000, /* Integer Select */ 1084 QEMU_PPC_FEATURE2_HAS_TAR = 0x04000000, /* Target Address Register */ 1085 QEMU_PPC_FEATURE2_VEC_CRYPTO = 0x02000000, 1086 QEMU_PPC_FEATURE2_HTM_NOSC = 0x01000000, 1087 QEMU_PPC_FEATURE2_ARCH_3_00 = 0x00800000, /* ISA 3.00 */ 1088 QEMU_PPC_FEATURE2_HAS_IEEE128 = 0x00400000, /* VSX IEEE Bin Float 128-bit */ 1089 QEMU_PPC_FEATURE2_DARN = 0x00200000, /* darn random number insn */ 1090 QEMU_PPC_FEATURE2_SCV = 0x00100000, /* scv syscall */ 1091 QEMU_PPC_FEATURE2_HTM_NO_SUSPEND = 0x00080000, /* TM w/o suspended state */ 1092 QEMU_PPC_FEATURE2_ARCH_3_1 = 0x00040000, /* ISA 3.1 */ 1093 QEMU_PPC_FEATURE2_MMA = 0x00020000, /* Matrix-Multiply Assist */ 1094 }; 1095 1096 #define ELF_HWCAP get_elf_hwcap() 1097 1098 static uint32_t get_elf_hwcap(void) 1099 { 1100 PowerPCCPU *cpu = POWERPC_CPU(thread_cpu); 1101 uint32_t features = 0; 1102 1103 /* We don't have to be terribly complete here; the high points are 1104 Altivec/FP/SPE support. Anything else is just a bonus. */ 1105 #define GET_FEATURE(flag, feature) \ 1106 do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0) 1107 #define GET_FEATURE2(flags, feature) \ 1108 do { \ 1109 if ((cpu->env.insns_flags2 & flags) == flags) { \ 1110 features |= feature; \ 1111 } \ 1112 } while (0) 1113 GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64); 1114 GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU); 1115 GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC); 1116 GET_FEATURE(PPC_SPE, QEMU_PPC_FEATURE_HAS_SPE); 1117 GET_FEATURE(PPC_SPE_SINGLE, QEMU_PPC_FEATURE_HAS_EFP_SINGLE); 1118 GET_FEATURE(PPC_SPE_DOUBLE, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE); 1119 GET_FEATURE(PPC_BOOKE, QEMU_PPC_FEATURE_BOOKE); 1120 GET_FEATURE(PPC_405_MAC, QEMU_PPC_FEATURE_HAS_4xxMAC); 1121 GET_FEATURE2(PPC2_DFP, QEMU_PPC_FEATURE_HAS_DFP); 1122 GET_FEATURE2(PPC2_VSX, QEMU_PPC_FEATURE_HAS_VSX); 1123 GET_FEATURE2((PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 | 1124 PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206), 1125 QEMU_PPC_FEATURE_ARCH_2_06); 1126 #undef GET_FEATURE 1127 #undef GET_FEATURE2 1128 1129 return features; 1130 } 1131 1132 #define ELF_HWCAP2 get_elf_hwcap2() 1133 1134 static uint32_t get_elf_hwcap2(void) 1135 { 1136 PowerPCCPU *cpu = POWERPC_CPU(thread_cpu); 1137 uint32_t features = 0; 1138 1139 #define GET_FEATURE(flag, feature) \ 1140 do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0) 1141 #define GET_FEATURE2(flag, feature) \ 1142 do { if (cpu->env.insns_flags2 & flag) { features |= feature; } } while (0) 1143 1144 GET_FEATURE(PPC_ISEL, QEMU_PPC_FEATURE2_HAS_ISEL); 1145 GET_FEATURE2(PPC2_BCTAR_ISA207, QEMU_PPC_FEATURE2_HAS_TAR); 1146 GET_FEATURE2((PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | 1147 PPC2_ISA207S), QEMU_PPC_FEATURE2_ARCH_2_07 | 1148 QEMU_PPC_FEATURE2_VEC_CRYPTO); 1149 GET_FEATURE2(PPC2_ISA300, QEMU_PPC_FEATURE2_ARCH_3_00 | 1150 QEMU_PPC_FEATURE2_DARN | QEMU_PPC_FEATURE2_HAS_IEEE128); 1151 GET_FEATURE2(PPC2_ISA310, QEMU_PPC_FEATURE2_ARCH_3_1 | 1152 QEMU_PPC_FEATURE2_MMA); 1153 1154 #undef GET_FEATURE 1155 #undef GET_FEATURE2 1156 1157 return features; 1158 } 1159 1160 /* 1161 * The requirements here are: 1162 * - keep the final alignment of sp (sp & 0xf) 1163 * - make sure the 32-bit value at the first 16 byte aligned position of 1164 * AUXV is greater than 16 for glibc compatibility. 1165 * AT_IGNOREPPC is used for that. 1166 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC, 1167 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined. 1168 */ 1169 #define DLINFO_ARCH_ITEMS 5 1170 #define ARCH_DLINFO \ 1171 do { \ 1172 PowerPCCPU *cpu = POWERPC_CPU(thread_cpu); \ 1173 /* \ 1174 * Handle glibc compatibility: these magic entries must \ 1175 * be at the lowest addresses in the final auxv. \ 1176 */ \ 1177 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ 1178 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ 1179 NEW_AUX_ENT(AT_DCACHEBSIZE, cpu->env.dcache_line_size); \ 1180 NEW_AUX_ENT(AT_ICACHEBSIZE, cpu->env.icache_line_size); \ 1181 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \ 1182 } while (0) 1183 1184 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop) 1185 { 1186 _regs->gpr[1] = infop->start_stack; 1187 #if defined(TARGET_PPC64) 1188 if (get_ppc64_abi(infop) < 2) { 1189 uint64_t val; 1190 get_user_u64(val, infop->entry + 8); 1191 _regs->gpr[2] = val + infop->load_bias; 1192 get_user_u64(val, infop->entry); 1193 infop->entry = val + infop->load_bias; 1194 } else { 1195 _regs->gpr[12] = infop->entry; /* r12 set to global entry address */ 1196 } 1197 #endif 1198 _regs->nip = infop->entry; 1199 } 1200 1201 /* See linux kernel: arch/powerpc/include/asm/elf.h. */ 1202 #define ELF_NREG 48 1203 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1204 1205 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *env) 1206 { 1207 int i; 1208 target_ulong ccr = 0; 1209 1210 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 1211 (*regs)[i] = tswapreg(env->gpr[i]); 1212 } 1213 1214 (*regs)[32] = tswapreg(env->nip); 1215 (*regs)[33] = tswapreg(env->msr); 1216 (*regs)[35] = tswapreg(env->ctr); 1217 (*regs)[36] = tswapreg(env->lr); 1218 (*regs)[37] = tswapreg(cpu_read_xer(env)); 1219 1220 ccr = ppc_get_cr(env); 1221 (*regs)[38] = tswapreg(ccr); 1222 } 1223 1224 #define USE_ELF_CORE_DUMP 1225 #define ELF_EXEC_PAGESIZE 4096 1226 1227 #ifndef TARGET_PPC64 1228 # define VDSO_HEADER "vdso-32.c.inc" 1229 #elif TARGET_BIG_ENDIAN 1230 # define VDSO_HEADER "vdso-64.c.inc" 1231 #else 1232 # define VDSO_HEADER "vdso-64le.c.inc" 1233 #endif 1234 1235 #endif 1236 1237 #ifdef TARGET_LOONGARCH64 1238 1239 #define ELF_CLASS ELFCLASS64 1240 #define ELF_ARCH EM_LOONGARCH 1241 #define EXSTACK_DEFAULT true 1242 1243 #define elf_check_arch(x) ((x) == EM_LOONGARCH) 1244 1245 #define VDSO_HEADER "vdso.c.inc" 1246 1247 static inline void init_thread(struct target_pt_regs *regs, 1248 struct image_info *infop) 1249 { 1250 /*Set crmd PG,DA = 1,0 */ 1251 regs->csr.crmd = 2 << 3; 1252 regs->csr.era = infop->entry; 1253 regs->regs[3] = infop->start_stack; 1254 } 1255 1256 /* See linux kernel: arch/loongarch/include/asm/elf.h */ 1257 #define ELF_NREG 45 1258 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1259 1260 enum { 1261 TARGET_EF_R0 = 0, 1262 TARGET_EF_CSR_ERA = TARGET_EF_R0 + 33, 1263 TARGET_EF_CSR_BADV = TARGET_EF_R0 + 34, 1264 }; 1265 1266 static void elf_core_copy_regs(target_elf_gregset_t *regs, 1267 const CPULoongArchState *env) 1268 { 1269 int i; 1270 1271 (*regs)[TARGET_EF_R0] = 0; 1272 1273 for (i = 1; i < ARRAY_SIZE(env->gpr); i++) { 1274 (*regs)[TARGET_EF_R0 + i] = tswapreg(env->gpr[i]); 1275 } 1276 1277 (*regs)[TARGET_EF_CSR_ERA] = tswapreg(env->pc); 1278 (*regs)[TARGET_EF_CSR_BADV] = tswapreg(env->CSR_BADV); 1279 } 1280 1281 #define USE_ELF_CORE_DUMP 1282 #define ELF_EXEC_PAGESIZE 4096 1283 1284 #define ELF_HWCAP get_elf_hwcap() 1285 1286 /* See arch/loongarch/include/uapi/asm/hwcap.h */ 1287 enum { 1288 HWCAP_LOONGARCH_CPUCFG = (1 << 0), 1289 HWCAP_LOONGARCH_LAM = (1 << 1), 1290 HWCAP_LOONGARCH_UAL = (1 << 2), 1291 HWCAP_LOONGARCH_FPU = (1 << 3), 1292 HWCAP_LOONGARCH_LSX = (1 << 4), 1293 HWCAP_LOONGARCH_LASX = (1 << 5), 1294 HWCAP_LOONGARCH_CRC32 = (1 << 6), 1295 HWCAP_LOONGARCH_COMPLEX = (1 << 7), 1296 HWCAP_LOONGARCH_CRYPTO = (1 << 8), 1297 HWCAP_LOONGARCH_LVZ = (1 << 9), 1298 HWCAP_LOONGARCH_LBT_X86 = (1 << 10), 1299 HWCAP_LOONGARCH_LBT_ARM = (1 << 11), 1300 HWCAP_LOONGARCH_LBT_MIPS = (1 << 12), 1301 }; 1302 1303 static uint32_t get_elf_hwcap(void) 1304 { 1305 LoongArchCPU *cpu = LOONGARCH_CPU(thread_cpu); 1306 uint32_t hwcaps = 0; 1307 1308 hwcaps |= HWCAP_LOONGARCH_CRC32; 1309 1310 if (FIELD_EX32(cpu->env.cpucfg[1], CPUCFG1, UAL)) { 1311 hwcaps |= HWCAP_LOONGARCH_UAL; 1312 } 1313 1314 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, FP)) { 1315 hwcaps |= HWCAP_LOONGARCH_FPU; 1316 } 1317 1318 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LAM)) { 1319 hwcaps |= HWCAP_LOONGARCH_LAM; 1320 } 1321 1322 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LSX)) { 1323 hwcaps |= HWCAP_LOONGARCH_LSX; 1324 } 1325 1326 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LASX)) { 1327 hwcaps |= HWCAP_LOONGARCH_LASX; 1328 } 1329 1330 return hwcaps; 1331 } 1332 1333 #define ELF_PLATFORM "loongarch" 1334 1335 #endif /* TARGET_LOONGARCH64 */ 1336 1337 #ifdef TARGET_MIPS 1338 1339 #ifdef TARGET_MIPS64 1340 #define ELF_CLASS ELFCLASS64 1341 #else 1342 #define ELF_CLASS ELFCLASS32 1343 #endif 1344 #define ELF_ARCH EM_MIPS 1345 #define EXSTACK_DEFAULT true 1346 1347 #ifdef TARGET_ABI_MIPSN32 1348 #define elf_check_abi(x) ((x) & EF_MIPS_ABI2) 1349 #else 1350 #define elf_check_abi(x) (!((x) & EF_MIPS_ABI2)) 1351 #endif 1352 1353 #define ELF_BASE_PLATFORM get_elf_base_platform() 1354 1355 #define MATCH_PLATFORM_INSN(_flags, _base_platform) \ 1356 do { if ((cpu->env.insn_flags & (_flags)) == _flags) \ 1357 { return _base_platform; } } while (0) 1358 1359 static const char *get_elf_base_platform(void) 1360 { 1361 MIPSCPU *cpu = MIPS_CPU(thread_cpu); 1362 1363 /* 64 bit ISAs goes first */ 1364 MATCH_PLATFORM_INSN(CPU_MIPS64R6, "mips64r6"); 1365 MATCH_PLATFORM_INSN(CPU_MIPS64R5, "mips64r5"); 1366 MATCH_PLATFORM_INSN(CPU_MIPS64R2, "mips64r2"); 1367 MATCH_PLATFORM_INSN(CPU_MIPS64R1, "mips64"); 1368 MATCH_PLATFORM_INSN(CPU_MIPS5, "mips5"); 1369 MATCH_PLATFORM_INSN(CPU_MIPS4, "mips4"); 1370 MATCH_PLATFORM_INSN(CPU_MIPS3, "mips3"); 1371 1372 /* 32 bit ISAs */ 1373 MATCH_PLATFORM_INSN(CPU_MIPS32R6, "mips32r6"); 1374 MATCH_PLATFORM_INSN(CPU_MIPS32R5, "mips32r5"); 1375 MATCH_PLATFORM_INSN(CPU_MIPS32R2, "mips32r2"); 1376 MATCH_PLATFORM_INSN(CPU_MIPS32R1, "mips32"); 1377 MATCH_PLATFORM_INSN(CPU_MIPS2, "mips2"); 1378 1379 /* Fallback */ 1380 return "mips"; 1381 } 1382 #undef MATCH_PLATFORM_INSN 1383 1384 static inline void init_thread(struct target_pt_regs *regs, 1385 struct image_info *infop) 1386 { 1387 regs->cp0_status = 2 << CP0St_KSU; 1388 regs->cp0_epc = infop->entry; 1389 regs->regs[29] = infop->start_stack; 1390 } 1391 1392 /* See linux kernel: arch/mips/include/asm/elf.h. */ 1393 #define ELF_NREG 45 1394 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1395 1396 /* See linux kernel: arch/mips/include/asm/reg.h. */ 1397 enum { 1398 #ifdef TARGET_MIPS64 1399 TARGET_EF_R0 = 0, 1400 #else 1401 TARGET_EF_R0 = 6, 1402 #endif 1403 TARGET_EF_R26 = TARGET_EF_R0 + 26, 1404 TARGET_EF_R27 = TARGET_EF_R0 + 27, 1405 TARGET_EF_LO = TARGET_EF_R0 + 32, 1406 TARGET_EF_HI = TARGET_EF_R0 + 33, 1407 TARGET_EF_CP0_EPC = TARGET_EF_R0 + 34, 1408 TARGET_EF_CP0_BADVADDR = TARGET_EF_R0 + 35, 1409 TARGET_EF_CP0_STATUS = TARGET_EF_R0 + 36, 1410 TARGET_EF_CP0_CAUSE = TARGET_EF_R0 + 37 1411 }; 1412 1413 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */ 1414 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMIPSState *env) 1415 { 1416 int i; 1417 1418 for (i = 0; i < TARGET_EF_R0; i++) { 1419 (*regs)[i] = 0; 1420 } 1421 (*regs)[TARGET_EF_R0] = 0; 1422 1423 for (i = 1; i < ARRAY_SIZE(env->active_tc.gpr); i++) { 1424 (*regs)[TARGET_EF_R0 + i] = tswapreg(env->active_tc.gpr[i]); 1425 } 1426 1427 (*regs)[TARGET_EF_R26] = 0; 1428 (*regs)[TARGET_EF_R27] = 0; 1429 (*regs)[TARGET_EF_LO] = tswapreg(env->active_tc.LO[0]); 1430 (*regs)[TARGET_EF_HI] = tswapreg(env->active_tc.HI[0]); 1431 (*regs)[TARGET_EF_CP0_EPC] = tswapreg(env->active_tc.PC); 1432 (*regs)[TARGET_EF_CP0_BADVADDR] = tswapreg(env->CP0_BadVAddr); 1433 (*regs)[TARGET_EF_CP0_STATUS] = tswapreg(env->CP0_Status); 1434 (*regs)[TARGET_EF_CP0_CAUSE] = tswapreg(env->CP0_Cause); 1435 } 1436 1437 #define USE_ELF_CORE_DUMP 1438 #define ELF_EXEC_PAGESIZE 4096 1439 1440 /* See arch/mips/include/uapi/asm/hwcap.h. */ 1441 enum { 1442 HWCAP_MIPS_R6 = (1 << 0), 1443 HWCAP_MIPS_MSA = (1 << 1), 1444 HWCAP_MIPS_CRC32 = (1 << 2), 1445 HWCAP_MIPS_MIPS16 = (1 << 3), 1446 HWCAP_MIPS_MDMX = (1 << 4), 1447 HWCAP_MIPS_MIPS3D = (1 << 5), 1448 HWCAP_MIPS_SMARTMIPS = (1 << 6), 1449 HWCAP_MIPS_DSP = (1 << 7), 1450 HWCAP_MIPS_DSP2 = (1 << 8), 1451 HWCAP_MIPS_DSP3 = (1 << 9), 1452 HWCAP_MIPS_MIPS16E2 = (1 << 10), 1453 HWCAP_LOONGSON_MMI = (1 << 11), 1454 HWCAP_LOONGSON_EXT = (1 << 12), 1455 HWCAP_LOONGSON_EXT2 = (1 << 13), 1456 HWCAP_LOONGSON_CPUCFG = (1 << 14), 1457 }; 1458 1459 #define ELF_HWCAP get_elf_hwcap() 1460 1461 #define GET_FEATURE_INSN(_flag, _hwcap) \ 1462 do { if (cpu->env.insn_flags & (_flag)) { hwcaps |= _hwcap; } } while (0) 1463 1464 #define GET_FEATURE_REG_SET(_reg, _mask, _hwcap) \ 1465 do { if (cpu->env._reg & (_mask)) { hwcaps |= _hwcap; } } while (0) 1466 1467 #define GET_FEATURE_REG_EQU(_reg, _start, _length, _val, _hwcap) \ 1468 do { \ 1469 if (extract32(cpu->env._reg, (_start), (_length)) == (_val)) { \ 1470 hwcaps |= _hwcap; \ 1471 } \ 1472 } while (0) 1473 1474 static uint32_t get_elf_hwcap(void) 1475 { 1476 MIPSCPU *cpu = MIPS_CPU(thread_cpu); 1477 uint32_t hwcaps = 0; 1478 1479 GET_FEATURE_REG_EQU(CP0_Config0, CP0C0_AR, CP0C0_AR_LENGTH, 1480 2, HWCAP_MIPS_R6); 1481 GET_FEATURE_REG_SET(CP0_Config3, 1 << CP0C3_MSAP, HWCAP_MIPS_MSA); 1482 GET_FEATURE_INSN(ASE_LMMI, HWCAP_LOONGSON_MMI); 1483 GET_FEATURE_INSN(ASE_LEXT, HWCAP_LOONGSON_EXT); 1484 1485 return hwcaps; 1486 } 1487 1488 #undef GET_FEATURE_REG_EQU 1489 #undef GET_FEATURE_REG_SET 1490 #undef GET_FEATURE_INSN 1491 1492 #endif /* TARGET_MIPS */ 1493 1494 #ifdef TARGET_MICROBLAZE 1495 1496 #define elf_check_arch(x) ( (x) == EM_MICROBLAZE || (x) == EM_MICROBLAZE_OLD) 1497 1498 #define ELF_CLASS ELFCLASS32 1499 #define ELF_ARCH EM_MICROBLAZE 1500 1501 static inline void init_thread(struct target_pt_regs *regs, 1502 struct image_info *infop) 1503 { 1504 regs->pc = infop->entry; 1505 regs->r1 = infop->start_stack; 1506 1507 } 1508 1509 #define ELF_EXEC_PAGESIZE 4096 1510 1511 #define USE_ELF_CORE_DUMP 1512 #define ELF_NREG 38 1513 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1514 1515 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */ 1516 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMBState *env) 1517 { 1518 int i, pos = 0; 1519 1520 for (i = 0; i < 32; i++) { 1521 (*regs)[pos++] = tswapreg(env->regs[i]); 1522 } 1523 1524 (*regs)[pos++] = tswapreg(env->pc); 1525 (*regs)[pos++] = tswapreg(mb_cpu_read_msr(env)); 1526 (*regs)[pos++] = 0; 1527 (*regs)[pos++] = tswapreg(env->ear); 1528 (*regs)[pos++] = 0; 1529 (*regs)[pos++] = tswapreg(env->esr); 1530 } 1531 1532 #endif /* TARGET_MICROBLAZE */ 1533 1534 #ifdef TARGET_OPENRISC 1535 1536 #define ELF_ARCH EM_OPENRISC 1537 #define ELF_CLASS ELFCLASS32 1538 #define ELF_DATA ELFDATA2MSB 1539 1540 static inline void init_thread(struct target_pt_regs *regs, 1541 struct image_info *infop) 1542 { 1543 regs->pc = infop->entry; 1544 regs->gpr[1] = infop->start_stack; 1545 } 1546 1547 #define USE_ELF_CORE_DUMP 1548 #define ELF_EXEC_PAGESIZE 8192 1549 1550 /* See linux kernel arch/openrisc/include/asm/elf.h. */ 1551 #define ELF_NREG 34 /* gprs and pc, sr */ 1552 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1553 1554 static void elf_core_copy_regs(target_elf_gregset_t *regs, 1555 const CPUOpenRISCState *env) 1556 { 1557 int i; 1558 1559 for (i = 0; i < 32; i++) { 1560 (*regs)[i] = tswapreg(cpu_get_gpr(env, i)); 1561 } 1562 (*regs)[32] = tswapreg(env->pc); 1563 (*regs)[33] = tswapreg(cpu_get_sr(env)); 1564 } 1565 #define ELF_HWCAP 0 1566 #define ELF_PLATFORM NULL 1567 1568 #endif /* TARGET_OPENRISC */ 1569 1570 #ifdef TARGET_SH4 1571 1572 #define ELF_CLASS ELFCLASS32 1573 #define ELF_ARCH EM_SH 1574 1575 static inline void init_thread(struct target_pt_regs *regs, 1576 struct image_info *infop) 1577 { 1578 /* Check other registers XXXXX */ 1579 regs->pc = infop->entry; 1580 regs->regs[15] = infop->start_stack; 1581 } 1582 1583 /* See linux kernel: arch/sh/include/asm/elf.h. */ 1584 #define ELF_NREG 23 1585 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1586 1587 /* See linux kernel: arch/sh/include/asm/ptrace.h. */ 1588 enum { 1589 TARGET_REG_PC = 16, 1590 TARGET_REG_PR = 17, 1591 TARGET_REG_SR = 18, 1592 TARGET_REG_GBR = 19, 1593 TARGET_REG_MACH = 20, 1594 TARGET_REG_MACL = 21, 1595 TARGET_REG_SYSCALL = 22 1596 }; 1597 1598 static inline void elf_core_copy_regs(target_elf_gregset_t *regs, 1599 const CPUSH4State *env) 1600 { 1601 int i; 1602 1603 for (i = 0; i < 16; i++) { 1604 (*regs)[i] = tswapreg(env->gregs[i]); 1605 } 1606 1607 (*regs)[TARGET_REG_PC] = tswapreg(env->pc); 1608 (*regs)[TARGET_REG_PR] = tswapreg(env->pr); 1609 (*regs)[TARGET_REG_SR] = tswapreg(env->sr); 1610 (*regs)[TARGET_REG_GBR] = tswapreg(env->gbr); 1611 (*regs)[TARGET_REG_MACH] = tswapreg(env->mach); 1612 (*regs)[TARGET_REG_MACL] = tswapreg(env->macl); 1613 (*regs)[TARGET_REG_SYSCALL] = 0; /* FIXME */ 1614 } 1615 1616 #define USE_ELF_CORE_DUMP 1617 #define ELF_EXEC_PAGESIZE 4096 1618 1619 enum { 1620 SH_CPU_HAS_FPU = 0x0001, /* Hardware FPU support */ 1621 SH_CPU_HAS_P2_FLUSH_BUG = 0x0002, /* Need to flush the cache in P2 area */ 1622 SH_CPU_HAS_MMU_PAGE_ASSOC = 0x0004, /* SH3: TLB way selection bit support */ 1623 SH_CPU_HAS_DSP = 0x0008, /* SH-DSP: DSP support */ 1624 SH_CPU_HAS_PERF_COUNTER = 0x0010, /* Hardware performance counters */ 1625 SH_CPU_HAS_PTEA = 0x0020, /* PTEA register */ 1626 SH_CPU_HAS_LLSC = 0x0040, /* movli.l/movco.l */ 1627 SH_CPU_HAS_L2_CACHE = 0x0080, /* Secondary cache / URAM */ 1628 SH_CPU_HAS_OP32 = 0x0100, /* 32-bit instruction support */ 1629 SH_CPU_HAS_PTEAEX = 0x0200, /* PTE ASID Extension support */ 1630 }; 1631 1632 #define ELF_HWCAP get_elf_hwcap() 1633 1634 static uint32_t get_elf_hwcap(void) 1635 { 1636 SuperHCPU *cpu = SUPERH_CPU(thread_cpu); 1637 uint32_t hwcap = 0; 1638 1639 hwcap |= SH_CPU_HAS_FPU; 1640 1641 if (cpu->env.features & SH_FEATURE_SH4A) { 1642 hwcap |= SH_CPU_HAS_LLSC; 1643 } 1644 1645 return hwcap; 1646 } 1647 1648 #endif 1649 1650 #ifdef TARGET_CRIS 1651 1652 #define ELF_CLASS ELFCLASS32 1653 #define ELF_ARCH EM_CRIS 1654 1655 static inline void init_thread(struct target_pt_regs *regs, 1656 struct image_info *infop) 1657 { 1658 regs->erp = infop->entry; 1659 } 1660 1661 #define ELF_EXEC_PAGESIZE 8192 1662 1663 #endif 1664 1665 #ifdef TARGET_M68K 1666 1667 #define ELF_CLASS ELFCLASS32 1668 #define ELF_ARCH EM_68K 1669 1670 /* ??? Does this need to do anything? 1671 #define ELF_PLAT_INIT(_r) */ 1672 1673 static inline void init_thread(struct target_pt_regs *regs, 1674 struct image_info *infop) 1675 { 1676 regs->usp = infop->start_stack; 1677 regs->sr = 0; 1678 regs->pc = infop->entry; 1679 } 1680 1681 /* See linux kernel: arch/m68k/include/asm/elf.h. */ 1682 #define ELF_NREG 20 1683 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1684 1685 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUM68KState *env) 1686 { 1687 (*regs)[0] = tswapreg(env->dregs[1]); 1688 (*regs)[1] = tswapreg(env->dregs[2]); 1689 (*regs)[2] = tswapreg(env->dregs[3]); 1690 (*regs)[3] = tswapreg(env->dregs[4]); 1691 (*regs)[4] = tswapreg(env->dregs[5]); 1692 (*regs)[5] = tswapreg(env->dregs[6]); 1693 (*regs)[6] = tswapreg(env->dregs[7]); 1694 (*regs)[7] = tswapreg(env->aregs[0]); 1695 (*regs)[8] = tswapreg(env->aregs[1]); 1696 (*regs)[9] = tswapreg(env->aregs[2]); 1697 (*regs)[10] = tswapreg(env->aregs[3]); 1698 (*regs)[11] = tswapreg(env->aregs[4]); 1699 (*regs)[12] = tswapreg(env->aregs[5]); 1700 (*regs)[13] = tswapreg(env->aregs[6]); 1701 (*regs)[14] = tswapreg(env->dregs[0]); 1702 (*regs)[15] = tswapreg(env->aregs[7]); 1703 (*regs)[16] = tswapreg(env->dregs[0]); /* FIXME: orig_d0 */ 1704 (*regs)[17] = tswapreg(env->sr); 1705 (*regs)[18] = tswapreg(env->pc); 1706 (*regs)[19] = 0; /* FIXME: regs->format | regs->vector */ 1707 } 1708 1709 #define USE_ELF_CORE_DUMP 1710 #define ELF_EXEC_PAGESIZE 8192 1711 1712 #endif 1713 1714 #ifdef TARGET_ALPHA 1715 1716 #define ELF_CLASS ELFCLASS64 1717 #define ELF_ARCH EM_ALPHA 1718 1719 static inline void init_thread(struct target_pt_regs *regs, 1720 struct image_info *infop) 1721 { 1722 regs->pc = infop->entry; 1723 regs->ps = 8; 1724 regs->usp = infop->start_stack; 1725 } 1726 1727 #define ELF_EXEC_PAGESIZE 8192 1728 1729 #endif /* TARGET_ALPHA */ 1730 1731 #ifdef TARGET_S390X 1732 1733 #define ELF_CLASS ELFCLASS64 1734 #define ELF_DATA ELFDATA2MSB 1735 #define ELF_ARCH EM_S390 1736 1737 #include "elf.h" 1738 1739 #define ELF_HWCAP get_elf_hwcap() 1740 1741 #define GET_FEATURE(_feat, _hwcap) \ 1742 do { if (s390_has_feat(_feat)) { hwcap |= _hwcap; } } while (0) 1743 1744 uint32_t get_elf_hwcap(void) 1745 { 1746 /* 1747 * Let's assume we always have esan3 and zarch. 1748 * 31-bit processes can use 64-bit registers (high gprs). 1749 */ 1750 uint32_t hwcap = HWCAP_S390_ESAN3 | HWCAP_S390_ZARCH | HWCAP_S390_HIGH_GPRS; 1751 1752 GET_FEATURE(S390_FEAT_STFLE, HWCAP_S390_STFLE); 1753 GET_FEATURE(S390_FEAT_MSA, HWCAP_S390_MSA); 1754 GET_FEATURE(S390_FEAT_LONG_DISPLACEMENT, HWCAP_S390_LDISP); 1755 GET_FEATURE(S390_FEAT_EXTENDED_IMMEDIATE, HWCAP_S390_EIMM); 1756 if (s390_has_feat(S390_FEAT_EXTENDED_TRANSLATION_3) && 1757 s390_has_feat(S390_FEAT_ETF3_ENH)) { 1758 hwcap |= HWCAP_S390_ETF3EH; 1759 } 1760 GET_FEATURE(S390_FEAT_VECTOR, HWCAP_S390_VXRS); 1761 GET_FEATURE(S390_FEAT_VECTOR_ENH, HWCAP_S390_VXRS_EXT); 1762 GET_FEATURE(S390_FEAT_VECTOR_ENH2, HWCAP_S390_VXRS_EXT2); 1763 1764 return hwcap; 1765 } 1766 1767 const char *elf_hwcap_str(uint32_t bit) 1768 { 1769 static const char *hwcap_str[] = { 1770 [HWCAP_S390_NR_ESAN3] = "esan3", 1771 [HWCAP_S390_NR_ZARCH] = "zarch", 1772 [HWCAP_S390_NR_STFLE] = "stfle", 1773 [HWCAP_S390_NR_MSA] = "msa", 1774 [HWCAP_S390_NR_LDISP] = "ldisp", 1775 [HWCAP_S390_NR_EIMM] = "eimm", 1776 [HWCAP_S390_NR_DFP] = "dfp", 1777 [HWCAP_S390_NR_HPAGE] = "edat", 1778 [HWCAP_S390_NR_ETF3EH] = "etf3eh", 1779 [HWCAP_S390_NR_HIGH_GPRS] = "highgprs", 1780 [HWCAP_S390_NR_TE] = "te", 1781 [HWCAP_S390_NR_VXRS] = "vx", 1782 [HWCAP_S390_NR_VXRS_BCD] = "vxd", 1783 [HWCAP_S390_NR_VXRS_EXT] = "vxe", 1784 [HWCAP_S390_NR_GS] = "gs", 1785 [HWCAP_S390_NR_VXRS_EXT2] = "vxe2", 1786 [HWCAP_S390_NR_VXRS_PDE] = "vxp", 1787 [HWCAP_S390_NR_SORT] = "sort", 1788 [HWCAP_S390_NR_DFLT] = "dflt", 1789 [HWCAP_S390_NR_NNPA] = "nnpa", 1790 [HWCAP_S390_NR_PCI_MIO] = "pcimio", 1791 [HWCAP_S390_NR_SIE] = "sie", 1792 }; 1793 1794 return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; 1795 } 1796 1797 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 1798 { 1799 regs->psw.addr = infop->entry; 1800 regs->psw.mask = PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | \ 1801 PSW_MASK_MCHECK | PSW_MASK_PSTATE | PSW_MASK_64 | \ 1802 PSW_MASK_32; 1803 regs->gprs[15] = infop->start_stack; 1804 } 1805 1806 /* See linux kernel: arch/s390/include/uapi/asm/ptrace.h (s390_regs). */ 1807 #define ELF_NREG 27 1808 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1809 1810 enum { 1811 TARGET_REG_PSWM = 0, 1812 TARGET_REG_PSWA = 1, 1813 TARGET_REG_GPRS = 2, 1814 TARGET_REG_ARS = 18, 1815 TARGET_REG_ORIG_R2 = 26, 1816 }; 1817 1818 static void elf_core_copy_regs(target_elf_gregset_t *regs, 1819 const CPUS390XState *env) 1820 { 1821 int i; 1822 uint32_t *aregs; 1823 1824 (*regs)[TARGET_REG_PSWM] = tswapreg(env->psw.mask); 1825 (*regs)[TARGET_REG_PSWA] = tswapreg(env->psw.addr); 1826 for (i = 0; i < 16; i++) { 1827 (*regs)[TARGET_REG_GPRS + i] = tswapreg(env->regs[i]); 1828 } 1829 aregs = (uint32_t *)&((*regs)[TARGET_REG_ARS]); 1830 for (i = 0; i < 16; i++) { 1831 aregs[i] = tswap32(env->aregs[i]); 1832 } 1833 (*regs)[TARGET_REG_ORIG_R2] = 0; 1834 } 1835 1836 #define USE_ELF_CORE_DUMP 1837 #define ELF_EXEC_PAGESIZE 4096 1838 1839 #define VDSO_HEADER "vdso.c.inc" 1840 1841 #endif /* TARGET_S390X */ 1842 1843 #ifdef TARGET_RISCV 1844 1845 #define ELF_ARCH EM_RISCV 1846 1847 #ifdef TARGET_RISCV32 1848 #define ELF_CLASS ELFCLASS32 1849 #define VDSO_HEADER "vdso-32.c.inc" 1850 #else 1851 #define ELF_CLASS ELFCLASS64 1852 #define VDSO_HEADER "vdso-64.c.inc" 1853 #endif 1854 1855 #define ELF_HWCAP get_elf_hwcap() 1856 1857 static uint32_t get_elf_hwcap(void) 1858 { 1859 #define MISA_BIT(EXT) (1 << (EXT - 'A')) 1860 RISCVCPU *cpu = RISCV_CPU(thread_cpu); 1861 uint32_t mask = MISA_BIT('I') | MISA_BIT('M') | MISA_BIT('A') 1862 | MISA_BIT('F') | MISA_BIT('D') | MISA_BIT('C') 1863 | MISA_BIT('V'); 1864 1865 return cpu->env.misa_ext & mask; 1866 #undef MISA_BIT 1867 } 1868 1869 static inline void init_thread(struct target_pt_regs *regs, 1870 struct image_info *infop) 1871 { 1872 regs->sepc = infop->entry; 1873 regs->sp = infop->start_stack; 1874 } 1875 1876 #define ELF_EXEC_PAGESIZE 4096 1877 1878 #endif /* TARGET_RISCV */ 1879 1880 #ifdef TARGET_HPPA 1881 1882 #define ELF_CLASS ELFCLASS32 1883 #define ELF_ARCH EM_PARISC 1884 #define ELF_PLATFORM "PARISC" 1885 #define STACK_GROWS_DOWN 0 1886 #define STACK_ALIGNMENT 64 1887 1888 #define VDSO_HEADER "vdso.c.inc" 1889 1890 static inline void init_thread(struct target_pt_regs *regs, 1891 struct image_info *infop) 1892 { 1893 regs->iaoq[0] = infop->entry | PRIV_USER; 1894 regs->iaoq[1] = regs->iaoq[0] + 4; 1895 regs->gr[23] = 0; 1896 regs->gr[24] = infop->argv; 1897 regs->gr[25] = infop->argc; 1898 /* The top-of-stack contains a linkage buffer. */ 1899 regs->gr[30] = infop->start_stack + 64; 1900 regs->gr[31] = infop->entry; 1901 } 1902 1903 #define LO_COMMPAGE 0 1904 1905 static bool init_guest_commpage(void) 1906 { 1907 /* If reserved_va, then we have already mapped 0 page on the host. */ 1908 if (!reserved_va) { 1909 void *want, *addr; 1910 1911 want = g2h_untagged(LO_COMMPAGE); 1912 addr = mmap(want, TARGET_PAGE_SIZE, PROT_NONE, 1913 MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED_NOREPLACE, -1, 0); 1914 if (addr == MAP_FAILED) { 1915 perror("Allocating guest commpage"); 1916 exit(EXIT_FAILURE); 1917 } 1918 if (addr != want) { 1919 return false; 1920 } 1921 } 1922 1923 /* 1924 * On Linux, page zero is normally marked execute only + gateway. 1925 * Normal read or write is supposed to fail (thus PROT_NONE above), 1926 * but specific offsets have kernel code mapped to raise permissions 1927 * and implement syscalls. Here, simply mark the page executable. 1928 * Special case the entry points during translation (see do_page_zero). 1929 */ 1930 page_set_flags(LO_COMMPAGE, LO_COMMPAGE | ~TARGET_PAGE_MASK, 1931 PAGE_EXEC | PAGE_VALID); 1932 return true; 1933 } 1934 1935 #endif /* TARGET_HPPA */ 1936 1937 #ifdef TARGET_XTENSA 1938 1939 #define ELF_CLASS ELFCLASS32 1940 #define ELF_ARCH EM_XTENSA 1941 1942 static inline void init_thread(struct target_pt_regs *regs, 1943 struct image_info *infop) 1944 { 1945 regs->windowbase = 0; 1946 regs->windowstart = 1; 1947 regs->areg[1] = infop->start_stack; 1948 regs->pc = infop->entry; 1949 if (info_is_fdpic(infop)) { 1950 regs->areg[4] = infop->loadmap_addr; 1951 regs->areg[5] = infop->interpreter_loadmap_addr; 1952 if (infop->interpreter_loadmap_addr) { 1953 regs->areg[6] = infop->interpreter_pt_dynamic_addr; 1954 } else { 1955 regs->areg[6] = infop->pt_dynamic_addr; 1956 } 1957 } 1958 } 1959 1960 /* See linux kernel: arch/xtensa/include/asm/elf.h. */ 1961 #define ELF_NREG 128 1962 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1963 1964 enum { 1965 TARGET_REG_PC, 1966 TARGET_REG_PS, 1967 TARGET_REG_LBEG, 1968 TARGET_REG_LEND, 1969 TARGET_REG_LCOUNT, 1970 TARGET_REG_SAR, 1971 TARGET_REG_WINDOWSTART, 1972 TARGET_REG_WINDOWBASE, 1973 TARGET_REG_THREADPTR, 1974 TARGET_REG_AR0 = 64, 1975 }; 1976 1977 static void elf_core_copy_regs(target_elf_gregset_t *regs, 1978 const CPUXtensaState *env) 1979 { 1980 unsigned i; 1981 1982 (*regs)[TARGET_REG_PC] = tswapreg(env->pc); 1983 (*regs)[TARGET_REG_PS] = tswapreg(env->sregs[PS] & ~PS_EXCM); 1984 (*regs)[TARGET_REG_LBEG] = tswapreg(env->sregs[LBEG]); 1985 (*regs)[TARGET_REG_LEND] = tswapreg(env->sregs[LEND]); 1986 (*regs)[TARGET_REG_LCOUNT] = tswapreg(env->sregs[LCOUNT]); 1987 (*regs)[TARGET_REG_SAR] = tswapreg(env->sregs[SAR]); 1988 (*regs)[TARGET_REG_WINDOWSTART] = tswapreg(env->sregs[WINDOW_START]); 1989 (*regs)[TARGET_REG_WINDOWBASE] = tswapreg(env->sregs[WINDOW_BASE]); 1990 (*regs)[TARGET_REG_THREADPTR] = tswapreg(env->uregs[THREADPTR]); 1991 xtensa_sync_phys_from_window((CPUXtensaState *)env); 1992 for (i = 0; i < env->config->nareg; ++i) { 1993 (*regs)[TARGET_REG_AR0 + i] = tswapreg(env->phys_regs[i]); 1994 } 1995 } 1996 1997 #define USE_ELF_CORE_DUMP 1998 #define ELF_EXEC_PAGESIZE 4096 1999 2000 #endif /* TARGET_XTENSA */ 2001 2002 #ifdef TARGET_HEXAGON 2003 2004 #define ELF_CLASS ELFCLASS32 2005 #define ELF_ARCH EM_HEXAGON 2006 2007 static inline void init_thread(struct target_pt_regs *regs, 2008 struct image_info *infop) 2009 { 2010 regs->sepc = infop->entry; 2011 regs->sp = infop->start_stack; 2012 } 2013 2014 #endif /* TARGET_HEXAGON */ 2015 2016 #ifndef ELF_BASE_PLATFORM 2017 #define ELF_BASE_PLATFORM (NULL) 2018 #endif 2019 2020 #ifndef ELF_PLATFORM 2021 #define ELF_PLATFORM (NULL) 2022 #endif 2023 2024 #ifndef ELF_MACHINE 2025 #define ELF_MACHINE ELF_ARCH 2026 #endif 2027 2028 #ifndef elf_check_arch 2029 #define elf_check_arch(x) ((x) == ELF_ARCH) 2030 #endif 2031 2032 #ifndef elf_check_abi 2033 #define elf_check_abi(x) (1) 2034 #endif 2035 2036 #ifndef ELF_HWCAP 2037 #define ELF_HWCAP 0 2038 #endif 2039 2040 #ifndef STACK_GROWS_DOWN 2041 #define STACK_GROWS_DOWN 1 2042 #endif 2043 2044 #ifndef STACK_ALIGNMENT 2045 #define STACK_ALIGNMENT 16 2046 #endif 2047 2048 #ifdef TARGET_ABI32 2049 #undef ELF_CLASS 2050 #define ELF_CLASS ELFCLASS32 2051 #undef bswaptls 2052 #define bswaptls(ptr) bswap32s(ptr) 2053 #endif 2054 2055 #ifndef EXSTACK_DEFAULT 2056 #define EXSTACK_DEFAULT false 2057 #endif 2058 2059 #include "elf.h" 2060 2061 /* We must delay the following stanzas until after "elf.h". */ 2062 #if defined(TARGET_AARCH64) 2063 2064 static bool arch_parse_elf_property(uint32_t pr_type, uint32_t pr_datasz, 2065 const uint32_t *data, 2066 struct image_info *info, 2067 Error **errp) 2068 { 2069 if (pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND) { 2070 if (pr_datasz != sizeof(uint32_t)) { 2071 error_setg(errp, "Ill-formed GNU_PROPERTY_AARCH64_FEATURE_1_AND"); 2072 return false; 2073 } 2074 /* We will extract GNU_PROPERTY_AARCH64_FEATURE_1_BTI later. */ 2075 info->note_flags = *data; 2076 } 2077 return true; 2078 } 2079 #define ARCH_USE_GNU_PROPERTY 1 2080 2081 #else 2082 2083 static bool arch_parse_elf_property(uint32_t pr_type, uint32_t pr_datasz, 2084 const uint32_t *data, 2085 struct image_info *info, 2086 Error **errp) 2087 { 2088 g_assert_not_reached(); 2089 } 2090 #define ARCH_USE_GNU_PROPERTY 0 2091 2092 #endif 2093 2094 struct exec 2095 { 2096 unsigned int a_info; /* Use macros N_MAGIC, etc for access */ 2097 unsigned int a_text; /* length of text, in bytes */ 2098 unsigned int a_data; /* length of data, in bytes */ 2099 unsigned int a_bss; /* length of uninitialized data area, in bytes */ 2100 unsigned int a_syms; /* length of symbol table data in file, in bytes */ 2101 unsigned int a_entry; /* start address */ 2102 unsigned int a_trsize; /* length of relocation info for text, in bytes */ 2103 unsigned int a_drsize; /* length of relocation info for data, in bytes */ 2104 }; 2105 2106 2107 #define N_MAGIC(exec) ((exec).a_info & 0xffff) 2108 #define OMAGIC 0407 2109 #define NMAGIC 0410 2110 #define ZMAGIC 0413 2111 #define QMAGIC 0314 2112 2113 #define DLINFO_ITEMS 16 2114 2115 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n) 2116 { 2117 memcpy(to, from, n); 2118 } 2119 2120 #ifdef BSWAP_NEEDED 2121 static void bswap_ehdr(struct elfhdr *ehdr) 2122 { 2123 bswap16s(&ehdr->e_type); /* Object file type */ 2124 bswap16s(&ehdr->e_machine); /* Architecture */ 2125 bswap32s(&ehdr->e_version); /* Object file version */ 2126 bswaptls(&ehdr->e_entry); /* Entry point virtual address */ 2127 bswaptls(&ehdr->e_phoff); /* Program header table file offset */ 2128 bswaptls(&ehdr->e_shoff); /* Section header table file offset */ 2129 bswap32s(&ehdr->e_flags); /* Processor-specific flags */ 2130 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */ 2131 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */ 2132 bswap16s(&ehdr->e_phnum); /* Program header table entry count */ 2133 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */ 2134 bswap16s(&ehdr->e_shnum); /* Section header table entry count */ 2135 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */ 2136 } 2137 2138 static void bswap_phdr(struct elf_phdr *phdr, int phnum) 2139 { 2140 int i; 2141 for (i = 0; i < phnum; ++i, ++phdr) { 2142 bswap32s(&phdr->p_type); /* Segment type */ 2143 bswap32s(&phdr->p_flags); /* Segment flags */ 2144 bswaptls(&phdr->p_offset); /* Segment file offset */ 2145 bswaptls(&phdr->p_vaddr); /* Segment virtual address */ 2146 bswaptls(&phdr->p_paddr); /* Segment physical address */ 2147 bswaptls(&phdr->p_filesz); /* Segment size in file */ 2148 bswaptls(&phdr->p_memsz); /* Segment size in memory */ 2149 bswaptls(&phdr->p_align); /* Segment alignment */ 2150 } 2151 } 2152 2153 static void bswap_shdr(struct elf_shdr *shdr, int shnum) 2154 { 2155 int i; 2156 for (i = 0; i < shnum; ++i, ++shdr) { 2157 bswap32s(&shdr->sh_name); 2158 bswap32s(&shdr->sh_type); 2159 bswaptls(&shdr->sh_flags); 2160 bswaptls(&shdr->sh_addr); 2161 bswaptls(&shdr->sh_offset); 2162 bswaptls(&shdr->sh_size); 2163 bswap32s(&shdr->sh_link); 2164 bswap32s(&shdr->sh_info); 2165 bswaptls(&shdr->sh_addralign); 2166 bswaptls(&shdr->sh_entsize); 2167 } 2168 } 2169 2170 static void bswap_sym(struct elf_sym *sym) 2171 { 2172 bswap32s(&sym->st_name); 2173 bswaptls(&sym->st_value); 2174 bswaptls(&sym->st_size); 2175 bswap16s(&sym->st_shndx); 2176 } 2177 2178 #ifdef TARGET_MIPS 2179 static void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags) 2180 { 2181 bswap16s(&abiflags->version); 2182 bswap32s(&abiflags->ases); 2183 bswap32s(&abiflags->isa_ext); 2184 bswap32s(&abiflags->flags1); 2185 bswap32s(&abiflags->flags2); 2186 } 2187 #endif 2188 #else 2189 static inline void bswap_ehdr(struct elfhdr *ehdr) { } 2190 static inline void bswap_phdr(struct elf_phdr *phdr, int phnum) { } 2191 static inline void bswap_shdr(struct elf_shdr *shdr, int shnum) { } 2192 static inline void bswap_sym(struct elf_sym *sym) { } 2193 #ifdef TARGET_MIPS 2194 static inline void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags) { } 2195 #endif 2196 #endif 2197 2198 #ifdef USE_ELF_CORE_DUMP 2199 static int elf_core_dump(int, const CPUArchState *); 2200 #endif /* USE_ELF_CORE_DUMP */ 2201 static void load_symbols(struct elfhdr *hdr, const ImageSource *src, 2202 abi_ulong load_bias); 2203 2204 /* Verify the portions of EHDR within E_IDENT for the target. 2205 This can be performed before bswapping the entire header. */ 2206 static bool elf_check_ident(struct elfhdr *ehdr) 2207 { 2208 return (ehdr->e_ident[EI_MAG0] == ELFMAG0 2209 && ehdr->e_ident[EI_MAG1] == ELFMAG1 2210 && ehdr->e_ident[EI_MAG2] == ELFMAG2 2211 && ehdr->e_ident[EI_MAG3] == ELFMAG3 2212 && ehdr->e_ident[EI_CLASS] == ELF_CLASS 2213 && ehdr->e_ident[EI_DATA] == ELF_DATA 2214 && ehdr->e_ident[EI_VERSION] == EV_CURRENT); 2215 } 2216 2217 /* Verify the portions of EHDR outside of E_IDENT for the target. 2218 This has to wait until after bswapping the header. */ 2219 static bool elf_check_ehdr(struct elfhdr *ehdr) 2220 { 2221 return (elf_check_arch(ehdr->e_machine) 2222 && elf_check_abi(ehdr->e_flags) 2223 && ehdr->e_ehsize == sizeof(struct elfhdr) 2224 && ehdr->e_phentsize == sizeof(struct elf_phdr) 2225 && (ehdr->e_type == ET_EXEC || ehdr->e_type == ET_DYN)); 2226 } 2227 2228 /* 2229 * 'copy_elf_strings()' copies argument/envelope strings from user 2230 * memory to free pages in kernel mem. These are in a format ready 2231 * to be put directly into the top of new user memory. 2232 * 2233 */ 2234 static abi_ulong copy_elf_strings(int argc, char **argv, char *scratch, 2235 abi_ulong p, abi_ulong stack_limit) 2236 { 2237 char *tmp; 2238 int len, i; 2239 abi_ulong top = p; 2240 2241 if (!p) { 2242 return 0; /* bullet-proofing */ 2243 } 2244 2245 if (STACK_GROWS_DOWN) { 2246 int offset = ((p - 1) % TARGET_PAGE_SIZE) + 1; 2247 for (i = argc - 1; i >= 0; --i) { 2248 tmp = argv[i]; 2249 if (!tmp) { 2250 fprintf(stderr, "VFS: argc is wrong"); 2251 exit(-1); 2252 } 2253 len = strlen(tmp) + 1; 2254 tmp += len; 2255 2256 if (len > (p - stack_limit)) { 2257 return 0; 2258 } 2259 while (len) { 2260 int bytes_to_copy = (len > offset) ? offset : len; 2261 tmp -= bytes_to_copy; 2262 p -= bytes_to_copy; 2263 offset -= bytes_to_copy; 2264 len -= bytes_to_copy; 2265 2266 memcpy_fromfs(scratch + offset, tmp, bytes_to_copy); 2267 2268 if (offset == 0) { 2269 memcpy_to_target(p, scratch, top - p); 2270 top = p; 2271 offset = TARGET_PAGE_SIZE; 2272 } 2273 } 2274 } 2275 if (p != top) { 2276 memcpy_to_target(p, scratch + offset, top - p); 2277 } 2278 } else { 2279 int remaining = TARGET_PAGE_SIZE - (p % TARGET_PAGE_SIZE); 2280 for (i = 0; i < argc; ++i) { 2281 tmp = argv[i]; 2282 if (!tmp) { 2283 fprintf(stderr, "VFS: argc is wrong"); 2284 exit(-1); 2285 } 2286 len = strlen(tmp) + 1; 2287 if (len > (stack_limit - p)) { 2288 return 0; 2289 } 2290 while (len) { 2291 int bytes_to_copy = (len > remaining) ? remaining : len; 2292 2293 memcpy_fromfs(scratch + (p - top), tmp, bytes_to_copy); 2294 2295 tmp += bytes_to_copy; 2296 remaining -= bytes_to_copy; 2297 p += bytes_to_copy; 2298 len -= bytes_to_copy; 2299 2300 if (remaining == 0) { 2301 memcpy_to_target(top, scratch, p - top); 2302 top = p; 2303 remaining = TARGET_PAGE_SIZE; 2304 } 2305 } 2306 } 2307 if (p != top) { 2308 memcpy_to_target(top, scratch, p - top); 2309 } 2310 } 2311 2312 return p; 2313 } 2314 2315 /* Older linux kernels provide up to MAX_ARG_PAGES (default: 32) of 2316 * argument/environment space. Newer kernels (>2.6.33) allow more, 2317 * dependent on stack size, but guarantee at least 32 pages for 2318 * backwards compatibility. 2319 */ 2320 #define STACK_LOWER_LIMIT (32 * TARGET_PAGE_SIZE) 2321 2322 static abi_ulong setup_arg_pages(struct linux_binprm *bprm, 2323 struct image_info *info) 2324 { 2325 abi_ulong size, error, guard; 2326 int prot; 2327 2328 size = guest_stack_size; 2329 if (size < STACK_LOWER_LIMIT) { 2330 size = STACK_LOWER_LIMIT; 2331 } 2332 2333 if (STACK_GROWS_DOWN) { 2334 guard = TARGET_PAGE_SIZE; 2335 if (guard < qemu_real_host_page_size()) { 2336 guard = qemu_real_host_page_size(); 2337 } 2338 } else { 2339 /* no guard page for hppa target where stack grows upwards. */ 2340 guard = 0; 2341 } 2342 2343 prot = PROT_READ | PROT_WRITE; 2344 if (info->exec_stack) { 2345 prot |= PROT_EXEC; 2346 } 2347 error = target_mmap(0, size + guard, prot, 2348 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 2349 if (error == -1) { 2350 perror("mmap stack"); 2351 exit(-1); 2352 } 2353 2354 /* We reserve one extra page at the top of the stack as guard. */ 2355 if (STACK_GROWS_DOWN) { 2356 target_mprotect(error, guard, PROT_NONE); 2357 info->stack_limit = error + guard; 2358 return info->stack_limit + size - sizeof(void *); 2359 } else { 2360 info->stack_limit = error + size; 2361 return error; 2362 } 2363 } 2364 2365 /** 2366 * zero_bss: 2367 * 2368 * Map and zero the bss. We need to explicitly zero any fractional pages 2369 * after the data section (i.e. bss). Return false on mapping failure. 2370 */ 2371 static bool zero_bss(abi_ulong start_bss, abi_ulong end_bss, 2372 int prot, Error **errp) 2373 { 2374 abi_ulong align_bss; 2375 2376 /* We only expect writable bss; the code segment shouldn't need this. */ 2377 if (!(prot & PROT_WRITE)) { 2378 error_setg(errp, "PT_LOAD with non-writable bss"); 2379 return false; 2380 } 2381 2382 align_bss = TARGET_PAGE_ALIGN(start_bss); 2383 end_bss = TARGET_PAGE_ALIGN(end_bss); 2384 2385 if (start_bss < align_bss) { 2386 int flags = page_get_flags(start_bss); 2387 2388 if (!(flags & PAGE_RWX)) { 2389 /* 2390 * The whole address space of the executable was reserved 2391 * at the start, therefore all pages will be VALID. 2392 * But assuming there are no PROT_NONE PT_LOAD segments, 2393 * a PROT_NONE page means no data all bss, and we can 2394 * simply extend the new anon mapping back to the start 2395 * of the page of bss. 2396 */ 2397 align_bss -= TARGET_PAGE_SIZE; 2398 } else { 2399 /* 2400 * The start of the bss shares a page with something. 2401 * The only thing that we expect is the data section, 2402 * which would already be marked writable. 2403 * Overlapping the RX code segment seems malformed. 2404 */ 2405 if (!(flags & PAGE_WRITE)) { 2406 error_setg(errp, "PT_LOAD with bss overlapping " 2407 "non-writable page"); 2408 return false; 2409 } 2410 2411 /* The page is already mapped and writable. */ 2412 memset(g2h_untagged(start_bss), 0, align_bss - start_bss); 2413 } 2414 } 2415 2416 if (align_bss < end_bss && 2417 target_mmap(align_bss, end_bss - align_bss, prot, 2418 MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) == -1) { 2419 error_setg_errno(errp, errno, "Error mapping bss"); 2420 return false; 2421 } 2422 return true; 2423 } 2424 2425 #if defined(TARGET_ARM) 2426 static int elf_is_fdpic(struct elfhdr *exec) 2427 { 2428 return exec->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC; 2429 } 2430 #elif defined(TARGET_XTENSA) 2431 static int elf_is_fdpic(struct elfhdr *exec) 2432 { 2433 return exec->e_ident[EI_OSABI] == ELFOSABI_XTENSA_FDPIC; 2434 } 2435 #else 2436 /* Default implementation, always false. */ 2437 static int elf_is_fdpic(struct elfhdr *exec) 2438 { 2439 return 0; 2440 } 2441 #endif 2442 2443 static abi_ulong loader_build_fdpic_loadmap(struct image_info *info, abi_ulong sp) 2444 { 2445 uint16_t n; 2446 struct elf32_fdpic_loadseg *loadsegs = info->loadsegs; 2447 2448 /* elf32_fdpic_loadseg */ 2449 n = info->nsegs; 2450 while (n--) { 2451 sp -= 12; 2452 put_user_u32(loadsegs[n].addr, sp+0); 2453 put_user_u32(loadsegs[n].p_vaddr, sp+4); 2454 put_user_u32(loadsegs[n].p_memsz, sp+8); 2455 } 2456 2457 /* elf32_fdpic_loadmap */ 2458 sp -= 4; 2459 put_user_u16(0, sp+0); /* version */ 2460 put_user_u16(info->nsegs, sp+2); /* nsegs */ 2461 2462 info->personality = PER_LINUX_FDPIC; 2463 info->loadmap_addr = sp; 2464 2465 return sp; 2466 } 2467 2468 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc, 2469 struct elfhdr *exec, 2470 struct image_info *info, 2471 struct image_info *interp_info, 2472 struct image_info *vdso_info) 2473 { 2474 abi_ulong sp; 2475 abi_ulong u_argc, u_argv, u_envp, u_auxv; 2476 int size; 2477 int i; 2478 abi_ulong u_rand_bytes; 2479 uint8_t k_rand_bytes[16]; 2480 abi_ulong u_platform, u_base_platform; 2481 const char *k_platform, *k_base_platform; 2482 const int n = sizeof(elf_addr_t); 2483 2484 sp = p; 2485 2486 /* Needs to be before we load the env/argc/... */ 2487 if (elf_is_fdpic(exec)) { 2488 /* Need 4 byte alignment for these structs */ 2489 sp &= ~3; 2490 sp = loader_build_fdpic_loadmap(info, sp); 2491 info->other_info = interp_info; 2492 if (interp_info) { 2493 interp_info->other_info = info; 2494 sp = loader_build_fdpic_loadmap(interp_info, sp); 2495 info->interpreter_loadmap_addr = interp_info->loadmap_addr; 2496 info->interpreter_pt_dynamic_addr = interp_info->pt_dynamic_addr; 2497 } else { 2498 info->interpreter_loadmap_addr = 0; 2499 info->interpreter_pt_dynamic_addr = 0; 2500 } 2501 } 2502 2503 u_base_platform = 0; 2504 k_base_platform = ELF_BASE_PLATFORM; 2505 if (k_base_platform) { 2506 size_t len = strlen(k_base_platform) + 1; 2507 if (STACK_GROWS_DOWN) { 2508 sp -= (len + n - 1) & ~(n - 1); 2509 u_base_platform = sp; 2510 /* FIXME - check return value of memcpy_to_target() for failure */ 2511 memcpy_to_target(sp, k_base_platform, len); 2512 } else { 2513 memcpy_to_target(sp, k_base_platform, len); 2514 u_base_platform = sp; 2515 sp += len + 1; 2516 } 2517 } 2518 2519 u_platform = 0; 2520 k_platform = ELF_PLATFORM; 2521 if (k_platform) { 2522 size_t len = strlen(k_platform) + 1; 2523 if (STACK_GROWS_DOWN) { 2524 sp -= (len + n - 1) & ~(n - 1); 2525 u_platform = sp; 2526 /* FIXME - check return value of memcpy_to_target() for failure */ 2527 memcpy_to_target(sp, k_platform, len); 2528 } else { 2529 memcpy_to_target(sp, k_platform, len); 2530 u_platform = sp; 2531 sp += len + 1; 2532 } 2533 } 2534 2535 /* Provide 16 byte alignment for the PRNG, and basic alignment for 2536 * the argv and envp pointers. 2537 */ 2538 if (STACK_GROWS_DOWN) { 2539 sp = QEMU_ALIGN_DOWN(sp, 16); 2540 } else { 2541 sp = QEMU_ALIGN_UP(sp, 16); 2542 } 2543 2544 /* 2545 * Generate 16 random bytes for userspace PRNG seeding. 2546 */ 2547 qemu_guest_getrandom_nofail(k_rand_bytes, sizeof(k_rand_bytes)); 2548 if (STACK_GROWS_DOWN) { 2549 sp -= 16; 2550 u_rand_bytes = sp; 2551 /* FIXME - check return value of memcpy_to_target() for failure */ 2552 memcpy_to_target(sp, k_rand_bytes, 16); 2553 } else { 2554 memcpy_to_target(sp, k_rand_bytes, 16); 2555 u_rand_bytes = sp; 2556 sp += 16; 2557 } 2558 2559 size = (DLINFO_ITEMS + 1) * 2; 2560 if (k_base_platform) { 2561 size += 2; 2562 } 2563 if (k_platform) { 2564 size += 2; 2565 } 2566 if (vdso_info) { 2567 size += 2; 2568 } 2569 #ifdef DLINFO_ARCH_ITEMS 2570 size += DLINFO_ARCH_ITEMS * 2; 2571 #endif 2572 #ifdef ELF_HWCAP2 2573 size += 2; 2574 #endif 2575 info->auxv_len = size * n; 2576 2577 size += envc + argc + 2; 2578 size += 1; /* argc itself */ 2579 size *= n; 2580 2581 /* Allocate space and finalize stack alignment for entry now. */ 2582 if (STACK_GROWS_DOWN) { 2583 u_argc = QEMU_ALIGN_DOWN(sp - size, STACK_ALIGNMENT); 2584 sp = u_argc; 2585 } else { 2586 u_argc = sp; 2587 sp = QEMU_ALIGN_UP(sp + size, STACK_ALIGNMENT); 2588 } 2589 2590 u_argv = u_argc + n; 2591 u_envp = u_argv + (argc + 1) * n; 2592 u_auxv = u_envp + (envc + 1) * n; 2593 info->saved_auxv = u_auxv; 2594 info->argc = argc; 2595 info->envc = envc; 2596 info->argv = u_argv; 2597 info->envp = u_envp; 2598 2599 /* This is correct because Linux defines 2600 * elf_addr_t as Elf32_Off / Elf64_Off 2601 */ 2602 #define NEW_AUX_ENT(id, val) do { \ 2603 put_user_ual(id, u_auxv); u_auxv += n; \ 2604 put_user_ual(val, u_auxv); u_auxv += n; \ 2605 } while(0) 2606 2607 #ifdef ARCH_DLINFO 2608 /* 2609 * ARCH_DLINFO must come first so platform specific code can enforce 2610 * special alignment requirements on the AUXV if necessary (eg. PPC). 2611 */ 2612 ARCH_DLINFO; 2613 #endif 2614 /* There must be exactly DLINFO_ITEMS entries here, or the assert 2615 * on info->auxv_len will trigger. 2616 */ 2617 NEW_AUX_ENT(AT_PHDR, (abi_ulong)(info->load_addr + exec->e_phoff)); 2618 NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr))); 2619 NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum)); 2620 NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE)); 2621 NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_info ? interp_info->load_addr : 0)); 2622 NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0); 2623 NEW_AUX_ENT(AT_ENTRY, info->entry); 2624 NEW_AUX_ENT(AT_UID, (abi_ulong) getuid()); 2625 NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid()); 2626 NEW_AUX_ENT(AT_GID, (abi_ulong) getgid()); 2627 NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid()); 2628 NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP); 2629 NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK)); 2630 NEW_AUX_ENT(AT_RANDOM, (abi_ulong) u_rand_bytes); 2631 NEW_AUX_ENT(AT_SECURE, (abi_ulong) qemu_getauxval(AT_SECURE)); 2632 NEW_AUX_ENT(AT_EXECFN, info->file_string); 2633 2634 #ifdef ELF_HWCAP2 2635 NEW_AUX_ENT(AT_HWCAP2, (abi_ulong) ELF_HWCAP2); 2636 #endif 2637 2638 if (u_base_platform) { 2639 NEW_AUX_ENT(AT_BASE_PLATFORM, u_base_platform); 2640 } 2641 if (u_platform) { 2642 NEW_AUX_ENT(AT_PLATFORM, u_platform); 2643 } 2644 if (vdso_info) { 2645 NEW_AUX_ENT(AT_SYSINFO_EHDR, vdso_info->load_addr); 2646 } 2647 NEW_AUX_ENT (AT_NULL, 0); 2648 #undef NEW_AUX_ENT 2649 2650 /* Check that our initial calculation of the auxv length matches how much 2651 * we actually put into it. 2652 */ 2653 assert(info->auxv_len == u_auxv - info->saved_auxv); 2654 2655 put_user_ual(argc, u_argc); 2656 2657 p = info->arg_strings; 2658 for (i = 0; i < argc; ++i) { 2659 put_user_ual(p, u_argv); 2660 u_argv += n; 2661 p += target_strlen(p) + 1; 2662 } 2663 put_user_ual(0, u_argv); 2664 2665 p = info->env_strings; 2666 for (i = 0; i < envc; ++i) { 2667 put_user_ual(p, u_envp); 2668 u_envp += n; 2669 p += target_strlen(p) + 1; 2670 } 2671 put_user_ual(0, u_envp); 2672 2673 return sp; 2674 } 2675 2676 #if defined(HI_COMMPAGE) 2677 #define LO_COMMPAGE -1 2678 #elif defined(LO_COMMPAGE) 2679 #define HI_COMMPAGE 0 2680 #else 2681 #define HI_COMMPAGE 0 2682 #define LO_COMMPAGE -1 2683 #ifndef INIT_GUEST_COMMPAGE 2684 #define init_guest_commpage() true 2685 #endif 2686 #endif 2687 2688 /** 2689 * pgb_try_mmap: 2690 * @addr: host start address 2691 * @addr_last: host last address 2692 * @keep: do not unmap the probe region 2693 * 2694 * Return 1 if [@addr, @addr_last] is not mapped in the host, 2695 * return 0 if it is not available to map, and -1 on mmap error. 2696 * If @keep, the region is left mapped on success, otherwise unmapped. 2697 */ 2698 static int pgb_try_mmap(uintptr_t addr, uintptr_t addr_last, bool keep) 2699 { 2700 size_t size = addr_last - addr + 1; 2701 void *p = mmap((void *)addr, size, PROT_NONE, 2702 MAP_ANONYMOUS | MAP_PRIVATE | 2703 MAP_NORESERVE | MAP_FIXED_NOREPLACE, -1, 0); 2704 int ret; 2705 2706 if (p == MAP_FAILED) { 2707 return errno == EEXIST ? 0 : -1; 2708 } 2709 ret = p == (void *)addr; 2710 if (!keep || !ret) { 2711 munmap(p, size); 2712 } 2713 return ret; 2714 } 2715 2716 /** 2717 * pgb_try_mmap_skip_brk(uintptr_t addr, uintptr_t size, uintptr_t brk) 2718 * @addr: host address 2719 * @addr_last: host last address 2720 * @brk: host brk 2721 * 2722 * Like pgb_try_mmap, but additionally reserve some memory following brk. 2723 */ 2724 static int pgb_try_mmap_skip_brk(uintptr_t addr, uintptr_t addr_last, 2725 uintptr_t brk, bool keep) 2726 { 2727 uintptr_t brk_last = brk + 16 * MiB - 1; 2728 2729 /* Do not map anything close to the host brk. */ 2730 if (addr <= brk_last && brk <= addr_last) { 2731 return 0; 2732 } 2733 return pgb_try_mmap(addr, addr_last, keep); 2734 } 2735 2736 /** 2737 * pgb_try_mmap_set: 2738 * @ga: set of guest addrs 2739 * @base: guest_base 2740 * @brk: host brk 2741 * 2742 * Return true if all @ga can be mapped by the host at @base. 2743 * On success, retain the mapping at index 0 for reserved_va. 2744 */ 2745 2746 typedef struct PGBAddrs { 2747 uintptr_t bounds[3][2]; /* start/last pairs */ 2748 int nbounds; 2749 } PGBAddrs; 2750 2751 static bool pgb_try_mmap_set(const PGBAddrs *ga, uintptr_t base, uintptr_t brk) 2752 { 2753 for (int i = ga->nbounds - 1; i >= 0; --i) { 2754 if (pgb_try_mmap_skip_brk(ga->bounds[i][0] + base, 2755 ga->bounds[i][1] + base, 2756 brk, i == 0 && reserved_va) <= 0) { 2757 return false; 2758 } 2759 } 2760 return true; 2761 } 2762 2763 /** 2764 * pgb_addr_set: 2765 * @ga: output set of guest addrs 2766 * @guest_loaddr: guest image low address 2767 * @guest_loaddr: guest image high address 2768 * @identity: create for identity mapping 2769 * 2770 * Fill in @ga with the image, COMMPAGE and NULL page. 2771 */ 2772 static bool pgb_addr_set(PGBAddrs *ga, abi_ulong guest_loaddr, 2773 abi_ulong guest_hiaddr, bool try_identity) 2774 { 2775 int n; 2776 2777 /* 2778 * With a low commpage, or a guest mapped very low, 2779 * we may not be able to use the identity map. 2780 */ 2781 if (try_identity) { 2782 if (LO_COMMPAGE != -1 && LO_COMMPAGE < mmap_min_addr) { 2783 return false; 2784 } 2785 if (guest_loaddr != 0 && guest_loaddr < mmap_min_addr) { 2786 return false; 2787 } 2788 } 2789 2790 memset(ga, 0, sizeof(*ga)); 2791 n = 0; 2792 2793 if (reserved_va) { 2794 ga->bounds[n][0] = try_identity ? mmap_min_addr : 0; 2795 ga->bounds[n][1] = reserved_va; 2796 n++; 2797 /* LO_COMMPAGE and NULL handled by reserving from 0. */ 2798 } else { 2799 /* Add any LO_COMMPAGE or NULL page. */ 2800 if (LO_COMMPAGE != -1) { 2801 ga->bounds[n][0] = 0; 2802 ga->bounds[n][1] = LO_COMMPAGE + TARGET_PAGE_SIZE - 1; 2803 n++; 2804 } else if (!try_identity) { 2805 ga->bounds[n][0] = 0; 2806 ga->bounds[n][1] = TARGET_PAGE_SIZE - 1; 2807 n++; 2808 } 2809 2810 /* Add the guest image for ET_EXEC. */ 2811 if (guest_loaddr) { 2812 ga->bounds[n][0] = guest_loaddr; 2813 ga->bounds[n][1] = guest_hiaddr; 2814 n++; 2815 } 2816 } 2817 2818 /* 2819 * Temporarily disable 2820 * "comparison is always false due to limited range of data type" 2821 * due to comparison between unsigned and (possible) 0. 2822 */ 2823 #pragma GCC diagnostic push 2824 #pragma GCC diagnostic ignored "-Wtype-limits" 2825 2826 /* Add any HI_COMMPAGE not covered by reserved_va. */ 2827 if (reserved_va < HI_COMMPAGE) { 2828 ga->bounds[n][0] = HI_COMMPAGE & qemu_real_host_page_mask(); 2829 ga->bounds[n][1] = HI_COMMPAGE + TARGET_PAGE_SIZE - 1; 2830 n++; 2831 } 2832 2833 #pragma GCC diagnostic pop 2834 2835 ga->nbounds = n; 2836 return true; 2837 } 2838 2839 static void pgb_fail_in_use(const char *image_name) 2840 { 2841 error_report("%s: requires virtual address space that is in use " 2842 "(omit the -B option or choose a different value)", 2843 image_name); 2844 exit(EXIT_FAILURE); 2845 } 2846 2847 static void pgb_fixed(const char *image_name, uintptr_t guest_loaddr, 2848 uintptr_t guest_hiaddr, uintptr_t align) 2849 { 2850 PGBAddrs ga; 2851 uintptr_t brk = (uintptr_t)sbrk(0); 2852 2853 if (!QEMU_IS_ALIGNED(guest_base, align)) { 2854 fprintf(stderr, "Requested guest base %p does not satisfy " 2855 "host minimum alignment (0x%" PRIxPTR ")\n", 2856 (void *)guest_base, align); 2857 exit(EXIT_FAILURE); 2858 } 2859 2860 if (!pgb_addr_set(&ga, guest_loaddr, guest_hiaddr, !guest_base) 2861 || !pgb_try_mmap_set(&ga, guest_base, brk)) { 2862 pgb_fail_in_use(image_name); 2863 } 2864 } 2865 2866 /** 2867 * pgb_find_fallback: 2868 * 2869 * This is a fallback method for finding holes in the host address space 2870 * if we don't have the benefit of being able to access /proc/self/map. 2871 * It can potentially take a very long time as we can only dumbly iterate 2872 * up the host address space seeing if the allocation would work. 2873 */ 2874 static uintptr_t pgb_find_fallback(const PGBAddrs *ga, uintptr_t align, 2875 uintptr_t brk) 2876 { 2877 /* TODO: come up with a better estimate of how much to skip. */ 2878 uintptr_t skip = sizeof(uintptr_t) == 4 ? MiB : GiB; 2879 2880 for (uintptr_t base = skip; ; base += skip) { 2881 base = ROUND_UP(base, align); 2882 if (pgb_try_mmap_set(ga, base, brk)) { 2883 return base; 2884 } 2885 if (base >= -skip) { 2886 return -1; 2887 } 2888 } 2889 } 2890 2891 static uintptr_t pgb_try_itree(const PGBAddrs *ga, uintptr_t base, 2892 IntervalTreeRoot *root) 2893 { 2894 for (int i = ga->nbounds - 1; i >= 0; --i) { 2895 uintptr_t s = base + ga->bounds[i][0]; 2896 uintptr_t l = base + ga->bounds[i][1]; 2897 IntervalTreeNode *n; 2898 2899 if (l < s) { 2900 /* Wraparound. Skip to advance S to mmap_min_addr. */ 2901 return mmap_min_addr - s; 2902 } 2903 2904 n = interval_tree_iter_first(root, s, l); 2905 if (n != NULL) { 2906 /* Conflict. Skip to advance S to LAST + 1. */ 2907 return n->last - s + 1; 2908 } 2909 } 2910 return 0; /* success */ 2911 } 2912 2913 static uintptr_t pgb_find_itree(const PGBAddrs *ga, IntervalTreeRoot *root, 2914 uintptr_t align, uintptr_t brk) 2915 { 2916 uintptr_t last = mmap_min_addr; 2917 uintptr_t base, skip; 2918 2919 while (true) { 2920 base = ROUND_UP(last, align); 2921 if (base < last) { 2922 return -1; 2923 } 2924 2925 skip = pgb_try_itree(ga, base, root); 2926 if (skip == 0) { 2927 break; 2928 } 2929 2930 last = base + skip; 2931 if (last < base) { 2932 return -1; 2933 } 2934 } 2935 2936 /* 2937 * We've chosen 'base' based on holes in the interval tree, 2938 * but we don't yet know if it is a valid host address. 2939 * Because it is the first matching hole, if the host addresses 2940 * are invalid we know there are no further matches. 2941 */ 2942 return pgb_try_mmap_set(ga, base, brk) ? base : -1; 2943 } 2944 2945 static void pgb_dynamic(const char *image_name, uintptr_t guest_loaddr, 2946 uintptr_t guest_hiaddr, uintptr_t align) 2947 { 2948 IntervalTreeRoot *root; 2949 uintptr_t brk, ret; 2950 PGBAddrs ga; 2951 2952 /* Try the identity map first. */ 2953 if (pgb_addr_set(&ga, guest_loaddr, guest_hiaddr, true)) { 2954 brk = (uintptr_t)sbrk(0); 2955 if (pgb_try_mmap_set(&ga, 0, brk)) { 2956 guest_base = 0; 2957 return; 2958 } 2959 } 2960 2961 /* 2962 * Rebuild the address set for non-identity map. 2963 * This differs in the mapping of the guest NULL page. 2964 */ 2965 pgb_addr_set(&ga, guest_loaddr, guest_hiaddr, false); 2966 2967 root = read_self_maps(); 2968 2969 /* Read brk after we've read the maps, which will malloc. */ 2970 brk = (uintptr_t)sbrk(0); 2971 2972 if (!root) { 2973 ret = pgb_find_fallback(&ga, align, brk); 2974 } else { 2975 /* 2976 * Reserve the area close to the host brk. 2977 * This will be freed with the rest of the tree. 2978 */ 2979 IntervalTreeNode *b = g_new0(IntervalTreeNode, 1); 2980 b->start = brk; 2981 b->last = brk + 16 * MiB - 1; 2982 interval_tree_insert(b, root); 2983 2984 ret = pgb_find_itree(&ga, root, align, brk); 2985 free_self_maps(root); 2986 } 2987 2988 if (ret == -1) { 2989 int w = TARGET_LONG_BITS / 4; 2990 2991 error_report("%s: Unable to find a guest_base to satisfy all " 2992 "guest address mapping requirements", image_name); 2993 2994 for (int i = 0; i < ga.nbounds; ++i) { 2995 error_printf(" %0*" PRIx64 "-%0*" PRIx64 "\n", 2996 w, (uint64_t)ga.bounds[i][0], 2997 w, (uint64_t)ga.bounds[i][1]); 2998 } 2999 exit(EXIT_FAILURE); 3000 } 3001 guest_base = ret; 3002 } 3003 3004 void probe_guest_base(const char *image_name, abi_ulong guest_loaddr, 3005 abi_ulong guest_hiaddr) 3006 { 3007 /* In order to use host shmat, we must be able to honor SHMLBA. */ 3008 uintptr_t align = MAX(SHMLBA, TARGET_PAGE_SIZE); 3009 3010 /* Sanity check the guest binary. */ 3011 if (reserved_va) { 3012 if (guest_hiaddr > reserved_va) { 3013 error_report("%s: requires more than reserved virtual " 3014 "address space (0x%" PRIx64 " > 0x%lx)", 3015 image_name, (uint64_t)guest_hiaddr, reserved_va); 3016 exit(EXIT_FAILURE); 3017 } 3018 } else { 3019 if (guest_hiaddr != (uintptr_t)guest_hiaddr) { 3020 error_report("%s: requires more virtual address space " 3021 "than the host can provide (0x%" PRIx64 ")", 3022 image_name, (uint64_t)guest_hiaddr + 1); 3023 exit(EXIT_FAILURE); 3024 } 3025 } 3026 3027 if (have_guest_base) { 3028 pgb_fixed(image_name, guest_loaddr, guest_hiaddr, align); 3029 } else { 3030 pgb_dynamic(image_name, guest_loaddr, guest_hiaddr, align); 3031 } 3032 3033 /* Reserve and initialize the commpage. */ 3034 if (!init_guest_commpage()) { 3035 /* We have already probed for the commpage being free. */ 3036 g_assert_not_reached(); 3037 } 3038 3039 assert(QEMU_IS_ALIGNED(guest_base, align)); 3040 qemu_log_mask(CPU_LOG_PAGE, "Locating guest address space " 3041 "@ 0x%" PRIx64 "\n", (uint64_t)guest_base); 3042 } 3043 3044 enum { 3045 /* The string "GNU\0" as a magic number. */ 3046 GNU0_MAGIC = const_le32('G' | 'N' << 8 | 'U' << 16), 3047 NOTE_DATA_SZ = 1 * KiB, 3048 NOTE_NAME_SZ = 4, 3049 ELF_GNU_PROPERTY_ALIGN = ELF_CLASS == ELFCLASS32 ? 4 : 8, 3050 }; 3051 3052 /* 3053 * Process a single gnu_property entry. 3054 * Return false for error. 3055 */ 3056 static bool parse_elf_property(const uint32_t *data, int *off, int datasz, 3057 struct image_info *info, bool have_prev_type, 3058 uint32_t *prev_type, Error **errp) 3059 { 3060 uint32_t pr_type, pr_datasz, step; 3061 3062 if (*off > datasz || !QEMU_IS_ALIGNED(*off, ELF_GNU_PROPERTY_ALIGN)) { 3063 goto error_data; 3064 } 3065 datasz -= *off; 3066 data += *off / sizeof(uint32_t); 3067 3068 if (datasz < 2 * sizeof(uint32_t)) { 3069 goto error_data; 3070 } 3071 pr_type = data[0]; 3072 pr_datasz = data[1]; 3073 data += 2; 3074 datasz -= 2 * sizeof(uint32_t); 3075 step = ROUND_UP(pr_datasz, ELF_GNU_PROPERTY_ALIGN); 3076 if (step > datasz) { 3077 goto error_data; 3078 } 3079 3080 /* Properties are supposed to be unique and sorted on pr_type. */ 3081 if (have_prev_type && pr_type <= *prev_type) { 3082 if (pr_type == *prev_type) { 3083 error_setg(errp, "Duplicate property in PT_GNU_PROPERTY"); 3084 } else { 3085 error_setg(errp, "Unsorted property in PT_GNU_PROPERTY"); 3086 } 3087 return false; 3088 } 3089 *prev_type = pr_type; 3090 3091 if (!arch_parse_elf_property(pr_type, pr_datasz, data, info, errp)) { 3092 return false; 3093 } 3094 3095 *off += 2 * sizeof(uint32_t) + step; 3096 return true; 3097 3098 error_data: 3099 error_setg(errp, "Ill-formed property in PT_GNU_PROPERTY"); 3100 return false; 3101 } 3102 3103 /* Process NT_GNU_PROPERTY_TYPE_0. */ 3104 static bool parse_elf_properties(const ImageSource *src, 3105 struct image_info *info, 3106 const struct elf_phdr *phdr, 3107 Error **errp) 3108 { 3109 union { 3110 struct elf_note nhdr; 3111 uint32_t data[NOTE_DATA_SZ / sizeof(uint32_t)]; 3112 } note; 3113 3114 int n, off, datasz; 3115 bool have_prev_type; 3116 uint32_t prev_type; 3117 3118 /* Unless the arch requires properties, ignore them. */ 3119 if (!ARCH_USE_GNU_PROPERTY) { 3120 return true; 3121 } 3122 3123 /* If the properties are crazy large, that's too bad. */ 3124 n = phdr->p_filesz; 3125 if (n > sizeof(note)) { 3126 error_setg(errp, "PT_GNU_PROPERTY too large"); 3127 return false; 3128 } 3129 if (n < sizeof(note.nhdr)) { 3130 error_setg(errp, "PT_GNU_PROPERTY too small"); 3131 return false; 3132 } 3133 3134 if (!imgsrc_read(¬e, phdr->p_offset, n, src, errp)) { 3135 return false; 3136 } 3137 3138 /* 3139 * The contents of a valid PT_GNU_PROPERTY is a sequence 3140 * of uint32_t -- swap them all now. 3141 */ 3142 #ifdef BSWAP_NEEDED 3143 for (int i = 0; i < n / 4; i++) { 3144 bswap32s(note.data + i); 3145 } 3146 #endif 3147 3148 /* 3149 * Note that nhdr is 3 words, and that the "name" described by namesz 3150 * immediately follows nhdr and is thus at the 4th word. Further, all 3151 * of the inputs to the kernel's round_up are multiples of 4. 3152 */ 3153 if (note.nhdr.n_type != NT_GNU_PROPERTY_TYPE_0 || 3154 note.nhdr.n_namesz != NOTE_NAME_SZ || 3155 note.data[3] != GNU0_MAGIC) { 3156 error_setg(errp, "Invalid note in PT_GNU_PROPERTY"); 3157 return false; 3158 } 3159 off = sizeof(note.nhdr) + NOTE_NAME_SZ; 3160 3161 datasz = note.nhdr.n_descsz + off; 3162 if (datasz > n) { 3163 error_setg(errp, "Invalid note size in PT_GNU_PROPERTY"); 3164 return false; 3165 } 3166 3167 have_prev_type = false; 3168 prev_type = 0; 3169 while (1) { 3170 if (off == datasz) { 3171 return true; /* end, exit ok */ 3172 } 3173 if (!parse_elf_property(note.data, &off, datasz, info, 3174 have_prev_type, &prev_type, errp)) { 3175 return false; 3176 } 3177 have_prev_type = true; 3178 } 3179 } 3180 3181 /** 3182 * load_elf_image: Load an ELF image into the address space. 3183 * @image_name: the filename of the image, to use in error messages. 3184 * @src: the ImageSource from which to read. 3185 * @info: info collected from the loaded image. 3186 * @ehdr: the ELF header, not yet bswapped. 3187 * @pinterp_name: record any PT_INTERP string found. 3188 * 3189 * On return: @info values will be filled in, as necessary or available. 3190 */ 3191 3192 static void load_elf_image(const char *image_name, const ImageSource *src, 3193 struct image_info *info, struct elfhdr *ehdr, 3194 char **pinterp_name) 3195 { 3196 g_autofree struct elf_phdr *phdr = NULL; 3197 abi_ulong load_addr, load_bias, loaddr, hiaddr, error; 3198 int i, prot_exec; 3199 Error *err = NULL; 3200 3201 /* 3202 * First of all, some simple consistency checks. 3203 * Note that we rely on the bswapped ehdr staying in bprm_buf, 3204 * for later use by load_elf_binary and create_elf_tables. 3205 */ 3206 if (!imgsrc_read(ehdr, 0, sizeof(*ehdr), src, &err)) { 3207 goto exit_errmsg; 3208 } 3209 if (!elf_check_ident(ehdr)) { 3210 error_setg(&err, "Invalid ELF image for this architecture"); 3211 goto exit_errmsg; 3212 } 3213 bswap_ehdr(ehdr); 3214 if (!elf_check_ehdr(ehdr)) { 3215 error_setg(&err, "Invalid ELF image for this architecture"); 3216 goto exit_errmsg; 3217 } 3218 3219 phdr = imgsrc_read_alloc(ehdr->e_phoff, 3220 ehdr->e_phnum * sizeof(struct elf_phdr), 3221 src, &err); 3222 if (phdr == NULL) { 3223 goto exit_errmsg; 3224 } 3225 bswap_phdr(phdr, ehdr->e_phnum); 3226 3227 info->nsegs = 0; 3228 info->pt_dynamic_addr = 0; 3229 3230 mmap_lock(); 3231 3232 /* 3233 * Find the maximum size of the image and allocate an appropriate 3234 * amount of memory to handle that. Locate the interpreter, if any. 3235 */ 3236 loaddr = -1, hiaddr = 0; 3237 info->alignment = 0; 3238 info->exec_stack = EXSTACK_DEFAULT; 3239 for (i = 0; i < ehdr->e_phnum; ++i) { 3240 struct elf_phdr *eppnt = phdr + i; 3241 if (eppnt->p_type == PT_LOAD) { 3242 abi_ulong a = eppnt->p_vaddr & TARGET_PAGE_MASK; 3243 if (a < loaddr) { 3244 loaddr = a; 3245 } 3246 a = eppnt->p_vaddr + eppnt->p_memsz - 1; 3247 if (a > hiaddr) { 3248 hiaddr = a; 3249 } 3250 ++info->nsegs; 3251 info->alignment |= eppnt->p_align; 3252 } else if (eppnt->p_type == PT_INTERP && pinterp_name) { 3253 g_autofree char *interp_name = NULL; 3254 3255 if (*pinterp_name) { 3256 error_setg(&err, "Multiple PT_INTERP entries"); 3257 goto exit_errmsg; 3258 } 3259 3260 interp_name = imgsrc_read_alloc(eppnt->p_offset, eppnt->p_filesz, 3261 src, &err); 3262 if (interp_name == NULL) { 3263 goto exit_errmsg; 3264 } 3265 if (interp_name[eppnt->p_filesz - 1] != 0) { 3266 error_setg(&err, "Invalid PT_INTERP entry"); 3267 goto exit_errmsg; 3268 } 3269 *pinterp_name = g_steal_pointer(&interp_name); 3270 } else if (eppnt->p_type == PT_GNU_PROPERTY) { 3271 if (!parse_elf_properties(src, info, eppnt, &err)) { 3272 goto exit_errmsg; 3273 } 3274 } else if (eppnt->p_type == PT_GNU_STACK) { 3275 info->exec_stack = eppnt->p_flags & PF_X; 3276 } 3277 } 3278 3279 load_addr = loaddr; 3280 3281 if (pinterp_name != NULL) { 3282 if (ehdr->e_type == ET_EXEC) { 3283 /* 3284 * Make sure that the low address does not conflict with 3285 * MMAP_MIN_ADDR or the QEMU application itself. 3286 */ 3287 probe_guest_base(image_name, loaddr, hiaddr); 3288 } else { 3289 abi_ulong align; 3290 3291 /* 3292 * The binary is dynamic, but we still need to 3293 * select guest_base. In this case we pass a size. 3294 */ 3295 probe_guest_base(image_name, 0, hiaddr - loaddr); 3296 3297 /* 3298 * Avoid collision with the loader by providing a different 3299 * default load address. 3300 */ 3301 load_addr += elf_et_dyn_base; 3302 3303 /* 3304 * TODO: Better support for mmap alignment is desirable. 3305 * Since we do not have complete control over the guest 3306 * address space, we prefer the kernel to choose some address 3307 * rather than force the use of LOAD_ADDR via MAP_FIXED. 3308 * But without MAP_FIXED we cannot guarantee alignment, 3309 * only suggest it. 3310 */ 3311 align = pow2ceil(info->alignment); 3312 if (align) { 3313 load_addr &= -align; 3314 } 3315 } 3316 } 3317 3318 /* 3319 * Reserve address space for all of this. 3320 * 3321 * In the case of ET_EXEC, we supply MAP_FIXED_NOREPLACE so that we get 3322 * exactly the address range that is required. Without reserved_va, 3323 * the guest address space is not isolated. We have attempted to avoid 3324 * conflict with the host program itself via probe_guest_base, but using 3325 * MAP_FIXED_NOREPLACE instead of MAP_FIXED provides an extra check. 3326 * 3327 * Otherwise this is ET_DYN, and we are searching for a location 3328 * that can hold the memory space required. If the image is 3329 * pre-linked, LOAD_ADDR will be non-zero, and the kernel should 3330 * honor that address if it happens to be free. 3331 * 3332 * In both cases, we will overwrite pages in this range with mappings 3333 * from the executable. 3334 */ 3335 load_addr = target_mmap(load_addr, (size_t)hiaddr - loaddr + 1, PROT_NONE, 3336 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | 3337 (ehdr->e_type == ET_EXEC ? MAP_FIXED_NOREPLACE : 0), 3338 -1, 0); 3339 if (load_addr == -1) { 3340 goto exit_mmap; 3341 } 3342 load_bias = load_addr - loaddr; 3343 3344 if (elf_is_fdpic(ehdr)) { 3345 struct elf32_fdpic_loadseg *loadsegs = info->loadsegs = 3346 g_malloc(sizeof(*loadsegs) * info->nsegs); 3347 3348 for (i = 0; i < ehdr->e_phnum; ++i) { 3349 switch (phdr[i].p_type) { 3350 case PT_DYNAMIC: 3351 info->pt_dynamic_addr = phdr[i].p_vaddr + load_bias; 3352 break; 3353 case PT_LOAD: 3354 loadsegs->addr = phdr[i].p_vaddr + load_bias; 3355 loadsegs->p_vaddr = phdr[i].p_vaddr; 3356 loadsegs->p_memsz = phdr[i].p_memsz; 3357 ++loadsegs; 3358 break; 3359 } 3360 } 3361 } 3362 3363 info->load_bias = load_bias; 3364 info->code_offset = load_bias; 3365 info->data_offset = load_bias; 3366 info->load_addr = load_addr; 3367 info->entry = ehdr->e_entry + load_bias; 3368 info->start_code = -1; 3369 info->end_code = 0; 3370 info->start_data = -1; 3371 info->end_data = 0; 3372 /* Usual start for brk is after all sections of the main executable. */ 3373 info->brk = TARGET_PAGE_ALIGN(hiaddr + load_bias); 3374 info->elf_flags = ehdr->e_flags; 3375 3376 prot_exec = PROT_EXEC; 3377 #ifdef TARGET_AARCH64 3378 /* 3379 * If the BTI feature is present, this indicates that the executable 3380 * pages of the startup binary should be mapped with PROT_BTI, so that 3381 * branch targets are enforced. 3382 * 3383 * The startup binary is either the interpreter or the static executable. 3384 * The interpreter is responsible for all pages of a dynamic executable. 3385 * 3386 * Elf notes are backward compatible to older cpus. 3387 * Do not enable BTI unless it is supported. 3388 */ 3389 if ((info->note_flags & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) 3390 && (pinterp_name == NULL || *pinterp_name == 0) 3391 && cpu_isar_feature(aa64_bti, ARM_CPU(thread_cpu))) { 3392 prot_exec |= TARGET_PROT_BTI; 3393 } 3394 #endif 3395 3396 for (i = 0; i < ehdr->e_phnum; i++) { 3397 struct elf_phdr *eppnt = phdr + i; 3398 if (eppnt->p_type == PT_LOAD) { 3399 abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em; 3400 int elf_prot = 0; 3401 3402 if (eppnt->p_flags & PF_R) { 3403 elf_prot |= PROT_READ; 3404 } 3405 if (eppnt->p_flags & PF_W) { 3406 elf_prot |= PROT_WRITE; 3407 } 3408 if (eppnt->p_flags & PF_X) { 3409 elf_prot |= prot_exec; 3410 } 3411 3412 vaddr = load_bias + eppnt->p_vaddr; 3413 vaddr_po = vaddr & ~TARGET_PAGE_MASK; 3414 vaddr_ps = vaddr & TARGET_PAGE_MASK; 3415 3416 vaddr_ef = vaddr + eppnt->p_filesz; 3417 vaddr_em = vaddr + eppnt->p_memsz; 3418 3419 /* 3420 * Some segments may be completely empty, with a non-zero p_memsz 3421 * but no backing file segment. 3422 */ 3423 if (eppnt->p_filesz != 0) { 3424 error = imgsrc_mmap(vaddr_ps, eppnt->p_filesz + vaddr_po, 3425 elf_prot, MAP_PRIVATE | MAP_FIXED, 3426 src, eppnt->p_offset - vaddr_po); 3427 if (error == -1) { 3428 goto exit_mmap; 3429 } 3430 } 3431 3432 /* If the load segment requests extra zeros (e.g. bss), map it. */ 3433 if (vaddr_ef < vaddr_em && 3434 !zero_bss(vaddr_ef, vaddr_em, elf_prot, &err)) { 3435 goto exit_errmsg; 3436 } 3437 3438 /* Find the full program boundaries. */ 3439 if (elf_prot & PROT_EXEC) { 3440 if (vaddr < info->start_code) { 3441 info->start_code = vaddr; 3442 } 3443 if (vaddr_ef > info->end_code) { 3444 info->end_code = vaddr_ef; 3445 } 3446 } 3447 if (elf_prot & PROT_WRITE) { 3448 if (vaddr < info->start_data) { 3449 info->start_data = vaddr; 3450 } 3451 if (vaddr_ef > info->end_data) { 3452 info->end_data = vaddr_ef; 3453 } 3454 } 3455 #ifdef TARGET_MIPS 3456 } else if (eppnt->p_type == PT_MIPS_ABIFLAGS) { 3457 Mips_elf_abiflags_v0 abiflags; 3458 3459 if (!imgsrc_read(&abiflags, eppnt->p_offset, sizeof(abiflags), 3460 src, &err)) { 3461 goto exit_errmsg; 3462 } 3463 bswap_mips_abiflags(&abiflags); 3464 info->fp_abi = abiflags.fp_abi; 3465 #endif 3466 } 3467 } 3468 3469 if (info->end_data == 0) { 3470 info->start_data = info->end_code; 3471 info->end_data = info->end_code; 3472 } 3473 3474 if (qemu_log_enabled()) { 3475 load_symbols(ehdr, src, load_bias); 3476 } 3477 3478 debuginfo_report_elf(image_name, src->fd, load_bias); 3479 3480 mmap_unlock(); 3481 3482 close(src->fd); 3483 return; 3484 3485 exit_mmap: 3486 error_setg_errno(&err, errno, "Error mapping file"); 3487 goto exit_errmsg; 3488 exit_errmsg: 3489 error_reportf_err(err, "%s: ", image_name); 3490 exit(-1); 3491 } 3492 3493 static void load_elf_interp(const char *filename, struct image_info *info, 3494 char bprm_buf[BPRM_BUF_SIZE]) 3495 { 3496 struct elfhdr ehdr; 3497 ImageSource src; 3498 int fd, retval; 3499 Error *err = NULL; 3500 3501 fd = open(path(filename), O_RDONLY); 3502 if (fd < 0) { 3503 error_setg_file_open(&err, errno, filename); 3504 error_report_err(err); 3505 exit(-1); 3506 } 3507 3508 retval = read(fd, bprm_buf, BPRM_BUF_SIZE); 3509 if (retval < 0) { 3510 error_setg_errno(&err, errno, "Error reading file header"); 3511 error_reportf_err(err, "%s: ", filename); 3512 exit(-1); 3513 } 3514 3515 src.fd = fd; 3516 src.cache = bprm_buf; 3517 src.cache_size = retval; 3518 3519 load_elf_image(filename, &src, info, &ehdr, NULL); 3520 } 3521 3522 #ifdef VDSO_HEADER 3523 #include VDSO_HEADER 3524 #define vdso_image_info() &vdso_image_info 3525 #else 3526 #define vdso_image_info() NULL 3527 #endif 3528 3529 static void load_elf_vdso(struct image_info *info, const VdsoImageInfo *vdso) 3530 { 3531 ImageSource src; 3532 struct elfhdr ehdr; 3533 abi_ulong load_bias, load_addr; 3534 3535 src.fd = -1; 3536 src.cache = vdso->image; 3537 src.cache_size = vdso->image_size; 3538 3539 load_elf_image("<internal-vdso>", &src, info, &ehdr, NULL); 3540 load_addr = info->load_addr; 3541 load_bias = info->load_bias; 3542 3543 /* 3544 * We need to relocate the VDSO image. The one built into the kernel 3545 * is built for a fixed address. The one built for QEMU is not, since 3546 * that requires close control of the guest address space. 3547 * We pre-processed the image to locate all of the addresses that need 3548 * to be updated. 3549 */ 3550 for (unsigned i = 0, n = vdso->reloc_count; i < n; i++) { 3551 abi_ulong *addr = g2h_untagged(load_addr + vdso->relocs[i]); 3552 *addr = tswapal(tswapal(*addr) + load_bias); 3553 } 3554 3555 /* Install signal trampolines, if present. */ 3556 if (vdso->sigreturn_ofs) { 3557 default_sigreturn = load_addr + vdso->sigreturn_ofs; 3558 } 3559 if (vdso->rt_sigreturn_ofs) { 3560 default_rt_sigreturn = load_addr + vdso->rt_sigreturn_ofs; 3561 } 3562 3563 /* Remove write from VDSO segment. */ 3564 target_mprotect(info->start_data, info->end_data - info->start_data, 3565 PROT_READ | PROT_EXEC); 3566 } 3567 3568 static int symfind(const void *s0, const void *s1) 3569 { 3570 struct elf_sym *sym = (struct elf_sym *)s1; 3571 __typeof(sym->st_value) addr = *(uint64_t *)s0; 3572 int result = 0; 3573 3574 if (addr < sym->st_value) { 3575 result = -1; 3576 } else if (addr >= sym->st_value + sym->st_size) { 3577 result = 1; 3578 } 3579 return result; 3580 } 3581 3582 static const char *lookup_symbolxx(struct syminfo *s, uint64_t orig_addr) 3583 { 3584 #if ELF_CLASS == ELFCLASS32 3585 struct elf_sym *syms = s->disas_symtab.elf32; 3586 #else 3587 struct elf_sym *syms = s->disas_symtab.elf64; 3588 #endif 3589 3590 // binary search 3591 struct elf_sym *sym; 3592 3593 sym = bsearch(&orig_addr, syms, s->disas_num_syms, sizeof(*syms), symfind); 3594 if (sym != NULL) { 3595 return s->disas_strtab + sym->st_name; 3596 } 3597 3598 return ""; 3599 } 3600 3601 /* FIXME: This should use elf_ops.h.inc */ 3602 static int symcmp(const void *s0, const void *s1) 3603 { 3604 struct elf_sym *sym0 = (struct elf_sym *)s0; 3605 struct elf_sym *sym1 = (struct elf_sym *)s1; 3606 return (sym0->st_value < sym1->st_value) 3607 ? -1 3608 : ((sym0->st_value > sym1->st_value) ? 1 : 0); 3609 } 3610 3611 /* Best attempt to load symbols from this ELF object. */ 3612 static void load_symbols(struct elfhdr *hdr, const ImageSource *src, 3613 abi_ulong load_bias) 3614 { 3615 int i, shnum, nsyms, sym_idx = 0, str_idx = 0; 3616 g_autofree struct elf_shdr *shdr = NULL; 3617 char *strings = NULL; 3618 struct elf_sym *syms = NULL; 3619 struct elf_sym *new_syms; 3620 uint64_t segsz; 3621 3622 shnum = hdr->e_shnum; 3623 shdr = imgsrc_read_alloc(hdr->e_shoff, shnum * sizeof(struct elf_shdr), 3624 src, NULL); 3625 if (shdr == NULL) { 3626 return; 3627 } 3628 3629 bswap_shdr(shdr, shnum); 3630 for (i = 0; i < shnum; ++i) { 3631 if (shdr[i].sh_type == SHT_SYMTAB) { 3632 sym_idx = i; 3633 str_idx = shdr[i].sh_link; 3634 goto found; 3635 } 3636 } 3637 3638 /* There will be no symbol table if the file was stripped. */ 3639 return; 3640 3641 found: 3642 /* Now know where the strtab and symtab are. Snarf them. */ 3643 3644 segsz = shdr[str_idx].sh_size; 3645 strings = g_try_malloc(segsz); 3646 if (!strings) { 3647 goto give_up; 3648 } 3649 if (!imgsrc_read(strings, shdr[str_idx].sh_offset, segsz, src, NULL)) { 3650 goto give_up; 3651 } 3652 3653 segsz = shdr[sym_idx].sh_size; 3654 if (segsz / sizeof(struct elf_sym) > INT_MAX) { 3655 /* 3656 * Implausibly large symbol table: give up rather than ploughing 3657 * on with the number of symbols calculation overflowing. 3658 */ 3659 goto give_up; 3660 } 3661 nsyms = segsz / sizeof(struct elf_sym); 3662 syms = g_try_malloc(segsz); 3663 if (!syms) { 3664 goto give_up; 3665 } 3666 if (!imgsrc_read(syms, shdr[sym_idx].sh_offset, segsz, src, NULL)) { 3667 goto give_up; 3668 } 3669 3670 for (i = 0; i < nsyms; ) { 3671 bswap_sym(syms + i); 3672 /* Throw away entries which we do not need. */ 3673 if (syms[i].st_shndx == SHN_UNDEF 3674 || syms[i].st_shndx >= SHN_LORESERVE 3675 || ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) { 3676 if (i < --nsyms) { 3677 syms[i] = syms[nsyms]; 3678 } 3679 } else { 3680 #if defined(TARGET_ARM) || defined (TARGET_MIPS) 3681 /* The bottom address bit marks a Thumb or MIPS16 symbol. */ 3682 syms[i].st_value &= ~(target_ulong)1; 3683 #endif 3684 syms[i].st_value += load_bias; 3685 i++; 3686 } 3687 } 3688 3689 /* No "useful" symbol. */ 3690 if (nsyms == 0) { 3691 goto give_up; 3692 } 3693 3694 /* 3695 * Attempt to free the storage associated with the local symbols 3696 * that we threw away. Whether or not this has any effect on the 3697 * memory allocation depends on the malloc implementation and how 3698 * many symbols we managed to discard. 3699 */ 3700 new_syms = g_try_renew(struct elf_sym, syms, nsyms); 3701 if (new_syms == NULL) { 3702 goto give_up; 3703 } 3704 syms = new_syms; 3705 3706 qsort(syms, nsyms, sizeof(*syms), symcmp); 3707 3708 { 3709 struct syminfo *s = g_new(struct syminfo, 1); 3710 3711 s->disas_strtab = strings; 3712 s->disas_num_syms = nsyms; 3713 #if ELF_CLASS == ELFCLASS32 3714 s->disas_symtab.elf32 = syms; 3715 #else 3716 s->disas_symtab.elf64 = syms; 3717 #endif 3718 s->lookup_symbol = lookup_symbolxx; 3719 s->next = syminfos; 3720 syminfos = s; 3721 } 3722 return; 3723 3724 give_up: 3725 g_free(strings); 3726 g_free(syms); 3727 } 3728 3729 uint32_t get_elf_eflags(int fd) 3730 { 3731 struct elfhdr ehdr; 3732 off_t offset; 3733 int ret; 3734 3735 /* Read ELF header */ 3736 offset = lseek(fd, 0, SEEK_SET); 3737 if (offset == (off_t) -1) { 3738 return 0; 3739 } 3740 ret = read(fd, &ehdr, sizeof(ehdr)); 3741 if (ret < sizeof(ehdr)) { 3742 return 0; 3743 } 3744 offset = lseek(fd, offset, SEEK_SET); 3745 if (offset == (off_t) -1) { 3746 return 0; 3747 } 3748 3749 /* Check ELF signature */ 3750 if (!elf_check_ident(&ehdr)) { 3751 return 0; 3752 } 3753 3754 /* check header */ 3755 bswap_ehdr(&ehdr); 3756 if (!elf_check_ehdr(&ehdr)) { 3757 return 0; 3758 } 3759 3760 /* return architecture id */ 3761 return ehdr.e_flags; 3762 } 3763 3764 int load_elf_binary(struct linux_binprm *bprm, struct image_info *info) 3765 { 3766 /* 3767 * We need a copy of the elf header for passing to create_elf_tables. 3768 * We will have overwritten the original when we re-use bprm->buf 3769 * while loading the interpreter. Allocate the storage for this now 3770 * and let elf_load_image do any swapping that may be required. 3771 */ 3772 struct elfhdr ehdr; 3773 struct image_info interp_info, vdso_info; 3774 char *elf_interpreter = NULL; 3775 char *scratch; 3776 3777 memset(&interp_info, 0, sizeof(interp_info)); 3778 #ifdef TARGET_MIPS 3779 interp_info.fp_abi = MIPS_ABI_FP_UNKNOWN; 3780 #endif 3781 3782 load_elf_image(bprm->filename, &bprm->src, info, &ehdr, &elf_interpreter); 3783 3784 /* Do this so that we can load the interpreter, if need be. We will 3785 change some of these later */ 3786 bprm->p = setup_arg_pages(bprm, info); 3787 3788 scratch = g_new0(char, TARGET_PAGE_SIZE); 3789 if (STACK_GROWS_DOWN) { 3790 bprm->p = copy_elf_strings(1, &bprm->filename, scratch, 3791 bprm->p, info->stack_limit); 3792 info->file_string = bprm->p; 3793 bprm->p = copy_elf_strings(bprm->envc, bprm->envp, scratch, 3794 bprm->p, info->stack_limit); 3795 info->env_strings = bprm->p; 3796 bprm->p = copy_elf_strings(bprm->argc, bprm->argv, scratch, 3797 bprm->p, info->stack_limit); 3798 info->arg_strings = bprm->p; 3799 } else { 3800 info->arg_strings = bprm->p; 3801 bprm->p = copy_elf_strings(bprm->argc, bprm->argv, scratch, 3802 bprm->p, info->stack_limit); 3803 info->env_strings = bprm->p; 3804 bprm->p = copy_elf_strings(bprm->envc, bprm->envp, scratch, 3805 bprm->p, info->stack_limit); 3806 info->file_string = bprm->p; 3807 bprm->p = copy_elf_strings(1, &bprm->filename, scratch, 3808 bprm->p, info->stack_limit); 3809 } 3810 3811 g_free(scratch); 3812 3813 if (!bprm->p) { 3814 fprintf(stderr, "%s: %s\n", bprm->filename, strerror(E2BIG)); 3815 exit(-1); 3816 } 3817 3818 if (elf_interpreter) { 3819 load_elf_interp(elf_interpreter, &interp_info, bprm->buf); 3820 3821 /* 3822 * While unusual because of ELF_ET_DYN_BASE, if we are unlucky 3823 * with the mappings the interpreter can be loaded above but 3824 * near the main executable, which can leave very little room 3825 * for the heap. 3826 * If the current brk has less than 16MB, use the end of the 3827 * interpreter. 3828 */ 3829 if (interp_info.brk > info->brk && 3830 interp_info.load_bias - info->brk < 16 * MiB) { 3831 info->brk = interp_info.brk; 3832 } 3833 3834 /* If the program interpreter is one of these two, then assume 3835 an iBCS2 image. Otherwise assume a native linux image. */ 3836 3837 if (strcmp(elf_interpreter, "/usr/lib/libc.so.1") == 0 3838 || strcmp(elf_interpreter, "/usr/lib/ld.so.1") == 0) { 3839 info->personality = PER_SVR4; 3840 3841 /* Why this, you ask??? Well SVr4 maps page 0 as read-only, 3842 and some applications "depend" upon this behavior. Since 3843 we do not have the power to recompile these, we emulate 3844 the SVr4 behavior. Sigh. */ 3845 target_mmap(0, TARGET_PAGE_SIZE, PROT_READ | PROT_EXEC, 3846 MAP_FIXED_NOREPLACE | MAP_PRIVATE | MAP_ANONYMOUS, 3847 -1, 0); 3848 } 3849 #ifdef TARGET_MIPS 3850 info->interp_fp_abi = interp_info.fp_abi; 3851 #endif 3852 } 3853 3854 /* 3855 * Load a vdso if available, which will amongst other things contain the 3856 * signal trampolines. Otherwise, allocate a separate page for them. 3857 */ 3858 const VdsoImageInfo *vdso = vdso_image_info(); 3859 if (vdso) { 3860 load_elf_vdso(&vdso_info, vdso); 3861 info->vdso = vdso_info.load_bias; 3862 } else if (TARGET_ARCH_HAS_SIGTRAMP_PAGE) { 3863 abi_long tramp_page = target_mmap(0, TARGET_PAGE_SIZE, 3864 PROT_READ | PROT_WRITE, 3865 MAP_PRIVATE | MAP_ANON, -1, 0); 3866 if (tramp_page == -1) { 3867 return -errno; 3868 } 3869 3870 setup_sigtramp(tramp_page); 3871 target_mprotect(tramp_page, TARGET_PAGE_SIZE, PROT_READ | PROT_EXEC); 3872 } 3873 3874 bprm->p = create_elf_tables(bprm->p, bprm->argc, bprm->envc, &ehdr, info, 3875 elf_interpreter ? &interp_info : NULL, 3876 vdso ? &vdso_info : NULL); 3877 info->start_stack = bprm->p; 3878 3879 /* If we have an interpreter, set that as the program's entry point. 3880 Copy the load_bias as well, to help PPC64 interpret the entry 3881 point as a function descriptor. Do this after creating elf tables 3882 so that we copy the original program entry point into the AUXV. */ 3883 if (elf_interpreter) { 3884 info->load_bias = interp_info.load_bias; 3885 info->entry = interp_info.entry; 3886 g_free(elf_interpreter); 3887 } 3888 3889 #ifdef USE_ELF_CORE_DUMP 3890 bprm->core_dump = &elf_core_dump; 3891 #endif 3892 3893 return 0; 3894 } 3895 3896 #ifdef USE_ELF_CORE_DUMP 3897 #include "exec/translate-all.h" 3898 3899 /* 3900 * Definitions to generate Intel SVR4-like core files. 3901 * These mostly have the same names as the SVR4 types with "target_elf_" 3902 * tacked on the front to prevent clashes with linux definitions, 3903 * and the typedef forms have been avoided. This is mostly like 3904 * the SVR4 structure, but more Linuxy, with things that Linux does 3905 * not support and which gdb doesn't really use excluded. 3906 * 3907 * Fields we don't dump (their contents is zero) in linux-user qemu 3908 * are marked with XXX. 3909 * 3910 * Core dump code is copied from linux kernel (fs/binfmt_elf.c). 3911 * 3912 * Porting ELF coredump for target is (quite) simple process. First you 3913 * define USE_ELF_CORE_DUMP in target ELF code (where init_thread() for 3914 * the target resides): 3915 * 3916 * #define USE_ELF_CORE_DUMP 3917 * 3918 * Next you define type of register set used for dumping. ELF specification 3919 * says that it needs to be array of elf_greg_t that has size of ELF_NREG. 3920 * 3921 * typedef <target_regtype> target_elf_greg_t; 3922 * #define ELF_NREG <number of registers> 3923 * typedef taret_elf_greg_t target_elf_gregset_t[ELF_NREG]; 3924 * 3925 * Last step is to implement target specific function that copies registers 3926 * from given cpu into just specified register set. Prototype is: 3927 * 3928 * static void elf_core_copy_regs(taret_elf_gregset_t *regs, 3929 * const CPUArchState *env); 3930 * 3931 * Parameters: 3932 * regs - copy register values into here (allocated and zeroed by caller) 3933 * env - copy registers from here 3934 * 3935 * Example for ARM target is provided in this file. 3936 */ 3937 3938 struct target_elf_siginfo { 3939 abi_int si_signo; /* signal number */ 3940 abi_int si_code; /* extra code */ 3941 abi_int si_errno; /* errno */ 3942 }; 3943 3944 struct target_elf_prstatus { 3945 struct target_elf_siginfo pr_info; /* Info associated with signal */ 3946 abi_short pr_cursig; /* Current signal */ 3947 abi_ulong pr_sigpend; /* XXX */ 3948 abi_ulong pr_sighold; /* XXX */ 3949 target_pid_t pr_pid; 3950 target_pid_t pr_ppid; 3951 target_pid_t pr_pgrp; 3952 target_pid_t pr_sid; 3953 struct target_timeval pr_utime; /* XXX User time */ 3954 struct target_timeval pr_stime; /* XXX System time */ 3955 struct target_timeval pr_cutime; /* XXX Cumulative user time */ 3956 struct target_timeval pr_cstime; /* XXX Cumulative system time */ 3957 target_elf_gregset_t pr_reg; /* GP registers */ 3958 abi_int pr_fpvalid; /* XXX */ 3959 }; 3960 3961 #define ELF_PRARGSZ (80) /* Number of chars for args */ 3962 3963 struct target_elf_prpsinfo { 3964 char pr_state; /* numeric process state */ 3965 char pr_sname; /* char for pr_state */ 3966 char pr_zomb; /* zombie */ 3967 char pr_nice; /* nice val */ 3968 abi_ulong pr_flag; /* flags */ 3969 target_uid_t pr_uid; 3970 target_gid_t pr_gid; 3971 target_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; 3972 /* Lots missing */ 3973 char pr_fname[16] QEMU_NONSTRING; /* filename of executable */ 3974 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ 3975 }; 3976 3977 #ifdef BSWAP_NEEDED 3978 static void bswap_prstatus(struct target_elf_prstatus *prstatus) 3979 { 3980 prstatus->pr_info.si_signo = tswap32(prstatus->pr_info.si_signo); 3981 prstatus->pr_info.si_code = tswap32(prstatus->pr_info.si_code); 3982 prstatus->pr_info.si_errno = tswap32(prstatus->pr_info.si_errno); 3983 prstatus->pr_cursig = tswap16(prstatus->pr_cursig); 3984 prstatus->pr_sigpend = tswapal(prstatus->pr_sigpend); 3985 prstatus->pr_sighold = tswapal(prstatus->pr_sighold); 3986 prstatus->pr_pid = tswap32(prstatus->pr_pid); 3987 prstatus->pr_ppid = tswap32(prstatus->pr_ppid); 3988 prstatus->pr_pgrp = tswap32(prstatus->pr_pgrp); 3989 prstatus->pr_sid = tswap32(prstatus->pr_sid); 3990 /* cpu times are not filled, so we skip them */ 3991 /* regs should be in correct format already */ 3992 prstatus->pr_fpvalid = tswap32(prstatus->pr_fpvalid); 3993 } 3994 3995 static void bswap_psinfo(struct target_elf_prpsinfo *psinfo) 3996 { 3997 psinfo->pr_flag = tswapal(psinfo->pr_flag); 3998 psinfo->pr_uid = tswap16(psinfo->pr_uid); 3999 psinfo->pr_gid = tswap16(psinfo->pr_gid); 4000 psinfo->pr_pid = tswap32(psinfo->pr_pid); 4001 psinfo->pr_ppid = tswap32(psinfo->pr_ppid); 4002 psinfo->pr_pgrp = tswap32(psinfo->pr_pgrp); 4003 psinfo->pr_sid = tswap32(psinfo->pr_sid); 4004 } 4005 4006 static void bswap_note(struct elf_note *en) 4007 { 4008 bswap32s(&en->n_namesz); 4009 bswap32s(&en->n_descsz); 4010 bswap32s(&en->n_type); 4011 } 4012 #else 4013 static inline void bswap_prstatus(struct target_elf_prstatus *p) { } 4014 static inline void bswap_psinfo(struct target_elf_prpsinfo *p) {} 4015 static inline void bswap_note(struct elf_note *en) { } 4016 #endif /* BSWAP_NEEDED */ 4017 4018 /* 4019 * Calculate file (dump) size of given memory region. 4020 */ 4021 static size_t vma_dump_size(target_ulong start, target_ulong end, 4022 unsigned long flags) 4023 { 4024 /* The area must be readable. */ 4025 if (!(flags & PAGE_READ)) { 4026 return 0; 4027 } 4028 4029 /* 4030 * Usually we don't dump executable pages as they contain 4031 * non-writable code that debugger can read directly from 4032 * target library etc. If there is no elf header, we dump it. 4033 */ 4034 if (!(flags & PAGE_WRITE_ORG) && 4035 (flags & PAGE_EXEC) && 4036 memcmp(g2h_untagged(start), ELFMAG, SELFMAG) == 0) { 4037 return 0; 4038 } 4039 4040 return end - start; 4041 } 4042 4043 static size_t size_note(const char *name, size_t datasz) 4044 { 4045 size_t namesz = strlen(name) + 1; 4046 4047 namesz = ROUND_UP(namesz, 4); 4048 datasz = ROUND_UP(datasz, 4); 4049 4050 return sizeof(struct elf_note) + namesz + datasz; 4051 } 4052 4053 static void *fill_note(void **pptr, int type, const char *name, size_t datasz) 4054 { 4055 void *ptr = *pptr; 4056 struct elf_note *n = ptr; 4057 size_t namesz = strlen(name) + 1; 4058 4059 n->n_namesz = namesz; 4060 n->n_descsz = datasz; 4061 n->n_type = type; 4062 bswap_note(n); 4063 4064 ptr += sizeof(*n); 4065 memcpy(ptr, name, namesz); 4066 4067 namesz = ROUND_UP(namesz, 4); 4068 datasz = ROUND_UP(datasz, 4); 4069 4070 *pptr = ptr + namesz + datasz; 4071 return ptr + namesz; 4072 } 4073 4074 static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine, 4075 uint32_t flags) 4076 { 4077 memcpy(elf->e_ident, ELFMAG, SELFMAG); 4078 4079 elf->e_ident[EI_CLASS] = ELF_CLASS; 4080 elf->e_ident[EI_DATA] = ELF_DATA; 4081 elf->e_ident[EI_VERSION] = EV_CURRENT; 4082 elf->e_ident[EI_OSABI] = ELF_OSABI; 4083 4084 elf->e_type = ET_CORE; 4085 elf->e_machine = machine; 4086 elf->e_version = EV_CURRENT; 4087 elf->e_phoff = sizeof(struct elfhdr); 4088 elf->e_flags = flags; 4089 elf->e_ehsize = sizeof(struct elfhdr); 4090 elf->e_phentsize = sizeof(struct elf_phdr); 4091 elf->e_phnum = segs; 4092 4093 bswap_ehdr(elf); 4094 } 4095 4096 static void fill_elf_note_phdr(struct elf_phdr *phdr, size_t sz, off_t offset) 4097 { 4098 phdr->p_type = PT_NOTE; 4099 phdr->p_offset = offset; 4100 phdr->p_filesz = sz; 4101 4102 bswap_phdr(phdr, 1); 4103 } 4104 4105 static void fill_prstatus_note(void *data, CPUState *cpu, int signr) 4106 { 4107 /* 4108 * Because note memory is only aligned to 4, and target_elf_prstatus 4109 * may well have higher alignment requirements, fill locally and 4110 * memcpy to the destination afterward. 4111 */ 4112 struct target_elf_prstatus prstatus = { 4113 .pr_info.si_signo = signr, 4114 .pr_cursig = signr, 4115 .pr_pid = get_task_state(cpu)->ts_tid, 4116 .pr_ppid = getppid(), 4117 .pr_pgrp = getpgrp(), 4118 .pr_sid = getsid(0), 4119 }; 4120 4121 elf_core_copy_regs(&prstatus.pr_reg, cpu_env(cpu)); 4122 bswap_prstatus(&prstatus); 4123 memcpy(data, &prstatus, sizeof(prstatus)); 4124 } 4125 4126 static void fill_prpsinfo_note(void *data, const TaskState *ts) 4127 { 4128 /* 4129 * Because note memory is only aligned to 4, and target_elf_prpsinfo 4130 * may well have higher alignment requirements, fill locally and 4131 * memcpy to the destination afterward. 4132 */ 4133 struct target_elf_prpsinfo psinfo = { 4134 .pr_pid = getpid(), 4135 .pr_ppid = getppid(), 4136 .pr_pgrp = getpgrp(), 4137 .pr_sid = getsid(0), 4138 .pr_uid = getuid(), 4139 .pr_gid = getgid(), 4140 }; 4141 char *base_filename; 4142 size_t len; 4143 4144 len = ts->info->env_strings - ts->info->arg_strings; 4145 len = MIN(len, ELF_PRARGSZ); 4146 memcpy(&psinfo.pr_psargs, g2h_untagged(ts->info->arg_strings), len); 4147 for (size_t i = 0; i < len; i++) { 4148 if (psinfo.pr_psargs[i] == 0) { 4149 psinfo.pr_psargs[i] = ' '; 4150 } 4151 } 4152 4153 base_filename = g_path_get_basename(ts->bprm->filename); 4154 /* 4155 * Using strncpy here is fine: at max-length, 4156 * this field is not NUL-terminated. 4157 */ 4158 strncpy(psinfo.pr_fname, base_filename, sizeof(psinfo.pr_fname)); 4159 g_free(base_filename); 4160 4161 bswap_psinfo(&psinfo); 4162 memcpy(data, &psinfo, sizeof(psinfo)); 4163 } 4164 4165 static void fill_auxv_note(void *data, const TaskState *ts) 4166 { 4167 memcpy(data, g2h_untagged(ts->info->saved_auxv), ts->info->auxv_len); 4168 } 4169 4170 /* 4171 * Constructs name of coredump file. We have following convention 4172 * for the name: 4173 * qemu_<basename-of-target-binary>_<date>-<time>_<pid>.core 4174 * 4175 * Returns the filename 4176 */ 4177 static char *core_dump_filename(const TaskState *ts) 4178 { 4179 g_autoptr(GDateTime) now = g_date_time_new_now_local(); 4180 g_autofree char *nowstr = g_date_time_format(now, "%Y%m%d-%H%M%S"); 4181 g_autofree char *base_filename = g_path_get_basename(ts->bprm->filename); 4182 4183 return g_strdup_printf("qemu_%s_%s_%d.core", 4184 base_filename, nowstr, (int)getpid()); 4185 } 4186 4187 static int dump_write(int fd, const void *ptr, size_t size) 4188 { 4189 const char *bufp = (const char *)ptr; 4190 ssize_t bytes_written, bytes_left; 4191 4192 bytes_written = 0; 4193 bytes_left = size; 4194 4195 /* 4196 * In normal conditions, single write(2) should do but 4197 * in case of socket etc. this mechanism is more portable. 4198 */ 4199 do { 4200 bytes_written = write(fd, bufp, bytes_left); 4201 if (bytes_written < 0) { 4202 if (errno == EINTR) 4203 continue; 4204 return (-1); 4205 } else if (bytes_written == 0) { /* eof */ 4206 return (-1); 4207 } 4208 bufp += bytes_written; 4209 bytes_left -= bytes_written; 4210 } while (bytes_left > 0); 4211 4212 return (0); 4213 } 4214 4215 static int wmr_page_unprotect_regions(void *opaque, target_ulong start, 4216 target_ulong end, unsigned long flags) 4217 { 4218 if ((flags & (PAGE_WRITE | PAGE_WRITE_ORG)) == PAGE_WRITE_ORG) { 4219 size_t step = MAX(TARGET_PAGE_SIZE, qemu_real_host_page_size()); 4220 4221 while (1) { 4222 page_unprotect(start, 0); 4223 if (end - start <= step) { 4224 break; 4225 } 4226 start += step; 4227 } 4228 } 4229 return 0; 4230 } 4231 4232 typedef struct { 4233 unsigned count; 4234 size_t size; 4235 } CountAndSizeRegions; 4236 4237 static int wmr_count_and_size_regions(void *opaque, target_ulong start, 4238 target_ulong end, unsigned long flags) 4239 { 4240 CountAndSizeRegions *css = opaque; 4241 4242 css->count++; 4243 css->size += vma_dump_size(start, end, flags); 4244 return 0; 4245 } 4246 4247 typedef struct { 4248 struct elf_phdr *phdr; 4249 off_t offset; 4250 } FillRegionPhdr; 4251 4252 static int wmr_fill_region_phdr(void *opaque, target_ulong start, 4253 target_ulong end, unsigned long flags) 4254 { 4255 FillRegionPhdr *d = opaque; 4256 struct elf_phdr *phdr = d->phdr; 4257 4258 phdr->p_type = PT_LOAD; 4259 phdr->p_vaddr = start; 4260 phdr->p_paddr = 0; 4261 phdr->p_filesz = vma_dump_size(start, end, flags); 4262 phdr->p_offset = d->offset; 4263 d->offset += phdr->p_filesz; 4264 phdr->p_memsz = end - start; 4265 phdr->p_flags = (flags & PAGE_READ ? PF_R : 0) 4266 | (flags & PAGE_WRITE_ORG ? PF_W : 0) 4267 | (flags & PAGE_EXEC ? PF_X : 0); 4268 phdr->p_align = ELF_EXEC_PAGESIZE; 4269 4270 bswap_phdr(phdr, 1); 4271 d->phdr = phdr + 1; 4272 return 0; 4273 } 4274 4275 static int wmr_write_region(void *opaque, target_ulong start, 4276 target_ulong end, unsigned long flags) 4277 { 4278 int fd = *(int *)opaque; 4279 size_t size = vma_dump_size(start, end, flags); 4280 4281 if (!size) { 4282 return 0; 4283 } 4284 return dump_write(fd, g2h_untagged(start), size); 4285 } 4286 4287 /* 4288 * Write out ELF coredump. 4289 * 4290 * See documentation of ELF object file format in: 4291 * http://www.caldera.com/developers/devspecs/gabi41.pdf 4292 * 4293 * Coredump format in linux is following: 4294 * 4295 * 0 +----------------------+ \ 4296 * | ELF header | ET_CORE | 4297 * +----------------------+ | 4298 * | ELF program headers | |--- headers 4299 * | - NOTE section | | 4300 * | - PT_LOAD sections | | 4301 * +----------------------+ / 4302 * | NOTEs: | 4303 * | - NT_PRSTATUS | 4304 * | - NT_PRSINFO | 4305 * | - NT_AUXV | 4306 * +----------------------+ <-- aligned to target page 4307 * | Process memory dump | 4308 * : : 4309 * . . 4310 * : : 4311 * | | 4312 * +----------------------+ 4313 * 4314 * NT_PRSTATUS -> struct elf_prstatus (per thread) 4315 * NT_PRSINFO -> struct elf_prpsinfo 4316 * NT_AUXV is array of { type, value } pairs (see fill_auxv_note()). 4317 * 4318 * Format follows System V format as close as possible. Current 4319 * version limitations are as follows: 4320 * - no floating point registers are dumped 4321 * 4322 * Function returns 0 in case of success, negative errno otherwise. 4323 * 4324 * TODO: make this work also during runtime: it should be 4325 * possible to force coredump from running process and then 4326 * continue processing. For example qemu could set up SIGUSR2 4327 * handler (provided that target process haven't registered 4328 * handler for that) that does the dump when signal is received. 4329 */ 4330 static int elf_core_dump(int signr, const CPUArchState *env) 4331 { 4332 const CPUState *cpu = env_cpu((CPUArchState *)env); 4333 const TaskState *ts = (const TaskState *)get_task_state((CPUState *)cpu); 4334 struct rlimit dumpsize; 4335 CountAndSizeRegions css; 4336 off_t offset, note_offset, data_offset; 4337 size_t note_size; 4338 int cpus, ret; 4339 int fd = -1; 4340 CPUState *cpu_iter; 4341 4342 if (prctl(PR_GET_DUMPABLE) == 0) { 4343 return 0; 4344 } 4345 4346 if (getrlimit(RLIMIT_CORE, &dumpsize) < 0 || dumpsize.rlim_cur == 0) { 4347 return 0; 4348 } 4349 4350 cpu_list_lock(); 4351 mmap_lock(); 4352 4353 /* By unprotecting, we merge vmas that might be split. */ 4354 walk_memory_regions(NULL, wmr_page_unprotect_regions); 4355 4356 /* 4357 * Walk through target process memory mappings and 4358 * set up structure containing this information. 4359 */ 4360 memset(&css, 0, sizeof(css)); 4361 walk_memory_regions(&css, wmr_count_and_size_regions); 4362 4363 cpus = 0; 4364 CPU_FOREACH(cpu_iter) { 4365 cpus++; 4366 } 4367 4368 offset = sizeof(struct elfhdr); 4369 offset += (css.count + 1) * sizeof(struct elf_phdr); 4370 note_offset = offset; 4371 4372 offset += size_note("CORE", ts->info->auxv_len); 4373 offset += size_note("CORE", sizeof(struct target_elf_prpsinfo)); 4374 offset += size_note("CORE", sizeof(struct target_elf_prstatus)) * cpus; 4375 note_size = offset - note_offset; 4376 data_offset = ROUND_UP(offset, ELF_EXEC_PAGESIZE); 4377 4378 /* Do not dump if the corefile size exceeds the limit. */ 4379 if (dumpsize.rlim_cur != RLIM_INFINITY 4380 && dumpsize.rlim_cur < data_offset + css.size) { 4381 errno = 0; 4382 goto out; 4383 } 4384 4385 { 4386 g_autofree char *corefile = core_dump_filename(ts); 4387 fd = open(corefile, O_WRONLY | O_CREAT | O_TRUNC, 4388 S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); 4389 } 4390 if (fd < 0) { 4391 goto out; 4392 } 4393 4394 /* 4395 * There is a fair amount of alignment padding within the notes 4396 * as well as preceeding the process memory. Allocate a zeroed 4397 * block to hold it all. Write all of the headers directly into 4398 * this buffer and then write it out as a block. 4399 */ 4400 { 4401 g_autofree void *header = g_malloc0(data_offset); 4402 FillRegionPhdr frp; 4403 void *hptr, *dptr; 4404 4405 /* Create elf file header. */ 4406 hptr = header; 4407 fill_elf_header(hptr, css.count + 1, ELF_MACHINE, 0); 4408 hptr += sizeof(struct elfhdr); 4409 4410 /* Create elf program headers. */ 4411 fill_elf_note_phdr(hptr, note_size, note_offset); 4412 hptr += sizeof(struct elf_phdr); 4413 4414 frp.phdr = hptr; 4415 frp.offset = data_offset; 4416 walk_memory_regions(&frp, wmr_fill_region_phdr); 4417 hptr = frp.phdr; 4418 4419 /* Create the notes. */ 4420 dptr = fill_note(&hptr, NT_AUXV, "CORE", ts->info->auxv_len); 4421 fill_auxv_note(dptr, ts); 4422 4423 dptr = fill_note(&hptr, NT_PRPSINFO, "CORE", 4424 sizeof(struct target_elf_prpsinfo)); 4425 fill_prpsinfo_note(dptr, ts); 4426 4427 CPU_FOREACH(cpu_iter) { 4428 dptr = fill_note(&hptr, NT_PRSTATUS, "CORE", 4429 sizeof(struct target_elf_prstatus)); 4430 fill_prstatus_note(dptr, cpu_iter, cpu_iter == cpu ? signr : 0); 4431 } 4432 4433 if (dump_write(fd, header, data_offset) < 0) { 4434 goto out; 4435 } 4436 } 4437 4438 /* 4439 * Finally write process memory into the corefile as well. 4440 */ 4441 if (walk_memory_regions(&fd, wmr_write_region) < 0) { 4442 goto out; 4443 } 4444 errno = 0; 4445 4446 out: 4447 ret = -errno; 4448 mmap_unlock(); 4449 cpu_list_unlock(); 4450 if (fd >= 0) { 4451 close(fd); 4452 } 4453 return ret; 4454 } 4455 #endif /* USE_ELF_CORE_DUMP */ 4456 4457 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop) 4458 { 4459 init_thread(regs, infop); 4460 } 4461